1 /* 32-bit ELF support for ARM
2 Copyright (C) 1998-2018 Free Software Foundation, Inc.
4 This file is part of BFD, the Binary File Descriptor library.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19 MA 02110-1301, USA. */
25 #include "bfd_stdint.h"
26 #include "libiberty.h"
30 #include "elf-vxworks.h"
33 /* Return the relocation section associated with NAME. HTAB is the
34 bfd's elf32_arm_link_hash_entry. */
35 #define RELOC_SECTION(HTAB, NAME) \
36 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
38 /* Return size of a relocation entry. HTAB is the bfd's
39 elf32_arm_link_hash_entry. */
40 #define RELOC_SIZE(HTAB) \
42 ? sizeof (Elf32_External_Rel) \
43 : sizeof (Elf32_External_Rela))
45 /* Return function to swap relocations in. HTAB is the bfd's
46 elf32_arm_link_hash_entry. */
47 #define SWAP_RELOC_IN(HTAB) \
49 ? bfd_elf32_swap_reloc_in \
50 : bfd_elf32_swap_reloca_in)
52 /* Return function to swap relocations out. HTAB is the bfd's
53 elf32_arm_link_hash_entry. */
54 #define SWAP_RELOC_OUT(HTAB) \
56 ? bfd_elf32_swap_reloc_out \
57 : bfd_elf32_swap_reloca_out)
59 #define elf_info_to_howto NULL
60 #define elf_info_to_howto_rel elf32_arm_info_to_howto
62 #define ARM_ELF_ABI_VERSION 0
63 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
65 /* The Adjusted Place, as defined by AAELF. */
66 #define Pa(X) ((X) & 0xfffffffc)
68 static bfd_boolean
elf32_arm_write_section (bfd
*output_bfd
,
69 struct bfd_link_info
*link_info
,
73 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
74 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
77 static reloc_howto_type elf32_arm_howto_table_1
[] =
80 HOWTO (R_ARM_NONE
, /* type */
82 3, /* size (0 = byte, 1 = short, 2 = long) */
84 FALSE
, /* pc_relative */
86 complain_overflow_dont
,/* complain_on_overflow */
87 bfd_elf_generic_reloc
, /* special_function */
88 "R_ARM_NONE", /* name */
89 FALSE
, /* partial_inplace */
92 FALSE
), /* pcrel_offset */
94 HOWTO (R_ARM_PC24
, /* type */
96 2, /* size (0 = byte, 1 = short, 2 = long) */
98 TRUE
, /* pc_relative */
100 complain_overflow_signed
,/* complain_on_overflow */
101 bfd_elf_generic_reloc
, /* special_function */
102 "R_ARM_PC24", /* name */
103 FALSE
, /* partial_inplace */
104 0x00ffffff, /* src_mask */
105 0x00ffffff, /* dst_mask */
106 TRUE
), /* pcrel_offset */
108 /* 32 bit absolute */
109 HOWTO (R_ARM_ABS32
, /* type */
111 2, /* size (0 = byte, 1 = short, 2 = long) */
113 FALSE
, /* pc_relative */
115 complain_overflow_bitfield
,/* complain_on_overflow */
116 bfd_elf_generic_reloc
, /* special_function */
117 "R_ARM_ABS32", /* name */
118 FALSE
, /* partial_inplace */
119 0xffffffff, /* src_mask */
120 0xffffffff, /* dst_mask */
121 FALSE
), /* pcrel_offset */
123 /* standard 32bit pc-relative reloc */
124 HOWTO (R_ARM_REL32
, /* type */
126 2, /* size (0 = byte, 1 = short, 2 = long) */
128 TRUE
, /* pc_relative */
130 complain_overflow_bitfield
,/* complain_on_overflow */
131 bfd_elf_generic_reloc
, /* special_function */
132 "R_ARM_REL32", /* name */
133 FALSE
, /* partial_inplace */
134 0xffffffff, /* src_mask */
135 0xffffffff, /* dst_mask */
136 TRUE
), /* pcrel_offset */
138 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
139 HOWTO (R_ARM_LDR_PC_G0
, /* type */
141 0, /* size (0 = byte, 1 = short, 2 = long) */
143 TRUE
, /* pc_relative */
145 complain_overflow_dont
,/* complain_on_overflow */
146 bfd_elf_generic_reloc
, /* special_function */
147 "R_ARM_LDR_PC_G0", /* name */
148 FALSE
, /* partial_inplace */
149 0xffffffff, /* src_mask */
150 0xffffffff, /* dst_mask */
151 TRUE
), /* pcrel_offset */
153 /* 16 bit absolute */
154 HOWTO (R_ARM_ABS16
, /* type */
156 1, /* size (0 = byte, 1 = short, 2 = long) */
158 FALSE
, /* pc_relative */
160 complain_overflow_bitfield
,/* complain_on_overflow */
161 bfd_elf_generic_reloc
, /* special_function */
162 "R_ARM_ABS16", /* name */
163 FALSE
, /* partial_inplace */
164 0x0000ffff, /* src_mask */
165 0x0000ffff, /* dst_mask */
166 FALSE
), /* pcrel_offset */
168 /* 12 bit absolute */
169 HOWTO (R_ARM_ABS12
, /* type */
171 2, /* size (0 = byte, 1 = short, 2 = long) */
173 FALSE
, /* pc_relative */
175 complain_overflow_bitfield
,/* complain_on_overflow */
176 bfd_elf_generic_reloc
, /* special_function */
177 "R_ARM_ABS12", /* name */
178 FALSE
, /* partial_inplace */
179 0x00000fff, /* src_mask */
180 0x00000fff, /* dst_mask */
181 FALSE
), /* pcrel_offset */
183 HOWTO (R_ARM_THM_ABS5
, /* type */
185 1, /* size (0 = byte, 1 = short, 2 = long) */
187 FALSE
, /* pc_relative */
189 complain_overflow_bitfield
,/* complain_on_overflow */
190 bfd_elf_generic_reloc
, /* special_function */
191 "R_ARM_THM_ABS5", /* name */
192 FALSE
, /* partial_inplace */
193 0x000007e0, /* src_mask */
194 0x000007e0, /* dst_mask */
195 FALSE
), /* pcrel_offset */
198 HOWTO (R_ARM_ABS8
, /* type */
200 0, /* size (0 = byte, 1 = short, 2 = long) */
202 FALSE
, /* pc_relative */
204 complain_overflow_bitfield
,/* complain_on_overflow */
205 bfd_elf_generic_reloc
, /* special_function */
206 "R_ARM_ABS8", /* name */
207 FALSE
, /* partial_inplace */
208 0x000000ff, /* src_mask */
209 0x000000ff, /* dst_mask */
210 FALSE
), /* pcrel_offset */
212 HOWTO (R_ARM_SBREL32
, /* type */
214 2, /* size (0 = byte, 1 = short, 2 = long) */
216 FALSE
, /* pc_relative */
218 complain_overflow_dont
,/* complain_on_overflow */
219 bfd_elf_generic_reloc
, /* special_function */
220 "R_ARM_SBREL32", /* name */
221 FALSE
, /* partial_inplace */
222 0xffffffff, /* src_mask */
223 0xffffffff, /* dst_mask */
224 FALSE
), /* pcrel_offset */
226 HOWTO (R_ARM_THM_CALL
, /* type */
228 2, /* size (0 = byte, 1 = short, 2 = long) */
230 TRUE
, /* pc_relative */
232 complain_overflow_signed
,/* complain_on_overflow */
233 bfd_elf_generic_reloc
, /* special_function */
234 "R_ARM_THM_CALL", /* name */
235 FALSE
, /* partial_inplace */
236 0x07ff2fff, /* src_mask */
237 0x07ff2fff, /* dst_mask */
238 TRUE
), /* pcrel_offset */
240 HOWTO (R_ARM_THM_PC8
, /* type */
242 1, /* size (0 = byte, 1 = short, 2 = long) */
244 TRUE
, /* pc_relative */
246 complain_overflow_signed
,/* complain_on_overflow */
247 bfd_elf_generic_reloc
, /* special_function */
248 "R_ARM_THM_PC8", /* name */
249 FALSE
, /* partial_inplace */
250 0x000000ff, /* src_mask */
251 0x000000ff, /* dst_mask */
252 TRUE
), /* pcrel_offset */
254 HOWTO (R_ARM_BREL_ADJ
, /* type */
256 1, /* size (0 = byte, 1 = short, 2 = long) */
258 FALSE
, /* pc_relative */
260 complain_overflow_signed
,/* complain_on_overflow */
261 bfd_elf_generic_reloc
, /* special_function */
262 "R_ARM_BREL_ADJ", /* name */
263 FALSE
, /* partial_inplace */
264 0xffffffff, /* src_mask */
265 0xffffffff, /* dst_mask */
266 FALSE
), /* pcrel_offset */
268 HOWTO (R_ARM_TLS_DESC
, /* type */
270 2, /* size (0 = byte, 1 = short, 2 = long) */
272 FALSE
, /* pc_relative */
274 complain_overflow_bitfield
,/* complain_on_overflow */
275 bfd_elf_generic_reloc
, /* special_function */
276 "R_ARM_TLS_DESC", /* name */
277 FALSE
, /* partial_inplace */
278 0xffffffff, /* src_mask */
279 0xffffffff, /* dst_mask */
280 FALSE
), /* pcrel_offset */
282 HOWTO (R_ARM_THM_SWI8
, /* type */
284 0, /* size (0 = byte, 1 = short, 2 = long) */
286 FALSE
, /* pc_relative */
288 complain_overflow_signed
,/* complain_on_overflow */
289 bfd_elf_generic_reloc
, /* special_function */
290 "R_ARM_SWI8", /* name */
291 FALSE
, /* partial_inplace */
292 0x00000000, /* src_mask */
293 0x00000000, /* dst_mask */
294 FALSE
), /* pcrel_offset */
296 /* BLX instruction for the ARM. */
297 HOWTO (R_ARM_XPC25
, /* type */
299 2, /* size (0 = byte, 1 = short, 2 = long) */
301 TRUE
, /* pc_relative */
303 complain_overflow_signed
,/* complain_on_overflow */
304 bfd_elf_generic_reloc
, /* special_function */
305 "R_ARM_XPC25", /* name */
306 FALSE
, /* partial_inplace */
307 0x00ffffff, /* src_mask */
308 0x00ffffff, /* dst_mask */
309 TRUE
), /* pcrel_offset */
311 /* BLX instruction for the Thumb. */
312 HOWTO (R_ARM_THM_XPC22
, /* type */
314 2, /* size (0 = byte, 1 = short, 2 = long) */
316 TRUE
, /* pc_relative */
318 complain_overflow_signed
,/* complain_on_overflow */
319 bfd_elf_generic_reloc
, /* special_function */
320 "R_ARM_THM_XPC22", /* name */
321 FALSE
, /* partial_inplace */
322 0x07ff2fff, /* src_mask */
323 0x07ff2fff, /* dst_mask */
324 TRUE
), /* pcrel_offset */
326 /* Dynamic TLS relocations. */
328 HOWTO (R_ARM_TLS_DTPMOD32
, /* type */
330 2, /* size (0 = byte, 1 = short, 2 = long) */
332 FALSE
, /* pc_relative */
334 complain_overflow_bitfield
,/* complain_on_overflow */
335 bfd_elf_generic_reloc
, /* special_function */
336 "R_ARM_TLS_DTPMOD32", /* name */
337 TRUE
, /* partial_inplace */
338 0xffffffff, /* src_mask */
339 0xffffffff, /* dst_mask */
340 FALSE
), /* pcrel_offset */
342 HOWTO (R_ARM_TLS_DTPOFF32
, /* type */
344 2, /* size (0 = byte, 1 = short, 2 = long) */
346 FALSE
, /* pc_relative */
348 complain_overflow_bitfield
,/* complain_on_overflow */
349 bfd_elf_generic_reloc
, /* special_function */
350 "R_ARM_TLS_DTPOFF32", /* name */
351 TRUE
, /* partial_inplace */
352 0xffffffff, /* src_mask */
353 0xffffffff, /* dst_mask */
354 FALSE
), /* pcrel_offset */
356 HOWTO (R_ARM_TLS_TPOFF32
, /* type */
358 2, /* size (0 = byte, 1 = short, 2 = long) */
360 FALSE
, /* pc_relative */
362 complain_overflow_bitfield
,/* complain_on_overflow */
363 bfd_elf_generic_reloc
, /* special_function */
364 "R_ARM_TLS_TPOFF32", /* name */
365 TRUE
, /* partial_inplace */
366 0xffffffff, /* src_mask */
367 0xffffffff, /* dst_mask */
368 FALSE
), /* pcrel_offset */
370 /* Relocs used in ARM Linux */
372 HOWTO (R_ARM_COPY
, /* type */
374 2, /* size (0 = byte, 1 = short, 2 = long) */
376 FALSE
, /* pc_relative */
378 complain_overflow_bitfield
,/* complain_on_overflow */
379 bfd_elf_generic_reloc
, /* special_function */
380 "R_ARM_COPY", /* name */
381 TRUE
, /* partial_inplace */
382 0xffffffff, /* src_mask */
383 0xffffffff, /* dst_mask */
384 FALSE
), /* pcrel_offset */
386 HOWTO (R_ARM_GLOB_DAT
, /* type */
388 2, /* size (0 = byte, 1 = short, 2 = long) */
390 FALSE
, /* pc_relative */
392 complain_overflow_bitfield
,/* complain_on_overflow */
393 bfd_elf_generic_reloc
, /* special_function */
394 "R_ARM_GLOB_DAT", /* name */
395 TRUE
, /* partial_inplace */
396 0xffffffff, /* src_mask */
397 0xffffffff, /* dst_mask */
398 FALSE
), /* pcrel_offset */
400 HOWTO (R_ARM_JUMP_SLOT
, /* type */
402 2, /* size (0 = byte, 1 = short, 2 = long) */
404 FALSE
, /* pc_relative */
406 complain_overflow_bitfield
,/* complain_on_overflow */
407 bfd_elf_generic_reloc
, /* special_function */
408 "R_ARM_JUMP_SLOT", /* name */
409 TRUE
, /* partial_inplace */
410 0xffffffff, /* src_mask */
411 0xffffffff, /* dst_mask */
412 FALSE
), /* pcrel_offset */
414 HOWTO (R_ARM_RELATIVE
, /* type */
416 2, /* size (0 = byte, 1 = short, 2 = long) */
418 FALSE
, /* pc_relative */
420 complain_overflow_bitfield
,/* complain_on_overflow */
421 bfd_elf_generic_reloc
, /* special_function */
422 "R_ARM_RELATIVE", /* name */
423 TRUE
, /* partial_inplace */
424 0xffffffff, /* src_mask */
425 0xffffffff, /* dst_mask */
426 FALSE
), /* pcrel_offset */
428 HOWTO (R_ARM_GOTOFF32
, /* type */
430 2, /* size (0 = byte, 1 = short, 2 = long) */
432 FALSE
, /* pc_relative */
434 complain_overflow_bitfield
,/* complain_on_overflow */
435 bfd_elf_generic_reloc
, /* special_function */
436 "R_ARM_GOTOFF32", /* name */
437 TRUE
, /* partial_inplace */
438 0xffffffff, /* src_mask */
439 0xffffffff, /* dst_mask */
440 FALSE
), /* pcrel_offset */
442 HOWTO (R_ARM_GOTPC
, /* type */
444 2, /* size (0 = byte, 1 = short, 2 = long) */
446 TRUE
, /* pc_relative */
448 complain_overflow_bitfield
,/* complain_on_overflow */
449 bfd_elf_generic_reloc
, /* special_function */
450 "R_ARM_GOTPC", /* name */
451 TRUE
, /* partial_inplace */
452 0xffffffff, /* src_mask */
453 0xffffffff, /* dst_mask */
454 TRUE
), /* pcrel_offset */
456 HOWTO (R_ARM_GOT32
, /* type */
458 2, /* size (0 = byte, 1 = short, 2 = long) */
460 FALSE
, /* pc_relative */
462 complain_overflow_bitfield
,/* complain_on_overflow */
463 bfd_elf_generic_reloc
, /* special_function */
464 "R_ARM_GOT32", /* name */
465 TRUE
, /* partial_inplace */
466 0xffffffff, /* src_mask */
467 0xffffffff, /* dst_mask */
468 FALSE
), /* pcrel_offset */
470 HOWTO (R_ARM_PLT32
, /* type */
472 2, /* size (0 = byte, 1 = short, 2 = long) */
474 TRUE
, /* pc_relative */
476 complain_overflow_bitfield
,/* complain_on_overflow */
477 bfd_elf_generic_reloc
, /* special_function */
478 "R_ARM_PLT32", /* name */
479 FALSE
, /* partial_inplace */
480 0x00ffffff, /* src_mask */
481 0x00ffffff, /* dst_mask */
482 TRUE
), /* pcrel_offset */
484 HOWTO (R_ARM_CALL
, /* type */
486 2, /* size (0 = byte, 1 = short, 2 = long) */
488 TRUE
, /* pc_relative */
490 complain_overflow_signed
,/* complain_on_overflow */
491 bfd_elf_generic_reloc
, /* special_function */
492 "R_ARM_CALL", /* name */
493 FALSE
, /* partial_inplace */
494 0x00ffffff, /* src_mask */
495 0x00ffffff, /* dst_mask */
496 TRUE
), /* pcrel_offset */
498 HOWTO (R_ARM_JUMP24
, /* type */
500 2, /* size (0 = byte, 1 = short, 2 = long) */
502 TRUE
, /* pc_relative */
504 complain_overflow_signed
,/* complain_on_overflow */
505 bfd_elf_generic_reloc
, /* special_function */
506 "R_ARM_JUMP24", /* name */
507 FALSE
, /* partial_inplace */
508 0x00ffffff, /* src_mask */
509 0x00ffffff, /* dst_mask */
510 TRUE
), /* pcrel_offset */
512 HOWTO (R_ARM_THM_JUMP24
, /* type */
514 2, /* size (0 = byte, 1 = short, 2 = long) */
516 TRUE
, /* pc_relative */
518 complain_overflow_signed
,/* complain_on_overflow */
519 bfd_elf_generic_reloc
, /* special_function */
520 "R_ARM_THM_JUMP24", /* name */
521 FALSE
, /* partial_inplace */
522 0x07ff2fff, /* src_mask */
523 0x07ff2fff, /* dst_mask */
524 TRUE
), /* pcrel_offset */
526 HOWTO (R_ARM_BASE_ABS
, /* type */
528 2, /* size (0 = byte, 1 = short, 2 = long) */
530 FALSE
, /* pc_relative */
532 complain_overflow_dont
,/* complain_on_overflow */
533 bfd_elf_generic_reloc
, /* special_function */
534 "R_ARM_BASE_ABS", /* name */
535 FALSE
, /* partial_inplace */
536 0xffffffff, /* src_mask */
537 0xffffffff, /* dst_mask */
538 FALSE
), /* pcrel_offset */
540 HOWTO (R_ARM_ALU_PCREL7_0
, /* type */
542 2, /* size (0 = byte, 1 = short, 2 = long) */
544 TRUE
, /* pc_relative */
546 complain_overflow_dont
,/* complain_on_overflow */
547 bfd_elf_generic_reloc
, /* special_function */
548 "R_ARM_ALU_PCREL_7_0", /* name */
549 FALSE
, /* partial_inplace */
550 0x00000fff, /* src_mask */
551 0x00000fff, /* dst_mask */
552 TRUE
), /* pcrel_offset */
554 HOWTO (R_ARM_ALU_PCREL15_8
, /* type */
556 2, /* size (0 = byte, 1 = short, 2 = long) */
558 TRUE
, /* pc_relative */
560 complain_overflow_dont
,/* complain_on_overflow */
561 bfd_elf_generic_reloc
, /* special_function */
562 "R_ARM_ALU_PCREL_15_8",/* name */
563 FALSE
, /* partial_inplace */
564 0x00000fff, /* src_mask */
565 0x00000fff, /* dst_mask */
566 TRUE
), /* pcrel_offset */
568 HOWTO (R_ARM_ALU_PCREL23_15
, /* type */
570 2, /* size (0 = byte, 1 = short, 2 = long) */
572 TRUE
, /* pc_relative */
574 complain_overflow_dont
,/* complain_on_overflow */
575 bfd_elf_generic_reloc
, /* special_function */
576 "R_ARM_ALU_PCREL_23_15",/* name */
577 FALSE
, /* partial_inplace */
578 0x00000fff, /* src_mask */
579 0x00000fff, /* dst_mask */
580 TRUE
), /* pcrel_offset */
582 HOWTO (R_ARM_LDR_SBREL_11_0
, /* type */
584 2, /* size (0 = byte, 1 = short, 2 = long) */
586 FALSE
, /* pc_relative */
588 complain_overflow_dont
,/* complain_on_overflow */
589 bfd_elf_generic_reloc
, /* special_function */
590 "R_ARM_LDR_SBREL_11_0",/* name */
591 FALSE
, /* partial_inplace */
592 0x00000fff, /* src_mask */
593 0x00000fff, /* dst_mask */
594 FALSE
), /* pcrel_offset */
596 HOWTO (R_ARM_ALU_SBREL_19_12
, /* type */
598 2, /* size (0 = byte, 1 = short, 2 = long) */
600 FALSE
, /* pc_relative */
602 complain_overflow_dont
,/* complain_on_overflow */
603 bfd_elf_generic_reloc
, /* special_function */
604 "R_ARM_ALU_SBREL_19_12",/* name */
605 FALSE
, /* partial_inplace */
606 0x000ff000, /* src_mask */
607 0x000ff000, /* dst_mask */
608 FALSE
), /* pcrel_offset */
610 HOWTO (R_ARM_ALU_SBREL_27_20
, /* type */
612 2, /* size (0 = byte, 1 = short, 2 = long) */
614 FALSE
, /* pc_relative */
616 complain_overflow_dont
,/* complain_on_overflow */
617 bfd_elf_generic_reloc
, /* special_function */
618 "R_ARM_ALU_SBREL_27_20",/* name */
619 FALSE
, /* partial_inplace */
620 0x0ff00000, /* src_mask */
621 0x0ff00000, /* dst_mask */
622 FALSE
), /* pcrel_offset */
624 HOWTO (R_ARM_TARGET1
, /* type */
626 2, /* size (0 = byte, 1 = short, 2 = long) */
628 FALSE
, /* pc_relative */
630 complain_overflow_dont
,/* complain_on_overflow */
631 bfd_elf_generic_reloc
, /* special_function */
632 "R_ARM_TARGET1", /* name */
633 FALSE
, /* partial_inplace */
634 0xffffffff, /* src_mask */
635 0xffffffff, /* dst_mask */
636 FALSE
), /* pcrel_offset */
638 HOWTO (R_ARM_ROSEGREL32
, /* type */
640 2, /* size (0 = byte, 1 = short, 2 = long) */
642 FALSE
, /* pc_relative */
644 complain_overflow_dont
,/* complain_on_overflow */
645 bfd_elf_generic_reloc
, /* special_function */
646 "R_ARM_ROSEGREL32", /* name */
647 FALSE
, /* partial_inplace */
648 0xffffffff, /* src_mask */
649 0xffffffff, /* dst_mask */
650 FALSE
), /* pcrel_offset */
652 HOWTO (R_ARM_V4BX
, /* type */
654 2, /* size (0 = byte, 1 = short, 2 = long) */
656 FALSE
, /* pc_relative */
658 complain_overflow_dont
,/* complain_on_overflow */
659 bfd_elf_generic_reloc
, /* special_function */
660 "R_ARM_V4BX", /* name */
661 FALSE
, /* partial_inplace */
662 0xffffffff, /* src_mask */
663 0xffffffff, /* dst_mask */
664 FALSE
), /* pcrel_offset */
666 HOWTO (R_ARM_TARGET2
, /* type */
668 2, /* size (0 = byte, 1 = short, 2 = long) */
670 FALSE
, /* pc_relative */
672 complain_overflow_signed
,/* complain_on_overflow */
673 bfd_elf_generic_reloc
, /* special_function */
674 "R_ARM_TARGET2", /* name */
675 FALSE
, /* partial_inplace */
676 0xffffffff, /* src_mask */
677 0xffffffff, /* dst_mask */
678 TRUE
), /* pcrel_offset */
680 HOWTO (R_ARM_PREL31
, /* type */
682 2, /* size (0 = byte, 1 = short, 2 = long) */
684 TRUE
, /* pc_relative */
686 complain_overflow_signed
,/* complain_on_overflow */
687 bfd_elf_generic_reloc
, /* special_function */
688 "R_ARM_PREL31", /* name */
689 FALSE
, /* partial_inplace */
690 0x7fffffff, /* src_mask */
691 0x7fffffff, /* dst_mask */
692 TRUE
), /* pcrel_offset */
694 HOWTO (R_ARM_MOVW_ABS_NC
, /* type */
696 2, /* size (0 = byte, 1 = short, 2 = long) */
698 FALSE
, /* pc_relative */
700 complain_overflow_dont
,/* complain_on_overflow */
701 bfd_elf_generic_reloc
, /* special_function */
702 "R_ARM_MOVW_ABS_NC", /* name */
703 FALSE
, /* partial_inplace */
704 0x000f0fff, /* src_mask */
705 0x000f0fff, /* dst_mask */
706 FALSE
), /* pcrel_offset */
708 HOWTO (R_ARM_MOVT_ABS
, /* type */
710 2, /* size (0 = byte, 1 = short, 2 = long) */
712 FALSE
, /* pc_relative */
714 complain_overflow_bitfield
,/* complain_on_overflow */
715 bfd_elf_generic_reloc
, /* special_function */
716 "R_ARM_MOVT_ABS", /* name */
717 FALSE
, /* partial_inplace */
718 0x000f0fff, /* src_mask */
719 0x000f0fff, /* dst_mask */
720 FALSE
), /* pcrel_offset */
722 HOWTO (R_ARM_MOVW_PREL_NC
, /* type */
724 2, /* size (0 = byte, 1 = short, 2 = long) */
726 TRUE
, /* pc_relative */
728 complain_overflow_dont
,/* complain_on_overflow */
729 bfd_elf_generic_reloc
, /* special_function */
730 "R_ARM_MOVW_PREL_NC", /* name */
731 FALSE
, /* partial_inplace */
732 0x000f0fff, /* src_mask */
733 0x000f0fff, /* dst_mask */
734 TRUE
), /* pcrel_offset */
736 HOWTO (R_ARM_MOVT_PREL
, /* type */
738 2, /* size (0 = byte, 1 = short, 2 = long) */
740 TRUE
, /* pc_relative */
742 complain_overflow_bitfield
,/* complain_on_overflow */
743 bfd_elf_generic_reloc
, /* special_function */
744 "R_ARM_MOVT_PREL", /* name */
745 FALSE
, /* partial_inplace */
746 0x000f0fff, /* src_mask */
747 0x000f0fff, /* dst_mask */
748 TRUE
), /* pcrel_offset */
750 HOWTO (R_ARM_THM_MOVW_ABS_NC
, /* type */
752 2, /* size (0 = byte, 1 = short, 2 = long) */
754 FALSE
, /* pc_relative */
756 complain_overflow_dont
,/* complain_on_overflow */
757 bfd_elf_generic_reloc
, /* special_function */
758 "R_ARM_THM_MOVW_ABS_NC",/* name */
759 FALSE
, /* partial_inplace */
760 0x040f70ff, /* src_mask */
761 0x040f70ff, /* dst_mask */
762 FALSE
), /* pcrel_offset */
764 HOWTO (R_ARM_THM_MOVT_ABS
, /* type */
766 2, /* size (0 = byte, 1 = short, 2 = long) */
768 FALSE
, /* pc_relative */
770 complain_overflow_bitfield
,/* complain_on_overflow */
771 bfd_elf_generic_reloc
, /* special_function */
772 "R_ARM_THM_MOVT_ABS", /* name */
773 FALSE
, /* partial_inplace */
774 0x040f70ff, /* src_mask */
775 0x040f70ff, /* dst_mask */
776 FALSE
), /* pcrel_offset */
778 HOWTO (R_ARM_THM_MOVW_PREL_NC
,/* type */
780 2, /* size (0 = byte, 1 = short, 2 = long) */
782 TRUE
, /* pc_relative */
784 complain_overflow_dont
,/* complain_on_overflow */
785 bfd_elf_generic_reloc
, /* special_function */
786 "R_ARM_THM_MOVW_PREL_NC",/* name */
787 FALSE
, /* partial_inplace */
788 0x040f70ff, /* src_mask */
789 0x040f70ff, /* dst_mask */
790 TRUE
), /* pcrel_offset */
792 HOWTO (R_ARM_THM_MOVT_PREL
, /* type */
794 2, /* size (0 = byte, 1 = short, 2 = long) */
796 TRUE
, /* pc_relative */
798 complain_overflow_bitfield
,/* complain_on_overflow */
799 bfd_elf_generic_reloc
, /* special_function */
800 "R_ARM_THM_MOVT_PREL", /* name */
801 FALSE
, /* partial_inplace */
802 0x040f70ff, /* src_mask */
803 0x040f70ff, /* dst_mask */
804 TRUE
), /* pcrel_offset */
806 HOWTO (R_ARM_THM_JUMP19
, /* type */
808 2, /* size (0 = byte, 1 = short, 2 = long) */
810 TRUE
, /* pc_relative */
812 complain_overflow_signed
,/* complain_on_overflow */
813 bfd_elf_generic_reloc
, /* special_function */
814 "R_ARM_THM_JUMP19", /* name */
815 FALSE
, /* partial_inplace */
816 0x043f2fff, /* src_mask */
817 0x043f2fff, /* dst_mask */
818 TRUE
), /* pcrel_offset */
820 HOWTO (R_ARM_THM_JUMP6
, /* type */
822 1, /* size (0 = byte, 1 = short, 2 = long) */
824 TRUE
, /* pc_relative */
826 complain_overflow_unsigned
,/* complain_on_overflow */
827 bfd_elf_generic_reloc
, /* special_function */
828 "R_ARM_THM_JUMP6", /* name */
829 FALSE
, /* partial_inplace */
830 0x02f8, /* src_mask */
831 0x02f8, /* dst_mask */
832 TRUE
), /* pcrel_offset */
834 /* These are declared as 13-bit signed relocations because we can
835 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
837 HOWTO (R_ARM_THM_ALU_PREL_11_0
,/* type */
839 2, /* size (0 = byte, 1 = short, 2 = long) */
841 TRUE
, /* pc_relative */
843 complain_overflow_dont
,/* complain_on_overflow */
844 bfd_elf_generic_reloc
, /* special_function */
845 "R_ARM_THM_ALU_PREL_11_0",/* name */
846 FALSE
, /* partial_inplace */
847 0xffffffff, /* src_mask */
848 0xffffffff, /* dst_mask */
849 TRUE
), /* pcrel_offset */
851 HOWTO (R_ARM_THM_PC12
, /* type */
853 2, /* size (0 = byte, 1 = short, 2 = long) */
855 TRUE
, /* pc_relative */
857 complain_overflow_dont
,/* complain_on_overflow */
858 bfd_elf_generic_reloc
, /* special_function */
859 "R_ARM_THM_PC12", /* name */
860 FALSE
, /* partial_inplace */
861 0xffffffff, /* src_mask */
862 0xffffffff, /* dst_mask */
863 TRUE
), /* pcrel_offset */
865 HOWTO (R_ARM_ABS32_NOI
, /* type */
867 2, /* size (0 = byte, 1 = short, 2 = long) */
869 FALSE
, /* pc_relative */
871 complain_overflow_dont
,/* complain_on_overflow */
872 bfd_elf_generic_reloc
, /* special_function */
873 "R_ARM_ABS32_NOI", /* name */
874 FALSE
, /* partial_inplace */
875 0xffffffff, /* src_mask */
876 0xffffffff, /* dst_mask */
877 FALSE
), /* pcrel_offset */
879 HOWTO (R_ARM_REL32_NOI
, /* type */
881 2, /* size (0 = byte, 1 = short, 2 = long) */
883 TRUE
, /* pc_relative */
885 complain_overflow_dont
,/* complain_on_overflow */
886 bfd_elf_generic_reloc
, /* special_function */
887 "R_ARM_REL32_NOI", /* name */
888 FALSE
, /* partial_inplace */
889 0xffffffff, /* src_mask */
890 0xffffffff, /* dst_mask */
891 FALSE
), /* pcrel_offset */
893 /* Group relocations. */
895 HOWTO (R_ARM_ALU_PC_G0_NC
, /* type */
897 2, /* size (0 = byte, 1 = short, 2 = long) */
899 TRUE
, /* pc_relative */
901 complain_overflow_dont
,/* complain_on_overflow */
902 bfd_elf_generic_reloc
, /* special_function */
903 "R_ARM_ALU_PC_G0_NC", /* name */
904 FALSE
, /* partial_inplace */
905 0xffffffff, /* src_mask */
906 0xffffffff, /* dst_mask */
907 TRUE
), /* pcrel_offset */
909 HOWTO (R_ARM_ALU_PC_G0
, /* type */
911 2, /* size (0 = byte, 1 = short, 2 = long) */
913 TRUE
, /* pc_relative */
915 complain_overflow_dont
,/* complain_on_overflow */
916 bfd_elf_generic_reloc
, /* special_function */
917 "R_ARM_ALU_PC_G0", /* name */
918 FALSE
, /* partial_inplace */
919 0xffffffff, /* src_mask */
920 0xffffffff, /* dst_mask */
921 TRUE
), /* pcrel_offset */
923 HOWTO (R_ARM_ALU_PC_G1_NC
, /* type */
925 2, /* size (0 = byte, 1 = short, 2 = long) */
927 TRUE
, /* pc_relative */
929 complain_overflow_dont
,/* complain_on_overflow */
930 bfd_elf_generic_reloc
, /* special_function */
931 "R_ARM_ALU_PC_G1_NC", /* name */
932 FALSE
, /* partial_inplace */
933 0xffffffff, /* src_mask */
934 0xffffffff, /* dst_mask */
935 TRUE
), /* pcrel_offset */
937 HOWTO (R_ARM_ALU_PC_G1
, /* type */
939 2, /* size (0 = byte, 1 = short, 2 = long) */
941 TRUE
, /* pc_relative */
943 complain_overflow_dont
,/* complain_on_overflow */
944 bfd_elf_generic_reloc
, /* special_function */
945 "R_ARM_ALU_PC_G1", /* name */
946 FALSE
, /* partial_inplace */
947 0xffffffff, /* src_mask */
948 0xffffffff, /* dst_mask */
949 TRUE
), /* pcrel_offset */
951 HOWTO (R_ARM_ALU_PC_G2
, /* type */
953 2, /* size (0 = byte, 1 = short, 2 = long) */
955 TRUE
, /* pc_relative */
957 complain_overflow_dont
,/* complain_on_overflow */
958 bfd_elf_generic_reloc
, /* special_function */
959 "R_ARM_ALU_PC_G2", /* name */
960 FALSE
, /* partial_inplace */
961 0xffffffff, /* src_mask */
962 0xffffffff, /* dst_mask */
963 TRUE
), /* pcrel_offset */
965 HOWTO (R_ARM_LDR_PC_G1
, /* type */
967 2, /* size (0 = byte, 1 = short, 2 = long) */
969 TRUE
, /* pc_relative */
971 complain_overflow_dont
,/* complain_on_overflow */
972 bfd_elf_generic_reloc
, /* special_function */
973 "R_ARM_LDR_PC_G1", /* name */
974 FALSE
, /* partial_inplace */
975 0xffffffff, /* src_mask */
976 0xffffffff, /* dst_mask */
977 TRUE
), /* pcrel_offset */
979 HOWTO (R_ARM_LDR_PC_G2
, /* type */
981 2, /* size (0 = byte, 1 = short, 2 = long) */
983 TRUE
, /* pc_relative */
985 complain_overflow_dont
,/* complain_on_overflow */
986 bfd_elf_generic_reloc
, /* special_function */
987 "R_ARM_LDR_PC_G2", /* name */
988 FALSE
, /* partial_inplace */
989 0xffffffff, /* src_mask */
990 0xffffffff, /* dst_mask */
991 TRUE
), /* pcrel_offset */
993 HOWTO (R_ARM_LDRS_PC_G0
, /* type */
995 2, /* size (0 = byte, 1 = short, 2 = long) */
997 TRUE
, /* pc_relative */
999 complain_overflow_dont
,/* complain_on_overflow */
1000 bfd_elf_generic_reloc
, /* special_function */
1001 "R_ARM_LDRS_PC_G0", /* name */
1002 FALSE
, /* partial_inplace */
1003 0xffffffff, /* src_mask */
1004 0xffffffff, /* dst_mask */
1005 TRUE
), /* pcrel_offset */
1007 HOWTO (R_ARM_LDRS_PC_G1
, /* type */
1009 2, /* size (0 = byte, 1 = short, 2 = long) */
1011 TRUE
, /* pc_relative */
1013 complain_overflow_dont
,/* complain_on_overflow */
1014 bfd_elf_generic_reloc
, /* special_function */
1015 "R_ARM_LDRS_PC_G1", /* name */
1016 FALSE
, /* partial_inplace */
1017 0xffffffff, /* src_mask */
1018 0xffffffff, /* dst_mask */
1019 TRUE
), /* pcrel_offset */
1021 HOWTO (R_ARM_LDRS_PC_G2
, /* type */
1023 2, /* size (0 = byte, 1 = short, 2 = long) */
1025 TRUE
, /* pc_relative */
1027 complain_overflow_dont
,/* complain_on_overflow */
1028 bfd_elf_generic_reloc
, /* special_function */
1029 "R_ARM_LDRS_PC_G2", /* name */
1030 FALSE
, /* partial_inplace */
1031 0xffffffff, /* src_mask */
1032 0xffffffff, /* dst_mask */
1033 TRUE
), /* pcrel_offset */
1035 HOWTO (R_ARM_LDC_PC_G0
, /* type */
1037 2, /* size (0 = byte, 1 = short, 2 = long) */
1039 TRUE
, /* pc_relative */
1041 complain_overflow_dont
,/* complain_on_overflow */
1042 bfd_elf_generic_reloc
, /* special_function */
1043 "R_ARM_LDC_PC_G0", /* name */
1044 FALSE
, /* partial_inplace */
1045 0xffffffff, /* src_mask */
1046 0xffffffff, /* dst_mask */
1047 TRUE
), /* pcrel_offset */
1049 HOWTO (R_ARM_LDC_PC_G1
, /* type */
1051 2, /* size (0 = byte, 1 = short, 2 = long) */
1053 TRUE
, /* pc_relative */
1055 complain_overflow_dont
,/* complain_on_overflow */
1056 bfd_elf_generic_reloc
, /* special_function */
1057 "R_ARM_LDC_PC_G1", /* name */
1058 FALSE
, /* partial_inplace */
1059 0xffffffff, /* src_mask */
1060 0xffffffff, /* dst_mask */
1061 TRUE
), /* pcrel_offset */
1063 HOWTO (R_ARM_LDC_PC_G2
, /* type */
1065 2, /* size (0 = byte, 1 = short, 2 = long) */
1067 TRUE
, /* pc_relative */
1069 complain_overflow_dont
,/* complain_on_overflow */
1070 bfd_elf_generic_reloc
, /* special_function */
1071 "R_ARM_LDC_PC_G2", /* name */
1072 FALSE
, /* partial_inplace */
1073 0xffffffff, /* src_mask */
1074 0xffffffff, /* dst_mask */
1075 TRUE
), /* pcrel_offset */
1077 HOWTO (R_ARM_ALU_SB_G0_NC
, /* type */
1079 2, /* size (0 = byte, 1 = short, 2 = long) */
1081 TRUE
, /* pc_relative */
1083 complain_overflow_dont
,/* complain_on_overflow */
1084 bfd_elf_generic_reloc
, /* special_function */
1085 "R_ARM_ALU_SB_G0_NC", /* name */
1086 FALSE
, /* partial_inplace */
1087 0xffffffff, /* src_mask */
1088 0xffffffff, /* dst_mask */
1089 TRUE
), /* pcrel_offset */
1091 HOWTO (R_ARM_ALU_SB_G0
, /* type */
1093 2, /* size (0 = byte, 1 = short, 2 = long) */
1095 TRUE
, /* pc_relative */
1097 complain_overflow_dont
,/* complain_on_overflow */
1098 bfd_elf_generic_reloc
, /* special_function */
1099 "R_ARM_ALU_SB_G0", /* name */
1100 FALSE
, /* partial_inplace */
1101 0xffffffff, /* src_mask */
1102 0xffffffff, /* dst_mask */
1103 TRUE
), /* pcrel_offset */
1105 HOWTO (R_ARM_ALU_SB_G1_NC
, /* type */
1107 2, /* size (0 = byte, 1 = short, 2 = long) */
1109 TRUE
, /* pc_relative */
1111 complain_overflow_dont
,/* complain_on_overflow */
1112 bfd_elf_generic_reloc
, /* special_function */
1113 "R_ARM_ALU_SB_G1_NC", /* name */
1114 FALSE
, /* partial_inplace */
1115 0xffffffff, /* src_mask */
1116 0xffffffff, /* dst_mask */
1117 TRUE
), /* pcrel_offset */
1119 HOWTO (R_ARM_ALU_SB_G1
, /* type */
1121 2, /* size (0 = byte, 1 = short, 2 = long) */
1123 TRUE
, /* pc_relative */
1125 complain_overflow_dont
,/* complain_on_overflow */
1126 bfd_elf_generic_reloc
, /* special_function */
1127 "R_ARM_ALU_SB_G1", /* name */
1128 FALSE
, /* partial_inplace */
1129 0xffffffff, /* src_mask */
1130 0xffffffff, /* dst_mask */
1131 TRUE
), /* pcrel_offset */
1133 HOWTO (R_ARM_ALU_SB_G2
, /* type */
1135 2, /* size (0 = byte, 1 = short, 2 = long) */
1137 TRUE
, /* pc_relative */
1139 complain_overflow_dont
,/* complain_on_overflow */
1140 bfd_elf_generic_reloc
, /* special_function */
1141 "R_ARM_ALU_SB_G2", /* name */
1142 FALSE
, /* partial_inplace */
1143 0xffffffff, /* src_mask */
1144 0xffffffff, /* dst_mask */
1145 TRUE
), /* pcrel_offset */
1147 HOWTO (R_ARM_LDR_SB_G0
, /* type */
1149 2, /* size (0 = byte, 1 = short, 2 = long) */
1151 TRUE
, /* pc_relative */
1153 complain_overflow_dont
,/* complain_on_overflow */
1154 bfd_elf_generic_reloc
, /* special_function */
1155 "R_ARM_LDR_SB_G0", /* name */
1156 FALSE
, /* partial_inplace */
1157 0xffffffff, /* src_mask */
1158 0xffffffff, /* dst_mask */
1159 TRUE
), /* pcrel_offset */
1161 HOWTO (R_ARM_LDR_SB_G1
, /* type */
1163 2, /* size (0 = byte, 1 = short, 2 = long) */
1165 TRUE
, /* pc_relative */
1167 complain_overflow_dont
,/* complain_on_overflow */
1168 bfd_elf_generic_reloc
, /* special_function */
1169 "R_ARM_LDR_SB_G1", /* name */
1170 FALSE
, /* partial_inplace */
1171 0xffffffff, /* src_mask */
1172 0xffffffff, /* dst_mask */
1173 TRUE
), /* pcrel_offset */
1175 HOWTO (R_ARM_LDR_SB_G2
, /* type */
1177 2, /* size (0 = byte, 1 = short, 2 = long) */
1179 TRUE
, /* pc_relative */
1181 complain_overflow_dont
,/* complain_on_overflow */
1182 bfd_elf_generic_reloc
, /* special_function */
1183 "R_ARM_LDR_SB_G2", /* name */
1184 FALSE
, /* partial_inplace */
1185 0xffffffff, /* src_mask */
1186 0xffffffff, /* dst_mask */
1187 TRUE
), /* pcrel_offset */
1189 HOWTO (R_ARM_LDRS_SB_G0
, /* type */
1191 2, /* size (0 = byte, 1 = short, 2 = long) */
1193 TRUE
, /* pc_relative */
1195 complain_overflow_dont
,/* complain_on_overflow */
1196 bfd_elf_generic_reloc
, /* special_function */
1197 "R_ARM_LDRS_SB_G0", /* name */
1198 FALSE
, /* partial_inplace */
1199 0xffffffff, /* src_mask */
1200 0xffffffff, /* dst_mask */
1201 TRUE
), /* pcrel_offset */
1203 HOWTO (R_ARM_LDRS_SB_G1
, /* type */
1205 2, /* size (0 = byte, 1 = short, 2 = long) */
1207 TRUE
, /* pc_relative */
1209 complain_overflow_dont
,/* complain_on_overflow */
1210 bfd_elf_generic_reloc
, /* special_function */
1211 "R_ARM_LDRS_SB_G1", /* name */
1212 FALSE
, /* partial_inplace */
1213 0xffffffff, /* src_mask */
1214 0xffffffff, /* dst_mask */
1215 TRUE
), /* pcrel_offset */
1217 HOWTO (R_ARM_LDRS_SB_G2
, /* type */
1219 2, /* size (0 = byte, 1 = short, 2 = long) */
1221 TRUE
, /* pc_relative */
1223 complain_overflow_dont
,/* complain_on_overflow */
1224 bfd_elf_generic_reloc
, /* special_function */
1225 "R_ARM_LDRS_SB_G2", /* name */
1226 FALSE
, /* partial_inplace */
1227 0xffffffff, /* src_mask */
1228 0xffffffff, /* dst_mask */
1229 TRUE
), /* pcrel_offset */
1231 HOWTO (R_ARM_LDC_SB_G0
, /* type */
1233 2, /* size (0 = byte, 1 = short, 2 = long) */
1235 TRUE
, /* pc_relative */
1237 complain_overflow_dont
,/* complain_on_overflow */
1238 bfd_elf_generic_reloc
, /* special_function */
1239 "R_ARM_LDC_SB_G0", /* name */
1240 FALSE
, /* partial_inplace */
1241 0xffffffff, /* src_mask */
1242 0xffffffff, /* dst_mask */
1243 TRUE
), /* pcrel_offset */
1245 HOWTO (R_ARM_LDC_SB_G1
, /* type */
1247 2, /* size (0 = byte, 1 = short, 2 = long) */
1249 TRUE
, /* pc_relative */
1251 complain_overflow_dont
,/* complain_on_overflow */
1252 bfd_elf_generic_reloc
, /* special_function */
1253 "R_ARM_LDC_SB_G1", /* name */
1254 FALSE
, /* partial_inplace */
1255 0xffffffff, /* src_mask */
1256 0xffffffff, /* dst_mask */
1257 TRUE
), /* pcrel_offset */
1259 HOWTO (R_ARM_LDC_SB_G2
, /* type */
1261 2, /* size (0 = byte, 1 = short, 2 = long) */
1263 TRUE
, /* pc_relative */
1265 complain_overflow_dont
,/* complain_on_overflow */
1266 bfd_elf_generic_reloc
, /* special_function */
1267 "R_ARM_LDC_SB_G2", /* name */
1268 FALSE
, /* partial_inplace */
1269 0xffffffff, /* src_mask */
1270 0xffffffff, /* dst_mask */
1271 TRUE
), /* pcrel_offset */
1273 /* End of group relocations. */
1275 HOWTO (R_ARM_MOVW_BREL_NC
, /* type */
1277 2, /* size (0 = byte, 1 = short, 2 = long) */
1279 FALSE
, /* pc_relative */
1281 complain_overflow_dont
,/* complain_on_overflow */
1282 bfd_elf_generic_reloc
, /* special_function */
1283 "R_ARM_MOVW_BREL_NC", /* name */
1284 FALSE
, /* partial_inplace */
1285 0x0000ffff, /* src_mask */
1286 0x0000ffff, /* dst_mask */
1287 FALSE
), /* pcrel_offset */
1289 HOWTO (R_ARM_MOVT_BREL
, /* type */
1291 2, /* size (0 = byte, 1 = short, 2 = long) */
1293 FALSE
, /* pc_relative */
1295 complain_overflow_bitfield
,/* complain_on_overflow */
1296 bfd_elf_generic_reloc
, /* special_function */
1297 "R_ARM_MOVT_BREL", /* name */
1298 FALSE
, /* partial_inplace */
1299 0x0000ffff, /* src_mask */
1300 0x0000ffff, /* dst_mask */
1301 FALSE
), /* pcrel_offset */
1303 HOWTO (R_ARM_MOVW_BREL
, /* type */
1305 2, /* size (0 = byte, 1 = short, 2 = long) */
1307 FALSE
, /* pc_relative */
1309 complain_overflow_dont
,/* complain_on_overflow */
1310 bfd_elf_generic_reloc
, /* special_function */
1311 "R_ARM_MOVW_BREL", /* name */
1312 FALSE
, /* partial_inplace */
1313 0x0000ffff, /* src_mask */
1314 0x0000ffff, /* dst_mask */
1315 FALSE
), /* pcrel_offset */
1317 HOWTO (R_ARM_THM_MOVW_BREL_NC
,/* type */
1319 2, /* size (0 = byte, 1 = short, 2 = long) */
1321 FALSE
, /* pc_relative */
1323 complain_overflow_dont
,/* complain_on_overflow */
1324 bfd_elf_generic_reloc
, /* special_function */
1325 "R_ARM_THM_MOVW_BREL_NC",/* name */
1326 FALSE
, /* partial_inplace */
1327 0x040f70ff, /* src_mask */
1328 0x040f70ff, /* dst_mask */
1329 FALSE
), /* pcrel_offset */
1331 HOWTO (R_ARM_THM_MOVT_BREL
, /* type */
1333 2, /* size (0 = byte, 1 = short, 2 = long) */
1335 FALSE
, /* pc_relative */
1337 complain_overflow_bitfield
,/* complain_on_overflow */
1338 bfd_elf_generic_reloc
, /* special_function */
1339 "R_ARM_THM_MOVT_BREL", /* name */
1340 FALSE
, /* partial_inplace */
1341 0x040f70ff, /* src_mask */
1342 0x040f70ff, /* dst_mask */
1343 FALSE
), /* pcrel_offset */
1345 HOWTO (R_ARM_THM_MOVW_BREL
, /* type */
1347 2, /* size (0 = byte, 1 = short, 2 = long) */
1349 FALSE
, /* pc_relative */
1351 complain_overflow_dont
,/* complain_on_overflow */
1352 bfd_elf_generic_reloc
, /* special_function */
1353 "R_ARM_THM_MOVW_BREL", /* name */
1354 FALSE
, /* partial_inplace */
1355 0x040f70ff, /* src_mask */
1356 0x040f70ff, /* dst_mask */
1357 FALSE
), /* pcrel_offset */
1359 HOWTO (R_ARM_TLS_GOTDESC
, /* type */
1361 2, /* size (0 = byte, 1 = short, 2 = long) */
1363 FALSE
, /* pc_relative */
1365 complain_overflow_bitfield
,/* complain_on_overflow */
1366 NULL
, /* special_function */
1367 "R_ARM_TLS_GOTDESC", /* name */
1368 TRUE
, /* partial_inplace */
1369 0xffffffff, /* src_mask */
1370 0xffffffff, /* dst_mask */
1371 FALSE
), /* pcrel_offset */
1373 HOWTO (R_ARM_TLS_CALL
, /* type */
1375 2, /* size (0 = byte, 1 = short, 2 = long) */
1377 FALSE
, /* pc_relative */
1379 complain_overflow_dont
,/* complain_on_overflow */
1380 bfd_elf_generic_reloc
, /* special_function */
1381 "R_ARM_TLS_CALL", /* name */
1382 FALSE
, /* partial_inplace */
1383 0x00ffffff, /* src_mask */
1384 0x00ffffff, /* dst_mask */
1385 FALSE
), /* pcrel_offset */
1387 HOWTO (R_ARM_TLS_DESCSEQ
, /* type */
1389 2, /* size (0 = byte, 1 = short, 2 = long) */
1391 FALSE
, /* pc_relative */
1393 complain_overflow_bitfield
,/* complain_on_overflow */
1394 bfd_elf_generic_reloc
, /* special_function */
1395 "R_ARM_TLS_DESCSEQ", /* name */
1396 FALSE
, /* partial_inplace */
1397 0x00000000, /* src_mask */
1398 0x00000000, /* dst_mask */
1399 FALSE
), /* pcrel_offset */
1401 HOWTO (R_ARM_THM_TLS_CALL
, /* type */
1403 2, /* size (0 = byte, 1 = short, 2 = long) */
1405 FALSE
, /* pc_relative */
1407 complain_overflow_dont
,/* complain_on_overflow */
1408 bfd_elf_generic_reloc
, /* special_function */
1409 "R_ARM_THM_TLS_CALL", /* name */
1410 FALSE
, /* partial_inplace */
1411 0x07ff07ff, /* src_mask */
1412 0x07ff07ff, /* dst_mask */
1413 FALSE
), /* pcrel_offset */
1415 HOWTO (R_ARM_PLT32_ABS
, /* type */
1417 2, /* size (0 = byte, 1 = short, 2 = long) */
1419 FALSE
, /* pc_relative */
1421 complain_overflow_dont
,/* complain_on_overflow */
1422 bfd_elf_generic_reloc
, /* special_function */
1423 "R_ARM_PLT32_ABS", /* name */
1424 FALSE
, /* partial_inplace */
1425 0xffffffff, /* src_mask */
1426 0xffffffff, /* dst_mask */
1427 FALSE
), /* pcrel_offset */
1429 HOWTO (R_ARM_GOT_ABS
, /* type */
1431 2, /* size (0 = byte, 1 = short, 2 = long) */
1433 FALSE
, /* pc_relative */
1435 complain_overflow_dont
,/* complain_on_overflow */
1436 bfd_elf_generic_reloc
, /* special_function */
1437 "R_ARM_GOT_ABS", /* name */
1438 FALSE
, /* partial_inplace */
1439 0xffffffff, /* src_mask */
1440 0xffffffff, /* dst_mask */
1441 FALSE
), /* pcrel_offset */
1443 HOWTO (R_ARM_GOT_PREL
, /* type */
1445 2, /* size (0 = byte, 1 = short, 2 = long) */
1447 TRUE
, /* pc_relative */
1449 complain_overflow_dont
, /* complain_on_overflow */
1450 bfd_elf_generic_reloc
, /* special_function */
1451 "R_ARM_GOT_PREL", /* name */
1452 FALSE
, /* partial_inplace */
1453 0xffffffff, /* src_mask */
1454 0xffffffff, /* dst_mask */
1455 TRUE
), /* pcrel_offset */
1457 HOWTO (R_ARM_GOT_BREL12
, /* type */
1459 2, /* size (0 = byte, 1 = short, 2 = long) */
1461 FALSE
, /* pc_relative */
1463 complain_overflow_bitfield
,/* complain_on_overflow */
1464 bfd_elf_generic_reloc
, /* special_function */
1465 "R_ARM_GOT_BREL12", /* name */
1466 FALSE
, /* partial_inplace */
1467 0x00000fff, /* src_mask */
1468 0x00000fff, /* dst_mask */
1469 FALSE
), /* pcrel_offset */
1471 HOWTO (R_ARM_GOTOFF12
, /* type */
1473 2, /* size (0 = byte, 1 = short, 2 = long) */
1475 FALSE
, /* pc_relative */
1477 complain_overflow_bitfield
,/* complain_on_overflow */
1478 bfd_elf_generic_reloc
, /* special_function */
1479 "R_ARM_GOTOFF12", /* name */
1480 FALSE
, /* partial_inplace */
1481 0x00000fff, /* src_mask */
1482 0x00000fff, /* dst_mask */
1483 FALSE
), /* pcrel_offset */
1485 EMPTY_HOWTO (R_ARM_GOTRELAX
), /* reserved for future GOT-load optimizations */
1487 /* GNU extension to record C++ vtable member usage */
1488 HOWTO (R_ARM_GNU_VTENTRY
, /* type */
1490 2, /* size (0 = byte, 1 = short, 2 = long) */
1492 FALSE
, /* pc_relative */
1494 complain_overflow_dont
, /* complain_on_overflow */
1495 _bfd_elf_rel_vtable_reloc_fn
, /* special_function */
1496 "R_ARM_GNU_VTENTRY", /* name */
1497 FALSE
, /* partial_inplace */
1500 FALSE
), /* pcrel_offset */
1502 /* GNU extension to record C++ vtable hierarchy */
1503 HOWTO (R_ARM_GNU_VTINHERIT
, /* type */
1505 2, /* size (0 = byte, 1 = short, 2 = long) */
1507 FALSE
, /* pc_relative */
1509 complain_overflow_dont
, /* complain_on_overflow */
1510 NULL
, /* special_function */
1511 "R_ARM_GNU_VTINHERIT", /* name */
1512 FALSE
, /* partial_inplace */
1515 FALSE
), /* pcrel_offset */
1517 HOWTO (R_ARM_THM_JUMP11
, /* type */
1519 1, /* size (0 = byte, 1 = short, 2 = long) */
1521 TRUE
, /* pc_relative */
1523 complain_overflow_signed
, /* complain_on_overflow */
1524 bfd_elf_generic_reloc
, /* special_function */
1525 "R_ARM_THM_JUMP11", /* name */
1526 FALSE
, /* partial_inplace */
1527 0x000007ff, /* src_mask */
1528 0x000007ff, /* dst_mask */
1529 TRUE
), /* pcrel_offset */
1531 HOWTO (R_ARM_THM_JUMP8
, /* type */
1533 1, /* size (0 = byte, 1 = short, 2 = long) */
1535 TRUE
, /* pc_relative */
1537 complain_overflow_signed
, /* complain_on_overflow */
1538 bfd_elf_generic_reloc
, /* special_function */
1539 "R_ARM_THM_JUMP8", /* name */
1540 FALSE
, /* partial_inplace */
1541 0x000000ff, /* src_mask */
1542 0x000000ff, /* dst_mask */
1543 TRUE
), /* pcrel_offset */
1545 /* TLS relocations */
1546 HOWTO (R_ARM_TLS_GD32
, /* type */
1548 2, /* size (0 = byte, 1 = short, 2 = long) */
1550 FALSE
, /* pc_relative */
1552 complain_overflow_bitfield
,/* complain_on_overflow */
1553 NULL
, /* special_function */
1554 "R_ARM_TLS_GD32", /* name */
1555 TRUE
, /* partial_inplace */
1556 0xffffffff, /* src_mask */
1557 0xffffffff, /* dst_mask */
1558 FALSE
), /* pcrel_offset */
1560 HOWTO (R_ARM_TLS_LDM32
, /* type */
1562 2, /* size (0 = byte, 1 = short, 2 = long) */
1564 FALSE
, /* pc_relative */
1566 complain_overflow_bitfield
,/* complain_on_overflow */
1567 bfd_elf_generic_reloc
, /* special_function */
1568 "R_ARM_TLS_LDM32", /* name */
1569 TRUE
, /* partial_inplace */
1570 0xffffffff, /* src_mask */
1571 0xffffffff, /* dst_mask */
1572 FALSE
), /* pcrel_offset */
1574 HOWTO (R_ARM_TLS_LDO32
, /* type */
1576 2, /* size (0 = byte, 1 = short, 2 = long) */
1578 FALSE
, /* pc_relative */
1580 complain_overflow_bitfield
,/* complain_on_overflow */
1581 bfd_elf_generic_reloc
, /* special_function */
1582 "R_ARM_TLS_LDO32", /* name */
1583 TRUE
, /* partial_inplace */
1584 0xffffffff, /* src_mask */
1585 0xffffffff, /* dst_mask */
1586 FALSE
), /* pcrel_offset */
1588 HOWTO (R_ARM_TLS_IE32
, /* type */
1590 2, /* size (0 = byte, 1 = short, 2 = long) */
1592 FALSE
, /* pc_relative */
1594 complain_overflow_bitfield
,/* complain_on_overflow */
1595 NULL
, /* special_function */
1596 "R_ARM_TLS_IE32", /* name */
1597 TRUE
, /* partial_inplace */
1598 0xffffffff, /* src_mask */
1599 0xffffffff, /* dst_mask */
1600 FALSE
), /* pcrel_offset */
1602 HOWTO (R_ARM_TLS_LE32
, /* type */
1604 2, /* size (0 = byte, 1 = short, 2 = long) */
1606 FALSE
, /* pc_relative */
1608 complain_overflow_bitfield
,/* complain_on_overflow */
1609 NULL
, /* special_function */
1610 "R_ARM_TLS_LE32", /* name */
1611 TRUE
, /* partial_inplace */
1612 0xffffffff, /* src_mask */
1613 0xffffffff, /* dst_mask */
1614 FALSE
), /* pcrel_offset */
1616 HOWTO (R_ARM_TLS_LDO12
, /* type */
1618 2, /* size (0 = byte, 1 = short, 2 = long) */
1620 FALSE
, /* pc_relative */
1622 complain_overflow_bitfield
,/* complain_on_overflow */
1623 bfd_elf_generic_reloc
, /* special_function */
1624 "R_ARM_TLS_LDO12", /* name */
1625 FALSE
, /* partial_inplace */
1626 0x00000fff, /* src_mask */
1627 0x00000fff, /* dst_mask */
1628 FALSE
), /* pcrel_offset */
1630 HOWTO (R_ARM_TLS_LE12
, /* type */
1632 2, /* size (0 = byte, 1 = short, 2 = long) */
1634 FALSE
, /* pc_relative */
1636 complain_overflow_bitfield
,/* complain_on_overflow */
1637 bfd_elf_generic_reloc
, /* special_function */
1638 "R_ARM_TLS_LE12", /* name */
1639 FALSE
, /* partial_inplace */
1640 0x00000fff, /* src_mask */
1641 0x00000fff, /* dst_mask */
1642 FALSE
), /* pcrel_offset */
1644 HOWTO (R_ARM_TLS_IE12GP
, /* type */
1646 2, /* size (0 = byte, 1 = short, 2 = long) */
1648 FALSE
, /* pc_relative */
1650 complain_overflow_bitfield
,/* complain_on_overflow */
1651 bfd_elf_generic_reloc
, /* special_function */
1652 "R_ARM_TLS_IE12GP", /* name */
1653 FALSE
, /* partial_inplace */
1654 0x00000fff, /* src_mask */
1655 0x00000fff, /* dst_mask */
1656 FALSE
), /* pcrel_offset */
1658 /* 112-127 private relocations. */
1676 /* R_ARM_ME_TOO, obsolete. */
1679 HOWTO (R_ARM_THM_TLS_DESCSEQ
, /* type */
1681 1, /* size (0 = byte, 1 = short, 2 = long) */
1683 FALSE
, /* pc_relative */
1685 complain_overflow_bitfield
,/* complain_on_overflow */
1686 bfd_elf_generic_reloc
, /* special_function */
1687 "R_ARM_THM_TLS_DESCSEQ",/* name */
1688 FALSE
, /* partial_inplace */
1689 0x00000000, /* src_mask */
1690 0x00000000, /* dst_mask */
1691 FALSE
), /* pcrel_offset */
1694 HOWTO (R_ARM_THM_ALU_ABS_G0_NC
,/* type. */
1695 0, /* rightshift. */
1696 1, /* size (0 = byte, 1 = short, 2 = long). */
1698 FALSE
, /* pc_relative. */
1700 complain_overflow_bitfield
,/* complain_on_overflow. */
1701 bfd_elf_generic_reloc
, /* special_function. */
1702 "R_ARM_THM_ALU_ABS_G0_NC",/* name. */
1703 FALSE
, /* partial_inplace. */
1704 0x00000000, /* src_mask. */
1705 0x00000000, /* dst_mask. */
1706 FALSE
), /* pcrel_offset. */
1707 HOWTO (R_ARM_THM_ALU_ABS_G1_NC
,/* type. */
1708 0, /* rightshift. */
1709 1, /* size (0 = byte, 1 = short, 2 = long). */
1711 FALSE
, /* pc_relative. */
1713 complain_overflow_bitfield
,/* complain_on_overflow. */
1714 bfd_elf_generic_reloc
, /* special_function. */
1715 "R_ARM_THM_ALU_ABS_G1_NC",/* name. */
1716 FALSE
, /* partial_inplace. */
1717 0x00000000, /* src_mask. */
1718 0x00000000, /* dst_mask. */
1719 FALSE
), /* pcrel_offset. */
1720 HOWTO (R_ARM_THM_ALU_ABS_G2_NC
,/* type. */
1721 0, /* rightshift. */
1722 1, /* size (0 = byte, 1 = short, 2 = long). */
1724 FALSE
, /* pc_relative. */
1726 complain_overflow_bitfield
,/* complain_on_overflow. */
1727 bfd_elf_generic_reloc
, /* special_function. */
1728 "R_ARM_THM_ALU_ABS_G2_NC",/* name. */
1729 FALSE
, /* partial_inplace. */
1730 0x00000000, /* src_mask. */
1731 0x00000000, /* dst_mask. */
1732 FALSE
), /* pcrel_offset. */
1733 HOWTO (R_ARM_THM_ALU_ABS_G3_NC
,/* type. */
1734 0, /* rightshift. */
1735 1, /* size (0 = byte, 1 = short, 2 = long). */
1737 FALSE
, /* pc_relative. */
1739 complain_overflow_bitfield
,/* complain_on_overflow. */
1740 bfd_elf_generic_reloc
, /* special_function. */
1741 "R_ARM_THM_ALU_ABS_G3_NC",/* name. */
1742 FALSE
, /* partial_inplace. */
1743 0x00000000, /* src_mask. */
1744 0x00000000, /* dst_mask. */
1745 FALSE
), /* pcrel_offset. */
1749 static reloc_howto_type elf32_arm_howto_table_2
[8] =
1751 HOWTO (R_ARM_IRELATIVE
, /* type */
1753 2, /* size (0 = byte, 1 = short, 2 = long) */
1755 FALSE
, /* pc_relative */
1757 complain_overflow_bitfield
,/* complain_on_overflow */
1758 bfd_elf_generic_reloc
, /* special_function */
1759 "R_ARM_IRELATIVE", /* name */
1760 TRUE
, /* partial_inplace */
1761 0xffffffff, /* src_mask */
1762 0xffffffff, /* dst_mask */
1763 FALSE
), /* pcrel_offset */
1764 HOWTO (R_ARM_GOTFUNCDESC
, /* type */
1766 2, /* size (0 = byte, 1 = short, 2 = long) */
1768 FALSE
, /* pc_relative */
1770 complain_overflow_bitfield
,/* complain_on_overflow */
1771 bfd_elf_generic_reloc
, /* special_function */
1772 "R_ARM_GOTFUNCDESC", /* name */
1773 FALSE
, /* partial_inplace */
1775 0xffffffff, /* dst_mask */
1776 FALSE
), /* pcrel_offset */
1777 HOWTO (R_ARM_GOTOFFFUNCDESC
, /* type */
1779 2, /* size (0 = byte, 1 = short, 2 = long) */
1781 FALSE
, /* pc_relative */
1783 complain_overflow_bitfield
,/* complain_on_overflow */
1784 bfd_elf_generic_reloc
, /* special_function */
1785 "R_ARM_GOTOFFFUNCDESC",/* name */
1786 FALSE
, /* partial_inplace */
1788 0xffffffff, /* dst_mask */
1789 FALSE
), /* pcrel_offset */
1790 HOWTO (R_ARM_FUNCDESC
, /* type */
1792 2, /* size (0 = byte, 1 = short, 2 = long) */
1794 FALSE
, /* pc_relative */
1796 complain_overflow_bitfield
,/* complain_on_overflow */
1797 bfd_elf_generic_reloc
, /* special_function */
1798 "R_ARM_FUNCDESC", /* name */
1799 FALSE
, /* partial_inplace */
1801 0xffffffff, /* dst_mask */
1802 FALSE
), /* pcrel_offset */
1803 HOWTO (R_ARM_FUNCDESC_VALUE
, /* type */
1805 2, /* size (0 = byte, 1 = short, 2 = long) */
1807 FALSE
, /* pc_relative */
1809 complain_overflow_bitfield
,/* complain_on_overflow */
1810 bfd_elf_generic_reloc
, /* special_function */
1811 "R_ARM_FUNCDESC_VALUE",/* name */
1812 FALSE
, /* partial_inplace */
1814 0xffffffff, /* dst_mask */
1815 FALSE
), /* pcrel_offset */
1816 HOWTO (R_ARM_TLS_GD32_FDPIC
, /* type */
1818 2, /* size (0 = byte, 1 = short, 2 = long) */
1820 FALSE
, /* pc_relative */
1822 complain_overflow_bitfield
,/* complain_on_overflow */
1823 bfd_elf_generic_reloc
, /* special_function */
1824 "R_ARM_TLS_GD32_FDPIC",/* name */
1825 FALSE
, /* partial_inplace */
1827 0xffffffff, /* dst_mask */
1828 FALSE
), /* pcrel_offset */
1829 HOWTO (R_ARM_TLS_LDM32_FDPIC
, /* type */
1831 2, /* size (0 = byte, 1 = short, 2 = long) */
1833 FALSE
, /* pc_relative */
1835 complain_overflow_bitfield
,/* complain_on_overflow */
1836 bfd_elf_generic_reloc
, /* special_function */
1837 "R_ARM_TLS_LDM32_FDPIC",/* name */
1838 FALSE
, /* partial_inplace */
1840 0xffffffff, /* dst_mask */
1841 FALSE
), /* pcrel_offset */
1842 HOWTO (R_ARM_TLS_IE32_FDPIC
, /* type */
1844 2, /* size (0 = byte, 1 = short, 2 = long) */
1846 FALSE
, /* pc_relative */
1848 complain_overflow_bitfield
,/* complain_on_overflow */
1849 bfd_elf_generic_reloc
, /* special_function */
1850 "R_ARM_TLS_IE32_FDPIC",/* name */
1851 FALSE
, /* partial_inplace */
1853 0xffffffff, /* dst_mask */
1854 FALSE
), /* pcrel_offset */
1857 /* 249-255 extended, currently unused, relocations: */
1858 static reloc_howto_type elf32_arm_howto_table_3
[4] =
1860 HOWTO (R_ARM_RREL32
, /* type */
1862 0, /* size (0 = byte, 1 = short, 2 = long) */
1864 FALSE
, /* pc_relative */
1866 complain_overflow_dont
,/* complain_on_overflow */
1867 bfd_elf_generic_reloc
, /* special_function */
1868 "R_ARM_RREL32", /* name */
1869 FALSE
, /* partial_inplace */
1872 FALSE
), /* pcrel_offset */
1874 HOWTO (R_ARM_RABS32
, /* type */
1876 0, /* size (0 = byte, 1 = short, 2 = long) */
1878 FALSE
, /* pc_relative */
1880 complain_overflow_dont
,/* complain_on_overflow */
1881 bfd_elf_generic_reloc
, /* special_function */
1882 "R_ARM_RABS32", /* name */
1883 FALSE
, /* partial_inplace */
1886 FALSE
), /* pcrel_offset */
1888 HOWTO (R_ARM_RPC24
, /* type */
1890 0, /* size (0 = byte, 1 = short, 2 = long) */
1892 FALSE
, /* pc_relative */
1894 complain_overflow_dont
,/* complain_on_overflow */
1895 bfd_elf_generic_reloc
, /* special_function */
1896 "R_ARM_RPC24", /* name */
1897 FALSE
, /* partial_inplace */
1900 FALSE
), /* pcrel_offset */
1902 HOWTO (R_ARM_RBASE
, /* type */
1904 0, /* size (0 = byte, 1 = short, 2 = long) */
1906 FALSE
, /* pc_relative */
1908 complain_overflow_dont
,/* complain_on_overflow */
1909 bfd_elf_generic_reloc
, /* special_function */
1910 "R_ARM_RBASE", /* name */
1911 FALSE
, /* partial_inplace */
1914 FALSE
) /* pcrel_offset */
1917 static reloc_howto_type
*
1918 elf32_arm_howto_from_type (unsigned int r_type
)
1920 if (r_type
< ARRAY_SIZE (elf32_arm_howto_table_1
))
1921 return &elf32_arm_howto_table_1
[r_type
];
1923 if (r_type
>= R_ARM_IRELATIVE
1924 && r_type
< R_ARM_IRELATIVE
+ ARRAY_SIZE (elf32_arm_howto_table_2
))
1925 return &elf32_arm_howto_table_2
[r_type
- R_ARM_IRELATIVE
];
1927 if (r_type
>= R_ARM_RREL32
1928 && r_type
< R_ARM_RREL32
+ ARRAY_SIZE (elf32_arm_howto_table_3
))
1929 return &elf32_arm_howto_table_3
[r_type
- R_ARM_RREL32
];
1935 elf32_arm_info_to_howto (bfd
* abfd
, arelent
* bfd_reloc
,
1936 Elf_Internal_Rela
* elf_reloc
)
1938 unsigned int r_type
;
1940 r_type
= ELF32_R_TYPE (elf_reloc
->r_info
);
1941 if ((bfd_reloc
->howto
= elf32_arm_howto_from_type (r_type
)) == NULL
)
1943 /* xgettext:c-format */
1944 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
1946 bfd_set_error (bfd_error_bad_value
);
1952 struct elf32_arm_reloc_map
1954 bfd_reloc_code_real_type bfd_reloc_val
;
1955 unsigned char elf_reloc_val
;
1958 /* All entries in this list must also be present in elf32_arm_howto_table. */
1959 static const struct elf32_arm_reloc_map elf32_arm_reloc_map
[] =
1961 {BFD_RELOC_NONE
, R_ARM_NONE
},
1962 {BFD_RELOC_ARM_PCREL_BRANCH
, R_ARM_PC24
},
1963 {BFD_RELOC_ARM_PCREL_CALL
, R_ARM_CALL
},
1964 {BFD_RELOC_ARM_PCREL_JUMP
, R_ARM_JUMP24
},
1965 {BFD_RELOC_ARM_PCREL_BLX
, R_ARM_XPC25
},
1966 {BFD_RELOC_THUMB_PCREL_BLX
, R_ARM_THM_XPC22
},
1967 {BFD_RELOC_32
, R_ARM_ABS32
},
1968 {BFD_RELOC_32_PCREL
, R_ARM_REL32
},
1969 {BFD_RELOC_8
, R_ARM_ABS8
},
1970 {BFD_RELOC_16
, R_ARM_ABS16
},
1971 {BFD_RELOC_ARM_OFFSET_IMM
, R_ARM_ABS12
},
1972 {BFD_RELOC_ARM_THUMB_OFFSET
, R_ARM_THM_ABS5
},
1973 {BFD_RELOC_THUMB_PCREL_BRANCH25
, R_ARM_THM_JUMP24
},
1974 {BFD_RELOC_THUMB_PCREL_BRANCH23
, R_ARM_THM_CALL
},
1975 {BFD_RELOC_THUMB_PCREL_BRANCH12
, R_ARM_THM_JUMP11
},
1976 {BFD_RELOC_THUMB_PCREL_BRANCH20
, R_ARM_THM_JUMP19
},
1977 {BFD_RELOC_THUMB_PCREL_BRANCH9
, R_ARM_THM_JUMP8
},
1978 {BFD_RELOC_THUMB_PCREL_BRANCH7
, R_ARM_THM_JUMP6
},
1979 {BFD_RELOC_ARM_GLOB_DAT
, R_ARM_GLOB_DAT
},
1980 {BFD_RELOC_ARM_JUMP_SLOT
, R_ARM_JUMP_SLOT
},
1981 {BFD_RELOC_ARM_RELATIVE
, R_ARM_RELATIVE
},
1982 {BFD_RELOC_ARM_GOTOFF
, R_ARM_GOTOFF32
},
1983 {BFD_RELOC_ARM_GOTPC
, R_ARM_GOTPC
},
1984 {BFD_RELOC_ARM_GOT_PREL
, R_ARM_GOT_PREL
},
1985 {BFD_RELOC_ARM_GOT32
, R_ARM_GOT32
},
1986 {BFD_RELOC_ARM_PLT32
, R_ARM_PLT32
},
1987 {BFD_RELOC_ARM_TARGET1
, R_ARM_TARGET1
},
1988 {BFD_RELOC_ARM_ROSEGREL32
, R_ARM_ROSEGREL32
},
1989 {BFD_RELOC_ARM_SBREL32
, R_ARM_SBREL32
},
1990 {BFD_RELOC_ARM_PREL31
, R_ARM_PREL31
},
1991 {BFD_RELOC_ARM_TARGET2
, R_ARM_TARGET2
},
1992 {BFD_RELOC_ARM_PLT32
, R_ARM_PLT32
},
1993 {BFD_RELOC_ARM_TLS_GOTDESC
, R_ARM_TLS_GOTDESC
},
1994 {BFD_RELOC_ARM_TLS_CALL
, R_ARM_TLS_CALL
},
1995 {BFD_RELOC_ARM_THM_TLS_CALL
, R_ARM_THM_TLS_CALL
},
1996 {BFD_RELOC_ARM_TLS_DESCSEQ
, R_ARM_TLS_DESCSEQ
},
1997 {BFD_RELOC_ARM_THM_TLS_DESCSEQ
, R_ARM_THM_TLS_DESCSEQ
},
1998 {BFD_RELOC_ARM_TLS_DESC
, R_ARM_TLS_DESC
},
1999 {BFD_RELOC_ARM_TLS_GD32
, R_ARM_TLS_GD32
},
2000 {BFD_RELOC_ARM_TLS_LDO32
, R_ARM_TLS_LDO32
},
2001 {BFD_RELOC_ARM_TLS_LDM32
, R_ARM_TLS_LDM32
},
2002 {BFD_RELOC_ARM_TLS_DTPMOD32
, R_ARM_TLS_DTPMOD32
},
2003 {BFD_RELOC_ARM_TLS_DTPOFF32
, R_ARM_TLS_DTPOFF32
},
2004 {BFD_RELOC_ARM_TLS_TPOFF32
, R_ARM_TLS_TPOFF32
},
2005 {BFD_RELOC_ARM_TLS_IE32
, R_ARM_TLS_IE32
},
2006 {BFD_RELOC_ARM_TLS_LE32
, R_ARM_TLS_LE32
},
2007 {BFD_RELOC_ARM_IRELATIVE
, R_ARM_IRELATIVE
},
2008 {BFD_RELOC_ARM_GOTFUNCDESC
, R_ARM_GOTFUNCDESC
},
2009 {BFD_RELOC_ARM_GOTOFFFUNCDESC
, R_ARM_GOTOFFFUNCDESC
},
2010 {BFD_RELOC_ARM_FUNCDESC
, R_ARM_FUNCDESC
},
2011 {BFD_RELOC_ARM_FUNCDESC_VALUE
, R_ARM_FUNCDESC_VALUE
},
2012 {BFD_RELOC_ARM_TLS_GD32_FDPIC
, R_ARM_TLS_GD32_FDPIC
},
2013 {BFD_RELOC_ARM_TLS_LDM32_FDPIC
, R_ARM_TLS_LDM32_FDPIC
},
2014 {BFD_RELOC_ARM_TLS_IE32_FDPIC
, R_ARM_TLS_IE32_FDPIC
},
2015 {BFD_RELOC_VTABLE_INHERIT
, R_ARM_GNU_VTINHERIT
},
2016 {BFD_RELOC_VTABLE_ENTRY
, R_ARM_GNU_VTENTRY
},
2017 {BFD_RELOC_ARM_MOVW
, R_ARM_MOVW_ABS_NC
},
2018 {BFD_RELOC_ARM_MOVT
, R_ARM_MOVT_ABS
},
2019 {BFD_RELOC_ARM_MOVW_PCREL
, R_ARM_MOVW_PREL_NC
},
2020 {BFD_RELOC_ARM_MOVT_PCREL
, R_ARM_MOVT_PREL
},
2021 {BFD_RELOC_ARM_THUMB_MOVW
, R_ARM_THM_MOVW_ABS_NC
},
2022 {BFD_RELOC_ARM_THUMB_MOVT
, R_ARM_THM_MOVT_ABS
},
2023 {BFD_RELOC_ARM_THUMB_MOVW_PCREL
, R_ARM_THM_MOVW_PREL_NC
},
2024 {BFD_RELOC_ARM_THUMB_MOVT_PCREL
, R_ARM_THM_MOVT_PREL
},
2025 {BFD_RELOC_ARM_ALU_PC_G0_NC
, R_ARM_ALU_PC_G0_NC
},
2026 {BFD_RELOC_ARM_ALU_PC_G0
, R_ARM_ALU_PC_G0
},
2027 {BFD_RELOC_ARM_ALU_PC_G1_NC
, R_ARM_ALU_PC_G1_NC
},
2028 {BFD_RELOC_ARM_ALU_PC_G1
, R_ARM_ALU_PC_G1
},
2029 {BFD_RELOC_ARM_ALU_PC_G2
, R_ARM_ALU_PC_G2
},
2030 {BFD_RELOC_ARM_LDR_PC_G0
, R_ARM_LDR_PC_G0
},
2031 {BFD_RELOC_ARM_LDR_PC_G1
, R_ARM_LDR_PC_G1
},
2032 {BFD_RELOC_ARM_LDR_PC_G2
, R_ARM_LDR_PC_G2
},
2033 {BFD_RELOC_ARM_LDRS_PC_G0
, R_ARM_LDRS_PC_G0
},
2034 {BFD_RELOC_ARM_LDRS_PC_G1
, R_ARM_LDRS_PC_G1
},
2035 {BFD_RELOC_ARM_LDRS_PC_G2
, R_ARM_LDRS_PC_G2
},
2036 {BFD_RELOC_ARM_LDC_PC_G0
, R_ARM_LDC_PC_G0
},
2037 {BFD_RELOC_ARM_LDC_PC_G1
, R_ARM_LDC_PC_G1
},
2038 {BFD_RELOC_ARM_LDC_PC_G2
, R_ARM_LDC_PC_G2
},
2039 {BFD_RELOC_ARM_ALU_SB_G0_NC
, R_ARM_ALU_SB_G0_NC
},
2040 {BFD_RELOC_ARM_ALU_SB_G0
, R_ARM_ALU_SB_G0
},
2041 {BFD_RELOC_ARM_ALU_SB_G1_NC
, R_ARM_ALU_SB_G1_NC
},
2042 {BFD_RELOC_ARM_ALU_SB_G1
, R_ARM_ALU_SB_G1
},
2043 {BFD_RELOC_ARM_ALU_SB_G2
, R_ARM_ALU_SB_G2
},
2044 {BFD_RELOC_ARM_LDR_SB_G0
, R_ARM_LDR_SB_G0
},
2045 {BFD_RELOC_ARM_LDR_SB_G1
, R_ARM_LDR_SB_G1
},
2046 {BFD_RELOC_ARM_LDR_SB_G2
, R_ARM_LDR_SB_G2
},
2047 {BFD_RELOC_ARM_LDRS_SB_G0
, R_ARM_LDRS_SB_G0
},
2048 {BFD_RELOC_ARM_LDRS_SB_G1
, R_ARM_LDRS_SB_G1
},
2049 {BFD_RELOC_ARM_LDRS_SB_G2
, R_ARM_LDRS_SB_G2
},
2050 {BFD_RELOC_ARM_LDC_SB_G0
, R_ARM_LDC_SB_G0
},
2051 {BFD_RELOC_ARM_LDC_SB_G1
, R_ARM_LDC_SB_G1
},
2052 {BFD_RELOC_ARM_LDC_SB_G2
, R_ARM_LDC_SB_G2
},
2053 {BFD_RELOC_ARM_V4BX
, R_ARM_V4BX
},
2054 {BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
, R_ARM_THM_ALU_ABS_G3_NC
},
2055 {BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
, R_ARM_THM_ALU_ABS_G2_NC
},
2056 {BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
, R_ARM_THM_ALU_ABS_G1_NC
},
2057 {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
, R_ARM_THM_ALU_ABS_G0_NC
}
2060 static reloc_howto_type
*
2061 elf32_arm_reloc_type_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
2062 bfd_reloc_code_real_type code
)
2066 for (i
= 0; i
< ARRAY_SIZE (elf32_arm_reloc_map
); i
++)
2067 if (elf32_arm_reloc_map
[i
].bfd_reloc_val
== code
)
2068 return elf32_arm_howto_from_type (elf32_arm_reloc_map
[i
].elf_reloc_val
);
2073 static reloc_howto_type
*
2074 elf32_arm_reloc_name_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
2079 for (i
= 0; i
< ARRAY_SIZE (elf32_arm_howto_table_1
); i
++)
2080 if (elf32_arm_howto_table_1
[i
].name
!= NULL
2081 && strcasecmp (elf32_arm_howto_table_1
[i
].name
, r_name
) == 0)
2082 return &elf32_arm_howto_table_1
[i
];
2084 for (i
= 0; i
< ARRAY_SIZE (elf32_arm_howto_table_2
); i
++)
2085 if (elf32_arm_howto_table_2
[i
].name
!= NULL
2086 && strcasecmp (elf32_arm_howto_table_2
[i
].name
, r_name
) == 0)
2087 return &elf32_arm_howto_table_2
[i
];
2089 for (i
= 0; i
< ARRAY_SIZE (elf32_arm_howto_table_3
); i
++)
2090 if (elf32_arm_howto_table_3
[i
].name
!= NULL
2091 && strcasecmp (elf32_arm_howto_table_3
[i
].name
, r_name
) == 0)
2092 return &elf32_arm_howto_table_3
[i
];
2097 /* Support for core dump NOTE sections. */
2100 elf32_arm_nabi_grok_prstatus (bfd
*abfd
, Elf_Internal_Note
*note
)
2105 switch (note
->descsz
)
2110 case 148: /* Linux/ARM 32-bit. */
2112 elf_tdata (abfd
)->core
->signal
= bfd_get_16 (abfd
, note
->descdata
+ 12);
2115 elf_tdata (abfd
)->core
->lwpid
= bfd_get_32 (abfd
, note
->descdata
+ 24);
2124 /* Make a ".reg/999" section. */
2125 return _bfd_elfcore_make_pseudosection (abfd
, ".reg",
2126 size
, note
->descpos
+ offset
);
2130 elf32_arm_nabi_grok_psinfo (bfd
*abfd
, Elf_Internal_Note
*note
)
2132 switch (note
->descsz
)
2137 case 124: /* Linux/ARM elf_prpsinfo. */
2138 elf_tdata (abfd
)->core
->pid
2139 = bfd_get_32 (abfd
, note
->descdata
+ 12);
2140 elf_tdata (abfd
)->core
->program
2141 = _bfd_elfcore_strndup (abfd
, note
->descdata
+ 28, 16);
2142 elf_tdata (abfd
)->core
->command
2143 = _bfd_elfcore_strndup (abfd
, note
->descdata
+ 44, 80);
2146 /* Note that for some reason, a spurious space is tacked
2147 onto the end of the args in some (at least one anyway)
2148 implementations, so strip it off if it exists. */
2150 char *command
= elf_tdata (abfd
)->core
->command
;
2151 int n
= strlen (command
);
2153 if (0 < n
&& command
[n
- 1] == ' ')
2154 command
[n
- 1] = '\0';
2161 elf32_arm_nabi_write_core_note (bfd
*abfd
, char *buf
, int *bufsiz
,
2171 char data
[124] ATTRIBUTE_NONSTRING
;
2174 va_start (ap
, note_type
);
2175 memset (data
, 0, sizeof (data
));
2176 strncpy (data
+ 28, va_arg (ap
, const char *), 16);
2177 #if GCC_VERSION == 8000 || GCC_VERSION == 8001
2179 /* GCC 8.0 and 8.1 warn about 80 equals destination size with
2180 -Wstringop-truncation:
2181 https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85643
2183 DIAGNOSTIC_IGNORE_STRINGOP_TRUNCATION
;
2185 strncpy (data
+ 44, va_arg (ap
, const char *), 80);
2186 #if GCC_VERSION == 8000 || GCC_VERSION == 8001
2191 return elfcore_write_note (abfd
, buf
, bufsiz
,
2192 "CORE", note_type
, data
, sizeof (data
));
2203 va_start (ap
, note_type
);
2204 memset (data
, 0, sizeof (data
));
2205 pid
= va_arg (ap
, long);
2206 bfd_put_32 (abfd
, pid
, data
+ 24);
2207 cursig
= va_arg (ap
, int);
2208 bfd_put_16 (abfd
, cursig
, data
+ 12);
2209 greg
= va_arg (ap
, const void *);
2210 memcpy (data
+ 72, greg
, 72);
2213 return elfcore_write_note (abfd
, buf
, bufsiz
,
2214 "CORE", note_type
, data
, sizeof (data
));
2219 #define TARGET_LITTLE_SYM arm_elf32_le_vec
2220 #define TARGET_LITTLE_NAME "elf32-littlearm"
2221 #define TARGET_BIG_SYM arm_elf32_be_vec
2222 #define TARGET_BIG_NAME "elf32-bigarm"
2224 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
2225 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
2226 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note
2228 typedef unsigned long int insn32
;
2229 typedef unsigned short int insn16
;
2231 /* In lieu of proper flags, assume all EABIv4 or later objects are
2233 #define INTERWORK_FLAG(abfd) \
2234 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2235 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2236 || ((abfd)->flags & BFD_LINKER_CREATED))
2238 /* The linker script knows the section names for placement.
2239 The entry_names are used to do simple name mangling on the stubs.
2240 Given a function name, and its type, the stub can be found. The
2241 name can be changed. The only requirement is the %s be present. */
2242 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2243 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
2245 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2246 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
2248 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2249 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
2251 #define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer"
2252 #define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "__stm32l4xx_veneer_%x"
2254 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2255 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
2257 #define STUB_ENTRY_NAME "__%s_veneer"
2259 #define CMSE_PREFIX "__acle_se_"
2261 /* The name of the dynamic interpreter. This is put in the .interp
2263 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
2265 /* FDPIC default stack size. */
2266 #define DEFAULT_STACK_SIZE 0x8000
2268 static const unsigned long tls_trampoline
[] =
2270 0xe08e0000, /* add r0, lr, r0 */
2271 0xe5901004, /* ldr r1, [r0,#4] */
2272 0xe12fff11, /* bx r1 */
2275 static const unsigned long dl_tlsdesc_lazy_trampoline
[] =
2277 0xe52d2004, /* push {r2} */
2278 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
2279 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
2280 0xe79f2002, /* 1: ldr r2, [pc, r2] */
2281 0xe081100f, /* 2: add r1, pc */
2282 0xe12fff12, /* bx r2 */
2283 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
2284 + dl_tlsdesc_lazy_resolver(GOT) */
2285 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2288 /* ARM FDPIC PLT entry. */
2289 /* The last 5 words contain PLT lazy fragment code and data. */
2290 static const bfd_vma elf32_arm_fdpic_plt_entry
[] =
2292 0xe59fc008, /* ldr r12, .L1 */
2293 0xe08cc009, /* add r12, r12, r9 */
2294 0xe59c9004, /* ldr r9, [r12, #4] */
2295 0xe59cf000, /* ldr pc, [r12] */
2296 0x00000000, /* L1. .word foo(GOTOFFFUNCDESC) */
2297 0x00000000, /* L1. .word foo(funcdesc_value_reloc_offset) */
2298 0xe51fc00c, /* ldr r12, [pc, #-12] */
2299 0xe92d1000, /* push {r12} */
2300 0xe599c004, /* ldr r12, [r9, #4] */
2301 0xe599f000, /* ldr pc, [r9] */
2304 /* Thumb FDPIC PLT entry. */
2305 /* The last 5 words contain PLT lazy fragment code and data. */
2306 static const bfd_vma elf32_arm_fdpic_thumb_plt_entry
[] =
2308 0xc00cf8df, /* ldr.w r12, .L1 */
2309 0x0c09eb0c, /* add.w r12, r12, r9 */
2310 0x9004f8dc, /* ldr.w r9, [r12, #4] */
2311 0xf000f8dc, /* ldr.w pc, [r12] */
2312 0x00000000, /* .L1 .word foo(GOTOFFFUNCDESC) */
2313 0x00000000, /* .L2 .word foo(funcdesc_value_reloc_offset) */
2314 0xc008f85f, /* ldr.w r12, .L2 */
2315 0xcd04f84d, /* push {r12} */
2316 0xc004f8d9, /* ldr.w r12, [r9, #4] */
2317 0xf000f8d9, /* ldr.w pc, [r9] */
2320 #ifdef FOUR_WORD_PLT
2322 /* The first entry in a procedure linkage table looks like
2323 this. It is set up so that any shared library function that is
2324 called before the relocation has been set up calls the dynamic
2326 static const bfd_vma elf32_arm_plt0_entry
[] =
2328 0xe52de004, /* str lr, [sp, #-4]! */
2329 0xe59fe010, /* ldr lr, [pc, #16] */
2330 0xe08fe00e, /* add lr, pc, lr */
2331 0xe5bef008, /* ldr pc, [lr, #8]! */
2334 /* Subsequent entries in a procedure linkage table look like
2336 static const bfd_vma elf32_arm_plt_entry
[] =
2338 0xe28fc600, /* add ip, pc, #NN */
2339 0xe28cca00, /* add ip, ip, #NN */
2340 0xe5bcf000, /* ldr pc, [ip, #NN]! */
2341 0x00000000, /* unused */
2344 #else /* not FOUR_WORD_PLT */
2346 /* The first entry in a procedure linkage table looks like
2347 this. It is set up so that any shared library function that is
2348 called before the relocation has been set up calls the dynamic
2350 static const bfd_vma elf32_arm_plt0_entry
[] =
2352 0xe52de004, /* str lr, [sp, #-4]! */
2353 0xe59fe004, /* ldr lr, [pc, #4] */
2354 0xe08fe00e, /* add lr, pc, lr */
2355 0xe5bef008, /* ldr pc, [lr, #8]! */
2356 0x00000000, /* &GOT[0] - . */
2359 /* By default subsequent entries in a procedure linkage table look like
2360 this. Offsets that don't fit into 28 bits will cause link error. */
2361 static const bfd_vma elf32_arm_plt_entry_short
[] =
2363 0xe28fc600, /* add ip, pc, #0xNN00000 */
2364 0xe28cca00, /* add ip, ip, #0xNN000 */
2365 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2368 /* When explicitly asked, we'll use this "long" entry format
2369 which can cope with arbitrary displacements. */
2370 static const bfd_vma elf32_arm_plt_entry_long
[] =
2372 0xe28fc200, /* add ip, pc, #0xN0000000 */
2373 0xe28cc600, /* add ip, ip, #0xNN00000 */
2374 0xe28cca00, /* add ip, ip, #0xNN000 */
2375 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2378 static bfd_boolean elf32_arm_use_long_plt_entry
= FALSE
;
2380 #endif /* not FOUR_WORD_PLT */
2382 /* The first entry in a procedure linkage table looks like this.
2383 It is set up so that any shared library function that is called before the
2384 relocation has been set up calls the dynamic linker first. */
2385 static const bfd_vma elf32_thumb2_plt0_entry
[] =
2387 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2388 an instruction maybe encoded to one or two array elements. */
2389 0xf8dfb500, /* push {lr} */
2390 0x44fee008, /* ldr.w lr, [pc, #8] */
2392 0xff08f85e, /* ldr.w pc, [lr, #8]! */
2393 0x00000000, /* &GOT[0] - . */
2396 /* Subsequent entries in a procedure linkage table for thumb only target
2398 static const bfd_vma elf32_thumb2_plt_entry
[] =
2400 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2401 an instruction maybe encoded to one or two array elements. */
2402 0x0c00f240, /* movw ip, #0xNNNN */
2403 0x0c00f2c0, /* movt ip, #0xNNNN */
2404 0xf8dc44fc, /* add ip, pc */
2405 0xbf00f000 /* ldr.w pc, [ip] */
2409 /* The format of the first entry in the procedure linkage table
2410 for a VxWorks executable. */
2411 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry
[] =
2413 0xe52dc008, /* str ip,[sp,#-8]! */
2414 0xe59fc000, /* ldr ip,[pc] */
2415 0xe59cf008, /* ldr pc,[ip,#8] */
2416 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
2419 /* The format of subsequent entries in a VxWorks executable. */
2420 static const bfd_vma elf32_arm_vxworks_exec_plt_entry
[] =
2422 0xe59fc000, /* ldr ip,[pc] */
2423 0xe59cf000, /* ldr pc,[ip] */
2424 0x00000000, /* .long @got */
2425 0xe59fc000, /* ldr ip,[pc] */
2426 0xea000000, /* b _PLT */
2427 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2430 /* The format of entries in a VxWorks shared library. */
2431 static const bfd_vma elf32_arm_vxworks_shared_plt_entry
[] =
2433 0xe59fc000, /* ldr ip,[pc] */
2434 0xe79cf009, /* ldr pc,[ip,r9] */
2435 0x00000000, /* .long @got */
2436 0xe59fc000, /* ldr ip,[pc] */
2437 0xe599f008, /* ldr pc,[r9,#8] */
2438 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2441 /* An initial stub used if the PLT entry is referenced from Thumb code. */
2442 #define PLT_THUMB_STUB_SIZE 4
2443 static const bfd_vma elf32_arm_plt_thumb_stub
[] =
2449 /* The entries in a PLT when using a DLL-based target with multiple
2451 static const bfd_vma elf32_arm_symbian_plt_entry
[] =
2453 0xe51ff004, /* ldr pc, [pc, #-4] */
2454 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2457 /* The first entry in a procedure linkage table looks like
2458 this. It is set up so that any shared library function that is
2459 called before the relocation has been set up calls the dynamic
2461 static const bfd_vma elf32_arm_nacl_plt0_entry
[] =
2464 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */
2465 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */
2466 0xe08cc00f, /* add ip, ip, pc */
2467 0xe52dc008, /* str ip, [sp, #-8]! */
2468 /* Second bundle: */
2469 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2470 0xe59cc000, /* ldr ip, [ip] */
2471 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2472 0xe12fff1c, /* bx ip */
2474 0xe320f000, /* nop */
2475 0xe320f000, /* nop */
2476 0xe320f000, /* nop */
2478 0xe50dc004, /* str ip, [sp, #-4] */
2479 /* Fourth bundle: */
2480 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2481 0xe59cc000, /* ldr ip, [ip] */
2482 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2483 0xe12fff1c, /* bx ip */
2485 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
2487 /* Subsequent entries in a procedure linkage table look like this. */
2488 static const bfd_vma elf32_arm_nacl_plt_entry
[] =
2490 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */
2491 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */
2492 0xe08cc00f, /* add ip, ip, pc */
2493 0xea000000, /* b .Lplt_tail */
2496 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2497 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2498 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2499 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2500 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2501 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2502 #define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4)
2503 #define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4)
2513 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2514 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2515 is inserted in arm_build_one_stub(). */
2516 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2517 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2518 #define THUMB32_MOVT(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVT_ABS, 0}
2519 #define THUMB32_MOVW(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVW_ABS_NC, 0}
2520 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2521 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2522 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2523 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2528 enum stub_insn_type type
;
2529 unsigned int r_type
;
2533 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2534 to reach the stub if necessary. */
2535 static const insn_sequence elf32_arm_stub_long_branch_any_any
[] =
2537 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2538 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2541 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2543 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb
[] =
2545 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2546 ARM_INSN (0xe12fff1c), /* bx ip */
2547 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2550 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2551 static const insn_sequence elf32_arm_stub_long_branch_thumb_only
[] =
2553 THUMB16_INSN (0xb401), /* push {r0} */
2554 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2555 THUMB16_INSN (0x4684), /* mov ip, r0 */
2556 THUMB16_INSN (0xbc01), /* pop {r0} */
2557 THUMB16_INSN (0x4760), /* bx ip */
2558 THUMB16_INSN (0xbf00), /* nop */
2559 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2562 /* Thumb -> Thumb long branch stub in thumb2 encoding. Used on armv7. */
2563 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only
[] =
2565 THUMB32_INSN (0xf85ff000), /* ldr.w pc, [pc, #-0] */
2566 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(x) */
2569 /* Thumb -> Thumb long branch stub. Used for PureCode sections on Thumb2
2570 M-profile architectures. */
2571 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only_pure
[] =
2573 THUMB32_MOVW (0xf2400c00), /* mov.w ip, R_ARM_MOVW_ABS_NC */
2574 THUMB32_MOVT (0xf2c00c00), /* movt ip, R_ARM_MOVT_ABS << 16 */
2575 THUMB16_INSN (0x4760), /* bx ip */
2578 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2580 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb
[] =
2582 THUMB16_INSN (0x4778), /* bx pc */
2583 THUMB16_INSN (0x46c0), /* nop */
2584 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2585 ARM_INSN (0xe12fff1c), /* bx ip */
2586 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2589 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2591 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm
[] =
2593 THUMB16_INSN (0x4778), /* bx pc */
2594 THUMB16_INSN (0x46c0), /* nop */
2595 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2596 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2599 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2600 one, when the destination is close enough. */
2601 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm
[] =
2603 THUMB16_INSN (0x4778), /* bx pc */
2604 THUMB16_INSN (0x46c0), /* nop */
2605 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */
2608 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2609 blx to reach the stub if necessary. */
2610 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic
[] =
2612 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */
2613 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */
2614 DATA_WORD (0, R_ARM_REL32
, -4), /* dcd R_ARM_REL32(X-4) */
2617 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2618 blx to reach the stub if necessary. We can not add into pc;
2619 it is not guaranteed to mode switch (different in ARMv6 and
2621 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic
[] =
2623 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2624 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2625 ARM_INSN (0xe12fff1c), /* bx ip */
2626 DATA_WORD (0, R_ARM_REL32
, 0), /* dcd R_ARM_REL32(X) */
2629 /* V4T ARM -> ARM long branch stub, PIC. */
2630 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic
[] =
2632 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2633 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2634 ARM_INSN (0xe12fff1c), /* bx ip */
2635 DATA_WORD (0, R_ARM_REL32
, 0), /* dcd R_ARM_REL32(X) */
2638 /* V4T Thumb -> ARM long branch stub, PIC. */
2639 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic
[] =
2641 THUMB16_INSN (0x4778), /* bx pc */
2642 THUMB16_INSN (0x46c0), /* nop */
2643 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2644 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */
2645 DATA_WORD (0, R_ARM_REL32
, -4), /* dcd R_ARM_REL32(X) */
2648 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2650 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic
[] =
2652 THUMB16_INSN (0xb401), /* push {r0} */
2653 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2654 THUMB16_INSN (0x46fc), /* mov ip, pc */
2655 THUMB16_INSN (0x4484), /* add ip, r0 */
2656 THUMB16_INSN (0xbc01), /* pop {r0} */
2657 THUMB16_INSN (0x4760), /* bx ip */
2658 DATA_WORD (0, R_ARM_REL32
, 4), /* dcd R_ARM_REL32(X) */
2661 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2663 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic
[] =
2665 THUMB16_INSN (0x4778), /* bx pc */
2666 THUMB16_INSN (0x46c0), /* nop */
2667 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2668 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2669 ARM_INSN (0xe12fff1c), /* bx ip */
2670 DATA_WORD (0, R_ARM_REL32
, 0), /* dcd R_ARM_REL32(X) */
2673 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
2674 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2675 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic
[] =
2677 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */
2678 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */
2679 DATA_WORD (0, R_ARM_REL32
, -4), /* dcd R_ARM_REL32(X-4) */
2682 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
2683 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2684 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic
[] =
2686 THUMB16_INSN (0x4778), /* bx pc */
2687 THUMB16_INSN (0x46c0), /* nop */
2688 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */
2689 ARM_INSN (0xe081f00f), /* add pc, r1, pc */
2690 DATA_WORD (0, R_ARM_REL32
, -4), /* dcd R_ARM_REL32(X) */
2693 /* NaCl ARM -> ARM long branch stub. */
2694 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl
[] =
2696 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2697 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2698 ARM_INSN (0xe12fff1c), /* bx ip */
2699 ARM_INSN (0xe320f000), /* nop */
2700 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2701 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2702 DATA_WORD (0, R_ARM_NONE
, 0), /* .word 0 */
2703 DATA_WORD (0, R_ARM_NONE
, 0), /* .word 0 */
2706 /* NaCl ARM -> ARM long branch stub, PIC. */
2707 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic
[] =
2709 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2710 ARM_INSN (0xe08cc00f), /* add ip, ip, pc */
2711 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2712 ARM_INSN (0xe12fff1c), /* bx ip */
2713 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2714 DATA_WORD (0, R_ARM_REL32
, 8), /* dcd R_ARM_REL32(X+8) */
2715 DATA_WORD (0, R_ARM_NONE
, 0), /* .word 0 */
2716 DATA_WORD (0, R_ARM_NONE
, 0), /* .word 0 */
2719 /* Stub used for transition to secure state (aka SG veneer). */
2720 static const insn_sequence elf32_arm_stub_cmse_branch_thumb_only
[] =
2722 THUMB32_INSN (0xe97fe97f), /* sg. */
2723 THUMB32_B_INSN (0xf000b800, -4), /* b.w original_branch_dest. */
2727 /* Cortex-A8 erratum-workaround stubs. */
2729 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2730 can't use a conditional branch to reach this stub). */
2732 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond
[] =
2734 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */
2735 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */
2736 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */
2739 /* Stub used for b.w and bl.w instructions. */
2741 static const insn_sequence elf32_arm_stub_a8_veneer_b
[] =
2743 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2746 static const insn_sequence elf32_arm_stub_a8_veneer_bl
[] =
2748 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2751 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2752 instruction (which switches to ARM mode) to point to this stub. Jump to the
2753 real destination using an ARM-mode branch. */
2755 static const insn_sequence elf32_arm_stub_a8_veneer_blx
[] =
2757 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */
2760 /* For each section group there can be a specially created linker section
2761 to hold the stubs for that group. The name of the stub section is based
2762 upon the name of another section within that group with the suffix below
2765 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2766 create what appeared to be a linker stub section when it actually
2767 contained user code/data. For example, consider this fragment:
2769 const char * stubborn_problems[] = { "np" };
2771 If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2774 .data.rel.local.stubborn_problems
2776 This then causes problems in arm32_arm_build_stubs() as it triggers:
2778 // Ignore non-stub sections.
2779 if (!strstr (stub_sec->name, STUB_SUFFIX))
2782 And so the section would be ignored instead of being processed. Hence
2783 the change in definition of STUB_SUFFIX to a name that cannot be a valid
2785 #define STUB_SUFFIX ".__stub"
2787 /* One entry per long/short branch stub defined above. */
2789 DEF_STUB(long_branch_any_any) \
2790 DEF_STUB(long_branch_v4t_arm_thumb) \
2791 DEF_STUB(long_branch_thumb_only) \
2792 DEF_STUB(long_branch_v4t_thumb_thumb) \
2793 DEF_STUB(long_branch_v4t_thumb_arm) \
2794 DEF_STUB(short_branch_v4t_thumb_arm) \
2795 DEF_STUB(long_branch_any_arm_pic) \
2796 DEF_STUB(long_branch_any_thumb_pic) \
2797 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2798 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2799 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2800 DEF_STUB(long_branch_thumb_only_pic) \
2801 DEF_STUB(long_branch_any_tls_pic) \
2802 DEF_STUB(long_branch_v4t_thumb_tls_pic) \
2803 DEF_STUB(long_branch_arm_nacl) \
2804 DEF_STUB(long_branch_arm_nacl_pic) \
2805 DEF_STUB(cmse_branch_thumb_only) \
2806 DEF_STUB(a8_veneer_b_cond) \
2807 DEF_STUB(a8_veneer_b) \
2808 DEF_STUB(a8_veneer_bl) \
2809 DEF_STUB(a8_veneer_blx) \
2810 DEF_STUB(long_branch_thumb2_only) \
2811 DEF_STUB(long_branch_thumb2_only_pure)
2813 #define DEF_STUB(x) arm_stub_##x,
2814 enum elf32_arm_stub_type
2822 /* Note the first a8_veneer type. */
2823 const unsigned arm_stub_a8_veneer_lwm
= arm_stub_a8_veneer_b_cond
;
2827 const insn_sequence
* template_sequence
;
2831 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2832 static const stub_def stub_definitions
[] =
2838 struct elf32_arm_stub_hash_entry
2840 /* Base hash table entry structure. */
2841 struct bfd_hash_entry root
;
2843 /* The stub section. */
2846 /* Offset within stub_sec of the beginning of this stub. */
2847 bfd_vma stub_offset
;
2849 /* Given the symbol's value and its section we can determine its final
2850 value when building the stubs (so the stub knows where to jump). */
2851 bfd_vma target_value
;
2852 asection
*target_section
;
2854 /* Same as above but for the source of the branch to the stub. Used for
2855 Cortex-A8 erratum workaround to patch it to branch to the stub. As
2856 such, source section does not need to be recorded since Cortex-A8 erratum
2857 workaround stubs are only generated when both source and target are in the
2859 bfd_vma source_value
;
2861 /* The instruction which caused this stub to be generated (only valid for
2862 Cortex-A8 erratum workaround stubs at present). */
2863 unsigned long orig_insn
;
2865 /* The stub type. */
2866 enum elf32_arm_stub_type stub_type
;
2867 /* Its encoding size in bytes. */
2870 const insn_sequence
*stub_template
;
2871 /* The size of the template (number of entries). */
2872 int stub_template_size
;
2874 /* The symbol table entry, if any, that this was derived from. */
2875 struct elf32_arm_link_hash_entry
*h
;
2877 /* Type of branch. */
2878 enum arm_st_branch_type branch_type
;
2880 /* Where this stub is being called from, or, in the case of combined
2881 stub sections, the first input section in the group. */
2884 /* The name for the local symbol at the start of this stub. The
2885 stub name in the hash table has to be unique; this does not, so
2886 it can be friendlier. */
2890 /* Used to build a map of a section. This is required for mixed-endian
2893 typedef struct elf32_elf_section_map
2898 elf32_arm_section_map
;
2900 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2904 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER
,
2905 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER
,
2906 VFP11_ERRATUM_ARM_VENEER
,
2907 VFP11_ERRATUM_THUMB_VENEER
2909 elf32_vfp11_erratum_type
;
2911 typedef struct elf32_vfp11_erratum_list
2913 struct elf32_vfp11_erratum_list
*next
;
2919 struct elf32_vfp11_erratum_list
*veneer
;
2920 unsigned int vfp_insn
;
2924 struct elf32_vfp11_erratum_list
*branch
;
2928 elf32_vfp11_erratum_type type
;
2930 elf32_vfp11_erratum_list
;
2932 /* Information about a STM32L4XX erratum veneer, or a branch to such a
2936 STM32L4XX_ERRATUM_BRANCH_TO_VENEER
,
2937 STM32L4XX_ERRATUM_VENEER
2939 elf32_stm32l4xx_erratum_type
;
2941 typedef struct elf32_stm32l4xx_erratum_list
2943 struct elf32_stm32l4xx_erratum_list
*next
;
2949 struct elf32_stm32l4xx_erratum_list
*veneer
;
2954 struct elf32_stm32l4xx_erratum_list
*branch
;
2958 elf32_stm32l4xx_erratum_type type
;
2960 elf32_stm32l4xx_erratum_list
;
2965 INSERT_EXIDX_CANTUNWIND_AT_END
2967 arm_unwind_edit_type
;
2969 /* A (sorted) list of edits to apply to an unwind table. */
2970 typedef struct arm_unwind_table_edit
2972 arm_unwind_edit_type type
;
2973 /* Note: we sometimes want to insert an unwind entry corresponding to a
2974 section different from the one we're currently writing out, so record the
2975 (text) section this edit relates to here. */
2976 asection
*linked_section
;
2978 struct arm_unwind_table_edit
*next
;
2980 arm_unwind_table_edit
;
2982 typedef struct _arm_elf_section_data
2984 /* Information about mapping symbols. */
2985 struct bfd_elf_section_data elf
;
2986 unsigned int mapcount
;
2987 unsigned int mapsize
;
2988 elf32_arm_section_map
*map
;
2989 /* Information about CPU errata. */
2990 unsigned int erratumcount
;
2991 elf32_vfp11_erratum_list
*erratumlist
;
2992 unsigned int stm32l4xx_erratumcount
;
2993 elf32_stm32l4xx_erratum_list
*stm32l4xx_erratumlist
;
2994 unsigned int additional_reloc_count
;
2995 /* Information about unwind tables. */
2998 /* Unwind info attached to a text section. */
3001 asection
*arm_exidx_sec
;
3004 /* Unwind info attached to an .ARM.exidx section. */
3007 arm_unwind_table_edit
*unwind_edit_list
;
3008 arm_unwind_table_edit
*unwind_edit_tail
;
3012 _arm_elf_section_data
;
3014 #define elf32_arm_section_data(sec) \
3015 ((_arm_elf_section_data *) elf_section_data (sec))
3017 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
3018 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
3019 so may be created multiple times: we use an array of these entries whilst
3020 relaxing which we can refresh easily, then create stubs for each potentially
3021 erratum-triggering instruction once we've settled on a solution. */
3023 struct a8_erratum_fix
3028 bfd_vma target_offset
;
3029 unsigned long orig_insn
;
3031 enum elf32_arm_stub_type stub_type
;
3032 enum arm_st_branch_type branch_type
;
3035 /* A table of relocs applied to branches which might trigger Cortex-A8
3038 struct a8_erratum_reloc
3041 bfd_vma destination
;
3042 struct elf32_arm_link_hash_entry
*hash
;
3043 const char *sym_name
;
3044 unsigned int r_type
;
3045 enum arm_st_branch_type branch_type
;
3046 bfd_boolean non_a8_stub
;
3049 /* The size of the thread control block. */
3052 /* ARM-specific information about a PLT entry, over and above the usual
3056 /* We reference count Thumb references to a PLT entry separately,
3057 so that we can emit the Thumb trampoline only if needed. */
3058 bfd_signed_vma thumb_refcount
;
3060 /* Some references from Thumb code may be eliminated by BL->BLX
3061 conversion, so record them separately. */
3062 bfd_signed_vma maybe_thumb_refcount
;
3064 /* How many of the recorded PLT accesses were from non-call relocations.
3065 This information is useful when deciding whether anything takes the
3066 address of an STT_GNU_IFUNC PLT. A value of 0 means that all
3067 non-call references to the function should resolve directly to the
3068 real runtime target. */
3069 unsigned int noncall_refcount
;
3071 /* Since PLT entries have variable size if the Thumb prologue is
3072 used, we need to record the index into .got.plt instead of
3073 recomputing it from the PLT offset. */
3074 bfd_signed_vma got_offset
;
3077 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
3078 struct arm_local_iplt_info
3080 /* The information that is usually found in the generic ELF part of
3081 the hash table entry. */
3082 union gotplt_union root
;
3084 /* The information that is usually found in the ARM-specific part of
3085 the hash table entry. */
3086 struct arm_plt_info arm
;
3088 /* A list of all potential dynamic relocations against this symbol. */
3089 struct elf_dyn_relocs
*dyn_relocs
;
3092 /* Structure to handle FDPIC support for local functions. */
3093 struct fdpic_local
{
3094 unsigned int funcdesc_cnt
;
3095 unsigned int gotofffuncdesc_cnt
;
3096 int funcdesc_offset
;
3099 struct elf_arm_obj_tdata
3101 struct elf_obj_tdata root
;
3103 /* tls_type for each local got entry. */
3104 char *local_got_tls_type
;
3106 /* GOTPLT entries for TLS descriptors. */
3107 bfd_vma
*local_tlsdesc_gotent
;
3109 /* Information for local symbols that need entries in .iplt. */
3110 struct arm_local_iplt_info
**local_iplt
;
3112 /* Zero to warn when linking objects with incompatible enum sizes. */
3113 int no_enum_size_warning
;
3115 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
3116 int no_wchar_size_warning
;
3118 /* Maintains FDPIC counters and funcdesc info. */
3119 struct fdpic_local
*local_fdpic_cnts
;
3122 #define elf_arm_tdata(bfd) \
3123 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
3125 #define elf32_arm_local_got_tls_type(bfd) \
3126 (elf_arm_tdata (bfd)->local_got_tls_type)
3128 #define elf32_arm_local_tlsdesc_gotent(bfd) \
3129 (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
3131 #define elf32_arm_local_iplt(bfd) \
3132 (elf_arm_tdata (bfd)->local_iplt)
3134 #define elf32_arm_local_fdpic_cnts(bfd) \
3135 (elf_arm_tdata (bfd)->local_fdpic_cnts)
3137 #define is_arm_elf(bfd) \
3138 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
3139 && elf_tdata (bfd) != NULL \
3140 && elf_object_id (bfd) == ARM_ELF_DATA)
3143 elf32_arm_mkobject (bfd
*abfd
)
3145 return bfd_elf_allocate_object (abfd
, sizeof (struct elf_arm_obj_tdata
),
3149 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
3151 /* Structure to handle FDPIC support for extern functions. */
3152 struct fdpic_global
{
3153 unsigned int gotofffuncdesc_cnt
;
3154 unsigned int gotfuncdesc_cnt
;
3155 unsigned int funcdesc_cnt
;
3156 int funcdesc_offset
;
3157 int gotfuncdesc_offset
;
3160 /* Arm ELF linker hash entry. */
3161 struct elf32_arm_link_hash_entry
3163 struct elf_link_hash_entry root
;
3165 /* Track dynamic relocs copied for this symbol. */
3166 struct elf_dyn_relocs
*dyn_relocs
;
3168 /* ARM-specific PLT information. */
3169 struct arm_plt_info plt
;
3171 #define GOT_UNKNOWN 0
3172 #define GOT_NORMAL 1
3173 #define GOT_TLS_GD 2
3174 #define GOT_TLS_IE 4
3175 #define GOT_TLS_GDESC 8
3176 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
3177 unsigned int tls_type
: 8;
3179 /* True if the symbol's PLT entry is in .iplt rather than .plt. */
3180 unsigned int is_iplt
: 1;
3182 unsigned int unused
: 23;
3184 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
3185 starting at the end of the jump table. */
3186 bfd_vma tlsdesc_got
;
3188 /* The symbol marking the real symbol location for exported thumb
3189 symbols with Arm stubs. */
3190 struct elf_link_hash_entry
*export_glue
;
3192 /* A pointer to the most recently used stub hash entry against this
3194 struct elf32_arm_stub_hash_entry
*stub_cache
;
3196 /* Counter for FDPIC relocations against this symbol. */
3197 struct fdpic_global fdpic_cnts
;
3200 /* Traverse an arm ELF linker hash table. */
3201 #define elf32_arm_link_hash_traverse(table, func, info) \
3202 (elf_link_hash_traverse \
3204 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
3207 /* Get the ARM elf linker hash table from a link_info structure. */
3208 #define elf32_arm_hash_table(info) \
3209 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
3210 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
3212 #define arm_stub_hash_lookup(table, string, create, copy) \
3213 ((struct elf32_arm_stub_hash_entry *) \
3214 bfd_hash_lookup ((table), (string), (create), (copy)))
3216 /* Array to keep track of which stub sections have been created, and
3217 information on stub grouping. */
3220 /* This is the section to which stubs in the group will be
3223 /* The stub section. */
3227 #define elf32_arm_compute_jump_table_size(htab) \
3228 ((htab)->next_tls_desc_index * 4)
3230 /* ARM ELF linker hash table. */
3231 struct elf32_arm_link_hash_table
3233 /* The main hash table. */
3234 struct elf_link_hash_table root
;
3236 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
3237 bfd_size_type thumb_glue_size
;
3239 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
3240 bfd_size_type arm_glue_size
;
3242 /* The size in bytes of section containing the ARMv4 BX veneers. */
3243 bfd_size_type bx_glue_size
;
3245 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
3246 veneer has been populated. */
3247 bfd_vma bx_glue_offset
[15];
3249 /* The size in bytes of the section containing glue for VFP11 erratum
3251 bfd_size_type vfp11_erratum_glue_size
;
3253 /* The size in bytes of the section containing glue for STM32L4XX erratum
3255 bfd_size_type stm32l4xx_erratum_glue_size
;
3257 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
3258 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
3259 elf32_arm_write_section(). */
3260 struct a8_erratum_fix
*a8_erratum_fixes
;
3261 unsigned int num_a8_erratum_fixes
;
3263 /* An arbitrary input BFD chosen to hold the glue sections. */
3264 bfd
* bfd_of_glue_owner
;
3266 /* Nonzero to output a BE8 image. */
3269 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
3270 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
3273 /* The relocation to use for R_ARM_TARGET2 relocations. */
3276 /* 0 = Ignore R_ARM_V4BX.
3277 1 = Convert BX to MOV PC.
3278 2 = Generate v4 interworing stubs. */
3281 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
3284 /* Whether we should fix the ARM1176 BLX immediate issue. */
3287 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
3290 /* What sort of code sequences we should look for which may trigger the
3291 VFP11 denorm erratum. */
3292 bfd_arm_vfp11_fix vfp11_fix
;
3294 /* Global counter for the number of fixes we have emitted. */
3295 int num_vfp11_fixes
;
3297 /* What sort of code sequences we should look for which may trigger the
3298 STM32L4XX erratum. */
3299 bfd_arm_stm32l4xx_fix stm32l4xx_fix
;
3301 /* Global counter for the number of fixes we have emitted. */
3302 int num_stm32l4xx_fixes
;
3304 /* Nonzero to force PIC branch veneers. */
3307 /* The number of bytes in the initial entry in the PLT. */
3308 bfd_size_type plt_header_size
;
3310 /* The number of bytes in the subsequent PLT etries. */
3311 bfd_size_type plt_entry_size
;
3313 /* True if the target system is VxWorks. */
3316 /* True if the target system is Symbian OS. */
3319 /* True if the target system is Native Client. */
3322 /* True if the target uses REL relocations. */
3323 bfd_boolean use_rel
;
3325 /* Nonzero if import library must be a secure gateway import library
3326 as per ARMv8-M Security Extensions. */
3329 /* The import library whose symbols' address must remain stable in
3330 the import library generated. */
3333 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
3334 bfd_vma next_tls_desc_index
;
3336 /* How many R_ARM_TLS_DESC relocations were generated so far. */
3337 bfd_vma num_tls_desc
;
3339 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
3342 /* The offset into splt of the PLT entry for the TLS descriptor
3343 resolver. Special values are 0, if not necessary (or not found
3344 to be necessary yet), and -1 if needed but not determined
3346 bfd_vma dt_tlsdesc_plt
;
3348 /* The offset into sgot of the GOT entry used by the PLT entry
3350 bfd_vma dt_tlsdesc_got
;
3352 /* Offset in .plt section of tls_arm_trampoline. */
3353 bfd_vma tls_trampoline
;
3355 /* Data for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations. */
3358 bfd_signed_vma refcount
;
3362 /* Small local sym cache. */
3363 struct sym_cache sym_cache
;
3365 /* For convenience in allocate_dynrelocs. */
3368 /* The amount of space used by the reserved portion of the sgotplt
3369 section, plus whatever space is used by the jump slots. */
3370 bfd_vma sgotplt_jump_table_size
;
3372 /* The stub hash table. */
3373 struct bfd_hash_table stub_hash_table
;
3375 /* Linker stub bfd. */
3378 /* Linker call-backs. */
3379 asection
* (*add_stub_section
) (const char *, asection
*, asection
*,
3381 void (*layout_sections_again
) (void);
3383 /* Array to keep track of which stub sections have been created, and
3384 information on stub grouping. */
3385 struct map_stub
*stub_group
;
3387 /* Input stub section holding secure gateway veneers. */
3388 asection
*cmse_stub_sec
;
3390 /* Offset in cmse_stub_sec where new SG veneers (not in input import library)
3391 start to be allocated. */
3392 bfd_vma new_cmse_stub_offset
;
3394 /* Number of elements in stub_group. */
3395 unsigned int top_id
;
3397 /* Assorted information used by elf32_arm_size_stubs. */
3398 unsigned int bfd_count
;
3399 unsigned int top_index
;
3400 asection
**input_list
;
3402 /* True if the target system uses FDPIC. */
3405 /* Fixup section. Used for FDPIC. */
3409 /* Add an FDPIC read-only fixup. */
3411 arm_elf_add_rofixup (bfd
*output_bfd
, asection
*srofixup
, bfd_vma offset
)
3413 bfd_vma fixup_offset
;
3415 fixup_offset
= srofixup
->reloc_count
++ * 4;
3416 BFD_ASSERT (fixup_offset
< srofixup
->size
);
3417 bfd_put_32 (output_bfd
, offset
, srofixup
->contents
+ fixup_offset
);
3421 ctz (unsigned int mask
)
3423 #if GCC_VERSION >= 3004
3424 return __builtin_ctz (mask
);
3428 for (i
= 0; i
< 8 * sizeof (mask
); i
++)
3439 elf32_arm_popcount (unsigned int mask
)
3441 #if GCC_VERSION >= 3004
3442 return __builtin_popcount (mask
);
3447 for (i
= 0; i
< 8 * sizeof (mask
); i
++)
3457 static void elf32_arm_add_dynreloc (bfd
*output_bfd
, struct bfd_link_info
*info
,
3458 asection
*sreloc
, Elf_Internal_Rela
*rel
);
3461 arm_elf_fill_funcdesc(bfd
*output_bfd
,
3462 struct bfd_link_info
*info
,
3463 int *funcdesc_offset
,
3467 bfd_vma dynreloc_value
,
3470 if ((*funcdesc_offset
& 1) == 0)
3472 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (info
);
3473 asection
*sgot
= globals
->root
.sgot
;
3475 if (bfd_link_pic(info
))
3477 asection
*srelgot
= globals
->root
.srelgot
;
3478 Elf_Internal_Rela outrel
;
3480 outrel
.r_info
= ELF32_R_INFO (dynindx
, R_ARM_FUNCDESC_VALUE
);
3481 outrel
.r_offset
= sgot
->output_section
->vma
+ sgot
->output_offset
+ offset
;
3482 outrel
.r_addend
= 0;
3484 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
3485 bfd_put_32 (output_bfd
, addr
, sgot
->contents
+ offset
);
3486 bfd_put_32 (output_bfd
, seg
, sgot
->contents
+ offset
+ 4);
3490 struct elf_link_hash_entry
*hgot
= globals
->root
.hgot
;
3491 bfd_vma got_value
= hgot
->root
.u
.def
.value
3492 + hgot
->root
.u
.def
.section
->output_section
->vma
3493 + hgot
->root
.u
.def
.section
->output_offset
;
3495 arm_elf_add_rofixup(output_bfd
, globals
->srofixup
,
3496 sgot
->output_section
->vma
+ sgot
->output_offset
3498 arm_elf_add_rofixup(output_bfd
, globals
->srofixup
,
3499 sgot
->output_section
->vma
+ sgot
->output_offset
3501 bfd_put_32 (output_bfd
, dynreloc_value
, sgot
->contents
+ offset
);
3502 bfd_put_32 (output_bfd
, got_value
, sgot
->contents
+ offset
+ 4);
3504 *funcdesc_offset
|= 1;
3508 /* Create an entry in an ARM ELF linker hash table. */
3510 static struct bfd_hash_entry
*
3511 elf32_arm_link_hash_newfunc (struct bfd_hash_entry
* entry
,
3512 struct bfd_hash_table
* table
,
3513 const char * string
)
3515 struct elf32_arm_link_hash_entry
* ret
=
3516 (struct elf32_arm_link_hash_entry
*) entry
;
3518 /* Allocate the structure if it has not already been allocated by a
3521 ret
= (struct elf32_arm_link_hash_entry
*)
3522 bfd_hash_allocate (table
, sizeof (struct elf32_arm_link_hash_entry
));
3524 return (struct bfd_hash_entry
*) ret
;
3526 /* Call the allocation method of the superclass. */
3527 ret
= ((struct elf32_arm_link_hash_entry
*)
3528 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry
*) ret
,
3532 ret
->dyn_relocs
= NULL
;
3533 ret
->tls_type
= GOT_UNKNOWN
;
3534 ret
->tlsdesc_got
= (bfd_vma
) -1;
3535 ret
->plt
.thumb_refcount
= 0;
3536 ret
->plt
.maybe_thumb_refcount
= 0;
3537 ret
->plt
.noncall_refcount
= 0;
3538 ret
->plt
.got_offset
= -1;
3539 ret
->is_iplt
= FALSE
;
3540 ret
->export_glue
= NULL
;
3542 ret
->stub_cache
= NULL
;
3544 ret
->fdpic_cnts
.gotofffuncdesc_cnt
= 0;
3545 ret
->fdpic_cnts
.gotfuncdesc_cnt
= 0;
3546 ret
->fdpic_cnts
.funcdesc_cnt
= 0;
3547 ret
->fdpic_cnts
.funcdesc_offset
= -1;
3548 ret
->fdpic_cnts
.gotfuncdesc_offset
= -1;
3551 return (struct bfd_hash_entry
*) ret
;
3554 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3558 elf32_arm_allocate_local_sym_info (bfd
*abfd
)
3560 if (elf_local_got_refcounts (abfd
) == NULL
)
3562 bfd_size_type num_syms
;
3566 num_syms
= elf_tdata (abfd
)->symtab_hdr
.sh_info
;
3567 size
= num_syms
* (sizeof (bfd_signed_vma
)
3568 + sizeof (struct arm_local_iplt_info
*)
3571 + sizeof (struct fdpic_local
));
3572 data
= bfd_zalloc (abfd
, size
);
3576 elf32_arm_local_fdpic_cnts (abfd
) = (struct fdpic_local
*) data
;
3577 data
+= num_syms
* sizeof (struct fdpic_local
);
3579 elf_local_got_refcounts (abfd
) = (bfd_signed_vma
*) data
;
3580 data
+= num_syms
* sizeof (bfd_signed_vma
);
3582 elf32_arm_local_iplt (abfd
) = (struct arm_local_iplt_info
**) data
;
3583 data
+= num_syms
* sizeof (struct arm_local_iplt_info
*);
3585 elf32_arm_local_tlsdesc_gotent (abfd
) = (bfd_vma
*) data
;
3586 data
+= num_syms
* sizeof (bfd_vma
);
3588 elf32_arm_local_got_tls_type (abfd
) = data
;
3593 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3594 to input bfd ABFD. Create the information if it doesn't already exist.
3595 Return null if an allocation fails. */
3597 static struct arm_local_iplt_info
*
3598 elf32_arm_create_local_iplt (bfd
*abfd
, unsigned long r_symndx
)
3600 struct arm_local_iplt_info
**ptr
;
3602 if (!elf32_arm_allocate_local_sym_info (abfd
))
3605 BFD_ASSERT (r_symndx
< elf_tdata (abfd
)->symtab_hdr
.sh_info
);
3606 ptr
= &elf32_arm_local_iplt (abfd
)[r_symndx
];
3608 *ptr
= bfd_zalloc (abfd
, sizeof (**ptr
));
3612 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3613 in ABFD's symbol table. If the symbol is global, H points to its
3614 hash table entry, otherwise H is null.
3616 Return true if the symbol does have PLT information. When returning
3617 true, point *ROOT_PLT at the target-independent reference count/offset
3618 union and *ARM_PLT at the ARM-specific information. */
3621 elf32_arm_get_plt_info (bfd
*abfd
, struct elf32_arm_link_hash_table
*globals
,
3622 struct elf32_arm_link_hash_entry
*h
,
3623 unsigned long r_symndx
, union gotplt_union
**root_plt
,
3624 struct arm_plt_info
**arm_plt
)
3626 struct arm_local_iplt_info
*local_iplt
;
3628 if (globals
->root
.splt
== NULL
&& globals
->root
.iplt
== NULL
)
3633 *root_plt
= &h
->root
.plt
;
3638 if (elf32_arm_local_iplt (abfd
) == NULL
)
3641 local_iplt
= elf32_arm_local_iplt (abfd
)[r_symndx
];
3642 if (local_iplt
== NULL
)
3645 *root_plt
= &local_iplt
->root
;
3646 *arm_plt
= &local_iplt
->arm
;
3650 static bfd_boolean
using_thumb_only (struct elf32_arm_link_hash_table
*globals
);
3652 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3656 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info
*info
,
3657 struct arm_plt_info
*arm_plt
)
3659 struct elf32_arm_link_hash_table
*htab
;
3661 htab
= elf32_arm_hash_table (info
);
3663 return (!using_thumb_only(htab
) && (arm_plt
->thumb_refcount
!= 0
3664 || (!htab
->use_blx
&& arm_plt
->maybe_thumb_refcount
!= 0)));
3667 /* Return a pointer to the head of the dynamic reloc list that should
3668 be used for local symbol ISYM, which is symbol number R_SYMNDX in
3669 ABFD's symbol table. Return null if an error occurs. */
3671 static struct elf_dyn_relocs
**
3672 elf32_arm_get_local_dynreloc_list (bfd
*abfd
, unsigned long r_symndx
,
3673 Elf_Internal_Sym
*isym
)
3675 if (ELF32_ST_TYPE (isym
->st_info
) == STT_GNU_IFUNC
)
3677 struct arm_local_iplt_info
*local_iplt
;
3679 local_iplt
= elf32_arm_create_local_iplt (abfd
, r_symndx
);
3680 if (local_iplt
== NULL
)
3682 return &local_iplt
->dyn_relocs
;
3686 /* Track dynamic relocs needed for local syms too.
3687 We really need local syms available to do this
3692 s
= bfd_section_from_elf_index (abfd
, isym
->st_shndx
);
3696 vpp
= &elf_section_data (s
)->local_dynrel
;
3697 return (struct elf_dyn_relocs
**) vpp
;
3701 /* Initialize an entry in the stub hash table. */
3703 static struct bfd_hash_entry
*
3704 stub_hash_newfunc (struct bfd_hash_entry
*entry
,
3705 struct bfd_hash_table
*table
,
3708 /* Allocate the structure if it has not already been allocated by a
3712 entry
= (struct bfd_hash_entry
*)
3713 bfd_hash_allocate (table
, sizeof (struct elf32_arm_stub_hash_entry
));
3718 /* Call the allocation method of the superclass. */
3719 entry
= bfd_hash_newfunc (entry
, table
, string
);
3722 struct elf32_arm_stub_hash_entry
*eh
;
3724 /* Initialize the local fields. */
3725 eh
= (struct elf32_arm_stub_hash_entry
*) entry
;
3726 eh
->stub_sec
= NULL
;
3727 eh
->stub_offset
= (bfd_vma
) -1;
3728 eh
->source_value
= 0;
3729 eh
->target_value
= 0;
3730 eh
->target_section
= NULL
;
3732 eh
->stub_type
= arm_stub_none
;
3734 eh
->stub_template
= NULL
;
3735 eh
->stub_template_size
= -1;
3738 eh
->output_name
= NULL
;
3744 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3745 shortcuts to them in our hash table. */
3748 create_got_section (bfd
*dynobj
, struct bfd_link_info
*info
)
3750 struct elf32_arm_link_hash_table
*htab
;
3752 htab
= elf32_arm_hash_table (info
);
3756 /* BPABI objects never have a GOT, or associated sections. */
3757 if (htab
->symbian_p
)
3760 if (! _bfd_elf_create_got_section (dynobj
, info
))
3763 /* Also create .rofixup. */
3766 htab
->srofixup
= bfd_make_section_with_flags (dynobj
, ".rofixup",
3767 (SEC_ALLOC
| SEC_LOAD
| SEC_HAS_CONTENTS
3768 | SEC_IN_MEMORY
| SEC_LINKER_CREATED
| SEC_READONLY
));
3769 if (htab
->srofixup
== NULL
|| ! bfd_set_section_alignment (dynobj
, htab
->srofixup
, 2))
3776 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
3779 create_ifunc_sections (struct bfd_link_info
*info
)
3781 struct elf32_arm_link_hash_table
*htab
;
3782 const struct elf_backend_data
*bed
;
3787 htab
= elf32_arm_hash_table (info
);
3788 dynobj
= htab
->root
.dynobj
;
3789 bed
= get_elf_backend_data (dynobj
);
3790 flags
= bed
->dynamic_sec_flags
;
3792 if (htab
->root
.iplt
== NULL
)
3794 s
= bfd_make_section_anyway_with_flags (dynobj
, ".iplt",
3795 flags
| SEC_READONLY
| SEC_CODE
);
3797 || !bfd_set_section_alignment (dynobj
, s
, bed
->plt_alignment
))
3799 htab
->root
.iplt
= s
;
3802 if (htab
->root
.irelplt
== NULL
)
3804 s
= bfd_make_section_anyway_with_flags (dynobj
,
3805 RELOC_SECTION (htab
, ".iplt"),
3806 flags
| SEC_READONLY
);
3808 || !bfd_set_section_alignment (dynobj
, s
, bed
->s
->log_file_align
))
3810 htab
->root
.irelplt
= s
;
3813 if (htab
->root
.igotplt
== NULL
)
3815 s
= bfd_make_section_anyway_with_flags (dynobj
, ".igot.plt", flags
);
3817 || !bfd_set_section_alignment (dynobj
, s
, bed
->s
->log_file_align
))
3819 htab
->root
.igotplt
= s
;
3824 /* Determine if we're dealing with a Thumb only architecture. */
3827 using_thumb_only (struct elf32_arm_link_hash_table
*globals
)
3830 int profile
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
3831 Tag_CPU_arch_profile
);
3834 return profile
== 'M';
3836 arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
, Tag_CPU_arch
);
3838 /* Force return logic to be reviewed for each new architecture. */
3839 BFD_ASSERT (arch
<= TAG_CPU_ARCH_V8M_MAIN
);
3841 if (arch
== TAG_CPU_ARCH_V6_M
3842 || arch
== TAG_CPU_ARCH_V6S_M
3843 || arch
== TAG_CPU_ARCH_V7E_M
3844 || arch
== TAG_CPU_ARCH_V8M_BASE
3845 || arch
== TAG_CPU_ARCH_V8M_MAIN
)
3851 /* Determine if we're dealing with a Thumb-2 object. */
3854 using_thumb2 (struct elf32_arm_link_hash_table
*globals
)
3857 int thumb_isa
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
3861 return thumb_isa
== 2;
3863 arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
, Tag_CPU_arch
);
3865 /* Force return logic to be reviewed for each new architecture. */
3866 BFD_ASSERT (arch
<= TAG_CPU_ARCH_V8M_MAIN
);
3868 return (arch
== TAG_CPU_ARCH_V6T2
3869 || arch
== TAG_CPU_ARCH_V7
3870 || arch
== TAG_CPU_ARCH_V7E_M
3871 || arch
== TAG_CPU_ARCH_V8
3872 || arch
== TAG_CPU_ARCH_V8R
3873 || arch
== TAG_CPU_ARCH_V8M_MAIN
);
3876 /* Determine whether Thumb-2 BL instruction is available. */
3879 using_thumb2_bl (struct elf32_arm_link_hash_table
*globals
)
3882 bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
, Tag_CPU_arch
);
3884 /* Force return logic to be reviewed for each new architecture. */
3885 BFD_ASSERT (arch
<= TAG_CPU_ARCH_V8M_MAIN
);
3887 /* Architecture was introduced after ARMv6T2 (eg. ARMv6-M). */
3888 return (arch
== TAG_CPU_ARCH_V6T2
3889 || arch
>= TAG_CPU_ARCH_V7
);
3892 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3893 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3897 elf32_arm_create_dynamic_sections (bfd
*dynobj
, struct bfd_link_info
*info
)
3899 struct elf32_arm_link_hash_table
*htab
;
3901 htab
= elf32_arm_hash_table (info
);
3905 if (!htab
->root
.sgot
&& !create_got_section (dynobj
, info
))
3908 if (!_bfd_elf_create_dynamic_sections (dynobj
, info
))
3911 if (htab
->vxworks_p
)
3913 if (!elf_vxworks_create_dynamic_sections (dynobj
, info
, &htab
->srelplt2
))
3916 if (bfd_link_pic (info
))
3918 htab
->plt_header_size
= 0;
3919 htab
->plt_entry_size
3920 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry
);
3924 htab
->plt_header_size
3925 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry
);
3926 htab
->plt_entry_size
3927 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry
);
3930 if (elf_elfheader (dynobj
))
3931 elf_elfheader (dynobj
)->e_ident
[EI_CLASS
] = ELFCLASS32
;
3936 Test for thumb only architectures. Note - we cannot just call
3937 using_thumb_only() as the attributes in the output bfd have not been
3938 initialised at this point, so instead we use the input bfd. */
3939 bfd
* saved_obfd
= htab
->obfd
;
3941 htab
->obfd
= dynobj
;
3942 if (using_thumb_only (htab
))
3944 htab
->plt_header_size
= 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry
);
3945 htab
->plt_entry_size
= 4 * ARRAY_SIZE (elf32_thumb2_plt_entry
);
3947 htab
->obfd
= saved_obfd
;
3950 if (htab
->fdpic_p
) {
3951 htab
->plt_header_size
= 0;
3952 if (info
->flags
& DF_BIND_NOW
)
3953 htab
->plt_entry_size
= 4 * (ARRAY_SIZE(elf32_arm_fdpic_plt_entry
) - 5);
3955 htab
->plt_entry_size
= 4 * ARRAY_SIZE(elf32_arm_fdpic_plt_entry
);
3958 if (!htab
->root
.splt
3959 || !htab
->root
.srelplt
3960 || !htab
->root
.sdynbss
3961 || (!bfd_link_pic (info
) && !htab
->root
.srelbss
))
3967 /* Copy the extra info we tack onto an elf_link_hash_entry. */
3970 elf32_arm_copy_indirect_symbol (struct bfd_link_info
*info
,
3971 struct elf_link_hash_entry
*dir
,
3972 struct elf_link_hash_entry
*ind
)
3974 struct elf32_arm_link_hash_entry
*edir
, *eind
;
3976 edir
= (struct elf32_arm_link_hash_entry
*) dir
;
3977 eind
= (struct elf32_arm_link_hash_entry
*) ind
;
3979 if (eind
->dyn_relocs
!= NULL
)
3981 if (edir
->dyn_relocs
!= NULL
)
3983 struct elf_dyn_relocs
**pp
;
3984 struct elf_dyn_relocs
*p
;
3986 /* Add reloc counts against the indirect sym to the direct sym
3987 list. Merge any entries against the same section. */
3988 for (pp
= &eind
->dyn_relocs
; (p
= *pp
) != NULL
; )
3990 struct elf_dyn_relocs
*q
;
3992 for (q
= edir
->dyn_relocs
; q
!= NULL
; q
= q
->next
)
3993 if (q
->sec
== p
->sec
)
3995 q
->pc_count
+= p
->pc_count
;
3996 q
->count
+= p
->count
;
4003 *pp
= edir
->dyn_relocs
;
4006 edir
->dyn_relocs
= eind
->dyn_relocs
;
4007 eind
->dyn_relocs
= NULL
;
4010 if (ind
->root
.type
== bfd_link_hash_indirect
)
4012 /* Copy over PLT info. */
4013 edir
->plt
.thumb_refcount
+= eind
->plt
.thumb_refcount
;
4014 eind
->plt
.thumb_refcount
= 0;
4015 edir
->plt
.maybe_thumb_refcount
+= eind
->plt
.maybe_thumb_refcount
;
4016 eind
->plt
.maybe_thumb_refcount
= 0;
4017 edir
->plt
.noncall_refcount
+= eind
->plt
.noncall_refcount
;
4018 eind
->plt
.noncall_refcount
= 0;
4020 /* Copy FDPIC counters. */
4021 edir
->fdpic_cnts
.gotofffuncdesc_cnt
+= eind
->fdpic_cnts
.gotofffuncdesc_cnt
;
4022 edir
->fdpic_cnts
.gotfuncdesc_cnt
+= eind
->fdpic_cnts
.gotfuncdesc_cnt
;
4023 edir
->fdpic_cnts
.funcdesc_cnt
+= eind
->fdpic_cnts
.funcdesc_cnt
;
4025 /* We should only allocate a function to .iplt once the final
4026 symbol information is known. */
4027 BFD_ASSERT (!eind
->is_iplt
);
4029 if (dir
->got
.refcount
<= 0)
4031 edir
->tls_type
= eind
->tls_type
;
4032 eind
->tls_type
= GOT_UNKNOWN
;
4036 _bfd_elf_link_hash_copy_indirect (info
, dir
, ind
);
4039 /* Destroy an ARM elf linker hash table. */
4042 elf32_arm_link_hash_table_free (bfd
*obfd
)
4044 struct elf32_arm_link_hash_table
*ret
4045 = (struct elf32_arm_link_hash_table
*) obfd
->link
.hash
;
4047 bfd_hash_table_free (&ret
->stub_hash_table
);
4048 _bfd_elf_link_hash_table_free (obfd
);
4051 /* Create an ARM elf linker hash table. */
4053 static struct bfd_link_hash_table
*
4054 elf32_arm_link_hash_table_create (bfd
*abfd
)
4056 struct elf32_arm_link_hash_table
*ret
;
4057 bfd_size_type amt
= sizeof (struct elf32_arm_link_hash_table
);
4059 ret
= (struct elf32_arm_link_hash_table
*) bfd_zmalloc (amt
);
4063 if (!_bfd_elf_link_hash_table_init (& ret
->root
, abfd
,
4064 elf32_arm_link_hash_newfunc
,
4065 sizeof (struct elf32_arm_link_hash_entry
),
4072 ret
->vfp11_fix
= BFD_ARM_VFP11_FIX_NONE
;
4073 ret
->stm32l4xx_fix
= BFD_ARM_STM32L4XX_FIX_NONE
;
4074 #ifdef FOUR_WORD_PLT
4075 ret
->plt_header_size
= 16;
4076 ret
->plt_entry_size
= 16;
4078 ret
->plt_header_size
= 20;
4079 ret
->plt_entry_size
= elf32_arm_use_long_plt_entry
? 16 : 12;
4081 ret
->use_rel
= TRUE
;
4085 if (!bfd_hash_table_init (&ret
->stub_hash_table
, stub_hash_newfunc
,
4086 sizeof (struct elf32_arm_stub_hash_entry
)))
4088 _bfd_elf_link_hash_table_free (abfd
);
4091 ret
->root
.root
.hash_table_free
= elf32_arm_link_hash_table_free
;
4093 return &ret
->root
.root
;
4096 /* Determine what kind of NOPs are available. */
4099 arch_has_arm_nop (struct elf32_arm_link_hash_table
*globals
)
4101 const int arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
4104 /* Force return logic to be reviewed for each new architecture. */
4105 BFD_ASSERT (arch
<= TAG_CPU_ARCH_V8M_MAIN
);
4107 return (arch
== TAG_CPU_ARCH_V6T2
4108 || arch
== TAG_CPU_ARCH_V6K
4109 || arch
== TAG_CPU_ARCH_V7
4110 || arch
== TAG_CPU_ARCH_V8
4111 || arch
== TAG_CPU_ARCH_V8R
);
4115 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type
)
4119 case arm_stub_long_branch_thumb_only
:
4120 case arm_stub_long_branch_thumb2_only
:
4121 case arm_stub_long_branch_thumb2_only_pure
:
4122 case arm_stub_long_branch_v4t_thumb_arm
:
4123 case arm_stub_short_branch_v4t_thumb_arm
:
4124 case arm_stub_long_branch_v4t_thumb_arm_pic
:
4125 case arm_stub_long_branch_v4t_thumb_tls_pic
:
4126 case arm_stub_long_branch_thumb_only_pic
:
4127 case arm_stub_cmse_branch_thumb_only
:
4138 /* Determine the type of stub needed, if any, for a call. */
4140 static enum elf32_arm_stub_type
4141 arm_type_of_stub (struct bfd_link_info
*info
,
4142 asection
*input_sec
,
4143 const Elf_Internal_Rela
*rel
,
4144 unsigned char st_type
,
4145 enum arm_st_branch_type
*actual_branch_type
,
4146 struct elf32_arm_link_hash_entry
*hash
,
4147 bfd_vma destination
,
4153 bfd_signed_vma branch_offset
;
4154 unsigned int r_type
;
4155 struct elf32_arm_link_hash_table
* globals
;
4156 bfd_boolean thumb2
, thumb2_bl
, thumb_only
;
4157 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
4159 enum arm_st_branch_type branch_type
= *actual_branch_type
;
4160 union gotplt_union
*root_plt
;
4161 struct arm_plt_info
*arm_plt
;
4165 if (branch_type
== ST_BRANCH_LONG
)
4168 globals
= elf32_arm_hash_table (info
);
4169 if (globals
== NULL
)
4172 thumb_only
= using_thumb_only (globals
);
4173 thumb2
= using_thumb2 (globals
);
4174 thumb2_bl
= using_thumb2_bl (globals
);
4176 arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
, Tag_CPU_arch
);
4178 /* True for architectures that implement the thumb2 movw instruction. */
4179 thumb2_movw
= thumb2
|| (arch
== TAG_CPU_ARCH_V8M_BASE
);
4181 /* Determine where the call point is. */
4182 location
= (input_sec
->output_offset
4183 + input_sec
->output_section
->vma
4186 r_type
= ELF32_R_TYPE (rel
->r_info
);
4188 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
4189 are considering a function call relocation. */
4190 if (thumb_only
&& (r_type
== R_ARM_THM_CALL
|| r_type
== R_ARM_THM_JUMP24
4191 || r_type
== R_ARM_THM_JUMP19
)
4192 && branch_type
== ST_BRANCH_TO_ARM
)
4193 branch_type
= ST_BRANCH_TO_THUMB
;
4195 /* For TLS call relocs, it is the caller's responsibility to provide
4196 the address of the appropriate trampoline. */
4197 if (r_type
!= R_ARM_TLS_CALL
4198 && r_type
!= R_ARM_THM_TLS_CALL
4199 && elf32_arm_get_plt_info (input_bfd
, globals
, hash
,
4200 ELF32_R_SYM (rel
->r_info
), &root_plt
,
4202 && root_plt
->offset
!= (bfd_vma
) -1)
4206 if (hash
== NULL
|| hash
->is_iplt
)
4207 splt
= globals
->root
.iplt
;
4209 splt
= globals
->root
.splt
;
4214 /* Note when dealing with PLT entries: the main PLT stub is in
4215 ARM mode, so if the branch is in Thumb mode, another
4216 Thumb->ARM stub will be inserted later just before the ARM
4217 PLT stub. If a long branch stub is needed, we'll add a
4218 Thumb->Arm one and branch directly to the ARM PLT entry.
4219 Here, we have to check if a pre-PLT Thumb->ARM stub
4220 is needed and if it will be close enough. */
4222 destination
= (splt
->output_section
->vma
4223 + splt
->output_offset
4224 + root_plt
->offset
);
4227 /* Thumb branch/call to PLT: it can become a branch to ARM
4228 or to Thumb. We must perform the same checks and
4229 corrections as in elf32_arm_final_link_relocate. */
4230 if ((r_type
== R_ARM_THM_CALL
)
4231 || (r_type
== R_ARM_THM_JUMP24
))
4233 if (globals
->use_blx
4234 && r_type
== R_ARM_THM_CALL
4237 /* If the Thumb BLX instruction is available, convert
4238 the BL to a BLX instruction to call the ARM-mode
4240 branch_type
= ST_BRANCH_TO_ARM
;
4245 /* Target the Thumb stub before the ARM PLT entry. */
4246 destination
-= PLT_THUMB_STUB_SIZE
;
4247 branch_type
= ST_BRANCH_TO_THUMB
;
4252 branch_type
= ST_BRANCH_TO_ARM
;
4256 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
4257 BFD_ASSERT (st_type
!= STT_GNU_IFUNC
);
4259 branch_offset
= (bfd_signed_vma
)(destination
- location
);
4261 if (r_type
== R_ARM_THM_CALL
|| r_type
== R_ARM_THM_JUMP24
4262 || r_type
== R_ARM_THM_TLS_CALL
|| r_type
== R_ARM_THM_JUMP19
)
4264 /* Handle cases where:
4265 - this call goes too far (different Thumb/Thumb2 max
4267 - it's a Thumb->Arm call and blx is not available, or it's a
4268 Thumb->Arm branch (not bl). A stub is needed in this case,
4269 but only if this call is not through a PLT entry. Indeed,
4270 PLT stubs handle mode switching already. */
4272 && (branch_offset
> THM_MAX_FWD_BRANCH_OFFSET
4273 || (branch_offset
< THM_MAX_BWD_BRANCH_OFFSET
)))
4275 && (branch_offset
> THM2_MAX_FWD_BRANCH_OFFSET
4276 || (branch_offset
< THM2_MAX_BWD_BRANCH_OFFSET
)))
4278 && (branch_offset
> THM2_MAX_FWD_COND_BRANCH_OFFSET
4279 || (branch_offset
< THM2_MAX_BWD_COND_BRANCH_OFFSET
))
4280 && (r_type
== R_ARM_THM_JUMP19
))
4281 || (branch_type
== ST_BRANCH_TO_ARM
4282 && (((r_type
== R_ARM_THM_CALL
4283 || r_type
== R_ARM_THM_TLS_CALL
) && !globals
->use_blx
)
4284 || (r_type
== R_ARM_THM_JUMP24
)
4285 || (r_type
== R_ARM_THM_JUMP19
))
4288 /* If we need to insert a Thumb-Thumb long branch stub to a
4289 PLT, use one that branches directly to the ARM PLT
4290 stub. If we pretended we'd use the pre-PLT Thumb->ARM
4291 stub, undo this now. */
4292 if ((branch_type
== ST_BRANCH_TO_THUMB
) && use_plt
&& !thumb_only
)
4294 branch_type
= ST_BRANCH_TO_ARM
;
4295 branch_offset
+= PLT_THUMB_STUB_SIZE
;
4298 if (branch_type
== ST_BRANCH_TO_THUMB
)
4300 /* Thumb to thumb. */
4303 if (input_sec
->flags
& SEC_ELF_PURECODE
)
4305 (_("%pB(%pA): warning: long branch veneers used in"
4306 " section with SHF_ARM_PURECODE section"
4307 " attribute is only supported for M-profile"
4308 " targets that implement the movw instruction"),
4309 input_bfd
, input_sec
);
4311 stub_type
= (bfd_link_pic (info
) | globals
->pic_veneer
)
4313 ? ((globals
->use_blx
4314 && (r_type
== R_ARM_THM_CALL
))
4315 /* V5T and above. Stub starts with ARM code, so
4316 we must be able to switch mode before
4317 reaching it, which is only possible for 'bl'
4318 (ie R_ARM_THM_CALL relocation). */
4319 ? arm_stub_long_branch_any_thumb_pic
4320 /* On V4T, use Thumb code only. */
4321 : arm_stub_long_branch_v4t_thumb_thumb_pic
)
4323 /* non-PIC stubs. */
4324 : ((globals
->use_blx
4325 && (r_type
== R_ARM_THM_CALL
))
4326 /* V5T and above. */
4327 ? arm_stub_long_branch_any_any
4329 : arm_stub_long_branch_v4t_thumb_thumb
);
4333 if (thumb2_movw
&& (input_sec
->flags
& SEC_ELF_PURECODE
))
4334 stub_type
= arm_stub_long_branch_thumb2_only_pure
;
4337 if (input_sec
->flags
& SEC_ELF_PURECODE
)
4339 (_("%pB(%pA): warning: long branch veneers used in"
4340 " section with SHF_ARM_PURECODE section"
4341 " attribute is only supported for M-profile"
4342 " targets that implement the movw instruction"),
4343 input_bfd
, input_sec
);
4345 stub_type
= (bfd_link_pic (info
) | globals
->pic_veneer
)
4347 ? arm_stub_long_branch_thumb_only_pic
4349 : (thumb2
? arm_stub_long_branch_thumb2_only
4350 : arm_stub_long_branch_thumb_only
);
4356 if (input_sec
->flags
& SEC_ELF_PURECODE
)
4358 (_("%pB(%pA): warning: long branch veneers used in"
4359 " section with SHF_ARM_PURECODE section"
4360 " attribute is only supported" " for M-profile"
4361 " targets that implement the movw instruction"),
4362 input_bfd
, input_sec
);
4366 && sym_sec
->owner
!= NULL
4367 && !INTERWORK_FLAG (sym_sec
->owner
))
4370 (_("%pB(%s): warning: interworking not enabled;"
4371 " first occurrence: %pB: %s call to %s"),
4372 sym_sec
->owner
, name
, input_bfd
, "Thumb", "ARM");
4376 (bfd_link_pic (info
) | globals
->pic_veneer
)
4378 ? (r_type
== R_ARM_THM_TLS_CALL
4379 /* TLS PIC stubs. */
4380 ? (globals
->use_blx
? arm_stub_long_branch_any_tls_pic
4381 : arm_stub_long_branch_v4t_thumb_tls_pic
)
4382 : ((globals
->use_blx
&& r_type
== R_ARM_THM_CALL
)
4383 /* V5T PIC and above. */
4384 ? arm_stub_long_branch_any_arm_pic
4386 : arm_stub_long_branch_v4t_thumb_arm_pic
))
4388 /* non-PIC stubs. */
4389 : ((globals
->use_blx
&& r_type
== R_ARM_THM_CALL
)
4390 /* V5T and above. */
4391 ? arm_stub_long_branch_any_any
4393 : arm_stub_long_branch_v4t_thumb_arm
);
4395 /* Handle v4t short branches. */
4396 if ((stub_type
== arm_stub_long_branch_v4t_thumb_arm
)
4397 && (branch_offset
<= THM_MAX_FWD_BRANCH_OFFSET
)
4398 && (branch_offset
>= THM_MAX_BWD_BRANCH_OFFSET
))
4399 stub_type
= arm_stub_short_branch_v4t_thumb_arm
;
4403 else if (r_type
== R_ARM_CALL
4404 || r_type
== R_ARM_JUMP24
4405 || r_type
== R_ARM_PLT32
4406 || r_type
== R_ARM_TLS_CALL
)
4408 if (input_sec
->flags
& SEC_ELF_PURECODE
)
4410 (_("%pB(%pA): warning: long branch veneers used in"
4411 " section with SHF_ARM_PURECODE section"
4412 " attribute is only supported for M-profile"
4413 " targets that implement the movw instruction"),
4414 input_bfd
, input_sec
);
4415 if (branch_type
== ST_BRANCH_TO_THUMB
)
4420 && sym_sec
->owner
!= NULL
4421 && !INTERWORK_FLAG (sym_sec
->owner
))
4424 (_("%pB(%s): warning: interworking not enabled;"
4425 " first occurrence: %pB: %s call to %s"),
4426 sym_sec
->owner
, name
, input_bfd
, "ARM", "Thumb");
4429 /* We have an extra 2-bytes reach because of
4430 the mode change (bit 24 (H) of BLX encoding). */
4431 if (branch_offset
> (ARM_MAX_FWD_BRANCH_OFFSET
+ 2)
4432 || (branch_offset
< ARM_MAX_BWD_BRANCH_OFFSET
)
4433 || (r_type
== R_ARM_CALL
&& !globals
->use_blx
)
4434 || (r_type
== R_ARM_JUMP24
)
4435 || (r_type
== R_ARM_PLT32
))
4437 stub_type
= (bfd_link_pic (info
) | globals
->pic_veneer
)
4439 ? ((globals
->use_blx
)
4440 /* V5T and above. */
4441 ? arm_stub_long_branch_any_thumb_pic
4443 : arm_stub_long_branch_v4t_arm_thumb_pic
)
4445 /* non-PIC stubs. */
4446 : ((globals
->use_blx
)
4447 /* V5T and above. */
4448 ? arm_stub_long_branch_any_any
4450 : arm_stub_long_branch_v4t_arm_thumb
);
4456 if (branch_offset
> ARM_MAX_FWD_BRANCH_OFFSET
4457 || (branch_offset
< ARM_MAX_BWD_BRANCH_OFFSET
))
4460 (bfd_link_pic (info
) | globals
->pic_veneer
)
4462 ? (r_type
== R_ARM_TLS_CALL
4464 ? arm_stub_long_branch_any_tls_pic
4466 ? arm_stub_long_branch_arm_nacl_pic
4467 : arm_stub_long_branch_any_arm_pic
))
4468 /* non-PIC stubs. */
4470 ? arm_stub_long_branch_arm_nacl
4471 : arm_stub_long_branch_any_any
);
4476 /* If a stub is needed, record the actual destination type. */
4477 if (stub_type
!= arm_stub_none
)
4478 *actual_branch_type
= branch_type
;
4483 /* Build a name for an entry in the stub hash table. */
4486 elf32_arm_stub_name (const asection
*input_section
,
4487 const asection
*sym_sec
,
4488 const struct elf32_arm_link_hash_entry
*hash
,
4489 const Elf_Internal_Rela
*rel
,
4490 enum elf32_arm_stub_type stub_type
)
4497 len
= 8 + 1 + strlen (hash
->root
.root
.root
.string
) + 1 + 8 + 1 + 2 + 1;
4498 stub_name
= (char *) bfd_malloc (len
);
4499 if (stub_name
!= NULL
)
4500 sprintf (stub_name
, "%08x_%s+%x_%d",
4501 input_section
->id
& 0xffffffff,
4502 hash
->root
.root
.root
.string
,
4503 (int) rel
->r_addend
& 0xffffffff,
4508 len
= 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
4509 stub_name
= (char *) bfd_malloc (len
);
4510 if (stub_name
!= NULL
)
4511 sprintf (stub_name
, "%08x_%x:%x+%x_%d",
4512 input_section
->id
& 0xffffffff,
4513 sym_sec
->id
& 0xffffffff,
4514 ELF32_R_TYPE (rel
->r_info
) == R_ARM_TLS_CALL
4515 || ELF32_R_TYPE (rel
->r_info
) == R_ARM_THM_TLS_CALL
4516 ? 0 : (int) ELF32_R_SYM (rel
->r_info
) & 0xffffffff,
4517 (int) rel
->r_addend
& 0xffffffff,
4524 /* Look up an entry in the stub hash. Stub entries are cached because
4525 creating the stub name takes a bit of time. */
4527 static struct elf32_arm_stub_hash_entry
*
4528 elf32_arm_get_stub_entry (const asection
*input_section
,
4529 const asection
*sym_sec
,
4530 struct elf_link_hash_entry
*hash
,
4531 const Elf_Internal_Rela
*rel
,
4532 struct elf32_arm_link_hash_table
*htab
,
4533 enum elf32_arm_stub_type stub_type
)
4535 struct elf32_arm_stub_hash_entry
*stub_entry
;
4536 struct elf32_arm_link_hash_entry
*h
= (struct elf32_arm_link_hash_entry
*) hash
;
4537 const asection
*id_sec
;
4539 if ((input_section
->flags
& SEC_CODE
) == 0)
4542 /* If this input section is part of a group of sections sharing one
4543 stub section, then use the id of the first section in the group.
4544 Stub names need to include a section id, as there may well be
4545 more than one stub used to reach say, printf, and we need to
4546 distinguish between them. */
4547 BFD_ASSERT (input_section
->id
<= htab
->top_id
);
4548 id_sec
= htab
->stub_group
[input_section
->id
].link_sec
;
4550 if (h
!= NULL
&& h
->stub_cache
!= NULL
4551 && h
->stub_cache
->h
== h
4552 && h
->stub_cache
->id_sec
== id_sec
4553 && h
->stub_cache
->stub_type
== stub_type
)
4555 stub_entry
= h
->stub_cache
;
4561 stub_name
= elf32_arm_stub_name (id_sec
, sym_sec
, h
, rel
, stub_type
);
4562 if (stub_name
== NULL
)
4565 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
,
4566 stub_name
, FALSE
, FALSE
);
4568 h
->stub_cache
= stub_entry
;
4576 /* Whether veneers of type STUB_TYPE require to be in a dedicated output
4580 arm_dedicated_stub_output_section_required (enum elf32_arm_stub_type stub_type
)
4582 if (stub_type
>= max_stub_type
)
4583 abort (); /* Should be unreachable. */
4587 case arm_stub_cmse_branch_thumb_only
:
4594 abort (); /* Should be unreachable. */
4597 /* Required alignment (as a power of 2) for the dedicated section holding
4598 veneers of type STUB_TYPE, or 0 if veneers of this type are interspersed
4599 with input sections. */
4602 arm_dedicated_stub_output_section_required_alignment
4603 (enum elf32_arm_stub_type stub_type
)
4605 if (stub_type
>= max_stub_type
)
4606 abort (); /* Should be unreachable. */
4610 /* Vectors of Secure Gateway veneers must be aligned on 32byte
4612 case arm_stub_cmse_branch_thumb_only
:
4616 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type
));
4620 abort (); /* Should be unreachable. */
4623 /* Name of the dedicated output section to put veneers of type STUB_TYPE, or
4624 NULL if veneers of this type are interspersed with input sections. */
4627 arm_dedicated_stub_output_section_name (enum elf32_arm_stub_type stub_type
)
4629 if (stub_type
>= max_stub_type
)
4630 abort (); /* Should be unreachable. */
4634 case arm_stub_cmse_branch_thumb_only
:
4635 return ".gnu.sgstubs";
4638 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type
));
4642 abort (); /* Should be unreachable. */
4645 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4646 returns the address of the hash table field in HTAB holding a pointer to the
4647 corresponding input section. Otherwise, returns NULL. */
4650 arm_dedicated_stub_input_section_ptr (struct elf32_arm_link_hash_table
*htab
,
4651 enum elf32_arm_stub_type stub_type
)
4653 if (stub_type
>= max_stub_type
)
4654 abort (); /* Should be unreachable. */
4658 case arm_stub_cmse_branch_thumb_only
:
4659 return &htab
->cmse_stub_sec
;
4662 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type
));
4666 abort (); /* Should be unreachable. */
4669 /* Find or create a stub section to contain a stub of type STUB_TYPE. SECTION
4670 is the section that branch into veneer and can be NULL if stub should go in
4671 a dedicated output section. Returns a pointer to the stub section, and the
4672 section to which the stub section will be attached (in *LINK_SEC_P).
4673 LINK_SEC_P may be NULL. */
4676 elf32_arm_create_or_find_stub_sec (asection
**link_sec_p
, asection
*section
,
4677 struct elf32_arm_link_hash_table
*htab
,
4678 enum elf32_arm_stub_type stub_type
)
4680 asection
*link_sec
, *out_sec
, **stub_sec_p
;
4681 const char *stub_sec_prefix
;
4682 bfd_boolean dedicated_output_section
=
4683 arm_dedicated_stub_output_section_required (stub_type
);
4686 if (dedicated_output_section
)
4688 bfd
*output_bfd
= htab
->obfd
;
4689 const char *out_sec_name
=
4690 arm_dedicated_stub_output_section_name (stub_type
);
4692 stub_sec_p
= arm_dedicated_stub_input_section_ptr (htab
, stub_type
);
4693 stub_sec_prefix
= out_sec_name
;
4694 align
= arm_dedicated_stub_output_section_required_alignment (stub_type
);
4695 out_sec
= bfd_get_section_by_name (output_bfd
, out_sec_name
);
4696 if (out_sec
== NULL
)
4698 _bfd_error_handler (_("no address assigned to the veneers output "
4699 "section %s"), out_sec_name
);
4705 BFD_ASSERT (section
->id
<= htab
->top_id
);
4706 link_sec
= htab
->stub_group
[section
->id
].link_sec
;
4707 BFD_ASSERT (link_sec
!= NULL
);
4708 stub_sec_p
= &htab
->stub_group
[section
->id
].stub_sec
;
4709 if (*stub_sec_p
== NULL
)
4710 stub_sec_p
= &htab
->stub_group
[link_sec
->id
].stub_sec
;
4711 stub_sec_prefix
= link_sec
->name
;
4712 out_sec
= link_sec
->output_section
;
4713 align
= htab
->nacl_p
? 4 : 3;
4716 if (*stub_sec_p
== NULL
)
4722 namelen
= strlen (stub_sec_prefix
);
4723 len
= namelen
+ sizeof (STUB_SUFFIX
);
4724 s_name
= (char *) bfd_alloc (htab
->stub_bfd
, len
);
4728 memcpy (s_name
, stub_sec_prefix
, namelen
);
4729 memcpy (s_name
+ namelen
, STUB_SUFFIX
, sizeof (STUB_SUFFIX
));
4730 *stub_sec_p
= (*htab
->add_stub_section
) (s_name
, out_sec
, link_sec
,
4732 if (*stub_sec_p
== NULL
)
4735 out_sec
->flags
|= SEC_ALLOC
| SEC_LOAD
| SEC_READONLY
| SEC_CODE
4736 | SEC_HAS_CONTENTS
| SEC_RELOC
| SEC_IN_MEMORY
4740 if (!dedicated_output_section
)
4741 htab
->stub_group
[section
->id
].stub_sec
= *stub_sec_p
;
4744 *link_sec_p
= link_sec
;
4749 /* Add a new stub entry to the stub hash. Not all fields of the new
4750 stub entry are initialised. */
4752 static struct elf32_arm_stub_hash_entry
*
4753 elf32_arm_add_stub (const char *stub_name
, asection
*section
,
4754 struct elf32_arm_link_hash_table
*htab
,
4755 enum elf32_arm_stub_type stub_type
)
4759 struct elf32_arm_stub_hash_entry
*stub_entry
;
4761 stub_sec
= elf32_arm_create_or_find_stub_sec (&link_sec
, section
, htab
,
4763 if (stub_sec
== NULL
)
4766 /* Enter this entry into the linker stub hash table. */
4767 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
, stub_name
,
4769 if (stub_entry
== NULL
)
4771 if (section
== NULL
)
4773 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
4774 section
->owner
, stub_name
);
4778 stub_entry
->stub_sec
= stub_sec
;
4779 stub_entry
->stub_offset
= (bfd_vma
) -1;
4780 stub_entry
->id_sec
= link_sec
;
4785 /* Store an Arm insn into an output section not processed by
4786 elf32_arm_write_section. */
4789 put_arm_insn (struct elf32_arm_link_hash_table
* htab
,
4790 bfd
* output_bfd
, bfd_vma val
, void * ptr
)
4792 if (htab
->byteswap_code
!= bfd_little_endian (output_bfd
))
4793 bfd_putl32 (val
, ptr
);
4795 bfd_putb32 (val
, ptr
);
4798 /* Store a 16-bit Thumb insn into an output section not processed by
4799 elf32_arm_write_section. */
4802 put_thumb_insn (struct elf32_arm_link_hash_table
* htab
,
4803 bfd
* output_bfd
, bfd_vma val
, void * ptr
)
4805 if (htab
->byteswap_code
!= bfd_little_endian (output_bfd
))
4806 bfd_putl16 (val
, ptr
);
4808 bfd_putb16 (val
, ptr
);
4811 /* Store a Thumb2 insn into an output section not processed by
4812 elf32_arm_write_section. */
4815 put_thumb2_insn (struct elf32_arm_link_hash_table
* htab
,
4816 bfd
* output_bfd
, bfd_vma val
, bfd_byte
* ptr
)
4818 /* T2 instructions are 16-bit streamed. */
4819 if (htab
->byteswap_code
!= bfd_little_endian (output_bfd
))
4821 bfd_putl16 ((val
>> 16) & 0xffff, ptr
);
4822 bfd_putl16 ((val
& 0xffff), ptr
+ 2);
4826 bfd_putb16 ((val
>> 16) & 0xffff, ptr
);
4827 bfd_putb16 ((val
& 0xffff), ptr
+ 2);
4831 /* If it's possible to change R_TYPE to a more efficient access
4832 model, return the new reloc type. */
4835 elf32_arm_tls_transition (struct bfd_link_info
*info
, int r_type
,
4836 struct elf_link_hash_entry
*h
)
4838 int is_local
= (h
== NULL
);
4840 if (bfd_link_pic (info
)
4841 || (h
&& h
->root
.type
== bfd_link_hash_undefweak
))
4844 /* We do not support relaxations for Old TLS models. */
4847 case R_ARM_TLS_GOTDESC
:
4848 case R_ARM_TLS_CALL
:
4849 case R_ARM_THM_TLS_CALL
:
4850 case R_ARM_TLS_DESCSEQ
:
4851 case R_ARM_THM_TLS_DESCSEQ
:
4852 return is_local
? R_ARM_TLS_LE32
: R_ARM_TLS_IE32
;
4858 static bfd_reloc_status_type elf32_arm_final_link_relocate
4859 (reloc_howto_type
*, bfd
*, bfd
*, asection
*, bfd_byte
*,
4860 Elf_Internal_Rela
*, bfd_vma
, struct bfd_link_info
*, asection
*,
4861 const char *, unsigned char, enum arm_st_branch_type
,
4862 struct elf_link_hash_entry
*, bfd_boolean
*, char **);
4865 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type
)
4869 case arm_stub_a8_veneer_b_cond
:
4870 case arm_stub_a8_veneer_b
:
4871 case arm_stub_a8_veneer_bl
:
4874 case arm_stub_long_branch_any_any
:
4875 case arm_stub_long_branch_v4t_arm_thumb
:
4876 case arm_stub_long_branch_thumb_only
:
4877 case arm_stub_long_branch_thumb2_only
:
4878 case arm_stub_long_branch_thumb2_only_pure
:
4879 case arm_stub_long_branch_v4t_thumb_thumb
:
4880 case arm_stub_long_branch_v4t_thumb_arm
:
4881 case arm_stub_short_branch_v4t_thumb_arm
:
4882 case arm_stub_long_branch_any_arm_pic
:
4883 case arm_stub_long_branch_any_thumb_pic
:
4884 case arm_stub_long_branch_v4t_thumb_thumb_pic
:
4885 case arm_stub_long_branch_v4t_arm_thumb_pic
:
4886 case arm_stub_long_branch_v4t_thumb_arm_pic
:
4887 case arm_stub_long_branch_thumb_only_pic
:
4888 case arm_stub_long_branch_any_tls_pic
:
4889 case arm_stub_long_branch_v4t_thumb_tls_pic
:
4890 case arm_stub_cmse_branch_thumb_only
:
4891 case arm_stub_a8_veneer_blx
:
4894 case arm_stub_long_branch_arm_nacl
:
4895 case arm_stub_long_branch_arm_nacl_pic
:
4899 abort (); /* Should be unreachable. */
4903 /* Returns whether stubs of type STUB_TYPE take over the symbol they are
4904 veneering (TRUE) or have their own symbol (FALSE). */
4907 arm_stub_sym_claimed (enum elf32_arm_stub_type stub_type
)
4909 if (stub_type
>= max_stub_type
)
4910 abort (); /* Should be unreachable. */
4914 case arm_stub_cmse_branch_thumb_only
:
4921 abort (); /* Should be unreachable. */
4924 /* Returns the padding needed for the dedicated section used stubs of type
4928 arm_dedicated_stub_section_padding (enum elf32_arm_stub_type stub_type
)
4930 if (stub_type
>= max_stub_type
)
4931 abort (); /* Should be unreachable. */
4935 case arm_stub_cmse_branch_thumb_only
:
4942 abort (); /* Should be unreachable. */
4945 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4946 returns the address of the hash table field in HTAB holding the offset at
4947 which new veneers should be layed out in the stub section. */
4950 arm_new_stubs_start_offset_ptr (struct elf32_arm_link_hash_table
*htab
,
4951 enum elf32_arm_stub_type stub_type
)
4955 case arm_stub_cmse_branch_thumb_only
:
4956 return &htab
->new_cmse_stub_offset
;
4959 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type
));
4965 arm_build_one_stub (struct bfd_hash_entry
*gen_entry
,
4969 bfd_boolean removed_sg_veneer
;
4970 struct elf32_arm_stub_hash_entry
*stub_entry
;
4971 struct elf32_arm_link_hash_table
*globals
;
4972 struct bfd_link_info
*info
;
4979 const insn_sequence
*template_sequence
;
4981 int stub_reloc_idx
[MAXRELOCS
] = {-1, -1};
4982 int stub_reloc_offset
[MAXRELOCS
] = {0, 0};
4984 int just_allocated
= 0;
4986 /* Massage our args to the form they really have. */
4987 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
4988 info
= (struct bfd_link_info
*) in_arg
;
4990 globals
= elf32_arm_hash_table (info
);
4991 if (globals
== NULL
)
4994 stub_sec
= stub_entry
->stub_sec
;
4996 if ((globals
->fix_cortex_a8
< 0)
4997 != (arm_stub_required_alignment (stub_entry
->stub_type
) == 2))
4998 /* We have to do less-strictly-aligned fixes last. */
5001 /* Assign a slot at the end of section if none assigned yet. */
5002 if (stub_entry
->stub_offset
== (bfd_vma
) -1)
5004 stub_entry
->stub_offset
= stub_sec
->size
;
5007 loc
= stub_sec
->contents
+ stub_entry
->stub_offset
;
5009 stub_bfd
= stub_sec
->owner
;
5011 /* This is the address of the stub destination. */
5012 sym_value
= (stub_entry
->target_value
5013 + stub_entry
->target_section
->output_offset
5014 + stub_entry
->target_section
->output_section
->vma
);
5016 template_sequence
= stub_entry
->stub_template
;
5017 template_size
= stub_entry
->stub_template_size
;
5020 for (i
= 0; i
< template_size
; i
++)
5022 switch (template_sequence
[i
].type
)
5026 bfd_vma data
= (bfd_vma
) template_sequence
[i
].data
;
5027 if (template_sequence
[i
].reloc_addend
!= 0)
5029 /* We've borrowed the reloc_addend field to mean we should
5030 insert a condition code into this (Thumb-1 branch)
5031 instruction. See THUMB16_BCOND_INSN. */
5032 BFD_ASSERT ((data
& 0xff00) == 0xd000);
5033 data
|= ((stub_entry
->orig_insn
>> 22) & 0xf) << 8;
5035 bfd_put_16 (stub_bfd
, data
, loc
+ size
);
5041 bfd_put_16 (stub_bfd
,
5042 (template_sequence
[i
].data
>> 16) & 0xffff,
5044 bfd_put_16 (stub_bfd
, template_sequence
[i
].data
& 0xffff,
5046 if (template_sequence
[i
].r_type
!= R_ARM_NONE
)
5048 stub_reloc_idx
[nrelocs
] = i
;
5049 stub_reloc_offset
[nrelocs
++] = size
;
5055 bfd_put_32 (stub_bfd
, template_sequence
[i
].data
,
5057 /* Handle cases where the target is encoded within the
5059 if (template_sequence
[i
].r_type
== R_ARM_JUMP24
)
5061 stub_reloc_idx
[nrelocs
] = i
;
5062 stub_reloc_offset
[nrelocs
++] = size
;
5068 bfd_put_32 (stub_bfd
, template_sequence
[i
].data
, loc
+ size
);
5069 stub_reloc_idx
[nrelocs
] = i
;
5070 stub_reloc_offset
[nrelocs
++] = size
;
5081 stub_sec
->size
+= size
;
5083 /* Stub size has already been computed in arm_size_one_stub. Check
5085 BFD_ASSERT (size
== stub_entry
->stub_size
);
5087 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
5088 if (stub_entry
->branch_type
== ST_BRANCH_TO_THUMB
)
5091 /* Assume non empty slots have at least one and at most MAXRELOCS entries
5092 to relocate in each stub. */
5094 (size
== 0 && stub_entry
->stub_type
== arm_stub_cmse_branch_thumb_only
);
5095 BFD_ASSERT (removed_sg_veneer
|| (nrelocs
!= 0 && nrelocs
<= MAXRELOCS
));
5097 for (i
= 0; i
< nrelocs
; i
++)
5099 Elf_Internal_Rela rel
;
5100 bfd_boolean unresolved_reloc
;
5101 char *error_message
;
5103 sym_value
+ template_sequence
[stub_reloc_idx
[i
]].reloc_addend
;
5105 rel
.r_offset
= stub_entry
->stub_offset
+ stub_reloc_offset
[i
];
5106 rel
.r_info
= ELF32_R_INFO (0,
5107 template_sequence
[stub_reloc_idx
[i
]].r_type
);
5110 if (stub_entry
->stub_type
== arm_stub_a8_veneer_b_cond
&& i
== 0)
5111 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
5112 template should refer back to the instruction after the original
5113 branch. We use target_section as Cortex-A8 erratum workaround stubs
5114 are only generated when both source and target are in the same
5116 points_to
= stub_entry
->target_section
->output_section
->vma
5117 + stub_entry
->target_section
->output_offset
5118 + stub_entry
->source_value
;
5120 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
5121 (template_sequence
[stub_reloc_idx
[i
]].r_type
),
5122 stub_bfd
, info
->output_bfd
, stub_sec
, stub_sec
->contents
, &rel
,
5123 points_to
, info
, stub_entry
->target_section
, "", STT_FUNC
,
5124 stub_entry
->branch_type
,
5125 (struct elf_link_hash_entry
*) stub_entry
->h
, &unresolved_reloc
,
5133 /* Calculate the template, template size and instruction size for a stub.
5134 Return value is the instruction size. */
5137 find_stub_size_and_template (enum elf32_arm_stub_type stub_type
,
5138 const insn_sequence
**stub_template
,
5139 int *stub_template_size
)
5141 const insn_sequence
*template_sequence
= NULL
;
5142 int template_size
= 0, i
;
5145 template_sequence
= stub_definitions
[stub_type
].template_sequence
;
5147 *stub_template
= template_sequence
;
5149 template_size
= stub_definitions
[stub_type
].template_size
;
5150 if (stub_template_size
)
5151 *stub_template_size
= template_size
;
5154 for (i
= 0; i
< template_size
; i
++)
5156 switch (template_sequence
[i
].type
)
5177 /* As above, but don't actually build the stub. Just bump offset so
5178 we know stub section sizes. */
5181 arm_size_one_stub (struct bfd_hash_entry
*gen_entry
,
5182 void *in_arg ATTRIBUTE_UNUSED
)
5184 struct elf32_arm_stub_hash_entry
*stub_entry
;
5185 const insn_sequence
*template_sequence
;
5186 int template_size
, size
;
5188 /* Massage our args to the form they really have. */
5189 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
5191 BFD_ASSERT((stub_entry
->stub_type
> arm_stub_none
)
5192 && stub_entry
->stub_type
< ARRAY_SIZE(stub_definitions
));
5194 size
= find_stub_size_and_template (stub_entry
->stub_type
, &template_sequence
,
5197 /* Initialized to -1. Null size indicates an empty slot full of zeros. */
5198 if (stub_entry
->stub_template_size
)
5200 stub_entry
->stub_size
= size
;
5201 stub_entry
->stub_template
= template_sequence
;
5202 stub_entry
->stub_template_size
= template_size
;
5205 /* Already accounted for. */
5206 if (stub_entry
->stub_offset
!= (bfd_vma
) -1)
5209 size
= (size
+ 7) & ~7;
5210 stub_entry
->stub_sec
->size
+= size
;
5215 /* External entry points for sizing and building linker stubs. */
5217 /* Set up various things so that we can make a list of input sections
5218 for each output section included in the link. Returns -1 on error,
5219 0 when no stubs will be needed, and 1 on success. */
5222 elf32_arm_setup_section_lists (bfd
*output_bfd
,
5223 struct bfd_link_info
*info
)
5226 unsigned int bfd_count
;
5227 unsigned int top_id
, top_index
;
5229 asection
**input_list
, **list
;
5231 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
5235 if (! is_elf_hash_table (htab
))
5238 /* Count the number of input BFDs and find the top input section id. */
5239 for (input_bfd
= info
->input_bfds
, bfd_count
= 0, top_id
= 0;
5241 input_bfd
= input_bfd
->link
.next
)
5244 for (section
= input_bfd
->sections
;
5246 section
= section
->next
)
5248 if (top_id
< section
->id
)
5249 top_id
= section
->id
;
5252 htab
->bfd_count
= bfd_count
;
5254 amt
= sizeof (struct map_stub
) * (top_id
+ 1);
5255 htab
->stub_group
= (struct map_stub
*) bfd_zmalloc (amt
);
5256 if (htab
->stub_group
== NULL
)
5258 htab
->top_id
= top_id
;
5260 /* We can't use output_bfd->section_count here to find the top output
5261 section index as some sections may have been removed, and
5262 _bfd_strip_section_from_output doesn't renumber the indices. */
5263 for (section
= output_bfd
->sections
, top_index
= 0;
5265 section
= section
->next
)
5267 if (top_index
< section
->index
)
5268 top_index
= section
->index
;
5271 htab
->top_index
= top_index
;
5272 amt
= sizeof (asection
*) * (top_index
+ 1);
5273 input_list
= (asection
**) bfd_malloc (amt
);
5274 htab
->input_list
= input_list
;
5275 if (input_list
== NULL
)
5278 /* For sections we aren't interested in, mark their entries with a
5279 value we can check later. */
5280 list
= input_list
+ top_index
;
5282 *list
= bfd_abs_section_ptr
;
5283 while (list
-- != input_list
);
5285 for (section
= output_bfd
->sections
;
5287 section
= section
->next
)
5289 if ((section
->flags
& SEC_CODE
) != 0)
5290 input_list
[section
->index
] = NULL
;
5296 /* The linker repeatedly calls this function for each input section,
5297 in the order that input sections are linked into output sections.
5298 Build lists of input sections to determine groupings between which
5299 we may insert linker stubs. */
5302 elf32_arm_next_input_section (struct bfd_link_info
*info
,
5305 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
5310 if (isec
->output_section
->index
<= htab
->top_index
)
5312 asection
**list
= htab
->input_list
+ isec
->output_section
->index
;
5314 if (*list
!= bfd_abs_section_ptr
&& (isec
->flags
& SEC_CODE
) != 0)
5316 /* Steal the link_sec pointer for our list. */
5317 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
5318 /* This happens to make the list in reverse order,
5319 which we reverse later. */
5320 PREV_SEC (isec
) = *list
;
5326 /* See whether we can group stub sections together. Grouping stub
5327 sections may result in fewer stubs. More importantly, we need to
5328 put all .init* and .fini* stubs at the end of the .init or
5329 .fini output sections respectively, because glibc splits the
5330 _init and _fini functions into multiple parts. Putting a stub in
5331 the middle of a function is not a good idea. */
5334 group_sections (struct elf32_arm_link_hash_table
*htab
,
5335 bfd_size_type stub_group_size
,
5336 bfd_boolean stubs_always_after_branch
)
5338 asection
**list
= htab
->input_list
;
5342 asection
*tail
= *list
;
5345 if (tail
== bfd_abs_section_ptr
)
5348 /* Reverse the list: we must avoid placing stubs at the
5349 beginning of the section because the beginning of the text
5350 section may be required for an interrupt vector in bare metal
5352 #define NEXT_SEC PREV_SEC
5354 while (tail
!= NULL
)
5356 /* Pop from tail. */
5357 asection
*item
= tail
;
5358 tail
= PREV_SEC (item
);
5361 NEXT_SEC (item
) = head
;
5365 while (head
!= NULL
)
5369 bfd_vma stub_group_start
= head
->output_offset
;
5370 bfd_vma end_of_next
;
5373 while (NEXT_SEC (curr
) != NULL
)
5375 next
= NEXT_SEC (curr
);
5376 end_of_next
= next
->output_offset
+ next
->size
;
5377 if (end_of_next
- stub_group_start
>= stub_group_size
)
5378 /* End of NEXT is too far from start, so stop. */
5380 /* Add NEXT to the group. */
5384 /* OK, the size from the start to the start of CURR is less
5385 than stub_group_size and thus can be handled by one stub
5386 section. (Or the head section is itself larger than
5387 stub_group_size, in which case we may be toast.)
5388 We should really be keeping track of the total size of
5389 stubs added here, as stubs contribute to the final output
5393 next
= NEXT_SEC (head
);
5394 /* Set up this stub group. */
5395 htab
->stub_group
[head
->id
].link_sec
= curr
;
5397 while (head
!= curr
&& (head
= next
) != NULL
);
5399 /* But wait, there's more! Input sections up to stub_group_size
5400 bytes after the stub section can be handled by it too. */
5401 if (!stubs_always_after_branch
)
5403 stub_group_start
= curr
->output_offset
+ curr
->size
;
5405 while (next
!= NULL
)
5407 end_of_next
= next
->output_offset
+ next
->size
;
5408 if (end_of_next
- stub_group_start
>= stub_group_size
)
5409 /* End of NEXT is too far from stubs, so stop. */
5411 /* Add NEXT to the stub group. */
5413 next
= NEXT_SEC (head
);
5414 htab
->stub_group
[head
->id
].link_sec
= curr
;
5420 while (list
++ != htab
->input_list
+ htab
->top_index
);
5422 free (htab
->input_list
);
5427 /* Comparison function for sorting/searching relocations relating to Cortex-A8
5431 a8_reloc_compare (const void *a
, const void *b
)
5433 const struct a8_erratum_reloc
*ra
= (const struct a8_erratum_reloc
*) a
;
5434 const struct a8_erratum_reloc
*rb
= (const struct a8_erratum_reloc
*) b
;
5436 if (ra
->from
< rb
->from
)
5438 else if (ra
->from
> rb
->from
)
5444 static struct elf_link_hash_entry
*find_thumb_glue (struct bfd_link_info
*,
5445 const char *, char **);
5447 /* Helper function to scan code for sequences which might trigger the Cortex-A8
5448 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
5449 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
5453 cortex_a8_erratum_scan (bfd
*input_bfd
,
5454 struct bfd_link_info
*info
,
5455 struct a8_erratum_fix
**a8_fixes_p
,
5456 unsigned int *num_a8_fixes_p
,
5457 unsigned int *a8_fix_table_size_p
,
5458 struct a8_erratum_reloc
*a8_relocs
,
5459 unsigned int num_a8_relocs
,
5460 unsigned prev_num_a8_fixes
,
5461 bfd_boolean
*stub_changed_p
)
5464 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
5465 struct a8_erratum_fix
*a8_fixes
= *a8_fixes_p
;
5466 unsigned int num_a8_fixes
= *num_a8_fixes_p
;
5467 unsigned int a8_fix_table_size
= *a8_fix_table_size_p
;
5472 for (section
= input_bfd
->sections
;
5474 section
= section
->next
)
5476 bfd_byte
*contents
= NULL
;
5477 struct _arm_elf_section_data
*sec_data
;
5481 if (elf_section_type (section
) != SHT_PROGBITS
5482 || (elf_section_flags (section
) & SHF_EXECINSTR
) == 0
5483 || (section
->flags
& SEC_EXCLUDE
) != 0
5484 || (section
->sec_info_type
== SEC_INFO_TYPE_JUST_SYMS
)
5485 || (section
->output_section
== bfd_abs_section_ptr
))
5488 base_vma
= section
->output_section
->vma
+ section
->output_offset
;
5490 if (elf_section_data (section
)->this_hdr
.contents
!= NULL
)
5491 contents
= elf_section_data (section
)->this_hdr
.contents
;
5492 else if (! bfd_malloc_and_get_section (input_bfd
, section
, &contents
))
5495 sec_data
= elf32_arm_section_data (section
);
5497 for (span
= 0; span
< sec_data
->mapcount
; span
++)
5499 unsigned int span_start
= sec_data
->map
[span
].vma
;
5500 unsigned int span_end
= (span
== sec_data
->mapcount
- 1)
5501 ? section
->size
: sec_data
->map
[span
+ 1].vma
;
5503 char span_type
= sec_data
->map
[span
].type
;
5504 bfd_boolean last_was_32bit
= FALSE
, last_was_branch
= FALSE
;
5506 if (span_type
!= 't')
5509 /* Span is entirely within a single 4KB region: skip scanning. */
5510 if (((base_vma
+ span_start
) & ~0xfff)
5511 == ((base_vma
+ span_end
) & ~0xfff))
5514 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
5516 * The opcode is BLX.W, BL.W, B.W, Bcc.W
5517 * The branch target is in the same 4KB region as the
5518 first half of the branch.
5519 * The instruction before the branch is a 32-bit
5520 length non-branch instruction. */
5521 for (i
= span_start
; i
< span_end
;)
5523 unsigned int insn
= bfd_getl16 (&contents
[i
]);
5524 bfd_boolean insn_32bit
= FALSE
, is_blx
= FALSE
, is_b
= FALSE
;
5525 bfd_boolean is_bl
= FALSE
, is_bcc
= FALSE
, is_32bit_branch
;
5527 if ((insn
& 0xe000) == 0xe000 && (insn
& 0x1800) != 0x0000)
5532 /* Load the rest of the insn (in manual-friendly order). */
5533 insn
= (insn
<< 16) | bfd_getl16 (&contents
[i
+ 2]);
5535 /* Encoding T4: B<c>.W. */
5536 is_b
= (insn
& 0xf800d000) == 0xf0009000;
5537 /* Encoding T1: BL<c>.W. */
5538 is_bl
= (insn
& 0xf800d000) == 0xf000d000;
5539 /* Encoding T2: BLX<c>.W. */
5540 is_blx
= (insn
& 0xf800d000) == 0xf000c000;
5541 /* Encoding T3: B<c>.W (not permitted in IT block). */
5542 is_bcc
= (insn
& 0xf800d000) == 0xf0008000
5543 && (insn
& 0x07f00000) != 0x03800000;
5546 is_32bit_branch
= is_b
|| is_bl
|| is_blx
|| is_bcc
;
5548 if (((base_vma
+ i
) & 0xfff) == 0xffe
5552 && ! last_was_branch
)
5554 bfd_signed_vma offset
= 0;
5555 bfd_boolean force_target_arm
= FALSE
;
5556 bfd_boolean force_target_thumb
= FALSE
;
5558 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
5559 struct a8_erratum_reloc key
, *found
;
5560 bfd_boolean use_plt
= FALSE
;
5562 key
.from
= base_vma
+ i
;
5563 found
= (struct a8_erratum_reloc
*)
5564 bsearch (&key
, a8_relocs
, num_a8_relocs
,
5565 sizeof (struct a8_erratum_reloc
),
5570 char *error_message
= NULL
;
5571 struct elf_link_hash_entry
*entry
;
5573 /* We don't care about the error returned from this
5574 function, only if there is glue or not. */
5575 entry
= find_thumb_glue (info
, found
->sym_name
,
5579 found
->non_a8_stub
= TRUE
;
5581 /* Keep a simpler condition, for the sake of clarity. */
5582 if (htab
->root
.splt
!= NULL
&& found
->hash
!= NULL
5583 && found
->hash
->root
.plt
.offset
!= (bfd_vma
) -1)
5586 if (found
->r_type
== R_ARM_THM_CALL
)
5588 if (found
->branch_type
== ST_BRANCH_TO_ARM
5590 force_target_arm
= TRUE
;
5592 force_target_thumb
= TRUE
;
5596 /* Check if we have an offending branch instruction. */
5598 if (found
&& found
->non_a8_stub
)
5599 /* We've already made a stub for this instruction, e.g.
5600 it's a long branch or a Thumb->ARM stub. Assume that
5601 stub will suffice to work around the A8 erratum (see
5602 setting of always_after_branch above). */
5606 offset
= (insn
& 0x7ff) << 1;
5607 offset
|= (insn
& 0x3f0000) >> 4;
5608 offset
|= (insn
& 0x2000) ? 0x40000 : 0;
5609 offset
|= (insn
& 0x800) ? 0x80000 : 0;
5610 offset
|= (insn
& 0x4000000) ? 0x100000 : 0;
5611 if (offset
& 0x100000)
5612 offset
|= ~ ((bfd_signed_vma
) 0xfffff);
5613 stub_type
= arm_stub_a8_veneer_b_cond
;
5615 else if (is_b
|| is_bl
|| is_blx
)
5617 int s
= (insn
& 0x4000000) != 0;
5618 int j1
= (insn
& 0x2000) != 0;
5619 int j2
= (insn
& 0x800) != 0;
5623 offset
= (insn
& 0x7ff) << 1;
5624 offset
|= (insn
& 0x3ff0000) >> 4;
5628 if (offset
& 0x1000000)
5629 offset
|= ~ ((bfd_signed_vma
) 0xffffff);
5632 offset
&= ~ ((bfd_signed_vma
) 3);
5634 stub_type
= is_blx
? arm_stub_a8_veneer_blx
:
5635 is_bl
? arm_stub_a8_veneer_bl
: arm_stub_a8_veneer_b
;
5638 if (stub_type
!= arm_stub_none
)
5640 bfd_vma pc_for_insn
= base_vma
+ i
+ 4;
5642 /* The original instruction is a BL, but the target is
5643 an ARM instruction. If we were not making a stub,
5644 the BL would have been converted to a BLX. Use the
5645 BLX stub instead in that case. */
5646 if (htab
->use_blx
&& force_target_arm
5647 && stub_type
== arm_stub_a8_veneer_bl
)
5649 stub_type
= arm_stub_a8_veneer_blx
;
5653 /* Conversely, if the original instruction was
5654 BLX but the target is Thumb mode, use the BL
5656 else if (force_target_thumb
5657 && stub_type
== arm_stub_a8_veneer_blx
)
5659 stub_type
= arm_stub_a8_veneer_bl
;
5665 pc_for_insn
&= ~ ((bfd_vma
) 3);
5667 /* If we found a relocation, use the proper destination,
5668 not the offset in the (unrelocated) instruction.
5669 Note this is always done if we switched the stub type
5673 (bfd_signed_vma
) (found
->destination
- pc_for_insn
);
5675 /* If the stub will use a Thumb-mode branch to a
5676 PLT target, redirect it to the preceding Thumb
5678 if (stub_type
!= arm_stub_a8_veneer_blx
&& use_plt
)
5679 offset
-= PLT_THUMB_STUB_SIZE
;
5681 target
= pc_for_insn
+ offset
;
5683 /* The BLX stub is ARM-mode code. Adjust the offset to
5684 take the different PC value (+8 instead of +4) into
5686 if (stub_type
== arm_stub_a8_veneer_blx
)
5689 if (((base_vma
+ i
) & ~0xfff) == (target
& ~0xfff))
5691 char *stub_name
= NULL
;
5693 if (num_a8_fixes
== a8_fix_table_size
)
5695 a8_fix_table_size
*= 2;
5696 a8_fixes
= (struct a8_erratum_fix
*)
5697 bfd_realloc (a8_fixes
,
5698 sizeof (struct a8_erratum_fix
)
5699 * a8_fix_table_size
);
5702 if (num_a8_fixes
< prev_num_a8_fixes
)
5704 /* If we're doing a subsequent scan,
5705 check if we've found the same fix as
5706 before, and try and reuse the stub
5708 stub_name
= a8_fixes
[num_a8_fixes
].stub_name
;
5709 if ((a8_fixes
[num_a8_fixes
].section
!= section
)
5710 || (a8_fixes
[num_a8_fixes
].offset
!= i
))
5714 *stub_changed_p
= TRUE
;
5720 stub_name
= (char *) bfd_malloc (8 + 1 + 8 + 1);
5721 if (stub_name
!= NULL
)
5722 sprintf (stub_name
, "%x:%x", section
->id
, i
);
5725 a8_fixes
[num_a8_fixes
].input_bfd
= input_bfd
;
5726 a8_fixes
[num_a8_fixes
].section
= section
;
5727 a8_fixes
[num_a8_fixes
].offset
= i
;
5728 a8_fixes
[num_a8_fixes
].target_offset
=
5730 a8_fixes
[num_a8_fixes
].orig_insn
= insn
;
5731 a8_fixes
[num_a8_fixes
].stub_name
= stub_name
;
5732 a8_fixes
[num_a8_fixes
].stub_type
= stub_type
;
5733 a8_fixes
[num_a8_fixes
].branch_type
=
5734 is_blx
? ST_BRANCH_TO_ARM
: ST_BRANCH_TO_THUMB
;
5741 i
+= insn_32bit
? 4 : 2;
5742 last_was_32bit
= insn_32bit
;
5743 last_was_branch
= is_32bit_branch
;
5747 if (elf_section_data (section
)->this_hdr
.contents
== NULL
)
5751 *a8_fixes_p
= a8_fixes
;
5752 *num_a8_fixes_p
= num_a8_fixes
;
5753 *a8_fix_table_size_p
= a8_fix_table_size
;
5758 /* Create or update a stub entry depending on whether the stub can already be
5759 found in HTAB. The stub is identified by:
5760 - its type STUB_TYPE
5761 - its source branch (note that several can share the same stub) whose
5762 section and relocation (if any) are given by SECTION and IRELA
5764 - its target symbol whose input section, hash, name, value and branch type
5765 are given in SYM_SEC, HASH, SYM_NAME, SYM_VALUE and BRANCH_TYPE
5768 If found, the value of the stub's target symbol is updated from SYM_VALUE
5769 and *NEW_STUB is set to FALSE. Otherwise, *NEW_STUB is set to
5770 TRUE and the stub entry is initialized.
5772 Returns the stub that was created or updated, or NULL if an error
5775 static struct elf32_arm_stub_hash_entry
*
5776 elf32_arm_create_stub (struct elf32_arm_link_hash_table
*htab
,
5777 enum elf32_arm_stub_type stub_type
, asection
*section
,
5778 Elf_Internal_Rela
*irela
, asection
*sym_sec
,
5779 struct elf32_arm_link_hash_entry
*hash
, char *sym_name
,
5780 bfd_vma sym_value
, enum arm_st_branch_type branch_type
,
5781 bfd_boolean
*new_stub
)
5783 const asection
*id_sec
;
5785 struct elf32_arm_stub_hash_entry
*stub_entry
;
5786 unsigned int r_type
;
5787 bfd_boolean sym_claimed
= arm_stub_sym_claimed (stub_type
);
5789 BFD_ASSERT (stub_type
!= arm_stub_none
);
5793 stub_name
= sym_name
;
5797 BFD_ASSERT (section
);
5798 BFD_ASSERT (section
->id
<= htab
->top_id
);
5800 /* Support for grouping stub sections. */
5801 id_sec
= htab
->stub_group
[section
->id
].link_sec
;
5803 /* Get the name of this stub. */
5804 stub_name
= elf32_arm_stub_name (id_sec
, sym_sec
, hash
, irela
,
5810 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
, stub_name
, FALSE
,
5812 /* The proper stub has already been created, just update its value. */
5813 if (stub_entry
!= NULL
)
5817 stub_entry
->target_value
= sym_value
;
5821 stub_entry
= elf32_arm_add_stub (stub_name
, section
, htab
, stub_type
);
5822 if (stub_entry
== NULL
)
5829 stub_entry
->target_value
= sym_value
;
5830 stub_entry
->target_section
= sym_sec
;
5831 stub_entry
->stub_type
= stub_type
;
5832 stub_entry
->h
= hash
;
5833 stub_entry
->branch_type
= branch_type
;
5836 stub_entry
->output_name
= sym_name
;
5839 if (sym_name
== NULL
)
5840 sym_name
= "unnamed";
5841 stub_entry
->output_name
= (char *)
5842 bfd_alloc (htab
->stub_bfd
, sizeof (THUMB2ARM_GLUE_ENTRY_NAME
)
5843 + strlen (sym_name
));
5844 if (stub_entry
->output_name
== NULL
)
5850 /* For historical reasons, use the existing names for ARM-to-Thumb and
5851 Thumb-to-ARM stubs. */
5852 r_type
= ELF32_R_TYPE (irela
->r_info
);
5853 if ((r_type
== (unsigned int) R_ARM_THM_CALL
5854 || r_type
== (unsigned int) R_ARM_THM_JUMP24
5855 || r_type
== (unsigned int) R_ARM_THM_JUMP19
)
5856 && branch_type
== ST_BRANCH_TO_ARM
)
5857 sprintf (stub_entry
->output_name
, THUMB2ARM_GLUE_ENTRY_NAME
, sym_name
);
5858 else if ((r_type
== (unsigned int) R_ARM_CALL
5859 || r_type
== (unsigned int) R_ARM_JUMP24
)
5860 && branch_type
== ST_BRANCH_TO_THUMB
)
5861 sprintf (stub_entry
->output_name
, ARM2THUMB_GLUE_ENTRY_NAME
, sym_name
);
5863 sprintf (stub_entry
->output_name
, STUB_ENTRY_NAME
, sym_name
);
5870 /* Scan symbols in INPUT_BFD to identify secure entry functions needing a
5871 gateway veneer to transition from non secure to secure state and create them
5874 "ARMv8-M Security Extensions: Requirements on Development Tools" document
5875 defines the conditions that govern Secure Gateway veneer creation for a
5876 given symbol <SYM> as follows:
5877 - it has function type
5878 - it has non local binding
5879 - a symbol named __acle_se_<SYM> (called special symbol) exists with the
5880 same type, binding and value as <SYM> (called normal symbol).
5881 An entry function can handle secure state transition itself in which case
5882 its special symbol would have a different value from the normal symbol.
5884 OUT_ATTR gives the output attributes, SYM_HASHES the symbol index to hash
5885 entry mapping while HTAB gives the name to hash entry mapping.
5886 *CMSE_STUB_CREATED is increased by the number of secure gateway veneer
5889 The return value gives whether a stub failed to be allocated. */
5892 cmse_scan (bfd
*input_bfd
, struct elf32_arm_link_hash_table
*htab
,
5893 obj_attribute
*out_attr
, struct elf_link_hash_entry
**sym_hashes
,
5894 int *cmse_stub_created
)
5896 const struct elf_backend_data
*bed
;
5897 Elf_Internal_Shdr
*symtab_hdr
;
5898 unsigned i
, j
, sym_count
, ext_start
;
5899 Elf_Internal_Sym
*cmse_sym
, *local_syms
;
5900 struct elf32_arm_link_hash_entry
*hash
, *cmse_hash
= NULL
;
5901 enum arm_st_branch_type branch_type
;
5902 char *sym_name
, *lsym_name
;
5905 struct elf32_arm_stub_hash_entry
*stub_entry
;
5906 bfd_boolean is_v8m
, new_stub
, cmse_invalid
, ret
= TRUE
;
5908 bed
= get_elf_backend_data (input_bfd
);
5909 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
5910 sym_count
= symtab_hdr
->sh_size
/ bed
->s
->sizeof_sym
;
5911 ext_start
= symtab_hdr
->sh_info
;
5912 is_v8m
= (out_attr
[Tag_CPU_arch
].i
>= TAG_CPU_ARCH_V8M_BASE
5913 && out_attr
[Tag_CPU_arch_profile
].i
== 'M');
5915 local_syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
5916 if (local_syms
== NULL
)
5917 local_syms
= bfd_elf_get_elf_syms (input_bfd
, symtab_hdr
,
5918 symtab_hdr
->sh_info
, 0, NULL
, NULL
,
5920 if (symtab_hdr
->sh_info
&& local_syms
== NULL
)
5924 for (i
= 0; i
< sym_count
; i
++)
5926 cmse_invalid
= FALSE
;
5930 cmse_sym
= &local_syms
[i
];
5931 /* Not a special symbol. */
5932 if (!ARM_GET_SYM_CMSE_SPCL (cmse_sym
->st_target_internal
))
5934 sym_name
= bfd_elf_string_from_elf_section (input_bfd
,
5935 symtab_hdr
->sh_link
,
5937 /* Special symbol with local binding. */
5938 cmse_invalid
= TRUE
;
5942 cmse_hash
= elf32_arm_hash_entry (sym_hashes
[i
- ext_start
]);
5943 sym_name
= (char *) cmse_hash
->root
.root
.root
.string
;
5945 /* Not a special symbol. */
5946 if (!ARM_GET_SYM_CMSE_SPCL (cmse_hash
->root
.target_internal
))
5949 /* Special symbol has incorrect binding or type. */
5950 if ((cmse_hash
->root
.root
.type
!= bfd_link_hash_defined
5951 && cmse_hash
->root
.root
.type
!= bfd_link_hash_defweak
)
5952 || cmse_hash
->root
.type
!= STT_FUNC
)
5953 cmse_invalid
= TRUE
;
5958 _bfd_error_handler (_("%pB: special symbol `%s' only allowed for "
5959 "ARMv8-M architecture or later"),
5960 input_bfd
, sym_name
);
5961 is_v8m
= TRUE
; /* Avoid multiple warning. */
5967 _bfd_error_handler (_("%pB: invalid special symbol `%s'; it must be"
5968 " a global or weak function symbol"),
5969 input_bfd
, sym_name
);
5975 sym_name
+= strlen (CMSE_PREFIX
);
5976 hash
= (struct elf32_arm_link_hash_entry
*)
5977 elf_link_hash_lookup (&(htab
)->root
, sym_name
, FALSE
, FALSE
, TRUE
);
5979 /* No associated normal symbol or it is neither global nor weak. */
5981 || (hash
->root
.root
.type
!= bfd_link_hash_defined
5982 && hash
->root
.root
.type
!= bfd_link_hash_defweak
)
5983 || hash
->root
.type
!= STT_FUNC
)
5985 /* Initialize here to avoid warning about use of possibly
5986 uninitialized variable. */
5991 /* Searching for a normal symbol with local binding. */
5992 for (; j
< ext_start
; j
++)
5995 bfd_elf_string_from_elf_section (input_bfd
,
5996 symtab_hdr
->sh_link
,
5997 local_syms
[j
].st_name
);
5998 if (!strcmp (sym_name
, lsym_name
))
6003 if (hash
|| j
< ext_start
)
6006 (_("%pB: invalid standard symbol `%s'; it must be "
6007 "a global or weak function symbol"),
6008 input_bfd
, sym_name
);
6012 (_("%pB: absent standard symbol `%s'"), input_bfd
, sym_name
);
6018 sym_value
= hash
->root
.root
.u
.def
.value
;
6019 section
= hash
->root
.root
.u
.def
.section
;
6021 if (cmse_hash
->root
.root
.u
.def
.section
!= section
)
6024 (_("%pB: `%s' and its special symbol are in different sections"),
6025 input_bfd
, sym_name
);
6028 if (cmse_hash
->root
.root
.u
.def
.value
!= sym_value
)
6029 continue; /* Ignore: could be an entry function starting with SG. */
6031 /* If this section is a link-once section that will be discarded, then
6032 don't create any stubs. */
6033 if (section
->output_section
== NULL
)
6036 (_("%pB: entry function `%s' not output"), input_bfd
, sym_name
);
6040 if (hash
->root
.size
== 0)
6043 (_("%pB: entry function `%s' is empty"), input_bfd
, sym_name
);
6049 branch_type
= ARM_GET_SYM_BRANCH_TYPE (hash
->root
.target_internal
);
6051 = elf32_arm_create_stub (htab
, arm_stub_cmse_branch_thumb_only
,
6052 NULL
, NULL
, section
, hash
, sym_name
,
6053 sym_value
, branch_type
, &new_stub
);
6055 if (stub_entry
== NULL
)
6059 BFD_ASSERT (new_stub
);
6060 (*cmse_stub_created
)++;
6064 if (!symtab_hdr
->contents
)
6069 /* Return TRUE iff a symbol identified by its linker HASH entry is a secure
6070 code entry function, ie can be called from non secure code without using a
6074 cmse_entry_fct_p (struct elf32_arm_link_hash_entry
*hash
)
6076 bfd_byte contents
[4];
6077 uint32_t first_insn
;
6082 /* Defined symbol of function type. */
6083 if (hash
->root
.root
.type
!= bfd_link_hash_defined
6084 && hash
->root
.root
.type
!= bfd_link_hash_defweak
)
6086 if (hash
->root
.type
!= STT_FUNC
)
6089 /* Read first instruction. */
6090 section
= hash
->root
.root
.u
.def
.section
;
6091 abfd
= section
->owner
;
6092 offset
= hash
->root
.root
.u
.def
.value
- section
->vma
;
6093 if (!bfd_get_section_contents (abfd
, section
, contents
, offset
,
6097 first_insn
= bfd_get_32 (abfd
, contents
);
6099 /* Starts by SG instruction. */
6100 return first_insn
== 0xe97fe97f;
6103 /* Output the name (in symbol table) of the veneer GEN_ENTRY if it is a new
6104 secure gateway veneers (ie. the veneers was not in the input import library)
6105 and there is no output import library (GEN_INFO->out_implib_bfd is NULL. */
6108 arm_list_new_cmse_stub (struct bfd_hash_entry
*gen_entry
, void *gen_info
)
6110 struct elf32_arm_stub_hash_entry
*stub_entry
;
6111 struct bfd_link_info
*info
;
6113 /* Massage our args to the form they really have. */
6114 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
6115 info
= (struct bfd_link_info
*) gen_info
;
6117 if (info
->out_implib_bfd
)
6120 if (stub_entry
->stub_type
!= arm_stub_cmse_branch_thumb_only
)
6123 if (stub_entry
->stub_offset
== (bfd_vma
) -1)
6124 _bfd_error_handler (" %s", stub_entry
->output_name
);
6129 /* Set offset of each secure gateway veneers so that its address remain
6130 identical to the one in the input import library referred by
6131 HTAB->in_implib_bfd. A warning is issued for veneers that disappeared
6132 (present in input import library but absent from the executable being
6133 linked) or if new veneers appeared and there is no output import library
6134 (INFO->out_implib_bfd is NULL and *CMSE_STUB_CREATED is bigger than the
6135 number of secure gateway veneers found in the input import library.
6137 The function returns whether an error occurred. If no error occurred,
6138 *CMSE_STUB_CREATED gives the number of SG veneers created by both cmse_scan
6139 and this function and HTAB->new_cmse_stub_offset is set to the biggest
6140 veneer observed set for new veneers to be layed out after. */
6143 set_cmse_veneer_addr_from_implib (struct bfd_link_info
*info
,
6144 struct elf32_arm_link_hash_table
*htab
,
6145 int *cmse_stub_created
)
6152 asection
*stub_out_sec
;
6153 bfd_boolean ret
= TRUE
;
6154 Elf_Internal_Sym
*intsym
;
6155 const char *out_sec_name
;
6156 bfd_size_type cmse_stub_size
;
6157 asymbol
**sympp
= NULL
, *sym
;
6158 struct elf32_arm_link_hash_entry
*hash
;
6159 const insn_sequence
*cmse_stub_template
;
6160 struct elf32_arm_stub_hash_entry
*stub_entry
;
6161 int cmse_stub_template_size
, new_cmse_stubs_created
= *cmse_stub_created
;
6162 bfd_vma veneer_value
, stub_offset
, next_cmse_stub_offset
;
6163 bfd_vma cmse_stub_array_start
= (bfd_vma
) -1, cmse_stub_sec_vma
= 0;
6165 /* No input secure gateway import library. */
6166 if (!htab
->in_implib_bfd
)
6169 in_implib_bfd
= htab
->in_implib_bfd
;
6170 if (!htab
->cmse_implib
)
6172 _bfd_error_handler (_("%pB: --in-implib only supported for Secure "
6173 "Gateway import libraries"), in_implib_bfd
);
6177 /* Get symbol table size. */
6178 symsize
= bfd_get_symtab_upper_bound (in_implib_bfd
);
6182 /* Read in the input secure gateway import library's symbol table. */
6183 sympp
= (asymbol
**) xmalloc (symsize
);
6184 symcount
= bfd_canonicalize_symtab (in_implib_bfd
, sympp
);
6191 htab
->new_cmse_stub_offset
= 0;
6193 find_stub_size_and_template (arm_stub_cmse_branch_thumb_only
,
6194 &cmse_stub_template
,
6195 &cmse_stub_template_size
);
6197 arm_dedicated_stub_output_section_name (arm_stub_cmse_branch_thumb_only
);
6199 bfd_get_section_by_name (htab
->obfd
, out_sec_name
);
6200 if (stub_out_sec
!= NULL
)
6201 cmse_stub_sec_vma
= stub_out_sec
->vma
;
6203 /* Set addresses of veneers mentionned in input secure gateway import
6204 library's symbol table. */
6205 for (i
= 0; i
< symcount
; i
++)
6209 sym_name
= (char *) bfd_asymbol_name (sym
);
6210 intsym
= &((elf_symbol_type
*) sym
)->internal_elf_sym
;
6212 if (sym
->section
!= bfd_abs_section_ptr
6213 || !(flags
& (BSF_GLOBAL
| BSF_WEAK
))
6214 || (flags
& BSF_FUNCTION
) != BSF_FUNCTION
6215 || (ARM_GET_SYM_BRANCH_TYPE (intsym
->st_target_internal
)
6216 != ST_BRANCH_TO_THUMB
))
6218 _bfd_error_handler (_("%pB: invalid import library entry: `%s'; "
6219 "symbol should be absolute, global and "
6220 "refer to Thumb functions"),
6221 in_implib_bfd
, sym_name
);
6226 veneer_value
= bfd_asymbol_value (sym
);
6227 stub_offset
= veneer_value
- cmse_stub_sec_vma
;
6228 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
, sym_name
,
6230 hash
= (struct elf32_arm_link_hash_entry
*)
6231 elf_link_hash_lookup (&(htab
)->root
, sym_name
, FALSE
, FALSE
, TRUE
);
6233 /* Stub entry should have been created by cmse_scan or the symbol be of
6234 a secure function callable from non secure code. */
6235 if (!stub_entry
&& !hash
)
6237 bfd_boolean new_stub
;
6240 (_("entry function `%s' disappeared from secure code"), sym_name
);
6241 hash
= (struct elf32_arm_link_hash_entry
*)
6242 elf_link_hash_lookup (&(htab
)->root
, sym_name
, TRUE
, TRUE
, TRUE
);
6244 = elf32_arm_create_stub (htab
, arm_stub_cmse_branch_thumb_only
,
6245 NULL
, NULL
, bfd_abs_section_ptr
, hash
,
6246 sym_name
, veneer_value
,
6247 ST_BRANCH_TO_THUMB
, &new_stub
);
6248 if (stub_entry
== NULL
)
6252 BFD_ASSERT (new_stub
);
6253 new_cmse_stubs_created
++;
6254 (*cmse_stub_created
)++;
6256 stub_entry
->stub_template_size
= stub_entry
->stub_size
= 0;
6257 stub_entry
->stub_offset
= stub_offset
;
6259 /* Symbol found is not callable from non secure code. */
6260 else if (!stub_entry
)
6262 if (!cmse_entry_fct_p (hash
))
6264 _bfd_error_handler (_("`%s' refers to a non entry function"),
6272 /* Only stubs for SG veneers should have been created. */
6273 BFD_ASSERT (stub_entry
->stub_type
== arm_stub_cmse_branch_thumb_only
);
6275 /* Check visibility hasn't changed. */
6276 if (!!(flags
& BSF_GLOBAL
)
6277 != (hash
->root
.root
.type
== bfd_link_hash_defined
))
6279 (_("%pB: visibility of symbol `%s' has changed"), in_implib_bfd
,
6282 stub_entry
->stub_offset
= stub_offset
;
6285 /* Size should match that of a SG veneer. */
6286 if (intsym
->st_size
!= cmse_stub_size
)
6288 _bfd_error_handler (_("%pB: incorrect size for symbol `%s'"),
6289 in_implib_bfd
, sym_name
);
6293 /* Previous veneer address is before current SG veneer section. */
6294 if (veneer_value
< cmse_stub_sec_vma
)
6296 /* Avoid offset underflow. */
6298 stub_entry
->stub_offset
= 0;
6303 /* Complain if stub offset not a multiple of stub size. */
6304 if (stub_offset
% cmse_stub_size
)
6307 (_("offset of veneer for entry function `%s' not a multiple of "
6308 "its size"), sym_name
);
6315 new_cmse_stubs_created
--;
6316 if (veneer_value
< cmse_stub_array_start
)
6317 cmse_stub_array_start
= veneer_value
;
6318 next_cmse_stub_offset
= stub_offset
+ ((cmse_stub_size
+ 7) & ~7);
6319 if (next_cmse_stub_offset
> htab
->new_cmse_stub_offset
)
6320 htab
->new_cmse_stub_offset
= next_cmse_stub_offset
;
6323 if (!info
->out_implib_bfd
&& new_cmse_stubs_created
!= 0)
6325 BFD_ASSERT (new_cmse_stubs_created
> 0);
6327 (_("new entry function(s) introduced but no output import library "
6329 bfd_hash_traverse (&htab
->stub_hash_table
, arm_list_new_cmse_stub
, info
);
6332 if (cmse_stub_array_start
!= cmse_stub_sec_vma
)
6335 (_("start address of `%s' is different from previous link"),
6345 /* Determine and set the size of the stub section for a final link.
6347 The basic idea here is to examine all the relocations looking for
6348 PC-relative calls to a target that is unreachable with a "bl"
6352 elf32_arm_size_stubs (bfd
*output_bfd
,
6354 struct bfd_link_info
*info
,
6355 bfd_signed_vma group_size
,
6356 asection
* (*add_stub_section
) (const char *, asection
*,
6359 void (*layout_sections_again
) (void))
6361 bfd_boolean ret
= TRUE
;
6362 obj_attribute
*out_attr
;
6363 int cmse_stub_created
= 0;
6364 bfd_size_type stub_group_size
;
6365 bfd_boolean m_profile
, stubs_always_after_branch
, first_veneer_scan
= TRUE
;
6366 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
6367 struct a8_erratum_fix
*a8_fixes
= NULL
;
6368 unsigned int num_a8_fixes
= 0, a8_fix_table_size
= 10;
6369 struct a8_erratum_reloc
*a8_relocs
= NULL
;
6370 unsigned int num_a8_relocs
= 0, a8_reloc_table_size
= 10, i
;
6375 if (htab
->fix_cortex_a8
)
6377 a8_fixes
= (struct a8_erratum_fix
*)
6378 bfd_zmalloc (sizeof (struct a8_erratum_fix
) * a8_fix_table_size
);
6379 a8_relocs
= (struct a8_erratum_reloc
*)
6380 bfd_zmalloc (sizeof (struct a8_erratum_reloc
) * a8_reloc_table_size
);
6383 /* Propagate mach to stub bfd, because it may not have been
6384 finalized when we created stub_bfd. */
6385 bfd_set_arch_mach (stub_bfd
, bfd_get_arch (output_bfd
),
6386 bfd_get_mach (output_bfd
));
6388 /* Stash our params away. */
6389 htab
->stub_bfd
= stub_bfd
;
6390 htab
->add_stub_section
= add_stub_section
;
6391 htab
->layout_sections_again
= layout_sections_again
;
6392 stubs_always_after_branch
= group_size
< 0;
6394 out_attr
= elf_known_obj_attributes_proc (output_bfd
);
6395 m_profile
= out_attr
[Tag_CPU_arch_profile
].i
== 'M';
6397 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
6398 as the first half of a 32-bit branch straddling two 4K pages. This is a
6399 crude way of enforcing that. */
6400 if (htab
->fix_cortex_a8
)
6401 stubs_always_after_branch
= 1;
6404 stub_group_size
= -group_size
;
6406 stub_group_size
= group_size
;
6408 if (stub_group_size
== 1)
6410 /* Default values. */
6411 /* Thumb branch range is +-4MB has to be used as the default
6412 maximum size (a given section can contain both ARM and Thumb
6413 code, so the worst case has to be taken into account).
6415 This value is 24K less than that, which allows for 2025
6416 12-byte stubs. If we exceed that, then we will fail to link.
6417 The user will have to relink with an explicit group size
6419 stub_group_size
= 4170000;
6422 group_sections (htab
, stub_group_size
, stubs_always_after_branch
);
6424 /* If we're applying the cortex A8 fix, we need to determine the
6425 program header size now, because we cannot change it later --
6426 that could alter section placements. Notice the A8 erratum fix
6427 ends up requiring the section addresses to remain unchanged
6428 modulo the page size. That's something we cannot represent
6429 inside BFD, and we don't want to force the section alignment to
6430 be the page size. */
6431 if (htab
->fix_cortex_a8
)
6432 (*htab
->layout_sections_again
) ();
6437 unsigned int bfd_indx
;
6439 enum elf32_arm_stub_type stub_type
;
6440 bfd_boolean stub_changed
= FALSE
;
6441 unsigned prev_num_a8_fixes
= num_a8_fixes
;
6444 for (input_bfd
= info
->input_bfds
, bfd_indx
= 0;
6446 input_bfd
= input_bfd
->link
.next
, bfd_indx
++)
6448 Elf_Internal_Shdr
*symtab_hdr
;
6450 Elf_Internal_Sym
*local_syms
= NULL
;
6452 if (!is_arm_elf (input_bfd
)
6453 || (elf_dyn_lib_class (input_bfd
) & DYN_AS_NEEDED
) != 0)
6458 /* We'll need the symbol table in a second. */
6459 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
6460 if (symtab_hdr
->sh_info
== 0)
6463 /* Limit scan of symbols to object file whose profile is
6464 Microcontroller to not hinder performance in the general case. */
6465 if (m_profile
&& first_veneer_scan
)
6467 struct elf_link_hash_entry
**sym_hashes
;
6469 sym_hashes
= elf_sym_hashes (input_bfd
);
6470 if (!cmse_scan (input_bfd
, htab
, out_attr
, sym_hashes
,
6471 &cmse_stub_created
))
6472 goto error_ret_free_local
;
6474 if (cmse_stub_created
!= 0)
6475 stub_changed
= TRUE
;
6478 /* Walk over each section attached to the input bfd. */
6479 for (section
= input_bfd
->sections
;
6481 section
= section
->next
)
6483 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
6485 /* If there aren't any relocs, then there's nothing more
6487 if ((section
->flags
& SEC_RELOC
) == 0
6488 || section
->reloc_count
== 0
6489 || (section
->flags
& SEC_CODE
) == 0)
6492 /* If this section is a link-once section that will be
6493 discarded, then don't create any stubs. */
6494 if (section
->output_section
== NULL
6495 || section
->output_section
->owner
!= output_bfd
)
6498 /* Get the relocs. */
6500 = _bfd_elf_link_read_relocs (input_bfd
, section
, NULL
,
6501 NULL
, info
->keep_memory
);
6502 if (internal_relocs
== NULL
)
6503 goto error_ret_free_local
;
6505 /* Now examine each relocation. */
6506 irela
= internal_relocs
;
6507 irelaend
= irela
+ section
->reloc_count
;
6508 for (; irela
< irelaend
; irela
++)
6510 unsigned int r_type
, r_indx
;
6513 bfd_vma destination
;
6514 struct elf32_arm_link_hash_entry
*hash
;
6515 const char *sym_name
;
6516 unsigned char st_type
;
6517 enum arm_st_branch_type branch_type
;
6518 bfd_boolean created_stub
= FALSE
;
6520 r_type
= ELF32_R_TYPE (irela
->r_info
);
6521 r_indx
= ELF32_R_SYM (irela
->r_info
);
6523 if (r_type
>= (unsigned int) R_ARM_max
)
6525 bfd_set_error (bfd_error_bad_value
);
6526 error_ret_free_internal
:
6527 if (elf_section_data (section
)->relocs
== NULL
)
6528 free (internal_relocs
);
6530 error_ret_free_local
:
6531 if (local_syms
!= NULL
6532 && (symtab_hdr
->contents
6533 != (unsigned char *) local_syms
))
6539 if (r_indx
>= symtab_hdr
->sh_info
)
6540 hash
= elf32_arm_hash_entry
6541 (elf_sym_hashes (input_bfd
)
6542 [r_indx
- symtab_hdr
->sh_info
]);
6544 /* Only look for stubs on branch instructions, or
6545 non-relaxed TLSCALL */
6546 if ((r_type
!= (unsigned int) R_ARM_CALL
)
6547 && (r_type
!= (unsigned int) R_ARM_THM_CALL
)
6548 && (r_type
!= (unsigned int) R_ARM_JUMP24
)
6549 && (r_type
!= (unsigned int) R_ARM_THM_JUMP19
)
6550 && (r_type
!= (unsigned int) R_ARM_THM_XPC22
)
6551 && (r_type
!= (unsigned int) R_ARM_THM_JUMP24
)
6552 && (r_type
!= (unsigned int) R_ARM_PLT32
)
6553 && !((r_type
== (unsigned int) R_ARM_TLS_CALL
6554 || r_type
== (unsigned int) R_ARM_THM_TLS_CALL
)
6555 && r_type
== elf32_arm_tls_transition
6556 (info
, r_type
, &hash
->root
)
6557 && ((hash
? hash
->tls_type
6558 : (elf32_arm_local_got_tls_type
6559 (input_bfd
)[r_indx
]))
6560 & GOT_TLS_GDESC
) != 0))
6563 /* Now determine the call target, its name, value,
6570 if (r_type
== (unsigned int) R_ARM_TLS_CALL
6571 || r_type
== (unsigned int) R_ARM_THM_TLS_CALL
)
6573 /* A non-relaxed TLS call. The target is the
6574 plt-resident trampoline and nothing to do
6576 BFD_ASSERT (htab
->tls_trampoline
> 0);
6577 sym_sec
= htab
->root
.splt
;
6578 sym_value
= htab
->tls_trampoline
;
6581 branch_type
= ST_BRANCH_TO_ARM
;
6585 /* It's a local symbol. */
6586 Elf_Internal_Sym
*sym
;
6588 if (local_syms
== NULL
)
6591 = (Elf_Internal_Sym
*) symtab_hdr
->contents
;
6592 if (local_syms
== NULL
)
6594 = bfd_elf_get_elf_syms (input_bfd
, symtab_hdr
,
6595 symtab_hdr
->sh_info
, 0,
6597 if (local_syms
== NULL
)
6598 goto error_ret_free_internal
;
6601 sym
= local_syms
+ r_indx
;
6602 if (sym
->st_shndx
== SHN_UNDEF
)
6603 sym_sec
= bfd_und_section_ptr
;
6604 else if (sym
->st_shndx
== SHN_ABS
)
6605 sym_sec
= bfd_abs_section_ptr
;
6606 else if (sym
->st_shndx
== SHN_COMMON
)
6607 sym_sec
= bfd_com_section_ptr
;
6610 bfd_section_from_elf_index (input_bfd
, sym
->st_shndx
);
6613 /* This is an undefined symbol. It can never
6617 if (ELF_ST_TYPE (sym
->st_info
) != STT_SECTION
)
6618 sym_value
= sym
->st_value
;
6619 destination
= (sym_value
+ irela
->r_addend
6620 + sym_sec
->output_offset
6621 + sym_sec
->output_section
->vma
);
6622 st_type
= ELF_ST_TYPE (sym
->st_info
);
6624 ARM_GET_SYM_BRANCH_TYPE (sym
->st_target_internal
);
6626 = bfd_elf_string_from_elf_section (input_bfd
,
6627 symtab_hdr
->sh_link
,
6632 /* It's an external symbol. */
6633 while (hash
->root
.root
.type
== bfd_link_hash_indirect
6634 || hash
->root
.root
.type
== bfd_link_hash_warning
)
6635 hash
= ((struct elf32_arm_link_hash_entry
*)
6636 hash
->root
.root
.u
.i
.link
);
6638 if (hash
->root
.root
.type
== bfd_link_hash_defined
6639 || hash
->root
.root
.type
== bfd_link_hash_defweak
)
6641 sym_sec
= hash
->root
.root
.u
.def
.section
;
6642 sym_value
= hash
->root
.root
.u
.def
.value
;
6644 struct elf32_arm_link_hash_table
*globals
=
6645 elf32_arm_hash_table (info
);
6647 /* For a destination in a shared library,
6648 use the PLT stub as target address to
6649 decide whether a branch stub is
6652 && globals
->root
.splt
!= NULL
6654 && hash
->root
.plt
.offset
!= (bfd_vma
) -1)
6656 sym_sec
= globals
->root
.splt
;
6657 sym_value
= hash
->root
.plt
.offset
;
6658 if (sym_sec
->output_section
!= NULL
)
6659 destination
= (sym_value
6660 + sym_sec
->output_offset
6661 + sym_sec
->output_section
->vma
);
6663 else if (sym_sec
->output_section
!= NULL
)
6664 destination
= (sym_value
+ irela
->r_addend
6665 + sym_sec
->output_offset
6666 + sym_sec
->output_section
->vma
);
6668 else if ((hash
->root
.root
.type
== bfd_link_hash_undefined
)
6669 || (hash
->root
.root
.type
== bfd_link_hash_undefweak
))
6671 /* For a shared library, use the PLT stub as
6672 target address to decide whether a long
6673 branch stub is needed.
6674 For absolute code, they cannot be handled. */
6675 struct elf32_arm_link_hash_table
*globals
=
6676 elf32_arm_hash_table (info
);
6679 && globals
->root
.splt
!= NULL
6681 && hash
->root
.plt
.offset
!= (bfd_vma
) -1)
6683 sym_sec
= globals
->root
.splt
;
6684 sym_value
= hash
->root
.plt
.offset
;
6685 if (sym_sec
->output_section
!= NULL
)
6686 destination
= (sym_value
6687 + sym_sec
->output_offset
6688 + sym_sec
->output_section
->vma
);
6695 bfd_set_error (bfd_error_bad_value
);
6696 goto error_ret_free_internal
;
6698 st_type
= hash
->root
.type
;
6700 ARM_GET_SYM_BRANCH_TYPE (hash
->root
.target_internal
);
6701 sym_name
= hash
->root
.root
.root
.string
;
6706 bfd_boolean new_stub
;
6707 struct elf32_arm_stub_hash_entry
*stub_entry
;
6709 /* Determine what (if any) linker stub is needed. */
6710 stub_type
= arm_type_of_stub (info
, section
, irela
,
6711 st_type
, &branch_type
,
6712 hash
, destination
, sym_sec
,
6713 input_bfd
, sym_name
);
6714 if (stub_type
== arm_stub_none
)
6717 /* We've either created a stub for this reloc already,
6718 or we are about to. */
6720 elf32_arm_create_stub (htab
, stub_type
, section
, irela
,
6722 (char *) sym_name
, sym_value
,
6723 branch_type
, &new_stub
);
6725 created_stub
= stub_entry
!= NULL
;
6727 goto error_ret_free_internal
;
6731 stub_changed
= TRUE
;
6735 /* Look for relocations which might trigger Cortex-A8
6737 if (htab
->fix_cortex_a8
6738 && (r_type
== (unsigned int) R_ARM_THM_JUMP24
6739 || r_type
== (unsigned int) R_ARM_THM_JUMP19
6740 || r_type
== (unsigned int) R_ARM_THM_CALL
6741 || r_type
== (unsigned int) R_ARM_THM_XPC22
))
6743 bfd_vma from
= section
->output_section
->vma
6744 + section
->output_offset
6747 if ((from
& 0xfff) == 0xffe)
6749 /* Found a candidate. Note we haven't checked the
6750 destination is within 4K here: if we do so (and
6751 don't create an entry in a8_relocs) we can't tell
6752 that a branch should have been relocated when
6754 if (num_a8_relocs
== a8_reloc_table_size
)
6756 a8_reloc_table_size
*= 2;
6757 a8_relocs
= (struct a8_erratum_reloc
*)
6758 bfd_realloc (a8_relocs
,
6759 sizeof (struct a8_erratum_reloc
)
6760 * a8_reloc_table_size
);
6763 a8_relocs
[num_a8_relocs
].from
= from
;
6764 a8_relocs
[num_a8_relocs
].destination
= destination
;
6765 a8_relocs
[num_a8_relocs
].r_type
= r_type
;
6766 a8_relocs
[num_a8_relocs
].branch_type
= branch_type
;
6767 a8_relocs
[num_a8_relocs
].sym_name
= sym_name
;
6768 a8_relocs
[num_a8_relocs
].non_a8_stub
= created_stub
;
6769 a8_relocs
[num_a8_relocs
].hash
= hash
;
6776 /* We're done with the internal relocs, free them. */
6777 if (elf_section_data (section
)->relocs
== NULL
)
6778 free (internal_relocs
);
6781 if (htab
->fix_cortex_a8
)
6783 /* Sort relocs which might apply to Cortex-A8 erratum. */
6784 qsort (a8_relocs
, num_a8_relocs
,
6785 sizeof (struct a8_erratum_reloc
),
6788 /* Scan for branches which might trigger Cortex-A8 erratum. */
6789 if (cortex_a8_erratum_scan (input_bfd
, info
, &a8_fixes
,
6790 &num_a8_fixes
, &a8_fix_table_size
,
6791 a8_relocs
, num_a8_relocs
,
6792 prev_num_a8_fixes
, &stub_changed
)
6794 goto error_ret_free_local
;
6797 if (local_syms
!= NULL
6798 && symtab_hdr
->contents
!= (unsigned char *) local_syms
)
6800 if (!info
->keep_memory
)
6803 symtab_hdr
->contents
= (unsigned char *) local_syms
;
6807 if (first_veneer_scan
6808 && !set_cmse_veneer_addr_from_implib (info
, htab
,
6809 &cmse_stub_created
))
6812 if (prev_num_a8_fixes
!= num_a8_fixes
)
6813 stub_changed
= TRUE
;
6818 /* OK, we've added some stubs. Find out the new size of the
6820 for (stub_sec
= htab
->stub_bfd
->sections
;
6822 stub_sec
= stub_sec
->next
)
6824 /* Ignore non-stub sections. */
6825 if (!strstr (stub_sec
->name
, STUB_SUFFIX
))
6831 /* Add new SG veneers after those already in the input import
6833 for (stub_type
= arm_stub_none
+ 1; stub_type
< max_stub_type
;
6836 bfd_vma
*start_offset_p
;
6837 asection
**stub_sec_p
;
6839 start_offset_p
= arm_new_stubs_start_offset_ptr (htab
, stub_type
);
6840 stub_sec_p
= arm_dedicated_stub_input_section_ptr (htab
, stub_type
);
6841 if (start_offset_p
== NULL
)
6844 BFD_ASSERT (stub_sec_p
!= NULL
);
6845 if (*stub_sec_p
!= NULL
)
6846 (*stub_sec_p
)->size
= *start_offset_p
;
6849 /* Compute stub section size, considering padding. */
6850 bfd_hash_traverse (&htab
->stub_hash_table
, arm_size_one_stub
, htab
);
6851 for (stub_type
= arm_stub_none
+ 1; stub_type
< max_stub_type
;
6855 asection
**stub_sec_p
;
6857 padding
= arm_dedicated_stub_section_padding (stub_type
);
6858 stub_sec_p
= arm_dedicated_stub_input_section_ptr (htab
, stub_type
);
6859 /* Skip if no stub input section or no stub section padding
6861 if ((stub_sec_p
!= NULL
&& *stub_sec_p
== NULL
) || padding
== 0)
6863 /* Stub section padding required but no dedicated section. */
6864 BFD_ASSERT (stub_sec_p
);
6866 size
= (*stub_sec_p
)->size
;
6867 size
= (size
+ padding
- 1) & ~(padding
- 1);
6868 (*stub_sec_p
)->size
= size
;
6871 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
6872 if (htab
->fix_cortex_a8
)
6873 for (i
= 0; i
< num_a8_fixes
; i
++)
6875 stub_sec
= elf32_arm_create_or_find_stub_sec (NULL
,
6876 a8_fixes
[i
].section
, htab
, a8_fixes
[i
].stub_type
);
6878 if (stub_sec
== NULL
)
6882 += find_stub_size_and_template (a8_fixes
[i
].stub_type
, NULL
,
6887 /* Ask the linker to do its stuff. */
6888 (*htab
->layout_sections_again
) ();
6889 first_veneer_scan
= FALSE
;
6892 /* Add stubs for Cortex-A8 erratum fixes now. */
6893 if (htab
->fix_cortex_a8
)
6895 for (i
= 0; i
< num_a8_fixes
; i
++)
6897 struct elf32_arm_stub_hash_entry
*stub_entry
;
6898 char *stub_name
= a8_fixes
[i
].stub_name
;
6899 asection
*section
= a8_fixes
[i
].section
;
6900 unsigned int section_id
= a8_fixes
[i
].section
->id
;
6901 asection
*link_sec
= htab
->stub_group
[section_id
].link_sec
;
6902 asection
*stub_sec
= htab
->stub_group
[section_id
].stub_sec
;
6903 const insn_sequence
*template_sequence
;
6904 int template_size
, size
= 0;
6906 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
, stub_name
,
6908 if (stub_entry
== NULL
)
6910 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
6911 section
->owner
, stub_name
);
6915 stub_entry
->stub_sec
= stub_sec
;
6916 stub_entry
->stub_offset
= (bfd_vma
) -1;
6917 stub_entry
->id_sec
= link_sec
;
6918 stub_entry
->stub_type
= a8_fixes
[i
].stub_type
;
6919 stub_entry
->source_value
= a8_fixes
[i
].offset
;
6920 stub_entry
->target_section
= a8_fixes
[i
].section
;
6921 stub_entry
->target_value
= a8_fixes
[i
].target_offset
;
6922 stub_entry
->orig_insn
= a8_fixes
[i
].orig_insn
;
6923 stub_entry
->branch_type
= a8_fixes
[i
].branch_type
;
6925 size
= find_stub_size_and_template (a8_fixes
[i
].stub_type
,
6929 stub_entry
->stub_size
= size
;
6930 stub_entry
->stub_template
= template_sequence
;
6931 stub_entry
->stub_template_size
= template_size
;
6934 /* Stash the Cortex-A8 erratum fix array for use later in
6935 elf32_arm_write_section(). */
6936 htab
->a8_erratum_fixes
= a8_fixes
;
6937 htab
->num_a8_erratum_fixes
= num_a8_fixes
;
6941 htab
->a8_erratum_fixes
= NULL
;
6942 htab
->num_a8_erratum_fixes
= 0;
6947 /* Build all the stubs associated with the current output file. The
6948 stubs are kept in a hash table attached to the main linker hash
6949 table. We also set up the .plt entries for statically linked PIC
6950 functions here. This function is called via arm_elf_finish in the
6954 elf32_arm_build_stubs (struct bfd_link_info
*info
)
6957 struct bfd_hash_table
*table
;
6958 enum elf32_arm_stub_type stub_type
;
6959 struct elf32_arm_link_hash_table
*htab
;
6961 htab
= elf32_arm_hash_table (info
);
6965 for (stub_sec
= htab
->stub_bfd
->sections
;
6967 stub_sec
= stub_sec
->next
)
6971 /* Ignore non-stub sections. */
6972 if (!strstr (stub_sec
->name
, STUB_SUFFIX
))
6975 /* Allocate memory to hold the linker stubs. Zeroing the stub sections
6976 must at least be done for stub section requiring padding and for SG
6977 veneers to ensure that a non secure code branching to a removed SG
6978 veneer causes an error. */
6979 size
= stub_sec
->size
;
6980 stub_sec
->contents
= (unsigned char *) bfd_zalloc (htab
->stub_bfd
, size
);
6981 if (stub_sec
->contents
== NULL
&& size
!= 0)
6987 /* Add new SG veneers after those already in the input import library. */
6988 for (stub_type
= arm_stub_none
+ 1; stub_type
< max_stub_type
; stub_type
++)
6990 bfd_vma
*start_offset_p
;
6991 asection
**stub_sec_p
;
6993 start_offset_p
= arm_new_stubs_start_offset_ptr (htab
, stub_type
);
6994 stub_sec_p
= arm_dedicated_stub_input_section_ptr (htab
, stub_type
);
6995 if (start_offset_p
== NULL
)
6998 BFD_ASSERT (stub_sec_p
!= NULL
);
6999 if (*stub_sec_p
!= NULL
)
7000 (*stub_sec_p
)->size
= *start_offset_p
;
7003 /* Build the stubs as directed by the stub hash table. */
7004 table
= &htab
->stub_hash_table
;
7005 bfd_hash_traverse (table
, arm_build_one_stub
, info
);
7006 if (htab
->fix_cortex_a8
)
7008 /* Place the cortex a8 stubs last. */
7009 htab
->fix_cortex_a8
= -1;
7010 bfd_hash_traverse (table
, arm_build_one_stub
, info
);
7016 /* Locate the Thumb encoded calling stub for NAME. */
7018 static struct elf_link_hash_entry
*
7019 find_thumb_glue (struct bfd_link_info
*link_info
,
7021 char **error_message
)
7024 struct elf_link_hash_entry
*hash
;
7025 struct elf32_arm_link_hash_table
*hash_table
;
7027 /* We need a pointer to the armelf specific hash table. */
7028 hash_table
= elf32_arm_hash_table (link_info
);
7029 if (hash_table
== NULL
)
7032 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen (name
)
7033 + strlen (THUMB2ARM_GLUE_ENTRY_NAME
) + 1);
7035 BFD_ASSERT (tmp_name
);
7037 sprintf (tmp_name
, THUMB2ARM_GLUE_ENTRY_NAME
, name
);
7039 hash
= elf_link_hash_lookup
7040 (&(hash_table
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
7043 && asprintf (error_message
, _("unable to find %s glue '%s' for '%s'"),
7044 "Thumb", tmp_name
, name
) == -1)
7045 *error_message
= (char *) bfd_errmsg (bfd_error_system_call
);
7052 /* Locate the ARM encoded calling stub for NAME. */
7054 static struct elf_link_hash_entry
*
7055 find_arm_glue (struct bfd_link_info
*link_info
,
7057 char **error_message
)
7060 struct elf_link_hash_entry
*myh
;
7061 struct elf32_arm_link_hash_table
*hash_table
;
7063 /* We need a pointer to the elfarm specific hash table. */
7064 hash_table
= elf32_arm_hash_table (link_info
);
7065 if (hash_table
== NULL
)
7068 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen (name
)
7069 + strlen (ARM2THUMB_GLUE_ENTRY_NAME
) + 1);
7071 BFD_ASSERT (tmp_name
);
7073 sprintf (tmp_name
, ARM2THUMB_GLUE_ENTRY_NAME
, name
);
7075 myh
= elf_link_hash_lookup
7076 (&(hash_table
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
7079 && asprintf (error_message
, _("unable to find %s glue '%s' for '%s'"),
7080 "ARM", tmp_name
, name
) == -1)
7081 *error_message
= (char *) bfd_errmsg (bfd_error_system_call
);
7088 /* ARM->Thumb glue (static images):
7092 ldr r12, __func_addr
7095 .word func @ behave as if you saw a ARM_32 reloc.
7102 .word func @ behave as if you saw a ARM_32 reloc.
7104 (relocatable images)
7107 ldr r12, __func_offset
7113 #define ARM2THUMB_STATIC_GLUE_SIZE 12
7114 static const insn32 a2t1_ldr_insn
= 0xe59fc000;
7115 static const insn32 a2t2_bx_r12_insn
= 0xe12fff1c;
7116 static const insn32 a2t3_func_addr_insn
= 0x00000001;
7118 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
7119 static const insn32 a2t1v5_ldr_insn
= 0xe51ff004;
7120 static const insn32 a2t2v5_func_addr_insn
= 0x00000001;
7122 #define ARM2THUMB_PIC_GLUE_SIZE 16
7123 static const insn32 a2t1p_ldr_insn
= 0xe59fc004;
7124 static const insn32 a2t2p_add_pc_insn
= 0xe08cc00f;
7125 static const insn32 a2t3p_bx_r12_insn
= 0xe12fff1c;
7127 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
7131 __func_from_thumb: __func_from_thumb:
7133 nop ldr r6, __func_addr
7143 #define THUMB2ARM_GLUE_SIZE 8
7144 static const insn16 t2a1_bx_pc_insn
= 0x4778;
7145 static const insn16 t2a2_noop_insn
= 0x46c0;
7146 static const insn32 t2a3_b_insn
= 0xea000000;
7148 #define VFP11_ERRATUM_VENEER_SIZE 8
7149 #define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16
7150 #define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24
7152 #define ARM_BX_VENEER_SIZE 12
7153 static const insn32 armbx1_tst_insn
= 0xe3100001;
7154 static const insn32 armbx2_moveq_insn
= 0x01a0f000;
7155 static const insn32 armbx3_bx_insn
= 0xe12fff10;
7157 #ifndef ELFARM_NABI_C_INCLUDED
7159 arm_allocate_glue_section_space (bfd
* abfd
, bfd_size_type size
, const char * name
)
7162 bfd_byte
* contents
;
7166 /* Do not include empty glue sections in the output. */
7169 s
= bfd_get_linker_section (abfd
, name
);
7171 s
->flags
|= SEC_EXCLUDE
;
7176 BFD_ASSERT (abfd
!= NULL
);
7178 s
= bfd_get_linker_section (abfd
, name
);
7179 BFD_ASSERT (s
!= NULL
);
7181 contents
= (bfd_byte
*) bfd_alloc (abfd
, size
);
7183 BFD_ASSERT (s
->size
== size
);
7184 s
->contents
= contents
;
7188 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info
* info
)
7190 struct elf32_arm_link_hash_table
* globals
;
7192 globals
= elf32_arm_hash_table (info
);
7193 BFD_ASSERT (globals
!= NULL
);
7195 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
7196 globals
->arm_glue_size
,
7197 ARM2THUMB_GLUE_SECTION_NAME
);
7199 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
7200 globals
->thumb_glue_size
,
7201 THUMB2ARM_GLUE_SECTION_NAME
);
7203 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
7204 globals
->vfp11_erratum_glue_size
,
7205 VFP11_ERRATUM_VENEER_SECTION_NAME
);
7207 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
7208 globals
->stm32l4xx_erratum_glue_size
,
7209 STM32L4XX_ERRATUM_VENEER_SECTION_NAME
);
7211 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
7212 globals
->bx_glue_size
,
7213 ARM_BX_GLUE_SECTION_NAME
);
7218 /* Allocate space and symbols for calling a Thumb function from Arm mode.
7219 returns the symbol identifying the stub. */
7221 static struct elf_link_hash_entry
*
7222 record_arm_to_thumb_glue (struct bfd_link_info
* link_info
,
7223 struct elf_link_hash_entry
* h
)
7225 const char * name
= h
->root
.root
.string
;
7228 struct elf_link_hash_entry
* myh
;
7229 struct bfd_link_hash_entry
* bh
;
7230 struct elf32_arm_link_hash_table
* globals
;
7234 globals
= elf32_arm_hash_table (link_info
);
7235 BFD_ASSERT (globals
!= NULL
);
7236 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
7238 s
= bfd_get_linker_section
7239 (globals
->bfd_of_glue_owner
, ARM2THUMB_GLUE_SECTION_NAME
);
7241 BFD_ASSERT (s
!= NULL
);
7243 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen (name
)
7244 + strlen (ARM2THUMB_GLUE_ENTRY_NAME
) + 1);
7246 BFD_ASSERT (tmp_name
);
7248 sprintf (tmp_name
, ARM2THUMB_GLUE_ENTRY_NAME
, name
);
7250 myh
= elf_link_hash_lookup
7251 (&(globals
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
7255 /* We've already seen this guy. */
7260 /* The only trick here is using hash_table->arm_glue_size as the value.
7261 Even though the section isn't allocated yet, this is where we will be
7262 putting it. The +1 on the value marks that the stub has not been
7263 output yet - not that it is a Thumb function. */
7265 val
= globals
->arm_glue_size
+ 1;
7266 _bfd_generic_link_add_one_symbol (link_info
, globals
->bfd_of_glue_owner
,
7267 tmp_name
, BSF_GLOBAL
, s
, val
,
7268 NULL
, TRUE
, FALSE
, &bh
);
7270 myh
= (struct elf_link_hash_entry
*) bh
;
7271 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
7272 myh
->forced_local
= 1;
7276 if (bfd_link_pic (link_info
)
7277 || globals
->root
.is_relocatable_executable
7278 || globals
->pic_veneer
)
7279 size
= ARM2THUMB_PIC_GLUE_SIZE
;
7280 else if (globals
->use_blx
)
7281 size
= ARM2THUMB_V5_STATIC_GLUE_SIZE
;
7283 size
= ARM2THUMB_STATIC_GLUE_SIZE
;
7286 globals
->arm_glue_size
+= size
;
7291 /* Allocate space for ARMv4 BX veneers. */
7294 record_arm_bx_glue (struct bfd_link_info
* link_info
, int reg
)
7297 struct elf32_arm_link_hash_table
*globals
;
7299 struct elf_link_hash_entry
*myh
;
7300 struct bfd_link_hash_entry
*bh
;
7303 /* BX PC does not need a veneer. */
7307 globals
= elf32_arm_hash_table (link_info
);
7308 BFD_ASSERT (globals
!= NULL
);
7309 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
7311 /* Check if this veneer has already been allocated. */
7312 if (globals
->bx_glue_offset
[reg
])
7315 s
= bfd_get_linker_section
7316 (globals
->bfd_of_glue_owner
, ARM_BX_GLUE_SECTION_NAME
);
7318 BFD_ASSERT (s
!= NULL
);
7320 /* Add symbol for veneer. */
7322 bfd_malloc ((bfd_size_type
) strlen (ARM_BX_GLUE_ENTRY_NAME
) + 1);
7324 BFD_ASSERT (tmp_name
);
7326 sprintf (tmp_name
, ARM_BX_GLUE_ENTRY_NAME
, reg
);
7328 myh
= elf_link_hash_lookup
7329 (&(globals
)->root
, tmp_name
, FALSE
, FALSE
, FALSE
);
7331 BFD_ASSERT (myh
== NULL
);
7334 val
= globals
->bx_glue_size
;
7335 _bfd_generic_link_add_one_symbol (link_info
, globals
->bfd_of_glue_owner
,
7336 tmp_name
, BSF_FUNCTION
| BSF_LOCAL
, s
, val
,
7337 NULL
, TRUE
, FALSE
, &bh
);
7339 myh
= (struct elf_link_hash_entry
*) bh
;
7340 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
7341 myh
->forced_local
= 1;
7343 s
->size
+= ARM_BX_VENEER_SIZE
;
7344 globals
->bx_glue_offset
[reg
] = globals
->bx_glue_size
| 2;
7345 globals
->bx_glue_size
+= ARM_BX_VENEER_SIZE
;
7349 /* Add an entry to the code/data map for section SEC. */
7352 elf32_arm_section_map_add (asection
*sec
, char type
, bfd_vma vma
)
7354 struct _arm_elf_section_data
*sec_data
= elf32_arm_section_data (sec
);
7355 unsigned int newidx
;
7357 if (sec_data
->map
== NULL
)
7359 sec_data
->map
= (elf32_arm_section_map
*)
7360 bfd_malloc (sizeof (elf32_arm_section_map
));
7361 sec_data
->mapcount
= 0;
7362 sec_data
->mapsize
= 1;
7365 newidx
= sec_data
->mapcount
++;
7367 if (sec_data
->mapcount
> sec_data
->mapsize
)
7369 sec_data
->mapsize
*= 2;
7370 sec_data
->map
= (elf32_arm_section_map
*)
7371 bfd_realloc_or_free (sec_data
->map
, sec_data
->mapsize
7372 * sizeof (elf32_arm_section_map
));
7377 sec_data
->map
[newidx
].vma
= vma
;
7378 sec_data
->map
[newidx
].type
= type
;
7383 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
7384 veneers are handled for now. */
7387 record_vfp11_erratum_veneer (struct bfd_link_info
*link_info
,
7388 elf32_vfp11_erratum_list
*branch
,
7390 asection
*branch_sec
,
7391 unsigned int offset
)
7394 struct elf32_arm_link_hash_table
*hash_table
;
7396 struct elf_link_hash_entry
*myh
;
7397 struct bfd_link_hash_entry
*bh
;
7399 struct _arm_elf_section_data
*sec_data
;
7400 elf32_vfp11_erratum_list
*newerr
;
7402 hash_table
= elf32_arm_hash_table (link_info
);
7403 BFD_ASSERT (hash_table
!= NULL
);
7404 BFD_ASSERT (hash_table
->bfd_of_glue_owner
!= NULL
);
7406 s
= bfd_get_linker_section
7407 (hash_table
->bfd_of_glue_owner
, VFP11_ERRATUM_VENEER_SECTION_NAME
);
7409 sec_data
= elf32_arm_section_data (s
);
7411 BFD_ASSERT (s
!= NULL
);
7413 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen
7414 (VFP11_ERRATUM_VENEER_ENTRY_NAME
) + 10);
7416 BFD_ASSERT (tmp_name
);
7418 sprintf (tmp_name
, VFP11_ERRATUM_VENEER_ENTRY_NAME
,
7419 hash_table
->num_vfp11_fixes
);
7421 myh
= elf_link_hash_lookup
7422 (&(hash_table
)->root
, tmp_name
, FALSE
, FALSE
, FALSE
);
7424 BFD_ASSERT (myh
== NULL
);
7427 val
= hash_table
->vfp11_erratum_glue_size
;
7428 _bfd_generic_link_add_one_symbol (link_info
, hash_table
->bfd_of_glue_owner
,
7429 tmp_name
, BSF_FUNCTION
| BSF_LOCAL
, s
, val
,
7430 NULL
, TRUE
, FALSE
, &bh
);
7432 myh
= (struct elf_link_hash_entry
*) bh
;
7433 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
7434 myh
->forced_local
= 1;
7436 /* Link veneer back to calling location. */
7437 sec_data
->erratumcount
+= 1;
7438 newerr
= (elf32_vfp11_erratum_list
*)
7439 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list
));
7441 newerr
->type
= VFP11_ERRATUM_ARM_VENEER
;
7443 newerr
->u
.v
.branch
= branch
;
7444 newerr
->u
.v
.id
= hash_table
->num_vfp11_fixes
;
7445 branch
->u
.b
.veneer
= newerr
;
7447 newerr
->next
= sec_data
->erratumlist
;
7448 sec_data
->erratumlist
= newerr
;
7450 /* A symbol for the return from the veneer. */
7451 sprintf (tmp_name
, VFP11_ERRATUM_VENEER_ENTRY_NAME
"_r",
7452 hash_table
->num_vfp11_fixes
);
7454 myh
= elf_link_hash_lookup
7455 (&(hash_table
)->root
, tmp_name
, FALSE
, FALSE
, FALSE
);
7462 _bfd_generic_link_add_one_symbol (link_info
, branch_bfd
, tmp_name
, BSF_LOCAL
,
7463 branch_sec
, val
, NULL
, TRUE
, FALSE
, &bh
);
7465 myh
= (struct elf_link_hash_entry
*) bh
;
7466 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
7467 myh
->forced_local
= 1;
7471 /* Generate a mapping symbol for the veneer section, and explicitly add an
7472 entry for that symbol to the code/data map for the section. */
7473 if (hash_table
->vfp11_erratum_glue_size
== 0)
7476 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
7477 ever requires this erratum fix. */
7478 _bfd_generic_link_add_one_symbol (link_info
,
7479 hash_table
->bfd_of_glue_owner
, "$a",
7480 BSF_LOCAL
, s
, 0, NULL
,
7483 myh
= (struct elf_link_hash_entry
*) bh
;
7484 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_NOTYPE
);
7485 myh
->forced_local
= 1;
7487 /* The elf32_arm_init_maps function only cares about symbols from input
7488 BFDs. We must make a note of this generated mapping symbol
7489 ourselves so that code byteswapping works properly in
7490 elf32_arm_write_section. */
7491 elf32_arm_section_map_add (s
, 'a', 0);
7494 s
->size
+= VFP11_ERRATUM_VENEER_SIZE
;
7495 hash_table
->vfp11_erratum_glue_size
+= VFP11_ERRATUM_VENEER_SIZE
;
7496 hash_table
->num_vfp11_fixes
++;
7498 /* The offset of the veneer. */
7502 /* Record information about a STM32L4XX STM erratum veneer. Only THUMB-mode
7503 veneers need to be handled because used only in Cortex-M. */
7506 record_stm32l4xx_erratum_veneer (struct bfd_link_info
*link_info
,
7507 elf32_stm32l4xx_erratum_list
*branch
,
7509 asection
*branch_sec
,
7510 unsigned int offset
,
7511 bfd_size_type veneer_size
)
7514 struct elf32_arm_link_hash_table
*hash_table
;
7516 struct elf_link_hash_entry
*myh
;
7517 struct bfd_link_hash_entry
*bh
;
7519 struct _arm_elf_section_data
*sec_data
;
7520 elf32_stm32l4xx_erratum_list
*newerr
;
7522 hash_table
= elf32_arm_hash_table (link_info
);
7523 BFD_ASSERT (hash_table
!= NULL
);
7524 BFD_ASSERT (hash_table
->bfd_of_glue_owner
!= NULL
);
7526 s
= bfd_get_linker_section
7527 (hash_table
->bfd_of_glue_owner
, STM32L4XX_ERRATUM_VENEER_SECTION_NAME
);
7529 BFD_ASSERT (s
!= NULL
);
7531 sec_data
= elf32_arm_section_data (s
);
7533 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen
7534 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
) + 10);
7536 BFD_ASSERT (tmp_name
);
7538 sprintf (tmp_name
, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
,
7539 hash_table
->num_stm32l4xx_fixes
);
7541 myh
= elf_link_hash_lookup
7542 (&(hash_table
)->root
, tmp_name
, FALSE
, FALSE
, FALSE
);
7544 BFD_ASSERT (myh
== NULL
);
7547 val
= hash_table
->stm32l4xx_erratum_glue_size
;
7548 _bfd_generic_link_add_one_symbol (link_info
, hash_table
->bfd_of_glue_owner
,
7549 tmp_name
, BSF_FUNCTION
| BSF_LOCAL
, s
, val
,
7550 NULL
, TRUE
, FALSE
, &bh
);
7552 myh
= (struct elf_link_hash_entry
*) bh
;
7553 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
7554 myh
->forced_local
= 1;
7556 /* Link veneer back to calling location. */
7557 sec_data
->stm32l4xx_erratumcount
+= 1;
7558 newerr
= (elf32_stm32l4xx_erratum_list
*)
7559 bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list
));
7561 newerr
->type
= STM32L4XX_ERRATUM_VENEER
;
7563 newerr
->u
.v
.branch
= branch
;
7564 newerr
->u
.v
.id
= hash_table
->num_stm32l4xx_fixes
;
7565 branch
->u
.b
.veneer
= newerr
;
7567 newerr
->next
= sec_data
->stm32l4xx_erratumlist
;
7568 sec_data
->stm32l4xx_erratumlist
= newerr
;
7570 /* A symbol for the return from the veneer. */
7571 sprintf (tmp_name
, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
"_r",
7572 hash_table
->num_stm32l4xx_fixes
);
7574 myh
= elf_link_hash_lookup
7575 (&(hash_table
)->root
, tmp_name
, FALSE
, FALSE
, FALSE
);
7582 _bfd_generic_link_add_one_symbol (link_info
, branch_bfd
, tmp_name
, BSF_LOCAL
,
7583 branch_sec
, val
, NULL
, TRUE
, FALSE
, &bh
);
7585 myh
= (struct elf_link_hash_entry
*) bh
;
7586 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
7587 myh
->forced_local
= 1;
7591 /* Generate a mapping symbol for the veneer section, and explicitly add an
7592 entry for that symbol to the code/data map for the section. */
7593 if (hash_table
->stm32l4xx_erratum_glue_size
== 0)
7596 /* Creates a THUMB symbol since there is no other choice. */
7597 _bfd_generic_link_add_one_symbol (link_info
,
7598 hash_table
->bfd_of_glue_owner
, "$t",
7599 BSF_LOCAL
, s
, 0, NULL
,
7602 myh
= (struct elf_link_hash_entry
*) bh
;
7603 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_NOTYPE
);
7604 myh
->forced_local
= 1;
7606 /* The elf32_arm_init_maps function only cares about symbols from input
7607 BFDs. We must make a note of this generated mapping symbol
7608 ourselves so that code byteswapping works properly in
7609 elf32_arm_write_section. */
7610 elf32_arm_section_map_add (s
, 't', 0);
7613 s
->size
+= veneer_size
;
7614 hash_table
->stm32l4xx_erratum_glue_size
+= veneer_size
;
7615 hash_table
->num_stm32l4xx_fixes
++;
7617 /* The offset of the veneer. */
7621 #define ARM_GLUE_SECTION_FLAGS \
7622 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
7623 | SEC_READONLY | SEC_LINKER_CREATED)
7625 /* Create a fake section for use by the ARM backend of the linker. */
7628 arm_make_glue_section (bfd
* abfd
, const char * name
)
7632 sec
= bfd_get_linker_section (abfd
, name
);
7637 sec
= bfd_make_section_anyway_with_flags (abfd
, name
, ARM_GLUE_SECTION_FLAGS
);
7640 || !bfd_set_section_alignment (abfd
, sec
, 2))
7643 /* Set the gc mark to prevent the section from being removed by garbage
7644 collection, despite the fact that no relocs refer to this section. */
7650 /* Set size of .plt entries. This function is called from the
7651 linker scripts in ld/emultempl/{armelf}.em. */
7654 bfd_elf32_arm_use_long_plt (void)
7656 elf32_arm_use_long_plt_entry
= TRUE
;
7659 /* Add the glue sections to ABFD. This function is called from the
7660 linker scripts in ld/emultempl/{armelf}.em. */
7663 bfd_elf32_arm_add_glue_sections_to_bfd (bfd
*abfd
,
7664 struct bfd_link_info
*info
)
7666 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (info
);
7667 bfd_boolean dostm32l4xx
= globals
7668 && globals
->stm32l4xx_fix
!= BFD_ARM_STM32L4XX_FIX_NONE
;
7669 bfd_boolean addglue
;
7671 /* If we are only performing a partial
7672 link do not bother adding the glue. */
7673 if (bfd_link_relocatable (info
))
7676 addglue
= arm_make_glue_section (abfd
, ARM2THUMB_GLUE_SECTION_NAME
)
7677 && arm_make_glue_section (abfd
, THUMB2ARM_GLUE_SECTION_NAME
)
7678 && arm_make_glue_section (abfd
, VFP11_ERRATUM_VENEER_SECTION_NAME
)
7679 && arm_make_glue_section (abfd
, ARM_BX_GLUE_SECTION_NAME
);
7685 && arm_make_glue_section (abfd
, STM32L4XX_ERRATUM_VENEER_SECTION_NAME
);
7688 /* Mark output sections of veneers needing a dedicated one with SEC_KEEP. This
7689 ensures they are not marked for deletion by
7690 strip_excluded_output_sections () when veneers are going to be created
7691 later. Not doing so would trigger assert on empty section size in
7692 lang_size_sections_1 (). */
7695 bfd_elf32_arm_keep_private_stub_output_sections (struct bfd_link_info
*info
)
7697 enum elf32_arm_stub_type stub_type
;
7699 /* If we are only performing a partial
7700 link do not bother adding the glue. */
7701 if (bfd_link_relocatable (info
))
7704 for (stub_type
= arm_stub_none
+ 1; stub_type
< max_stub_type
; stub_type
++)
7707 const char *out_sec_name
;
7709 if (!arm_dedicated_stub_output_section_required (stub_type
))
7712 out_sec_name
= arm_dedicated_stub_output_section_name (stub_type
);
7713 out_sec
= bfd_get_section_by_name (info
->output_bfd
, out_sec_name
);
7714 if (out_sec
!= NULL
)
7715 out_sec
->flags
|= SEC_KEEP
;
7719 /* Select a BFD to be used to hold the sections used by the glue code.
7720 This function is called from the linker scripts in ld/emultempl/
7724 bfd_elf32_arm_get_bfd_for_interworking (bfd
*abfd
, struct bfd_link_info
*info
)
7726 struct elf32_arm_link_hash_table
*globals
;
7728 /* If we are only performing a partial link
7729 do not bother getting a bfd to hold the glue. */
7730 if (bfd_link_relocatable (info
))
7733 /* Make sure we don't attach the glue sections to a dynamic object. */
7734 BFD_ASSERT (!(abfd
->flags
& DYNAMIC
));
7736 globals
= elf32_arm_hash_table (info
);
7737 BFD_ASSERT (globals
!= NULL
);
7739 if (globals
->bfd_of_glue_owner
!= NULL
)
7742 /* Save the bfd for later use. */
7743 globals
->bfd_of_glue_owner
= abfd
;
7749 check_use_blx (struct elf32_arm_link_hash_table
*globals
)
7753 cpu_arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
7756 if (globals
->fix_arm1176
)
7758 if (cpu_arch
== TAG_CPU_ARCH_V6T2
|| cpu_arch
> TAG_CPU_ARCH_V6K
)
7759 globals
->use_blx
= 1;
7763 if (cpu_arch
> TAG_CPU_ARCH_V4T
)
7764 globals
->use_blx
= 1;
7769 bfd_elf32_arm_process_before_allocation (bfd
*abfd
,
7770 struct bfd_link_info
*link_info
)
7772 Elf_Internal_Shdr
*symtab_hdr
;
7773 Elf_Internal_Rela
*internal_relocs
= NULL
;
7774 Elf_Internal_Rela
*irel
, *irelend
;
7775 bfd_byte
*contents
= NULL
;
7778 struct elf32_arm_link_hash_table
*globals
;
7780 /* If we are only performing a partial link do not bother
7781 to construct any glue. */
7782 if (bfd_link_relocatable (link_info
))
7785 /* Here we have a bfd that is to be included on the link. We have a
7786 hook to do reloc rummaging, before section sizes are nailed down. */
7787 globals
= elf32_arm_hash_table (link_info
);
7788 BFD_ASSERT (globals
!= NULL
);
7790 check_use_blx (globals
);
7792 if (globals
->byteswap_code
&& !bfd_big_endian (abfd
))
7794 _bfd_error_handler (_("%pB: BE8 images only valid in big-endian mode"),
7799 /* PR 5398: If we have not decided to include any loadable sections in
7800 the output then we will not have a glue owner bfd. This is OK, it
7801 just means that there is nothing else for us to do here. */
7802 if (globals
->bfd_of_glue_owner
== NULL
)
7805 /* Rummage around all the relocs and map the glue vectors. */
7806 sec
= abfd
->sections
;
7811 for (; sec
!= NULL
; sec
= sec
->next
)
7813 if (sec
->reloc_count
== 0)
7816 if ((sec
->flags
& SEC_EXCLUDE
) != 0)
7819 symtab_hdr
= & elf_symtab_hdr (abfd
);
7821 /* Load the relocs. */
7823 = _bfd_elf_link_read_relocs (abfd
, sec
, NULL
, NULL
, FALSE
);
7825 if (internal_relocs
== NULL
)
7828 irelend
= internal_relocs
+ sec
->reloc_count
;
7829 for (irel
= internal_relocs
; irel
< irelend
; irel
++)
7832 unsigned long r_index
;
7834 struct elf_link_hash_entry
*h
;
7836 r_type
= ELF32_R_TYPE (irel
->r_info
);
7837 r_index
= ELF32_R_SYM (irel
->r_info
);
7839 /* These are the only relocation types we care about. */
7840 if ( r_type
!= R_ARM_PC24
7841 && (r_type
!= R_ARM_V4BX
|| globals
->fix_v4bx
< 2))
7844 /* Get the section contents if we haven't done so already. */
7845 if (contents
== NULL
)
7847 /* Get cached copy if it exists. */
7848 if (elf_section_data (sec
)->this_hdr
.contents
!= NULL
)
7849 contents
= elf_section_data (sec
)->this_hdr
.contents
;
7852 /* Go get them off disk. */
7853 if (! bfd_malloc_and_get_section (abfd
, sec
, &contents
))
7858 if (r_type
== R_ARM_V4BX
)
7862 reg
= bfd_get_32 (abfd
, contents
+ irel
->r_offset
) & 0xf;
7863 record_arm_bx_glue (link_info
, reg
);
7867 /* If the relocation is not against a symbol it cannot concern us. */
7870 /* We don't care about local symbols. */
7871 if (r_index
< symtab_hdr
->sh_info
)
7874 /* This is an external symbol. */
7875 r_index
-= symtab_hdr
->sh_info
;
7876 h
= (struct elf_link_hash_entry
*)
7877 elf_sym_hashes (abfd
)[r_index
];
7879 /* If the relocation is against a static symbol it must be within
7880 the current section and so cannot be a cross ARM/Thumb relocation. */
7884 /* If the call will go through a PLT entry then we do not need
7886 if (globals
->root
.splt
!= NULL
&& h
->plt
.offset
!= (bfd_vma
) -1)
7892 /* This one is a call from arm code. We need to look up
7893 the target of the call. If it is a thumb target, we
7895 if (ARM_GET_SYM_BRANCH_TYPE (h
->target_internal
)
7896 == ST_BRANCH_TO_THUMB
)
7897 record_arm_to_thumb_glue (link_info
, h
);
7905 if (contents
!= NULL
7906 && elf_section_data (sec
)->this_hdr
.contents
!= contents
)
7910 if (internal_relocs
!= NULL
7911 && elf_section_data (sec
)->relocs
!= internal_relocs
)
7912 free (internal_relocs
);
7913 internal_relocs
= NULL
;
7919 if (contents
!= NULL
7920 && elf_section_data (sec
)->this_hdr
.contents
!= contents
)
7922 if (internal_relocs
!= NULL
7923 && elf_section_data (sec
)->relocs
!= internal_relocs
)
7924 free (internal_relocs
);
7931 /* Initialise maps of ARM/Thumb/data for input BFDs. */
7934 bfd_elf32_arm_init_maps (bfd
*abfd
)
7936 Elf_Internal_Sym
*isymbuf
;
7937 Elf_Internal_Shdr
*hdr
;
7938 unsigned int i
, localsyms
;
7940 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
7941 if (! is_arm_elf (abfd
))
7944 if ((abfd
->flags
& DYNAMIC
) != 0)
7947 hdr
= & elf_symtab_hdr (abfd
);
7948 localsyms
= hdr
->sh_info
;
7950 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
7951 should contain the number of local symbols, which should come before any
7952 global symbols. Mapping symbols are always local. */
7953 isymbuf
= bfd_elf_get_elf_syms (abfd
, hdr
, localsyms
, 0, NULL
, NULL
,
7956 /* No internal symbols read? Skip this BFD. */
7957 if (isymbuf
== NULL
)
7960 for (i
= 0; i
< localsyms
; i
++)
7962 Elf_Internal_Sym
*isym
= &isymbuf
[i
];
7963 asection
*sec
= bfd_section_from_elf_index (abfd
, isym
->st_shndx
);
7967 && ELF_ST_BIND (isym
->st_info
) == STB_LOCAL
)
7969 name
= bfd_elf_string_from_elf_section (abfd
,
7970 hdr
->sh_link
, isym
->st_name
);
7972 if (bfd_is_arm_special_symbol_name (name
,
7973 BFD_ARM_SPECIAL_SYM_TYPE_MAP
))
7974 elf32_arm_section_map_add (sec
, name
[1], isym
->st_value
);
7980 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
7981 say what they wanted. */
7984 bfd_elf32_arm_set_cortex_a8_fix (bfd
*obfd
, struct bfd_link_info
*link_info
)
7986 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
7987 obj_attribute
*out_attr
= elf_known_obj_attributes_proc (obfd
);
7989 if (globals
== NULL
)
7992 if (globals
->fix_cortex_a8
== -1)
7994 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
7995 if (out_attr
[Tag_CPU_arch
].i
== TAG_CPU_ARCH_V7
7996 && (out_attr
[Tag_CPU_arch_profile
].i
== 'A'
7997 || out_attr
[Tag_CPU_arch_profile
].i
== 0))
7998 globals
->fix_cortex_a8
= 1;
8000 globals
->fix_cortex_a8
= 0;
8006 bfd_elf32_arm_set_vfp11_fix (bfd
*obfd
, struct bfd_link_info
*link_info
)
8008 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
8009 obj_attribute
*out_attr
= elf_known_obj_attributes_proc (obfd
);
8011 if (globals
== NULL
)
8013 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
8014 if (out_attr
[Tag_CPU_arch
].i
>= TAG_CPU_ARCH_V7
)
8016 switch (globals
->vfp11_fix
)
8018 case BFD_ARM_VFP11_FIX_DEFAULT
:
8019 case BFD_ARM_VFP11_FIX_NONE
:
8020 globals
->vfp11_fix
= BFD_ARM_VFP11_FIX_NONE
;
8024 /* Give a warning, but do as the user requests anyway. */
8025 _bfd_error_handler (_("%pB: warning: selected VFP11 erratum "
8026 "workaround is not necessary for target architecture"), obfd
);
8029 else if (globals
->vfp11_fix
== BFD_ARM_VFP11_FIX_DEFAULT
)
8030 /* For earlier architectures, we might need the workaround, but do not
8031 enable it by default. If users is running with broken hardware, they
8032 must enable the erratum fix explicitly. */
8033 globals
->vfp11_fix
= BFD_ARM_VFP11_FIX_NONE
;
8037 bfd_elf32_arm_set_stm32l4xx_fix (bfd
*obfd
, struct bfd_link_info
*link_info
)
8039 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
8040 obj_attribute
*out_attr
= elf_known_obj_attributes_proc (obfd
);
8042 if (globals
== NULL
)
8045 /* We assume only Cortex-M4 may require the fix. */
8046 if (out_attr
[Tag_CPU_arch
].i
!= TAG_CPU_ARCH_V7E_M
8047 || out_attr
[Tag_CPU_arch_profile
].i
!= 'M')
8049 if (globals
->stm32l4xx_fix
!= BFD_ARM_STM32L4XX_FIX_NONE
)
8050 /* Give a warning, but do as the user requests anyway. */
8052 (_("%pB: warning: selected STM32L4XX erratum "
8053 "workaround is not necessary for target architecture"), obfd
);
8057 enum bfd_arm_vfp11_pipe
8065 /* Return a VFP register number. This is encoded as RX:X for single-precision
8066 registers, or X:RX for double-precision registers, where RX is the group of
8067 four bits in the instruction encoding and X is the single extension bit.
8068 RX and X fields are specified using their lowest (starting) bit. The return
8071 0...31: single-precision registers s0...s31
8072 32...63: double-precision registers d0...d31.
8074 Although X should be zero for VFP11 (encoding d0...d15 only), we might
8075 encounter VFP3 instructions, so we allow the full range for DP registers. */
8078 bfd_arm_vfp11_regno (unsigned int insn
, bfd_boolean is_double
, unsigned int rx
,
8082 return (((insn
>> rx
) & 0xf) | (((insn
>> x
) & 1) << 4)) + 32;
8084 return (((insn
>> rx
) & 0xf) << 1) | ((insn
>> x
) & 1);
8087 /* Set bits in *WMASK according to a register number REG as encoded by
8088 bfd_arm_vfp11_regno(). Ignore d16-d31. */
8091 bfd_arm_vfp11_write_mask (unsigned int *wmask
, unsigned int reg
)
8096 *wmask
|= 3 << ((reg
- 32) * 2);
8099 /* Return TRUE if WMASK overwrites anything in REGS. */
8102 bfd_arm_vfp11_antidependency (unsigned int wmask
, int *regs
, int numregs
)
8106 for (i
= 0; i
< numregs
; i
++)
8108 unsigned int reg
= regs
[i
];
8110 if (reg
< 32 && (wmask
& (1 << reg
)) != 0)
8118 if ((wmask
& (3 << (reg
* 2))) != 0)
8125 /* In this function, we're interested in two things: finding input registers
8126 for VFP data-processing instructions, and finding the set of registers which
8127 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
8128 hold the written set, so FLDM etc. are easy to deal with (we're only
8129 interested in 32 SP registers or 16 dp registers, due to the VFP version
8130 implemented by the chip in question). DP registers are marked by setting
8131 both SP registers in the write mask). */
8133 static enum bfd_arm_vfp11_pipe
8134 bfd_arm_vfp11_insn_decode (unsigned int insn
, unsigned int *destmask
, int *regs
,
8137 enum bfd_arm_vfp11_pipe vpipe
= VFP11_BAD
;
8138 bfd_boolean is_double
= ((insn
& 0xf00) == 0xb00) ? 1 : 0;
8140 if ((insn
& 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
8143 unsigned int fd
= bfd_arm_vfp11_regno (insn
, is_double
, 12, 22);
8144 unsigned int fm
= bfd_arm_vfp11_regno (insn
, is_double
, 0, 5);
8146 pqrs
= ((insn
& 0x00800000) >> 20)
8147 | ((insn
& 0x00300000) >> 19)
8148 | ((insn
& 0x00000040) >> 6);
8152 case 0: /* fmac[sd]. */
8153 case 1: /* fnmac[sd]. */
8154 case 2: /* fmsc[sd]. */
8155 case 3: /* fnmsc[sd]. */
8157 bfd_arm_vfp11_write_mask (destmask
, fd
);
8159 regs
[1] = bfd_arm_vfp11_regno (insn
, is_double
, 16, 7); /* Fn. */
8164 case 4: /* fmul[sd]. */
8165 case 5: /* fnmul[sd]. */
8166 case 6: /* fadd[sd]. */
8167 case 7: /* fsub[sd]. */
8171 case 8: /* fdiv[sd]. */
8174 bfd_arm_vfp11_write_mask (destmask
, fd
);
8175 regs
[0] = bfd_arm_vfp11_regno (insn
, is_double
, 16, 7); /* Fn. */
8180 case 15: /* extended opcode. */
8182 unsigned int extn
= ((insn
>> 15) & 0x1e)
8183 | ((insn
>> 7) & 1);
8187 case 0: /* fcpy[sd]. */
8188 case 1: /* fabs[sd]. */
8189 case 2: /* fneg[sd]. */
8190 case 8: /* fcmp[sd]. */
8191 case 9: /* fcmpe[sd]. */
8192 case 10: /* fcmpz[sd]. */
8193 case 11: /* fcmpez[sd]. */
8194 case 16: /* fuito[sd]. */
8195 case 17: /* fsito[sd]. */
8196 case 24: /* ftoui[sd]. */
8197 case 25: /* ftouiz[sd]. */
8198 case 26: /* ftosi[sd]. */
8199 case 27: /* ftosiz[sd]. */
8200 /* These instructions will not bounce due to underflow. */
8205 case 3: /* fsqrt[sd]. */
8206 /* fsqrt cannot underflow, but it can (perhaps) overwrite
8207 registers to cause the erratum in previous instructions. */
8208 bfd_arm_vfp11_write_mask (destmask
, fd
);
8212 case 15: /* fcvt{ds,sd}. */
8216 bfd_arm_vfp11_write_mask (destmask
, fd
);
8218 /* Only FCVTSD can underflow. */
8219 if ((insn
& 0x100) != 0)
8238 /* Two-register transfer. */
8239 else if ((insn
& 0x0fe00ed0) == 0x0c400a10)
8241 unsigned int fm
= bfd_arm_vfp11_regno (insn
, is_double
, 0, 5);
8243 if ((insn
& 0x100000) == 0)
8246 bfd_arm_vfp11_write_mask (destmask
, fm
);
8249 bfd_arm_vfp11_write_mask (destmask
, fm
);
8250 bfd_arm_vfp11_write_mask (destmask
, fm
+ 1);
8256 else if ((insn
& 0x0e100e00) == 0x0c100a00) /* A load insn. */
8258 int fd
= bfd_arm_vfp11_regno (insn
, is_double
, 12, 22);
8259 unsigned int puw
= ((insn
>> 21) & 0x1) | (((insn
>> 23) & 3) << 1);
8263 case 0: /* Two-reg transfer. We should catch these above. */
8266 case 2: /* fldm[sdx]. */
8270 unsigned int i
, offset
= insn
& 0xff;
8275 for (i
= fd
; i
< fd
+ offset
; i
++)
8276 bfd_arm_vfp11_write_mask (destmask
, i
);
8280 case 4: /* fld[sd]. */
8282 bfd_arm_vfp11_write_mask (destmask
, fd
);
8291 /* Single-register transfer. Note L==0. */
8292 else if ((insn
& 0x0f100e10) == 0x0e000a10)
8294 unsigned int opcode
= (insn
>> 21) & 7;
8295 unsigned int fn
= bfd_arm_vfp11_regno (insn
, is_double
, 16, 7);
8299 case 0: /* fmsr/fmdlr. */
8300 case 1: /* fmdhr. */
8301 /* Mark fmdhr and fmdlr as writing to the whole of the DP
8302 destination register. I don't know if this is exactly right,
8303 but it is the conservative choice. */
8304 bfd_arm_vfp11_write_mask (destmask
, fn
);
8318 static int elf32_arm_compare_mapping (const void * a
, const void * b
);
8321 /* Look for potentially-troublesome code sequences which might trigger the
8322 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
8323 (available from ARM) for details of the erratum. A short version is
8324 described in ld.texinfo. */
8327 bfd_elf32_arm_vfp11_erratum_scan (bfd
*abfd
, struct bfd_link_info
*link_info
)
8330 bfd_byte
*contents
= NULL
;
8332 int regs
[3], numregs
= 0;
8333 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
8334 int use_vector
= (globals
->vfp11_fix
== BFD_ARM_VFP11_FIX_VECTOR
);
8336 if (globals
== NULL
)
8339 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
8340 The states transition as follows:
8342 0 -> 1 (vector) or 0 -> 2 (scalar)
8343 A VFP FMAC-pipeline instruction has been seen. Fill
8344 regs[0]..regs[numregs-1] with its input operands. Remember this
8345 instruction in 'first_fmac'.
8348 Any instruction, except for a VFP instruction which overwrites
8353 A VFP instruction has been seen which overwrites any of regs[*].
8354 We must make a veneer! Reset state to 0 before examining next
8358 If we fail to match anything in state 2, reset to state 0 and reset
8359 the instruction pointer to the instruction after 'first_fmac'.
8361 If the VFP11 vector mode is in use, there must be at least two unrelated
8362 instructions between anti-dependent VFP11 instructions to properly avoid
8363 triggering the erratum, hence the use of the extra state 1. */
8365 /* If we are only performing a partial link do not bother
8366 to construct any glue. */
8367 if (bfd_link_relocatable (link_info
))
8370 /* Skip if this bfd does not correspond to an ELF image. */
8371 if (! is_arm_elf (abfd
))
8374 /* We should have chosen a fix type by the time we get here. */
8375 BFD_ASSERT (globals
->vfp11_fix
!= BFD_ARM_VFP11_FIX_DEFAULT
);
8377 if (globals
->vfp11_fix
== BFD_ARM_VFP11_FIX_NONE
)
8380 /* Skip this BFD if it corresponds to an executable or dynamic object. */
8381 if ((abfd
->flags
& (EXEC_P
| DYNAMIC
)) != 0)
8384 for (sec
= abfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
8386 unsigned int i
, span
, first_fmac
= 0, veneer_of_insn
= 0;
8387 struct _arm_elf_section_data
*sec_data
;
8389 /* If we don't have executable progbits, we're not interested in this
8390 section. Also skip if section is to be excluded. */
8391 if (elf_section_type (sec
) != SHT_PROGBITS
8392 || (elf_section_flags (sec
) & SHF_EXECINSTR
) == 0
8393 || (sec
->flags
& SEC_EXCLUDE
) != 0
8394 || sec
->sec_info_type
== SEC_INFO_TYPE_JUST_SYMS
8395 || sec
->output_section
== bfd_abs_section_ptr
8396 || strcmp (sec
->name
, VFP11_ERRATUM_VENEER_SECTION_NAME
) == 0)
8399 sec_data
= elf32_arm_section_data (sec
);
8401 if (sec_data
->mapcount
== 0)
8404 if (elf_section_data (sec
)->this_hdr
.contents
!= NULL
)
8405 contents
= elf_section_data (sec
)->this_hdr
.contents
;
8406 else if (! bfd_malloc_and_get_section (abfd
, sec
, &contents
))
8409 qsort (sec_data
->map
, sec_data
->mapcount
, sizeof (elf32_arm_section_map
),
8410 elf32_arm_compare_mapping
);
8412 for (span
= 0; span
< sec_data
->mapcount
; span
++)
8414 unsigned int span_start
= sec_data
->map
[span
].vma
;
8415 unsigned int span_end
= (span
== sec_data
->mapcount
- 1)
8416 ? sec
->size
: sec_data
->map
[span
+ 1].vma
;
8417 char span_type
= sec_data
->map
[span
].type
;
8419 /* FIXME: Only ARM mode is supported at present. We may need to
8420 support Thumb-2 mode also at some point. */
8421 if (span_type
!= 'a')
8424 for (i
= span_start
; i
< span_end
;)
8426 unsigned int next_i
= i
+ 4;
8427 unsigned int insn
= bfd_big_endian (abfd
)
8428 ? (contents
[i
] << 24)
8429 | (contents
[i
+ 1] << 16)
8430 | (contents
[i
+ 2] << 8)
8432 : (contents
[i
+ 3] << 24)
8433 | (contents
[i
+ 2] << 16)
8434 | (contents
[i
+ 1] << 8)
8436 unsigned int writemask
= 0;
8437 enum bfd_arm_vfp11_pipe vpipe
;
8442 vpipe
= bfd_arm_vfp11_insn_decode (insn
, &writemask
, regs
,
8444 /* I'm assuming the VFP11 erratum can trigger with denorm
8445 operands on either the FMAC or the DS pipeline. This might
8446 lead to slightly overenthusiastic veneer insertion. */
8447 if (vpipe
== VFP11_FMAC
|| vpipe
== VFP11_DS
)
8449 state
= use_vector
? 1 : 2;
8451 veneer_of_insn
= insn
;
8457 int other_regs
[3], other_numregs
;
8458 vpipe
= bfd_arm_vfp11_insn_decode (insn
, &writemask
,
8461 if (vpipe
!= VFP11_BAD
8462 && bfd_arm_vfp11_antidependency (writemask
, regs
,
8472 int other_regs
[3], other_numregs
;
8473 vpipe
= bfd_arm_vfp11_insn_decode (insn
, &writemask
,
8476 if (vpipe
!= VFP11_BAD
8477 && bfd_arm_vfp11_antidependency (writemask
, regs
,
8483 next_i
= first_fmac
+ 4;
8489 abort (); /* Should be unreachable. */
8494 elf32_vfp11_erratum_list
*newerr
=(elf32_vfp11_erratum_list
*)
8495 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list
));
8497 elf32_arm_section_data (sec
)->erratumcount
+= 1;
8499 newerr
->u
.b
.vfp_insn
= veneer_of_insn
;
8504 newerr
->type
= VFP11_ERRATUM_BRANCH_TO_ARM_VENEER
;
8511 record_vfp11_erratum_veneer (link_info
, newerr
, abfd
, sec
,
8516 newerr
->next
= sec_data
->erratumlist
;
8517 sec_data
->erratumlist
= newerr
;
8526 if (contents
!= NULL
8527 && elf_section_data (sec
)->this_hdr
.contents
!= contents
)
8535 if (contents
!= NULL
8536 && elf_section_data (sec
)->this_hdr
.contents
!= contents
)
8542 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
8543 after sections have been laid out, using specially-named symbols. */
8546 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd
*abfd
,
8547 struct bfd_link_info
*link_info
)
8550 struct elf32_arm_link_hash_table
*globals
;
8553 if (bfd_link_relocatable (link_info
))
8556 /* Skip if this bfd does not correspond to an ELF image. */
8557 if (! is_arm_elf (abfd
))
8560 globals
= elf32_arm_hash_table (link_info
);
8561 if (globals
== NULL
)
8564 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen
8565 (VFP11_ERRATUM_VENEER_ENTRY_NAME
) + 10);
8567 for (sec
= abfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
8569 struct _arm_elf_section_data
*sec_data
= elf32_arm_section_data (sec
);
8570 elf32_vfp11_erratum_list
*errnode
= sec_data
->erratumlist
;
8572 for (; errnode
!= NULL
; errnode
= errnode
->next
)
8574 struct elf_link_hash_entry
*myh
;
8577 switch (errnode
->type
)
8579 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER
:
8580 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER
:
8581 /* Find veneer symbol. */
8582 sprintf (tmp_name
, VFP11_ERRATUM_VENEER_ENTRY_NAME
,
8583 errnode
->u
.b
.veneer
->u
.v
.id
);
8585 myh
= elf_link_hash_lookup
8586 (&(globals
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
8589 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8590 abfd
, "VFP11", tmp_name
);
8592 vma
= myh
->root
.u
.def
.section
->output_section
->vma
8593 + myh
->root
.u
.def
.section
->output_offset
8594 + myh
->root
.u
.def
.value
;
8596 errnode
->u
.b
.veneer
->vma
= vma
;
8599 case VFP11_ERRATUM_ARM_VENEER
:
8600 case VFP11_ERRATUM_THUMB_VENEER
:
8601 /* Find return location. */
8602 sprintf (tmp_name
, VFP11_ERRATUM_VENEER_ENTRY_NAME
"_r",
8605 myh
= elf_link_hash_lookup
8606 (&(globals
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
8609 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8610 abfd
, "VFP11", tmp_name
);
8612 vma
= myh
->root
.u
.def
.section
->output_section
->vma
8613 + myh
->root
.u
.def
.section
->output_offset
8614 + myh
->root
.u
.def
.value
;
8616 errnode
->u
.v
.branch
->vma
= vma
;
8628 /* Find virtual-memory addresses for STM32L4XX erratum veneers and
8629 return locations after sections have been laid out, using
8630 specially-named symbols. */
8633 bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd
*abfd
,
8634 struct bfd_link_info
*link_info
)
8637 struct elf32_arm_link_hash_table
*globals
;
8640 if (bfd_link_relocatable (link_info
))
8643 /* Skip if this bfd does not correspond to an ELF image. */
8644 if (! is_arm_elf (abfd
))
8647 globals
= elf32_arm_hash_table (link_info
);
8648 if (globals
== NULL
)
8651 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen
8652 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
) + 10);
8654 for (sec
= abfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
8656 struct _arm_elf_section_data
*sec_data
= elf32_arm_section_data (sec
);
8657 elf32_stm32l4xx_erratum_list
*errnode
= sec_data
->stm32l4xx_erratumlist
;
8659 for (; errnode
!= NULL
; errnode
= errnode
->next
)
8661 struct elf_link_hash_entry
*myh
;
8664 switch (errnode
->type
)
8666 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER
:
8667 /* Find veneer symbol. */
8668 sprintf (tmp_name
, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
,
8669 errnode
->u
.b
.veneer
->u
.v
.id
);
8671 myh
= elf_link_hash_lookup
8672 (&(globals
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
8675 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8676 abfd
, "STM32L4XX", tmp_name
);
8678 vma
= myh
->root
.u
.def
.section
->output_section
->vma
8679 + myh
->root
.u
.def
.section
->output_offset
8680 + myh
->root
.u
.def
.value
;
8682 errnode
->u
.b
.veneer
->vma
= vma
;
8685 case STM32L4XX_ERRATUM_VENEER
:
8686 /* Find return location. */
8687 sprintf (tmp_name
, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
"_r",
8690 myh
= elf_link_hash_lookup
8691 (&(globals
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
8694 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8695 abfd
, "STM32L4XX", tmp_name
);
8697 vma
= myh
->root
.u
.def
.section
->output_section
->vma
8698 + myh
->root
.u
.def
.section
->output_offset
8699 + myh
->root
.u
.def
.value
;
8701 errnode
->u
.v
.branch
->vma
= vma
;
8713 static inline bfd_boolean
8714 is_thumb2_ldmia (const insn32 insn
)
8716 /* Encoding T2: LDM<c>.W <Rn>{!},<registers>
8717 1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll. */
8718 return (insn
& 0xffd02000) == 0xe8900000;
8721 static inline bfd_boolean
8722 is_thumb2_ldmdb (const insn32 insn
)
8724 /* Encoding T1: LDMDB<c> <Rn>{!},<registers>
8725 1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll. */
8726 return (insn
& 0xffd02000) == 0xe9100000;
8729 static inline bfd_boolean
8730 is_thumb2_vldm (const insn32 insn
)
8732 /* A6.5 Extension register load or store instruction
8734 We look for SP 32-bit and DP 64-bit registers.
8735 Encoding T1 VLDM{mode}<c> <Rn>{!}, <list>
8736 <list> is consecutive 64-bit registers
8737 1110 - 110P - UDW1 - rrrr - vvvv - 1011 - iiii - iiii
8738 Encoding T2 VLDM{mode}<c> <Rn>{!}, <list>
8739 <list> is consecutive 32-bit registers
8740 1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii
8741 if P==0 && U==1 && W==1 && Rn=1101 VPOP
8742 if PUW=010 || PUW=011 || PUW=101 VLDM. */
8744 (((insn
& 0xfe100f00) == 0xec100b00) ||
8745 ((insn
& 0xfe100f00) == 0xec100a00))
8746 && /* (IA without !). */
8747 (((((insn
<< 7) >> 28) & 0xd) == 0x4)
8748 /* (IA with !), includes VPOP (when reg number is SP). */
8749 || ((((insn
<< 7) >> 28) & 0xd) == 0x5)
8751 || ((((insn
<< 7) >> 28) & 0xd) == 0x9));
8754 /* STM STM32L4XX erratum : This function assumes that it receives an LDM or
8756 - computes the number and the mode of memory accesses
8757 - decides if the replacement should be done:
8758 . replaces only if > 8-word accesses
8759 . or (testing purposes only) replaces all accesses. */
8762 stm32l4xx_need_create_replacing_stub (const insn32 insn
,
8763 bfd_arm_stm32l4xx_fix stm32l4xx_fix
)
8767 /* The field encoding the register list is the same for both LDMIA
8768 and LDMDB encodings. */
8769 if (is_thumb2_ldmia (insn
) || is_thumb2_ldmdb (insn
))
8770 nb_words
= elf32_arm_popcount (insn
& 0x0000ffff);
8771 else if (is_thumb2_vldm (insn
))
8772 nb_words
= (insn
& 0xff);
8774 /* DEFAULT mode accounts for the real bug condition situation,
8775 ALL mode inserts stubs for each LDM/VLDM instruction (testing). */
8777 (stm32l4xx_fix
== BFD_ARM_STM32L4XX_FIX_DEFAULT
) ? nb_words
> 8 :
8778 (stm32l4xx_fix
== BFD_ARM_STM32L4XX_FIX_ALL
) ? TRUE
: FALSE
;
8781 /* Look for potentially-troublesome code sequences which might trigger
8782 the STM STM32L4XX erratum. */
8785 bfd_elf32_arm_stm32l4xx_erratum_scan (bfd
*abfd
,
8786 struct bfd_link_info
*link_info
)
8789 bfd_byte
*contents
= NULL
;
8790 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
8792 if (globals
== NULL
)
8795 /* If we are only performing a partial link do not bother
8796 to construct any glue. */
8797 if (bfd_link_relocatable (link_info
))
8800 /* Skip if this bfd does not correspond to an ELF image. */
8801 if (! is_arm_elf (abfd
))
8804 if (globals
->stm32l4xx_fix
== BFD_ARM_STM32L4XX_FIX_NONE
)
8807 /* Skip this BFD if it corresponds to an executable or dynamic object. */
8808 if ((abfd
->flags
& (EXEC_P
| DYNAMIC
)) != 0)
8811 for (sec
= abfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
8813 unsigned int i
, span
;
8814 struct _arm_elf_section_data
*sec_data
;
8816 /* If we don't have executable progbits, we're not interested in this
8817 section. Also skip if section is to be excluded. */
8818 if (elf_section_type (sec
) != SHT_PROGBITS
8819 || (elf_section_flags (sec
) & SHF_EXECINSTR
) == 0
8820 || (sec
->flags
& SEC_EXCLUDE
) != 0
8821 || sec
->sec_info_type
== SEC_INFO_TYPE_JUST_SYMS
8822 || sec
->output_section
== bfd_abs_section_ptr
8823 || strcmp (sec
->name
, STM32L4XX_ERRATUM_VENEER_SECTION_NAME
) == 0)
8826 sec_data
= elf32_arm_section_data (sec
);
8828 if (sec_data
->mapcount
== 0)
8831 if (elf_section_data (sec
)->this_hdr
.contents
!= NULL
)
8832 contents
= elf_section_data (sec
)->this_hdr
.contents
;
8833 else if (! bfd_malloc_and_get_section (abfd
, sec
, &contents
))
8836 qsort (sec_data
->map
, sec_data
->mapcount
, sizeof (elf32_arm_section_map
),
8837 elf32_arm_compare_mapping
);
8839 for (span
= 0; span
< sec_data
->mapcount
; span
++)
8841 unsigned int span_start
= sec_data
->map
[span
].vma
;
8842 unsigned int span_end
= (span
== sec_data
->mapcount
- 1)
8843 ? sec
->size
: sec_data
->map
[span
+ 1].vma
;
8844 char span_type
= sec_data
->map
[span
].type
;
8845 int itblock_current_pos
= 0;
8847 /* Only Thumb2 mode need be supported with this CM4 specific
8848 code, we should not encounter any arm mode eg span_type
8850 if (span_type
!= 't')
8853 for (i
= span_start
; i
< span_end
;)
8855 unsigned int insn
= bfd_get_16 (abfd
, &contents
[i
]);
8856 bfd_boolean insn_32bit
= FALSE
;
8857 bfd_boolean is_ldm
= FALSE
;
8858 bfd_boolean is_vldm
= FALSE
;
8859 bfd_boolean is_not_last_in_it_block
= FALSE
;
8861 /* The first 16-bits of all 32-bit thumb2 instructions start
8862 with opcode[15..13]=0b111 and the encoded op1 can be anything
8863 except opcode[12..11]!=0b00.
8864 See 32-bit Thumb instruction encoding. */
8865 if ((insn
& 0xe000) == 0xe000 && (insn
& 0x1800) != 0x0000)
8868 /* Compute the predicate that tells if the instruction
8869 is concerned by the IT block
8870 - Creates an error if there is a ldm that is not
8871 last in the IT block thus cannot be replaced
8872 - Otherwise we can create a branch at the end of the
8873 IT block, it will be controlled naturally by IT
8874 with the proper pseudo-predicate
8875 - So the only interesting predicate is the one that
8876 tells that we are not on the last item of an IT
8878 if (itblock_current_pos
!= 0)
8879 is_not_last_in_it_block
= !!--itblock_current_pos
;
8883 /* Load the rest of the insn (in manual-friendly order). */
8884 insn
= (insn
<< 16) | bfd_get_16 (abfd
, &contents
[i
+ 2]);
8885 is_ldm
= is_thumb2_ldmia (insn
) || is_thumb2_ldmdb (insn
);
8886 is_vldm
= is_thumb2_vldm (insn
);
8888 /* Veneers are created for (v)ldm depending on
8889 option flags and memory accesses conditions; but
8890 if the instruction is not the last instruction of
8891 an IT block, we cannot create a jump there, so we
8893 if ((is_ldm
|| is_vldm
)
8894 && stm32l4xx_need_create_replacing_stub
8895 (insn
, globals
->stm32l4xx_fix
))
8897 if (is_not_last_in_it_block
)
8900 /* xgettext:c-format */
8901 (_("%pB(%pA+%#x): error: multiple load detected"
8902 " in non-last IT block instruction:"
8903 " STM32L4XX veneer cannot be generated; "
8904 "use gcc option -mrestrict-it to generate"
8905 " only one instruction per IT block"),
8910 elf32_stm32l4xx_erratum_list
*newerr
=
8911 (elf32_stm32l4xx_erratum_list
*)
8913 (sizeof (elf32_stm32l4xx_erratum_list
));
8915 elf32_arm_section_data (sec
)
8916 ->stm32l4xx_erratumcount
+= 1;
8917 newerr
->u
.b
.insn
= insn
;
8918 /* We create only thumb branches. */
8920 STM32L4XX_ERRATUM_BRANCH_TO_VENEER
;
8921 record_stm32l4xx_erratum_veneer
8922 (link_info
, newerr
, abfd
, sec
,
8925 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
:
8926 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE
);
8928 newerr
->next
= sec_data
->stm32l4xx_erratumlist
;
8929 sec_data
->stm32l4xx_erratumlist
= newerr
;
8936 IT blocks are only encoded in T1
8937 Encoding T1: IT{x{y{z}}} <firstcond>
8938 1 0 1 1 - 1 1 1 1 - firstcond - mask
8939 if mask = '0000' then see 'related encodings'
8940 We don't deal with UNPREDICTABLE, just ignore these.
8941 There can be no nested IT blocks so an IT block
8942 is naturally a new one for which it is worth
8943 computing its size. */
8944 bfd_boolean is_newitblock
= ((insn
& 0xff00) == 0xbf00)
8945 && ((insn
& 0x000f) != 0x0000);
8946 /* If we have a new IT block we compute its size. */
8949 /* Compute the number of instructions controlled
8950 by the IT block, it will be used to decide
8951 whether we are inside an IT block or not. */
8952 unsigned int mask
= insn
& 0x000f;
8953 itblock_current_pos
= 4 - ctz (mask
);
8957 i
+= insn_32bit
? 4 : 2;
8961 if (contents
!= NULL
8962 && elf_section_data (sec
)->this_hdr
.contents
!= contents
)
8970 if (contents
!= NULL
8971 && elf_section_data (sec
)->this_hdr
.contents
!= contents
)
8977 /* Set target relocation values needed during linking. */
8980 bfd_elf32_arm_set_target_params (struct bfd
*output_bfd
,
8981 struct bfd_link_info
*link_info
,
8982 struct elf32_arm_params
*params
)
8984 struct elf32_arm_link_hash_table
*globals
;
8986 globals
= elf32_arm_hash_table (link_info
);
8987 if (globals
== NULL
)
8990 globals
->target1_is_rel
= params
->target1_is_rel
;
8991 if (globals
->fdpic_p
)
8992 globals
->target2_reloc
= R_ARM_GOT32
;
8993 else if (strcmp (params
->target2_type
, "rel") == 0)
8994 globals
->target2_reloc
= R_ARM_REL32
;
8995 else if (strcmp (params
->target2_type
, "abs") == 0)
8996 globals
->target2_reloc
= R_ARM_ABS32
;
8997 else if (strcmp (params
->target2_type
, "got-rel") == 0)
8998 globals
->target2_reloc
= R_ARM_GOT_PREL
;
9001 _bfd_error_handler (_("invalid TARGET2 relocation type '%s'"),
9002 params
->target2_type
);
9004 globals
->fix_v4bx
= params
->fix_v4bx
;
9005 globals
->use_blx
|= params
->use_blx
;
9006 globals
->vfp11_fix
= params
->vfp11_denorm_fix
;
9007 globals
->stm32l4xx_fix
= params
->stm32l4xx_fix
;
9008 if (globals
->fdpic_p
)
9009 globals
->pic_veneer
= 1;
9011 globals
->pic_veneer
= params
->pic_veneer
;
9012 globals
->fix_cortex_a8
= params
->fix_cortex_a8
;
9013 globals
->fix_arm1176
= params
->fix_arm1176
;
9014 globals
->cmse_implib
= params
->cmse_implib
;
9015 globals
->in_implib_bfd
= params
->in_implib_bfd
;
9017 BFD_ASSERT (is_arm_elf (output_bfd
));
9018 elf_arm_tdata (output_bfd
)->no_enum_size_warning
9019 = params
->no_enum_size_warning
;
9020 elf_arm_tdata (output_bfd
)->no_wchar_size_warning
9021 = params
->no_wchar_size_warning
;
9024 /* Replace the target offset of a Thumb bl or b.w instruction. */
9027 insert_thumb_branch (bfd
*abfd
, long int offset
, bfd_byte
*insn
)
9033 BFD_ASSERT ((offset
& 1) == 0);
9035 upper
= bfd_get_16 (abfd
, insn
);
9036 lower
= bfd_get_16 (abfd
, insn
+ 2);
9037 reloc_sign
= (offset
< 0) ? 1 : 0;
9038 upper
= (upper
& ~(bfd_vma
) 0x7ff)
9039 | ((offset
>> 12) & 0x3ff)
9040 | (reloc_sign
<< 10);
9041 lower
= (lower
& ~(bfd_vma
) 0x2fff)
9042 | (((!((offset
>> 23) & 1)) ^ reloc_sign
) << 13)
9043 | (((!((offset
>> 22) & 1)) ^ reloc_sign
) << 11)
9044 | ((offset
>> 1) & 0x7ff);
9045 bfd_put_16 (abfd
, upper
, insn
);
9046 bfd_put_16 (abfd
, lower
, insn
+ 2);
9049 /* Thumb code calling an ARM function. */
9052 elf32_thumb_to_arm_stub (struct bfd_link_info
* info
,
9056 asection
* input_section
,
9057 bfd_byte
* hit_data
,
9060 bfd_signed_vma addend
,
9062 char **error_message
)
9066 long int ret_offset
;
9067 struct elf_link_hash_entry
* myh
;
9068 struct elf32_arm_link_hash_table
* globals
;
9070 myh
= find_thumb_glue (info
, name
, error_message
);
9074 globals
= elf32_arm_hash_table (info
);
9075 BFD_ASSERT (globals
!= NULL
);
9076 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
9078 my_offset
= myh
->root
.u
.def
.value
;
9080 s
= bfd_get_linker_section (globals
->bfd_of_glue_owner
,
9081 THUMB2ARM_GLUE_SECTION_NAME
);
9083 BFD_ASSERT (s
!= NULL
);
9084 BFD_ASSERT (s
->contents
!= NULL
);
9085 BFD_ASSERT (s
->output_section
!= NULL
);
9087 if ((my_offset
& 0x01) == 0x01)
9090 && sym_sec
->owner
!= NULL
9091 && !INTERWORK_FLAG (sym_sec
->owner
))
9094 (_("%pB(%s): warning: interworking not enabled;"
9095 " first occurrence: %pB: %s call to %s"),
9096 sym_sec
->owner
, name
, input_bfd
, "Thumb", "ARM");
9102 myh
->root
.u
.def
.value
= my_offset
;
9104 put_thumb_insn (globals
, output_bfd
, (bfd_vma
) t2a1_bx_pc_insn
,
9105 s
->contents
+ my_offset
);
9107 put_thumb_insn (globals
, output_bfd
, (bfd_vma
) t2a2_noop_insn
,
9108 s
->contents
+ my_offset
+ 2);
9111 /* Address of destination of the stub. */
9112 ((bfd_signed_vma
) val
)
9114 /* Offset from the start of the current section
9115 to the start of the stubs. */
9117 /* Offset of the start of this stub from the start of the stubs. */
9119 /* Address of the start of the current section. */
9120 + s
->output_section
->vma
)
9121 /* The branch instruction is 4 bytes into the stub. */
9123 /* ARM branches work from the pc of the instruction + 8. */
9126 put_arm_insn (globals
, output_bfd
,
9127 (bfd_vma
) t2a3_b_insn
| ((ret_offset
>> 2) & 0x00FFFFFF),
9128 s
->contents
+ my_offset
+ 4);
9131 BFD_ASSERT (my_offset
<= globals
->thumb_glue_size
);
9133 /* Now go back and fix up the original BL insn to point to here. */
9135 /* Address of where the stub is located. */
9136 (s
->output_section
->vma
+ s
->output_offset
+ my_offset
)
9137 /* Address of where the BL is located. */
9138 - (input_section
->output_section
->vma
+ input_section
->output_offset
9140 /* Addend in the relocation. */
9142 /* Biassing for PC-relative addressing. */
9145 insert_thumb_branch (input_bfd
, ret_offset
, hit_data
- input_section
->vma
);
9150 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
9152 static struct elf_link_hash_entry
*
9153 elf32_arm_create_thumb_stub (struct bfd_link_info
* info
,
9160 char ** error_message
)
9163 long int ret_offset
;
9164 struct elf_link_hash_entry
* myh
;
9165 struct elf32_arm_link_hash_table
* globals
;
9167 myh
= find_arm_glue (info
, name
, error_message
);
9171 globals
= elf32_arm_hash_table (info
);
9172 BFD_ASSERT (globals
!= NULL
);
9173 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
9175 my_offset
= myh
->root
.u
.def
.value
;
9177 if ((my_offset
& 0x01) == 0x01)
9180 && sym_sec
->owner
!= NULL
9181 && !INTERWORK_FLAG (sym_sec
->owner
))
9184 (_("%pB(%s): warning: interworking not enabled;"
9185 " first occurrence: %pB: %s call to %s"),
9186 sym_sec
->owner
, name
, input_bfd
, "ARM", "Thumb");
9190 myh
->root
.u
.def
.value
= my_offset
;
9192 if (bfd_link_pic (info
)
9193 || globals
->root
.is_relocatable_executable
9194 || globals
->pic_veneer
)
9196 /* For relocatable objects we can't use absolute addresses,
9197 so construct the address from a relative offset. */
9198 /* TODO: If the offset is small it's probably worth
9199 constructing the address with adds. */
9200 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t1p_ldr_insn
,
9201 s
->contents
+ my_offset
);
9202 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t2p_add_pc_insn
,
9203 s
->contents
+ my_offset
+ 4);
9204 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t3p_bx_r12_insn
,
9205 s
->contents
+ my_offset
+ 8);
9206 /* Adjust the offset by 4 for the position of the add,
9207 and 8 for the pipeline offset. */
9208 ret_offset
= (val
- (s
->output_offset
9209 + s
->output_section
->vma
9212 bfd_put_32 (output_bfd
, ret_offset
,
9213 s
->contents
+ my_offset
+ 12);
9215 else if (globals
->use_blx
)
9217 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t1v5_ldr_insn
,
9218 s
->contents
+ my_offset
);
9220 /* It's a thumb address. Add the low order bit. */
9221 bfd_put_32 (output_bfd
, val
| a2t2v5_func_addr_insn
,
9222 s
->contents
+ my_offset
+ 4);
9226 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t1_ldr_insn
,
9227 s
->contents
+ my_offset
);
9229 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t2_bx_r12_insn
,
9230 s
->contents
+ my_offset
+ 4);
9232 /* It's a thumb address. Add the low order bit. */
9233 bfd_put_32 (output_bfd
, val
| a2t3_func_addr_insn
,
9234 s
->contents
+ my_offset
+ 8);
9240 BFD_ASSERT (my_offset
<= globals
->arm_glue_size
);
9245 /* Arm code calling a Thumb function. */
9248 elf32_arm_to_thumb_stub (struct bfd_link_info
* info
,
9252 asection
* input_section
,
9253 bfd_byte
* hit_data
,
9256 bfd_signed_vma addend
,
9258 char **error_message
)
9260 unsigned long int tmp
;
9263 long int ret_offset
;
9264 struct elf_link_hash_entry
* myh
;
9265 struct elf32_arm_link_hash_table
* globals
;
9267 globals
= elf32_arm_hash_table (info
);
9268 BFD_ASSERT (globals
!= NULL
);
9269 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
9271 s
= bfd_get_linker_section (globals
->bfd_of_glue_owner
,
9272 ARM2THUMB_GLUE_SECTION_NAME
);
9273 BFD_ASSERT (s
!= NULL
);
9274 BFD_ASSERT (s
->contents
!= NULL
);
9275 BFD_ASSERT (s
->output_section
!= NULL
);
9277 myh
= elf32_arm_create_thumb_stub (info
, name
, input_bfd
, output_bfd
,
9278 sym_sec
, val
, s
, error_message
);
9282 my_offset
= myh
->root
.u
.def
.value
;
9283 tmp
= bfd_get_32 (input_bfd
, hit_data
);
9284 tmp
= tmp
& 0xFF000000;
9286 /* Somehow these are both 4 too far, so subtract 8. */
9287 ret_offset
= (s
->output_offset
9289 + s
->output_section
->vma
9290 - (input_section
->output_offset
9291 + input_section
->output_section
->vma
9295 tmp
= tmp
| ((ret_offset
>> 2) & 0x00FFFFFF);
9297 bfd_put_32 (output_bfd
, (bfd_vma
) tmp
, hit_data
- input_section
->vma
);
9302 /* Populate Arm stub for an exported Thumb function. */
9305 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry
*h
, void * inf
)
9307 struct bfd_link_info
* info
= (struct bfd_link_info
*) inf
;
9309 struct elf_link_hash_entry
* myh
;
9310 struct elf32_arm_link_hash_entry
*eh
;
9311 struct elf32_arm_link_hash_table
* globals
;
9314 char *error_message
;
9316 eh
= elf32_arm_hash_entry (h
);
9317 /* Allocate stubs for exported Thumb functions on v4t. */
9318 if (eh
->export_glue
== NULL
)
9321 globals
= elf32_arm_hash_table (info
);
9322 BFD_ASSERT (globals
!= NULL
);
9323 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
9325 s
= bfd_get_linker_section (globals
->bfd_of_glue_owner
,
9326 ARM2THUMB_GLUE_SECTION_NAME
);
9327 BFD_ASSERT (s
!= NULL
);
9328 BFD_ASSERT (s
->contents
!= NULL
);
9329 BFD_ASSERT (s
->output_section
!= NULL
);
9331 sec
= eh
->export_glue
->root
.u
.def
.section
;
9333 BFD_ASSERT (sec
->output_section
!= NULL
);
9335 val
= eh
->export_glue
->root
.u
.def
.value
+ sec
->output_offset
9336 + sec
->output_section
->vma
;
9338 myh
= elf32_arm_create_thumb_stub (info
, h
->root
.root
.string
,
9339 h
->root
.u
.def
.section
->owner
,
9340 globals
->obfd
, sec
, val
, s
,
9346 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
9349 elf32_arm_bx_glue (struct bfd_link_info
* info
, int reg
)
9354 struct elf32_arm_link_hash_table
*globals
;
9356 globals
= elf32_arm_hash_table (info
);
9357 BFD_ASSERT (globals
!= NULL
);
9358 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
9360 s
= bfd_get_linker_section (globals
->bfd_of_glue_owner
,
9361 ARM_BX_GLUE_SECTION_NAME
);
9362 BFD_ASSERT (s
!= NULL
);
9363 BFD_ASSERT (s
->contents
!= NULL
);
9364 BFD_ASSERT (s
->output_section
!= NULL
);
9366 BFD_ASSERT (globals
->bx_glue_offset
[reg
] & 2);
9368 glue_addr
= globals
->bx_glue_offset
[reg
] & ~(bfd_vma
)3;
9370 if ((globals
->bx_glue_offset
[reg
] & 1) == 0)
9372 p
= s
->contents
+ glue_addr
;
9373 bfd_put_32 (globals
->obfd
, armbx1_tst_insn
+ (reg
<< 16), p
);
9374 bfd_put_32 (globals
->obfd
, armbx2_moveq_insn
+ reg
, p
+ 4);
9375 bfd_put_32 (globals
->obfd
, armbx3_bx_insn
+ reg
, p
+ 8);
9376 globals
->bx_glue_offset
[reg
] |= 1;
9379 return glue_addr
+ s
->output_section
->vma
+ s
->output_offset
;
9382 /* Generate Arm stubs for exported Thumb symbols. */
9384 elf32_arm_begin_write_processing (bfd
*abfd ATTRIBUTE_UNUSED
,
9385 struct bfd_link_info
*link_info
)
9387 struct elf32_arm_link_hash_table
* globals
;
9389 if (link_info
== NULL
)
9390 /* Ignore this if we are not called by the ELF backend linker. */
9393 globals
= elf32_arm_hash_table (link_info
);
9394 if (globals
== NULL
)
9397 /* If blx is available then exported Thumb symbols are OK and there is
9399 if (globals
->use_blx
)
9402 elf_link_hash_traverse (&globals
->root
, elf32_arm_to_thumb_export_stub
,
9406 /* Reserve space for COUNT dynamic relocations in relocation selection
9410 elf32_arm_allocate_dynrelocs (struct bfd_link_info
*info
, asection
*sreloc
,
9411 bfd_size_type count
)
9413 struct elf32_arm_link_hash_table
*htab
;
9415 htab
= elf32_arm_hash_table (info
);
9416 BFD_ASSERT (htab
->root
.dynamic_sections_created
);
9419 sreloc
->size
+= RELOC_SIZE (htab
) * count
;
9422 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
9423 dynamic, the relocations should go in SRELOC, otherwise they should
9424 go in the special .rel.iplt section. */
9427 elf32_arm_allocate_irelocs (struct bfd_link_info
*info
, asection
*sreloc
,
9428 bfd_size_type count
)
9430 struct elf32_arm_link_hash_table
*htab
;
9432 htab
= elf32_arm_hash_table (info
);
9433 if (!htab
->root
.dynamic_sections_created
)
9434 htab
->root
.irelplt
->size
+= RELOC_SIZE (htab
) * count
;
9437 BFD_ASSERT (sreloc
!= NULL
);
9438 sreloc
->size
+= RELOC_SIZE (htab
) * count
;
9442 /* Add relocation REL to the end of relocation section SRELOC. */
9445 elf32_arm_add_dynreloc (bfd
*output_bfd
, struct bfd_link_info
*info
,
9446 asection
*sreloc
, Elf_Internal_Rela
*rel
)
9449 struct elf32_arm_link_hash_table
*htab
;
9451 htab
= elf32_arm_hash_table (info
);
9452 if (!htab
->root
.dynamic_sections_created
9453 && ELF32_R_TYPE (rel
->r_info
) == R_ARM_IRELATIVE
)
9454 sreloc
= htab
->root
.irelplt
;
9457 loc
= sreloc
->contents
;
9458 loc
+= sreloc
->reloc_count
++ * RELOC_SIZE (htab
);
9459 if (sreloc
->reloc_count
* RELOC_SIZE (htab
) > sreloc
->size
)
9461 SWAP_RELOC_OUT (htab
) (output_bfd
, rel
, loc
);
9464 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
9465 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
9469 elf32_arm_allocate_plt_entry (struct bfd_link_info
*info
,
9470 bfd_boolean is_iplt_entry
,
9471 union gotplt_union
*root_plt
,
9472 struct arm_plt_info
*arm_plt
)
9474 struct elf32_arm_link_hash_table
*htab
;
9478 htab
= elf32_arm_hash_table (info
);
9482 splt
= htab
->root
.iplt
;
9483 sgotplt
= htab
->root
.igotplt
;
9485 /* NaCl uses a special first entry in .iplt too. */
9486 if (htab
->nacl_p
&& splt
->size
== 0)
9487 splt
->size
+= htab
->plt_header_size
;
9489 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
9490 elf32_arm_allocate_irelocs (info
, htab
->root
.irelplt
, 1);
9494 splt
= htab
->root
.splt
;
9495 sgotplt
= htab
->root
.sgotplt
;
9499 /* Allocate room for R_ARM_FUNCDESC_VALUE. */
9500 /* For lazy binding, relocations will be put into .rel.plt, in
9501 .rel.got otherwise. */
9502 /* FIXME: today we don't support lazy binding so put it in .rel.got */
9503 if (info
->flags
& DF_BIND_NOW
)
9504 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
9506 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelplt
, 1);
9510 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
9511 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelplt
, 1);
9514 /* If this is the first .plt entry, make room for the special
9516 if (splt
->size
== 0)
9517 splt
->size
+= htab
->plt_header_size
;
9519 htab
->next_tls_desc_index
++;
9522 /* Allocate the PLT entry itself, including any leading Thumb stub. */
9523 if (elf32_arm_plt_needs_thumb_stub_p (info
, arm_plt
))
9524 splt
->size
+= PLT_THUMB_STUB_SIZE
;
9525 root_plt
->offset
= splt
->size
;
9526 splt
->size
+= htab
->plt_entry_size
;
9528 if (!htab
->symbian_p
)
9530 /* We also need to make an entry in the .got.plt section, which
9531 will be placed in the .got section by the linker script. */
9533 arm_plt
->got_offset
= sgotplt
->size
;
9535 arm_plt
->got_offset
= sgotplt
->size
- 8 * htab
->num_tls_desc
;
9537 /* Function descriptor takes 64 bits in GOT. */
9545 arm_movw_immediate (bfd_vma value
)
9547 return (value
& 0x00000fff) | ((value
& 0x0000f000) << 4);
9551 arm_movt_immediate (bfd_vma value
)
9553 return ((value
& 0x0fff0000) >> 16) | ((value
& 0xf0000000) >> 12);
9556 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
9557 the entry lives in .iplt and resolves to (*SYM_VALUE)().
9558 Otherwise, DYNINDX is the index of the symbol in the dynamic
9559 symbol table and SYM_VALUE is undefined.
9561 ROOT_PLT points to the offset of the PLT entry from the start of its
9562 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
9563 bookkeeping information.
9565 Returns FALSE if there was a problem. */
9568 elf32_arm_populate_plt_entry (bfd
*output_bfd
, struct bfd_link_info
*info
,
9569 union gotplt_union
*root_plt
,
9570 struct arm_plt_info
*arm_plt
,
9571 int dynindx
, bfd_vma sym_value
)
9573 struct elf32_arm_link_hash_table
*htab
;
9579 Elf_Internal_Rela rel
;
9580 bfd_vma plt_header_size
;
9581 bfd_vma got_header_size
;
9583 htab
= elf32_arm_hash_table (info
);
9585 /* Pick the appropriate sections and sizes. */
9588 splt
= htab
->root
.iplt
;
9589 sgot
= htab
->root
.igotplt
;
9590 srel
= htab
->root
.irelplt
;
9592 /* There are no reserved entries in .igot.plt, and no special
9593 first entry in .iplt. */
9594 got_header_size
= 0;
9595 plt_header_size
= 0;
9599 splt
= htab
->root
.splt
;
9600 sgot
= htab
->root
.sgotplt
;
9601 srel
= htab
->root
.srelplt
;
9603 got_header_size
= get_elf_backend_data (output_bfd
)->got_header_size
;
9604 plt_header_size
= htab
->plt_header_size
;
9606 BFD_ASSERT (splt
!= NULL
&& srel
!= NULL
);
9608 /* Fill in the entry in the procedure linkage table. */
9609 if (htab
->symbian_p
)
9611 BFD_ASSERT (dynindx
>= 0);
9612 put_arm_insn (htab
, output_bfd
,
9613 elf32_arm_symbian_plt_entry
[0],
9614 splt
->contents
+ root_plt
->offset
);
9615 bfd_put_32 (output_bfd
,
9616 elf32_arm_symbian_plt_entry
[1],
9617 splt
->contents
+ root_plt
->offset
+ 4);
9619 /* Fill in the entry in the .rel.plt section. */
9620 rel
.r_offset
= (splt
->output_section
->vma
9621 + splt
->output_offset
9622 + root_plt
->offset
+ 4);
9623 rel
.r_info
= ELF32_R_INFO (dynindx
, R_ARM_GLOB_DAT
);
9625 /* Get the index in the procedure linkage table which
9626 corresponds to this symbol. This is the index of this symbol
9627 in all the symbols for which we are making plt entries. The
9628 first entry in the procedure linkage table is reserved. */
9629 plt_index
= ((root_plt
->offset
- plt_header_size
)
9630 / htab
->plt_entry_size
);
9634 bfd_vma got_offset
, got_address
, plt_address
;
9635 bfd_vma got_displacement
, initial_got_entry
;
9638 BFD_ASSERT (sgot
!= NULL
);
9640 /* Get the offset into the .(i)got.plt table of the entry that
9641 corresponds to this function. */
9642 got_offset
= (arm_plt
->got_offset
& -2);
9644 /* Get the index in the procedure linkage table which
9645 corresponds to this symbol. This is the index of this symbol
9646 in all the symbols for which we are making plt entries.
9647 After the reserved .got.plt entries, all symbols appear in
9648 the same order as in .plt. */
9650 /* Function descriptor takes 8 bytes. */
9651 plt_index
= (got_offset
- got_header_size
) / 8;
9653 plt_index
= (got_offset
- got_header_size
) / 4;
9655 /* Calculate the address of the GOT entry. */
9656 got_address
= (sgot
->output_section
->vma
9657 + sgot
->output_offset
9660 /* ...and the address of the PLT entry. */
9661 plt_address
= (splt
->output_section
->vma
9662 + splt
->output_offset
9663 + root_plt
->offset
);
9665 ptr
= splt
->contents
+ root_plt
->offset
;
9666 if (htab
->vxworks_p
&& bfd_link_pic (info
))
9671 for (i
= 0; i
!= htab
->plt_entry_size
/ 4; i
++, ptr
+= 4)
9673 val
= elf32_arm_vxworks_shared_plt_entry
[i
];
9675 val
|= got_address
- sgot
->output_section
->vma
;
9677 val
|= plt_index
* RELOC_SIZE (htab
);
9678 if (i
== 2 || i
== 5)
9679 bfd_put_32 (output_bfd
, val
, ptr
);
9681 put_arm_insn (htab
, output_bfd
, val
, ptr
);
9684 else if (htab
->vxworks_p
)
9689 for (i
= 0; i
!= htab
->plt_entry_size
/ 4; i
++, ptr
+= 4)
9691 val
= elf32_arm_vxworks_exec_plt_entry
[i
];
9695 val
|= 0xffffff & -((root_plt
->offset
+ i
* 4 + 8) >> 2);
9697 val
|= plt_index
* RELOC_SIZE (htab
);
9698 if (i
== 2 || i
== 5)
9699 bfd_put_32 (output_bfd
, val
, ptr
);
9701 put_arm_insn (htab
, output_bfd
, val
, ptr
);
9704 loc
= (htab
->srelplt2
->contents
9705 + (plt_index
* 2 + 1) * RELOC_SIZE (htab
));
9707 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
9708 referencing the GOT for this PLT entry. */
9709 rel
.r_offset
= plt_address
+ 8;
9710 rel
.r_info
= ELF32_R_INFO (htab
->root
.hgot
->indx
, R_ARM_ABS32
);
9711 rel
.r_addend
= got_offset
;
9712 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, loc
);
9713 loc
+= RELOC_SIZE (htab
);
9715 /* Create the R_ARM_ABS32 relocation referencing the
9716 beginning of the PLT for this GOT entry. */
9717 rel
.r_offset
= got_address
;
9718 rel
.r_info
= ELF32_R_INFO (htab
->root
.hplt
->indx
, R_ARM_ABS32
);
9720 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, loc
);
9722 else if (htab
->nacl_p
)
9724 /* Calculate the displacement between the PLT slot and the
9725 common tail that's part of the special initial PLT slot. */
9726 int32_t tail_displacement
9727 = ((splt
->output_section
->vma
+ splt
->output_offset
9728 + ARM_NACL_PLT_TAIL_OFFSET
)
9729 - (plt_address
+ htab
->plt_entry_size
+ 4));
9730 BFD_ASSERT ((tail_displacement
& 3) == 0);
9731 tail_displacement
>>= 2;
9733 BFD_ASSERT ((tail_displacement
& 0xff000000) == 0
9734 || (-tail_displacement
& 0xff000000) == 0);
9736 /* Calculate the displacement between the PLT slot and the entry
9737 in the GOT. The offset accounts for the value produced by
9738 adding to pc in the penultimate instruction of the PLT stub. */
9739 got_displacement
= (got_address
9740 - (plt_address
+ htab
->plt_entry_size
));
9742 /* NaCl does not support interworking at all. */
9743 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info
, arm_plt
));
9745 put_arm_insn (htab
, output_bfd
,
9746 elf32_arm_nacl_plt_entry
[0]
9747 | arm_movw_immediate (got_displacement
),
9749 put_arm_insn (htab
, output_bfd
,
9750 elf32_arm_nacl_plt_entry
[1]
9751 | arm_movt_immediate (got_displacement
),
9753 put_arm_insn (htab
, output_bfd
,
9754 elf32_arm_nacl_plt_entry
[2],
9756 put_arm_insn (htab
, output_bfd
,
9757 elf32_arm_nacl_plt_entry
[3]
9758 | (tail_displacement
& 0x00ffffff),
9761 else if (htab
->fdpic_p
)
9763 const bfd_vma
*plt_entry
= using_thumb_only(htab
)
9764 ? elf32_arm_fdpic_thumb_plt_entry
9765 : elf32_arm_fdpic_plt_entry
;
9767 /* Fill-up Thumb stub if needed. */
9768 if (elf32_arm_plt_needs_thumb_stub_p (info
, arm_plt
))
9770 put_thumb_insn (htab
, output_bfd
,
9771 elf32_arm_plt_thumb_stub
[0], ptr
- 4);
9772 put_thumb_insn (htab
, output_bfd
,
9773 elf32_arm_plt_thumb_stub
[1], ptr
- 2);
9775 /* As we are using 32 bit instructions even for the Thumb
9776 version, we have to use 'put_arm_insn' instead of
9777 'put_thumb_insn'. */
9778 put_arm_insn(htab
, output_bfd
, plt_entry
[0], ptr
+ 0);
9779 put_arm_insn(htab
, output_bfd
, plt_entry
[1], ptr
+ 4);
9780 put_arm_insn(htab
, output_bfd
, plt_entry
[2], ptr
+ 8);
9781 put_arm_insn(htab
, output_bfd
, plt_entry
[3], ptr
+ 12);
9782 bfd_put_32 (output_bfd
, got_offset
, ptr
+ 16);
9784 if (!(info
->flags
& DF_BIND_NOW
))
9786 /* funcdesc_value_reloc_offset. */
9787 bfd_put_32 (output_bfd
,
9788 htab
->root
.srelplt
->reloc_count
* RELOC_SIZE (htab
),
9790 put_arm_insn(htab
, output_bfd
, plt_entry
[6], ptr
+ 24);
9791 put_arm_insn(htab
, output_bfd
, plt_entry
[7], ptr
+ 28);
9792 put_arm_insn(htab
, output_bfd
, plt_entry
[8], ptr
+ 32);
9793 put_arm_insn(htab
, output_bfd
, plt_entry
[9], ptr
+ 36);
9796 else if (using_thumb_only (htab
))
9798 /* PR ld/16017: Generate thumb only PLT entries. */
9799 if (!using_thumb2 (htab
))
9801 /* FIXME: We ought to be able to generate thumb-1 PLT
9803 _bfd_error_handler (_("%pB: warning: thumb-1 mode PLT generation not currently supported"),
9808 /* Calculate the displacement between the PLT slot and the entry in
9809 the GOT. The 12-byte offset accounts for the value produced by
9810 adding to pc in the 3rd instruction of the PLT stub. */
9811 got_displacement
= got_address
- (plt_address
+ 12);
9813 /* As we are using 32 bit instructions we have to use 'put_arm_insn'
9814 instead of 'put_thumb_insn'. */
9815 put_arm_insn (htab
, output_bfd
,
9816 elf32_thumb2_plt_entry
[0]
9817 | ((got_displacement
& 0x000000ff) << 16)
9818 | ((got_displacement
& 0x00000700) << 20)
9819 | ((got_displacement
& 0x00000800) >> 1)
9820 | ((got_displacement
& 0x0000f000) >> 12),
9822 put_arm_insn (htab
, output_bfd
,
9823 elf32_thumb2_plt_entry
[1]
9824 | ((got_displacement
& 0x00ff0000) )
9825 | ((got_displacement
& 0x07000000) << 4)
9826 | ((got_displacement
& 0x08000000) >> 17)
9827 | ((got_displacement
& 0xf0000000) >> 28),
9829 put_arm_insn (htab
, output_bfd
,
9830 elf32_thumb2_plt_entry
[2],
9832 put_arm_insn (htab
, output_bfd
,
9833 elf32_thumb2_plt_entry
[3],
9838 /* Calculate the displacement between the PLT slot and the
9839 entry in the GOT. The eight-byte offset accounts for the
9840 value produced by adding to pc in the first instruction
9842 got_displacement
= got_address
- (plt_address
+ 8);
9844 if (elf32_arm_plt_needs_thumb_stub_p (info
, arm_plt
))
9846 put_thumb_insn (htab
, output_bfd
,
9847 elf32_arm_plt_thumb_stub
[0], ptr
- 4);
9848 put_thumb_insn (htab
, output_bfd
,
9849 elf32_arm_plt_thumb_stub
[1], ptr
- 2);
9852 if (!elf32_arm_use_long_plt_entry
)
9854 BFD_ASSERT ((got_displacement
& 0xf0000000) == 0);
9856 put_arm_insn (htab
, output_bfd
,
9857 elf32_arm_plt_entry_short
[0]
9858 | ((got_displacement
& 0x0ff00000) >> 20),
9860 put_arm_insn (htab
, output_bfd
,
9861 elf32_arm_plt_entry_short
[1]
9862 | ((got_displacement
& 0x000ff000) >> 12),
9864 put_arm_insn (htab
, output_bfd
,
9865 elf32_arm_plt_entry_short
[2]
9866 | (got_displacement
& 0x00000fff),
9868 #ifdef FOUR_WORD_PLT
9869 bfd_put_32 (output_bfd
, elf32_arm_plt_entry_short
[3], ptr
+ 12);
9874 put_arm_insn (htab
, output_bfd
,
9875 elf32_arm_plt_entry_long
[0]
9876 | ((got_displacement
& 0xf0000000) >> 28),
9878 put_arm_insn (htab
, output_bfd
,
9879 elf32_arm_plt_entry_long
[1]
9880 | ((got_displacement
& 0x0ff00000) >> 20),
9882 put_arm_insn (htab
, output_bfd
,
9883 elf32_arm_plt_entry_long
[2]
9884 | ((got_displacement
& 0x000ff000) >> 12),
9886 put_arm_insn (htab
, output_bfd
,
9887 elf32_arm_plt_entry_long
[3]
9888 | (got_displacement
& 0x00000fff),
9893 /* Fill in the entry in the .rel(a).(i)plt section. */
9894 rel
.r_offset
= got_address
;
9898 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
9899 The dynamic linker or static executable then calls SYM_VALUE
9900 to determine the correct run-time value of the .igot.plt entry. */
9901 rel
.r_info
= ELF32_R_INFO (0, R_ARM_IRELATIVE
);
9902 initial_got_entry
= sym_value
;
9906 /* For FDPIC we will have to resolve a R_ARM_FUNCDESC_VALUE
9907 used by PLT entry. */
9910 rel
.r_info
= ELF32_R_INFO (dynindx
, R_ARM_FUNCDESC_VALUE
);
9911 initial_got_entry
= 0;
9915 rel
.r_info
= ELF32_R_INFO (dynindx
, R_ARM_JUMP_SLOT
);
9916 initial_got_entry
= (splt
->output_section
->vma
9917 + splt
->output_offset
);
9921 /* Fill in the entry in the global offset table. */
9922 bfd_put_32 (output_bfd
, initial_got_entry
,
9923 sgot
->contents
+ got_offset
);
9925 if (htab
->fdpic_p
&& !(info
->flags
& DF_BIND_NOW
))
9927 /* Setup initial funcdesc value. */
9928 /* FIXME: we don't support lazy binding because there is a
9929 race condition between both words getting written and
9930 some other thread attempting to read them. The ARM
9931 architecture does not have an atomic 64 bit load/store
9932 instruction that could be used to prevent it; it is
9933 recommended that threaded FDPIC applications run with the
9934 LD_BIND_NOW environment variable set. */
9935 bfd_put_32(output_bfd
, plt_address
+ 0x18,
9936 sgot
->contents
+ got_offset
);
9937 bfd_put_32(output_bfd
, -1 /*TODO*/,
9938 sgot
->contents
+ got_offset
+ 4);
9943 elf32_arm_add_dynreloc (output_bfd
, info
, srel
, &rel
);
9948 /* For FDPIC we put PLT relocationss into .rel.got when not
9949 lazy binding otherwise we put them in .rel.plt. For now,
9950 we don't support lazy binding so put it in .rel.got. */
9951 if (info
->flags
& DF_BIND_NOW
)
9952 elf32_arm_add_dynreloc(output_bfd
, info
, htab
->root
.srelgot
, &rel
);
9954 elf32_arm_add_dynreloc(output_bfd
, info
, htab
->root
.srelplt
, &rel
);
9958 loc
= srel
->contents
+ plt_index
* RELOC_SIZE (htab
);
9959 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, loc
);
9966 /* Some relocations map to different relocations depending on the
9967 target. Return the real relocation. */
9970 arm_real_reloc_type (struct elf32_arm_link_hash_table
* globals
,
9976 if (globals
->target1_is_rel
)
9982 return globals
->target2_reloc
;
9989 /* Return the base VMA address which should be subtracted from real addresses
9990 when resolving @dtpoff relocation.
9991 This is PT_TLS segment p_vaddr. */
9994 dtpoff_base (struct bfd_link_info
*info
)
9996 /* If tls_sec is NULL, we should have signalled an error already. */
9997 if (elf_hash_table (info
)->tls_sec
== NULL
)
9999 return elf_hash_table (info
)->tls_sec
->vma
;
10002 /* Return the relocation value for @tpoff relocation
10003 if STT_TLS virtual address is ADDRESS. */
10006 tpoff (struct bfd_link_info
*info
, bfd_vma address
)
10008 struct elf_link_hash_table
*htab
= elf_hash_table (info
);
10011 /* If tls_sec is NULL, we should have signalled an error already. */
10012 if (htab
->tls_sec
== NULL
)
10014 base
= align_power ((bfd_vma
) TCB_SIZE
, htab
->tls_sec
->alignment_power
);
10015 return address
- htab
->tls_sec
->vma
+ base
;
10018 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
10019 VALUE is the relocation value. */
10021 static bfd_reloc_status_type
10022 elf32_arm_abs12_reloc (bfd
*abfd
, void *data
, bfd_vma value
)
10025 return bfd_reloc_overflow
;
10027 value
|= bfd_get_32 (abfd
, data
) & 0xfffff000;
10028 bfd_put_32 (abfd
, value
, data
);
10029 return bfd_reloc_ok
;
10032 /* Handle TLS relaxations. Relaxing is possible for symbols that use
10033 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
10034 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
10036 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
10037 is to then call final_link_relocate. Return other values in the
10040 FIXME:When --emit-relocs is in effect, we'll emit relocs describing
10041 the pre-relaxed code. It would be nice if the relocs were updated
10042 to match the optimization. */
10044 static bfd_reloc_status_type
10045 elf32_arm_tls_relax (struct elf32_arm_link_hash_table
*globals
,
10046 bfd
*input_bfd
, asection
*input_sec
, bfd_byte
*contents
,
10047 Elf_Internal_Rela
*rel
, unsigned long is_local
)
10049 unsigned long insn
;
10051 switch (ELF32_R_TYPE (rel
->r_info
))
10054 return bfd_reloc_notsupported
;
10056 case R_ARM_TLS_GOTDESC
:
10061 insn
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
);
10063 insn
-= 5; /* THUMB */
10065 insn
-= 8; /* ARM */
10067 bfd_put_32 (input_bfd
, insn
, contents
+ rel
->r_offset
);
10068 return bfd_reloc_continue
;
10070 case R_ARM_THM_TLS_DESCSEQ
:
10072 insn
= bfd_get_16 (input_bfd
, contents
+ rel
->r_offset
);
10073 if ((insn
& 0xff78) == 0x4478) /* add rx, pc */
10077 bfd_put_16 (input_bfd
, 0x46c0, contents
+ rel
->r_offset
);
10079 else if ((insn
& 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
10083 bfd_put_16 (input_bfd
, 0x46c0, contents
+ rel
->r_offset
);
10086 bfd_put_16 (input_bfd
, insn
& 0xf83f, contents
+ rel
->r_offset
);
10088 else if ((insn
& 0xff87) == 0x4780) /* blx rx */
10092 bfd_put_16 (input_bfd
, 0x46c0, contents
+ rel
->r_offset
);
10095 bfd_put_16 (input_bfd
, 0x4600 | (insn
& 0x78),
10096 contents
+ rel
->r_offset
);
10100 if ((insn
& 0xf000) == 0xf000 || (insn
& 0xf800) == 0xe800)
10101 /* It's a 32 bit instruction, fetch the rest of it for
10102 error generation. */
10103 insn
= (insn
<< 16)
10104 | bfd_get_16 (input_bfd
, contents
+ rel
->r_offset
+ 2);
10106 /* xgettext:c-format */
10107 (_("%pB(%pA+%#" PRIx64
"): "
10108 "unexpected %s instruction '%#lx' in TLS trampoline"),
10109 input_bfd
, input_sec
, (uint64_t) rel
->r_offset
,
10111 return bfd_reloc_notsupported
;
10115 case R_ARM_TLS_DESCSEQ
:
10117 insn
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
);
10118 if ((insn
& 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
10122 bfd_put_32 (input_bfd
, 0xe1a00000 | (insn
& 0xffff),
10123 contents
+ rel
->r_offset
);
10125 else if ((insn
& 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
10129 bfd_put_32 (input_bfd
, 0xe1a00000, contents
+ rel
->r_offset
);
10132 bfd_put_32 (input_bfd
, insn
& 0xfffff000,
10133 contents
+ rel
->r_offset
);
10135 else if ((insn
& 0xfffffff0) == 0xe12fff30) /* blx rx */
10139 bfd_put_32 (input_bfd
, 0xe1a00000, contents
+ rel
->r_offset
);
10142 bfd_put_32 (input_bfd
, 0xe1a00000 | (insn
& 0xf),
10143 contents
+ rel
->r_offset
);
10148 /* xgettext:c-format */
10149 (_("%pB(%pA+%#" PRIx64
"): "
10150 "unexpected %s instruction '%#lx' in TLS trampoline"),
10151 input_bfd
, input_sec
, (uint64_t) rel
->r_offset
,
10153 return bfd_reloc_notsupported
;
10157 case R_ARM_TLS_CALL
:
10158 /* GD->IE relaxation, turn the instruction into 'nop' or
10159 'ldr r0, [pc,r0]' */
10160 insn
= is_local
? 0xe1a00000 : 0xe79f0000;
10161 bfd_put_32 (input_bfd
, insn
, contents
+ rel
->r_offset
);
10164 case R_ARM_THM_TLS_CALL
:
10165 /* GD->IE relaxation. */
10167 /* add r0,pc; ldr r0, [r0] */
10169 else if (using_thumb2 (globals
))
10176 bfd_put_16 (input_bfd
, insn
>> 16, contents
+ rel
->r_offset
);
10177 bfd_put_16 (input_bfd
, insn
& 0xffff, contents
+ rel
->r_offset
+ 2);
10180 return bfd_reloc_ok
;
10183 /* For a given value of n, calculate the value of G_n as required to
10184 deal with group relocations. We return it in the form of an
10185 encoded constant-and-rotation, together with the final residual. If n is
10186 specified as less than zero, then final_residual is filled with the
10187 input value and no further action is performed. */
10190 calculate_group_reloc_mask (bfd_vma value
, int n
, bfd_vma
*final_residual
)
10194 bfd_vma encoded_g_n
= 0;
10195 bfd_vma residual
= value
; /* Also known as Y_n. */
10197 for (current_n
= 0; current_n
<= n
; current_n
++)
10201 /* Calculate which part of the value to mask. */
10208 /* Determine the most significant bit in the residual and
10209 align the resulting value to a 2-bit boundary. */
10210 for (msb
= 30; msb
>= 0; msb
-= 2)
10211 if (residual
& (3 << msb
))
10214 /* The desired shift is now (msb - 6), or zero, whichever
10221 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
10222 g_n
= residual
& (0xff << shift
);
10223 encoded_g_n
= (g_n
>> shift
)
10224 | ((g_n
<= 0xff ? 0 : (32 - shift
) / 2) << 8);
10226 /* Calculate the residual for the next time around. */
10230 *final_residual
= residual
;
10232 return encoded_g_n
;
10235 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
10236 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
10239 identify_add_or_sub (bfd_vma insn
)
10241 int opcode
= insn
& 0x1e00000;
10243 if (opcode
== 1 << 23) /* ADD */
10246 if (opcode
== 1 << 22) /* SUB */
10252 /* Perform a relocation as part of a final link. */
10254 static bfd_reloc_status_type
10255 elf32_arm_final_link_relocate (reloc_howto_type
* howto
,
10258 asection
* input_section
,
10259 bfd_byte
* contents
,
10260 Elf_Internal_Rela
* rel
,
10262 struct bfd_link_info
* info
,
10263 asection
* sym_sec
,
10264 const char * sym_name
,
10265 unsigned char st_type
,
10266 enum arm_st_branch_type branch_type
,
10267 struct elf_link_hash_entry
* h
,
10268 bfd_boolean
* unresolved_reloc_p
,
10269 char ** error_message
)
10271 unsigned long r_type
= howto
->type
;
10272 unsigned long r_symndx
;
10273 bfd_byte
* hit_data
= contents
+ rel
->r_offset
;
10274 bfd_vma
* local_got_offsets
;
10275 bfd_vma
* local_tlsdesc_gotents
;
10278 asection
* sreloc
= NULL
;
10279 asection
* srelgot
;
10281 bfd_signed_vma signed_addend
;
10282 unsigned char dynreloc_st_type
;
10283 bfd_vma dynreloc_value
;
10284 struct elf32_arm_link_hash_table
* globals
;
10285 struct elf32_arm_link_hash_entry
*eh
;
10286 union gotplt_union
*root_plt
;
10287 struct arm_plt_info
*arm_plt
;
10288 bfd_vma plt_offset
;
10289 bfd_vma gotplt_offset
;
10290 bfd_boolean has_iplt_entry
;
10291 bfd_boolean resolved_to_zero
;
10293 globals
= elf32_arm_hash_table (info
);
10294 if (globals
== NULL
)
10295 return bfd_reloc_notsupported
;
10297 BFD_ASSERT (is_arm_elf (input_bfd
));
10298 BFD_ASSERT (howto
!= NULL
);
10300 /* Some relocation types map to different relocations depending on the
10301 target. We pick the right one here. */
10302 r_type
= arm_real_reloc_type (globals
, r_type
);
10304 /* It is possible to have linker relaxations on some TLS access
10305 models. Update our information here. */
10306 r_type
= elf32_arm_tls_transition (info
, r_type
, h
);
10308 if (r_type
!= howto
->type
)
10309 howto
= elf32_arm_howto_from_type (r_type
);
10311 eh
= (struct elf32_arm_link_hash_entry
*) h
;
10312 sgot
= globals
->root
.sgot
;
10313 local_got_offsets
= elf_local_got_offsets (input_bfd
);
10314 local_tlsdesc_gotents
= elf32_arm_local_tlsdesc_gotent (input_bfd
);
10316 if (globals
->root
.dynamic_sections_created
)
10317 srelgot
= globals
->root
.srelgot
;
10321 r_symndx
= ELF32_R_SYM (rel
->r_info
);
10323 if (globals
->use_rel
)
10325 addend
= bfd_get_32 (input_bfd
, hit_data
) & howto
->src_mask
;
10327 if (addend
& ((howto
->src_mask
+ 1) >> 1))
10329 signed_addend
= -1;
10330 signed_addend
&= ~ howto
->src_mask
;
10331 signed_addend
|= addend
;
10334 signed_addend
= addend
;
10337 addend
= signed_addend
= rel
->r_addend
;
10339 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
10340 are resolving a function call relocation. */
10341 if (using_thumb_only (globals
)
10342 && (r_type
== R_ARM_THM_CALL
10343 || r_type
== R_ARM_THM_JUMP24
)
10344 && branch_type
== ST_BRANCH_TO_ARM
)
10345 branch_type
= ST_BRANCH_TO_THUMB
;
10347 /* Record the symbol information that should be used in dynamic
10349 dynreloc_st_type
= st_type
;
10350 dynreloc_value
= value
;
10351 if (branch_type
== ST_BRANCH_TO_THUMB
)
10352 dynreloc_value
|= 1;
10354 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
10355 VALUE appropriately for relocations that we resolve at link time. */
10356 has_iplt_entry
= FALSE
;
10357 if (elf32_arm_get_plt_info (input_bfd
, globals
, eh
, r_symndx
, &root_plt
,
10359 && root_plt
->offset
!= (bfd_vma
) -1)
10361 plt_offset
= root_plt
->offset
;
10362 gotplt_offset
= arm_plt
->got_offset
;
10364 if (h
== NULL
|| eh
->is_iplt
)
10366 has_iplt_entry
= TRUE
;
10367 splt
= globals
->root
.iplt
;
10369 /* Populate .iplt entries here, because not all of them will
10370 be seen by finish_dynamic_symbol. The lower bit is set if
10371 we have already populated the entry. */
10372 if (plt_offset
& 1)
10376 if (elf32_arm_populate_plt_entry (output_bfd
, info
, root_plt
, arm_plt
,
10377 -1, dynreloc_value
))
10378 root_plt
->offset
|= 1;
10380 return bfd_reloc_notsupported
;
10383 /* Static relocations always resolve to the .iplt entry. */
10384 st_type
= STT_FUNC
;
10385 value
= (splt
->output_section
->vma
10386 + splt
->output_offset
10388 branch_type
= ST_BRANCH_TO_ARM
;
10390 /* If there are non-call relocations that resolve to the .iplt
10391 entry, then all dynamic ones must too. */
10392 if (arm_plt
->noncall_refcount
!= 0)
10394 dynreloc_st_type
= st_type
;
10395 dynreloc_value
= value
;
10399 /* We populate the .plt entry in finish_dynamic_symbol. */
10400 splt
= globals
->root
.splt
;
10405 plt_offset
= (bfd_vma
) -1;
10406 gotplt_offset
= (bfd_vma
) -1;
10409 resolved_to_zero
= (h
!= NULL
10410 && UNDEFWEAK_NO_DYNAMIC_RELOC (info
, h
));
10415 /* We don't need to find a value for this symbol. It's just a
10417 *unresolved_reloc_p
= FALSE
;
10418 return bfd_reloc_ok
;
10421 if (!globals
->vxworks_p
)
10422 return elf32_arm_abs12_reloc (input_bfd
, hit_data
, value
+ addend
);
10423 /* Fall through. */
10427 case R_ARM_ABS32_NOI
:
10429 case R_ARM_REL32_NOI
:
10435 /* Handle relocations which should use the PLT entry. ABS32/REL32
10436 will use the symbol's value, which may point to a PLT entry, but we
10437 don't need to handle that here. If we created a PLT entry, all
10438 branches in this object should go to it, except if the PLT is too
10439 far away, in which case a long branch stub should be inserted. */
10440 if ((r_type
!= R_ARM_ABS32
&& r_type
!= R_ARM_REL32
10441 && r_type
!= R_ARM_ABS32_NOI
&& r_type
!= R_ARM_REL32_NOI
10442 && r_type
!= R_ARM_CALL
10443 && r_type
!= R_ARM_JUMP24
10444 && r_type
!= R_ARM_PLT32
)
10445 && plt_offset
!= (bfd_vma
) -1)
10447 /* If we've created a .plt section, and assigned a PLT entry
10448 to this function, it must either be a STT_GNU_IFUNC reference
10449 or not be known to bind locally. In other cases, we should
10450 have cleared the PLT entry by now. */
10451 BFD_ASSERT (has_iplt_entry
|| !SYMBOL_CALLS_LOCAL (info
, h
));
10453 value
= (splt
->output_section
->vma
10454 + splt
->output_offset
10456 *unresolved_reloc_p
= FALSE
;
10457 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
10458 contents
, rel
->r_offset
, value
,
10462 /* When generating a shared object or relocatable executable, these
10463 relocations are copied into the output file to be resolved at
10465 if ((bfd_link_pic (info
)
10466 || globals
->root
.is_relocatable_executable
10467 || globals
->fdpic_p
)
10468 && (input_section
->flags
& SEC_ALLOC
)
10469 && !(globals
->vxworks_p
10470 && strcmp (input_section
->output_section
->name
,
10472 && ((r_type
!= R_ARM_REL32
&& r_type
!= R_ARM_REL32_NOI
)
10473 || !SYMBOL_CALLS_LOCAL (info
, h
))
10474 && !(input_bfd
== globals
->stub_bfd
10475 && strstr (input_section
->name
, STUB_SUFFIX
))
10477 || (ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
10478 && !resolved_to_zero
)
10479 || h
->root
.type
!= bfd_link_hash_undefweak
)
10480 && r_type
!= R_ARM_PC24
10481 && r_type
!= R_ARM_CALL
10482 && r_type
!= R_ARM_JUMP24
10483 && r_type
!= R_ARM_PREL31
10484 && r_type
!= R_ARM_PLT32
)
10486 Elf_Internal_Rela outrel
;
10487 bfd_boolean skip
, relocate
;
10490 if ((r_type
== R_ARM_REL32
|| r_type
== R_ARM_REL32_NOI
)
10491 && !h
->def_regular
)
10493 char *v
= _("shared object");
10495 if (bfd_link_executable (info
))
10496 v
= _("PIE executable");
10499 (_("%pB: relocation %s against external or undefined symbol `%s'"
10500 " can not be used when making a %s; recompile with -fPIC"), input_bfd
,
10501 elf32_arm_howto_table_1
[r_type
].name
, h
->root
.root
.string
, v
);
10502 return bfd_reloc_notsupported
;
10505 *unresolved_reloc_p
= FALSE
;
10507 if (sreloc
== NULL
&& globals
->root
.dynamic_sections_created
)
10509 sreloc
= _bfd_elf_get_dynamic_reloc_section (input_bfd
, input_section
,
10510 ! globals
->use_rel
);
10512 if (sreloc
== NULL
)
10513 return bfd_reloc_notsupported
;
10519 outrel
.r_addend
= addend
;
10521 _bfd_elf_section_offset (output_bfd
, info
, input_section
,
10523 if (outrel
.r_offset
== (bfd_vma
) -1)
10525 else if (outrel
.r_offset
== (bfd_vma
) -2)
10526 skip
= TRUE
, relocate
= TRUE
;
10527 outrel
.r_offset
+= (input_section
->output_section
->vma
10528 + input_section
->output_offset
);
10531 memset (&outrel
, 0, sizeof outrel
);
10533 && h
->dynindx
!= -1
10534 && (!bfd_link_pic (info
)
10535 || !(bfd_link_pie (info
)
10536 || SYMBOLIC_BIND (info
, h
))
10537 || !h
->def_regular
))
10538 outrel
.r_info
= ELF32_R_INFO (h
->dynindx
, r_type
);
10543 /* This symbol is local, or marked to become local. */
10544 BFD_ASSERT (r_type
== R_ARM_ABS32
|| r_type
== R_ARM_ABS32_NOI
10545 || (globals
->fdpic_p
&& !bfd_link_pic(info
)));
10546 if (globals
->symbian_p
)
10550 /* On Symbian OS, the data segment and text segement
10551 can be relocated independently. Therefore, we
10552 must indicate the segment to which this
10553 relocation is relative. The BPABI allows us to
10554 use any symbol in the right segment; we just use
10555 the section symbol as it is convenient. (We
10556 cannot use the symbol given by "h" directly as it
10557 will not appear in the dynamic symbol table.)
10559 Note that the dynamic linker ignores the section
10560 symbol value, so we don't subtract osec->vma
10561 from the emitted reloc addend. */
10563 osec
= sym_sec
->output_section
;
10565 osec
= input_section
->output_section
;
10566 symbol
= elf_section_data (osec
)->dynindx
;
10569 struct elf_link_hash_table
*htab
= elf_hash_table (info
);
10571 if ((osec
->flags
& SEC_READONLY
) == 0
10572 && htab
->data_index_section
!= NULL
)
10573 osec
= htab
->data_index_section
;
10575 osec
= htab
->text_index_section
;
10576 symbol
= elf_section_data (osec
)->dynindx
;
10578 BFD_ASSERT (symbol
!= 0);
10581 /* On SVR4-ish systems, the dynamic loader cannot
10582 relocate the text and data segments independently,
10583 so the symbol does not matter. */
10585 if (dynreloc_st_type
== STT_GNU_IFUNC
)
10586 /* We have an STT_GNU_IFUNC symbol that doesn't resolve
10587 to the .iplt entry. Instead, every non-call reference
10588 must use an R_ARM_IRELATIVE relocation to obtain the
10589 correct run-time address. */
10590 outrel
.r_info
= ELF32_R_INFO (symbol
, R_ARM_IRELATIVE
);
10591 else if (globals
->fdpic_p
&& !bfd_link_pic(info
))
10594 outrel
.r_info
= ELF32_R_INFO (symbol
, R_ARM_RELATIVE
);
10595 if (globals
->use_rel
)
10598 outrel
.r_addend
+= dynreloc_value
;
10602 arm_elf_add_rofixup(output_bfd
, globals
->srofixup
, outrel
.r_offset
);
10604 elf32_arm_add_dynreloc (output_bfd
, info
, sreloc
, &outrel
);
10606 /* If this reloc is against an external symbol, we do not want to
10607 fiddle with the addend. Otherwise, we need to include the symbol
10608 value so that it becomes an addend for the dynamic reloc. */
10610 return bfd_reloc_ok
;
10612 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
10613 contents
, rel
->r_offset
,
10614 dynreloc_value
, (bfd_vma
) 0);
10616 else switch (r_type
)
10619 return elf32_arm_abs12_reloc (input_bfd
, hit_data
, value
+ addend
);
10621 case R_ARM_XPC25
: /* Arm BLX instruction. */
10624 case R_ARM_PC24
: /* Arm B/BL instruction. */
10627 struct elf32_arm_stub_hash_entry
*stub_entry
= NULL
;
10629 if (r_type
== R_ARM_XPC25
)
10631 /* Check for Arm calling Arm function. */
10632 /* FIXME: Should we translate the instruction into a BL
10633 instruction instead ? */
10634 if (branch_type
!= ST_BRANCH_TO_THUMB
)
10636 (_("\%pB: warning: %s BLX instruction targets"
10637 " %s function '%s'"),
10639 "ARM", h
? h
->root
.root
.string
: "(local)");
10641 else if (r_type
== R_ARM_PC24
)
10643 /* Check for Arm calling Thumb function. */
10644 if (branch_type
== ST_BRANCH_TO_THUMB
)
10646 if (elf32_arm_to_thumb_stub (info
, sym_name
, input_bfd
,
10647 output_bfd
, input_section
,
10648 hit_data
, sym_sec
, rel
->r_offset
,
10649 signed_addend
, value
,
10651 return bfd_reloc_ok
;
10653 return bfd_reloc_dangerous
;
10657 /* Check if a stub has to be inserted because the
10658 destination is too far or we are changing mode. */
10659 if ( r_type
== R_ARM_CALL
10660 || r_type
== R_ARM_JUMP24
10661 || r_type
== R_ARM_PLT32
)
10663 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
10664 struct elf32_arm_link_hash_entry
*hash
;
10666 hash
= (struct elf32_arm_link_hash_entry
*) h
;
10667 stub_type
= arm_type_of_stub (info
, input_section
, rel
,
10668 st_type
, &branch_type
,
10669 hash
, value
, sym_sec
,
10670 input_bfd
, sym_name
);
10672 if (stub_type
!= arm_stub_none
)
10674 /* The target is out of reach, so redirect the
10675 branch to the local stub for this function. */
10676 stub_entry
= elf32_arm_get_stub_entry (input_section
,
10681 if (stub_entry
!= NULL
)
10682 value
= (stub_entry
->stub_offset
10683 + stub_entry
->stub_sec
->output_offset
10684 + stub_entry
->stub_sec
->output_section
->vma
);
10686 if (plt_offset
!= (bfd_vma
) -1)
10687 *unresolved_reloc_p
= FALSE
;
10692 /* If the call goes through a PLT entry, make sure to
10693 check distance to the right destination address. */
10694 if (plt_offset
!= (bfd_vma
) -1)
10696 value
= (splt
->output_section
->vma
10697 + splt
->output_offset
10699 *unresolved_reloc_p
= FALSE
;
10700 /* The PLT entry is in ARM mode, regardless of the
10701 target function. */
10702 branch_type
= ST_BRANCH_TO_ARM
;
10707 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
10709 S is the address of the symbol in the relocation.
10710 P is address of the instruction being relocated.
10711 A is the addend (extracted from the instruction) in bytes.
10713 S is held in 'value'.
10714 P is the base address of the section containing the
10715 instruction plus the offset of the reloc into that
10717 (input_section->output_section->vma +
10718 input_section->output_offset +
10720 A is the addend, converted into bytes, ie:
10721 (signed_addend * 4)
10723 Note: None of these operations have knowledge of the pipeline
10724 size of the processor, thus it is up to the assembler to
10725 encode this information into the addend. */
10726 value
-= (input_section
->output_section
->vma
10727 + input_section
->output_offset
);
10728 value
-= rel
->r_offset
;
10729 if (globals
->use_rel
)
10730 value
+= (signed_addend
<< howto
->size
);
10732 /* RELA addends do not have to be adjusted by howto->size. */
10733 value
+= signed_addend
;
10735 signed_addend
= value
;
10736 signed_addend
>>= howto
->rightshift
;
10738 /* A branch to an undefined weak symbol is turned into a jump to
10739 the next instruction unless a PLT entry will be created.
10740 Do the same for local undefined symbols (but not for STN_UNDEF).
10741 The jump to the next instruction is optimized as a NOP depending
10742 on the architecture. */
10743 if (h
? (h
->root
.type
== bfd_link_hash_undefweak
10744 && plt_offset
== (bfd_vma
) -1)
10745 : r_symndx
!= STN_UNDEF
&& bfd_is_und_section (sym_sec
))
10747 value
= (bfd_get_32 (input_bfd
, hit_data
) & 0xf0000000);
10749 if (arch_has_arm_nop (globals
))
10750 value
|= 0x0320f000;
10752 value
|= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
10756 /* Perform a signed range check. */
10757 if ( signed_addend
> ((bfd_signed_vma
) (howto
->dst_mask
>> 1))
10758 || signed_addend
< - ((bfd_signed_vma
) ((howto
->dst_mask
+ 1) >> 1)))
10759 return bfd_reloc_overflow
;
10761 addend
= (value
& 2);
10763 value
= (signed_addend
& howto
->dst_mask
)
10764 | (bfd_get_32 (input_bfd
, hit_data
) & (~ howto
->dst_mask
));
10766 if (r_type
== R_ARM_CALL
)
10768 /* Set the H bit in the BLX instruction. */
10769 if (branch_type
== ST_BRANCH_TO_THUMB
)
10772 value
|= (1 << 24);
10774 value
&= ~(bfd_vma
)(1 << 24);
10777 /* Select the correct instruction (BL or BLX). */
10778 /* Only if we are not handling a BL to a stub. In this
10779 case, mode switching is performed by the stub. */
10780 if (branch_type
== ST_BRANCH_TO_THUMB
&& !stub_entry
)
10781 value
|= (1 << 28);
10782 else if (stub_entry
|| branch_type
!= ST_BRANCH_UNKNOWN
)
10784 value
&= ~(bfd_vma
)(1 << 28);
10785 value
|= (1 << 24);
10794 if (branch_type
== ST_BRANCH_TO_THUMB
)
10798 case R_ARM_ABS32_NOI
:
10804 if (branch_type
== ST_BRANCH_TO_THUMB
)
10806 value
-= (input_section
->output_section
->vma
10807 + input_section
->output_offset
+ rel
->r_offset
);
10810 case R_ARM_REL32_NOI
:
10812 value
-= (input_section
->output_section
->vma
10813 + input_section
->output_offset
+ rel
->r_offset
);
10817 value
-= (input_section
->output_section
->vma
10818 + input_section
->output_offset
+ rel
->r_offset
);
10819 value
+= signed_addend
;
10820 if (! h
|| h
->root
.type
!= bfd_link_hash_undefweak
)
10822 /* Check for overflow. */
10823 if ((value
^ (value
>> 1)) & (1 << 30))
10824 return bfd_reloc_overflow
;
10826 value
&= 0x7fffffff;
10827 value
|= (bfd_get_32 (input_bfd
, hit_data
) & 0x80000000);
10828 if (branch_type
== ST_BRANCH_TO_THUMB
)
10833 bfd_put_32 (input_bfd
, value
, hit_data
);
10834 return bfd_reloc_ok
;
10837 /* PR 16202: Refectch the addend using the correct size. */
10838 if (globals
->use_rel
)
10839 addend
= bfd_get_8 (input_bfd
, hit_data
);
10842 /* There is no way to tell whether the user intended to use a signed or
10843 unsigned addend. When checking for overflow we accept either,
10844 as specified by the AAELF. */
10845 if ((long) value
> 0xff || (long) value
< -0x80)
10846 return bfd_reloc_overflow
;
10848 bfd_put_8 (input_bfd
, value
, hit_data
);
10849 return bfd_reloc_ok
;
10852 /* PR 16202: Refectch the addend using the correct size. */
10853 if (globals
->use_rel
)
10854 addend
= bfd_get_16 (input_bfd
, hit_data
);
10857 /* See comment for R_ARM_ABS8. */
10858 if ((long) value
> 0xffff || (long) value
< -0x8000)
10859 return bfd_reloc_overflow
;
10861 bfd_put_16 (input_bfd
, value
, hit_data
);
10862 return bfd_reloc_ok
;
10864 case R_ARM_THM_ABS5
:
10865 /* Support ldr and str instructions for the thumb. */
10866 if (globals
->use_rel
)
10868 /* Need to refetch addend. */
10869 addend
= bfd_get_16 (input_bfd
, hit_data
) & howto
->src_mask
;
10870 /* ??? Need to determine shift amount from operand size. */
10871 addend
>>= howto
->rightshift
;
10875 /* ??? Isn't value unsigned? */
10876 if ((long) value
> 0x1f || (long) value
< -0x10)
10877 return bfd_reloc_overflow
;
10879 /* ??? Value needs to be properly shifted into place first. */
10880 value
|= bfd_get_16 (input_bfd
, hit_data
) & 0xf83f;
10881 bfd_put_16 (input_bfd
, value
, hit_data
);
10882 return bfd_reloc_ok
;
10884 case R_ARM_THM_ALU_PREL_11_0
:
10885 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
10888 bfd_signed_vma relocation
;
10890 insn
= (bfd_get_16 (input_bfd
, hit_data
) << 16)
10891 | bfd_get_16 (input_bfd
, hit_data
+ 2);
10893 if (globals
->use_rel
)
10895 signed_addend
= (insn
& 0xff) | ((insn
& 0x7000) >> 4)
10896 | ((insn
& (1 << 26)) >> 15);
10897 if (insn
& 0xf00000)
10898 signed_addend
= -signed_addend
;
10901 relocation
= value
+ signed_addend
;
10902 relocation
-= Pa (input_section
->output_section
->vma
10903 + input_section
->output_offset
10906 /* PR 21523: Use an absolute value. The user of this reloc will
10907 have already selected an ADD or SUB insn appropriately. */
10908 value
= labs (relocation
);
10910 if (value
>= 0x1000)
10911 return bfd_reloc_overflow
;
10913 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
10914 if (branch_type
== ST_BRANCH_TO_THUMB
)
10917 insn
= (insn
& 0xfb0f8f00) | (value
& 0xff)
10918 | ((value
& 0x700) << 4)
10919 | ((value
& 0x800) << 15);
10920 if (relocation
< 0)
10923 bfd_put_16 (input_bfd
, insn
>> 16, hit_data
);
10924 bfd_put_16 (input_bfd
, insn
& 0xffff, hit_data
+ 2);
10926 return bfd_reloc_ok
;
10929 case R_ARM_THM_PC8
:
10930 /* PR 10073: This reloc is not generated by the GNU toolchain,
10931 but it is supported for compatibility with third party libraries
10932 generated by other compilers, specifically the ARM/IAR. */
10935 bfd_signed_vma relocation
;
10937 insn
= bfd_get_16 (input_bfd
, hit_data
);
10939 if (globals
->use_rel
)
10940 addend
= ((((insn
& 0x00ff) << 2) + 4) & 0x3ff) -4;
10942 relocation
= value
+ addend
;
10943 relocation
-= Pa (input_section
->output_section
->vma
10944 + input_section
->output_offset
10947 value
= relocation
;
10949 /* We do not check for overflow of this reloc. Although strictly
10950 speaking this is incorrect, it appears to be necessary in order
10951 to work with IAR generated relocs. Since GCC and GAS do not
10952 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
10953 a problem for them. */
10956 insn
= (insn
& 0xff00) | (value
>> 2);
10958 bfd_put_16 (input_bfd
, insn
, hit_data
);
10960 return bfd_reloc_ok
;
10963 case R_ARM_THM_PC12
:
10964 /* Corresponds to: ldr.w reg, [pc, #offset]. */
10967 bfd_signed_vma relocation
;
10969 insn
= (bfd_get_16 (input_bfd
, hit_data
) << 16)
10970 | bfd_get_16 (input_bfd
, hit_data
+ 2);
10972 if (globals
->use_rel
)
10974 signed_addend
= insn
& 0xfff;
10975 if (!(insn
& (1 << 23)))
10976 signed_addend
= -signed_addend
;
10979 relocation
= value
+ signed_addend
;
10980 relocation
-= Pa (input_section
->output_section
->vma
10981 + input_section
->output_offset
10984 value
= relocation
;
10986 if (value
>= 0x1000)
10987 return bfd_reloc_overflow
;
10989 insn
= (insn
& 0xff7ff000) | value
;
10990 if (relocation
>= 0)
10993 bfd_put_16 (input_bfd
, insn
>> 16, hit_data
);
10994 bfd_put_16 (input_bfd
, insn
& 0xffff, hit_data
+ 2);
10996 return bfd_reloc_ok
;
10999 case R_ARM_THM_XPC22
:
11000 case R_ARM_THM_CALL
:
11001 case R_ARM_THM_JUMP24
:
11002 /* Thumb BL (branch long instruction). */
11004 bfd_vma relocation
;
11005 bfd_vma reloc_sign
;
11006 bfd_boolean overflow
= FALSE
;
11007 bfd_vma upper_insn
= bfd_get_16 (input_bfd
, hit_data
);
11008 bfd_vma lower_insn
= bfd_get_16 (input_bfd
, hit_data
+ 2);
11009 bfd_signed_vma reloc_signed_max
;
11010 bfd_signed_vma reloc_signed_min
;
11012 bfd_signed_vma signed_check
;
11014 const int thumb2
= using_thumb2 (globals
);
11015 const int thumb2_bl
= using_thumb2_bl (globals
);
11017 /* A branch to an undefined weak symbol is turned into a jump to
11018 the next instruction unless a PLT entry will be created.
11019 The jump to the next instruction is optimized as a NOP.W for
11020 Thumb-2 enabled architectures. */
11021 if (h
&& h
->root
.type
== bfd_link_hash_undefweak
11022 && plt_offset
== (bfd_vma
) -1)
11026 bfd_put_16 (input_bfd
, 0xf3af, hit_data
);
11027 bfd_put_16 (input_bfd
, 0x8000, hit_data
+ 2);
11031 bfd_put_16 (input_bfd
, 0xe000, hit_data
);
11032 bfd_put_16 (input_bfd
, 0xbf00, hit_data
+ 2);
11034 return bfd_reloc_ok
;
11037 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
11038 with Thumb-1) involving the J1 and J2 bits. */
11039 if (globals
->use_rel
)
11041 bfd_vma s
= (upper_insn
& (1 << 10)) >> 10;
11042 bfd_vma upper
= upper_insn
& 0x3ff;
11043 bfd_vma lower
= lower_insn
& 0x7ff;
11044 bfd_vma j1
= (lower_insn
& (1 << 13)) >> 13;
11045 bfd_vma j2
= (lower_insn
& (1 << 11)) >> 11;
11046 bfd_vma i1
= j1
^ s
? 0 : 1;
11047 bfd_vma i2
= j2
^ s
? 0 : 1;
11049 addend
= (i1
<< 23) | (i2
<< 22) | (upper
<< 12) | (lower
<< 1);
11051 addend
= (addend
| ((s
? 0 : 1) << 24)) - (1 << 24);
11053 signed_addend
= addend
;
11056 if (r_type
== R_ARM_THM_XPC22
)
11058 /* Check for Thumb to Thumb call. */
11059 /* FIXME: Should we translate the instruction into a BL
11060 instruction instead ? */
11061 if (branch_type
== ST_BRANCH_TO_THUMB
)
11063 (_("%pB: warning: %s BLX instruction targets"
11064 " %s function '%s'"),
11065 input_bfd
, "Thumb",
11066 "Thumb", h
? h
->root
.root
.string
: "(local)");
11070 /* If it is not a call to Thumb, assume call to Arm.
11071 If it is a call relative to a section name, then it is not a
11072 function call at all, but rather a long jump. Calls through
11073 the PLT do not require stubs. */
11074 if (branch_type
== ST_BRANCH_TO_ARM
&& plt_offset
== (bfd_vma
) -1)
11076 if (globals
->use_blx
&& r_type
== R_ARM_THM_CALL
)
11078 /* Convert BL to BLX. */
11079 lower_insn
= (lower_insn
& ~0x1000) | 0x0800;
11081 else if (( r_type
!= R_ARM_THM_CALL
)
11082 && (r_type
!= R_ARM_THM_JUMP24
))
11084 if (elf32_thumb_to_arm_stub
11085 (info
, sym_name
, input_bfd
, output_bfd
, input_section
,
11086 hit_data
, sym_sec
, rel
->r_offset
, signed_addend
, value
,
11088 return bfd_reloc_ok
;
11090 return bfd_reloc_dangerous
;
11093 else if (branch_type
== ST_BRANCH_TO_THUMB
11094 && globals
->use_blx
11095 && r_type
== R_ARM_THM_CALL
)
11097 /* Make sure this is a BL. */
11098 lower_insn
|= 0x1800;
11102 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
11103 if (r_type
== R_ARM_THM_CALL
|| r_type
== R_ARM_THM_JUMP24
)
11105 /* Check if a stub has to be inserted because the destination
11107 struct elf32_arm_stub_hash_entry
*stub_entry
;
11108 struct elf32_arm_link_hash_entry
*hash
;
11110 hash
= (struct elf32_arm_link_hash_entry
*) h
;
11112 stub_type
= arm_type_of_stub (info
, input_section
, rel
,
11113 st_type
, &branch_type
,
11114 hash
, value
, sym_sec
,
11115 input_bfd
, sym_name
);
11117 if (stub_type
!= arm_stub_none
)
11119 /* The target is out of reach or we are changing modes, so
11120 redirect the branch to the local stub for this
11122 stub_entry
= elf32_arm_get_stub_entry (input_section
,
11126 if (stub_entry
!= NULL
)
11128 value
= (stub_entry
->stub_offset
11129 + stub_entry
->stub_sec
->output_offset
11130 + stub_entry
->stub_sec
->output_section
->vma
);
11132 if (plt_offset
!= (bfd_vma
) -1)
11133 *unresolved_reloc_p
= FALSE
;
11136 /* If this call becomes a call to Arm, force BLX. */
11137 if (globals
->use_blx
&& (r_type
== R_ARM_THM_CALL
))
11140 && !arm_stub_is_thumb (stub_entry
->stub_type
))
11141 || branch_type
!= ST_BRANCH_TO_THUMB
)
11142 lower_insn
= (lower_insn
& ~0x1000) | 0x0800;
11147 /* Handle calls via the PLT. */
11148 if (stub_type
== arm_stub_none
&& plt_offset
!= (bfd_vma
) -1)
11150 value
= (splt
->output_section
->vma
11151 + splt
->output_offset
11154 if (globals
->use_blx
11155 && r_type
== R_ARM_THM_CALL
11156 && ! using_thumb_only (globals
))
11158 /* If the Thumb BLX instruction is available, convert
11159 the BL to a BLX instruction to call the ARM-mode
11161 lower_insn
= (lower_insn
& ~0x1000) | 0x0800;
11162 branch_type
= ST_BRANCH_TO_ARM
;
11166 if (! using_thumb_only (globals
))
11167 /* Target the Thumb stub before the ARM PLT entry. */
11168 value
-= PLT_THUMB_STUB_SIZE
;
11169 branch_type
= ST_BRANCH_TO_THUMB
;
11171 *unresolved_reloc_p
= FALSE
;
11174 relocation
= value
+ signed_addend
;
11176 relocation
-= (input_section
->output_section
->vma
11177 + input_section
->output_offset
11180 check
= relocation
>> howto
->rightshift
;
11182 /* If this is a signed value, the rightshift just dropped
11183 leading 1 bits (assuming twos complement). */
11184 if ((bfd_signed_vma
) relocation
>= 0)
11185 signed_check
= check
;
11187 signed_check
= check
| ~((bfd_vma
) -1 >> howto
->rightshift
);
11189 /* Calculate the permissable maximum and minimum values for
11190 this relocation according to whether we're relocating for
11192 bitsize
= howto
->bitsize
;
11195 reloc_signed_max
= (1 << (bitsize
- 1)) - 1;
11196 reloc_signed_min
= ~reloc_signed_max
;
11198 /* Assumes two's complement. */
11199 if (signed_check
> reloc_signed_max
|| signed_check
< reloc_signed_min
)
11202 if ((lower_insn
& 0x5000) == 0x4000)
11203 /* For a BLX instruction, make sure that the relocation is rounded up
11204 to a word boundary. This follows the semantics of the instruction
11205 which specifies that bit 1 of the target address will come from bit
11206 1 of the base address. */
11207 relocation
= (relocation
+ 2) & ~ 3;
11209 /* Put RELOCATION back into the insn. Assumes two's complement.
11210 We use the Thumb-2 encoding, which is safe even if dealing with
11211 a Thumb-1 instruction by virtue of our overflow check above. */
11212 reloc_sign
= (signed_check
< 0) ? 1 : 0;
11213 upper_insn
= (upper_insn
& ~(bfd_vma
) 0x7ff)
11214 | ((relocation
>> 12) & 0x3ff)
11215 | (reloc_sign
<< 10);
11216 lower_insn
= (lower_insn
& ~(bfd_vma
) 0x2fff)
11217 | (((!((relocation
>> 23) & 1)) ^ reloc_sign
) << 13)
11218 | (((!((relocation
>> 22) & 1)) ^ reloc_sign
) << 11)
11219 | ((relocation
>> 1) & 0x7ff);
11221 /* Put the relocated value back in the object file: */
11222 bfd_put_16 (input_bfd
, upper_insn
, hit_data
);
11223 bfd_put_16 (input_bfd
, lower_insn
, hit_data
+ 2);
11225 return (overflow
? bfd_reloc_overflow
: bfd_reloc_ok
);
11229 case R_ARM_THM_JUMP19
:
11230 /* Thumb32 conditional branch instruction. */
11232 bfd_vma relocation
;
11233 bfd_boolean overflow
= FALSE
;
11234 bfd_vma upper_insn
= bfd_get_16 (input_bfd
, hit_data
);
11235 bfd_vma lower_insn
= bfd_get_16 (input_bfd
, hit_data
+ 2);
11236 bfd_signed_vma reloc_signed_max
= 0xffffe;
11237 bfd_signed_vma reloc_signed_min
= -0x100000;
11238 bfd_signed_vma signed_check
;
11239 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
11240 struct elf32_arm_stub_hash_entry
*stub_entry
;
11241 struct elf32_arm_link_hash_entry
*hash
;
11243 /* Need to refetch the addend, reconstruct the top three bits,
11244 and squish the two 11 bit pieces together. */
11245 if (globals
->use_rel
)
11247 bfd_vma S
= (upper_insn
& 0x0400) >> 10;
11248 bfd_vma upper
= (upper_insn
& 0x003f);
11249 bfd_vma J1
= (lower_insn
& 0x2000) >> 13;
11250 bfd_vma J2
= (lower_insn
& 0x0800) >> 11;
11251 bfd_vma lower
= (lower_insn
& 0x07ff);
11255 upper
|= (!S
) << 8;
11256 upper
-= 0x0100; /* Sign extend. */
11258 addend
= (upper
<< 12) | (lower
<< 1);
11259 signed_addend
= addend
;
11262 /* Handle calls via the PLT. */
11263 if (plt_offset
!= (bfd_vma
) -1)
11265 value
= (splt
->output_section
->vma
11266 + splt
->output_offset
11268 /* Target the Thumb stub before the ARM PLT entry. */
11269 value
-= PLT_THUMB_STUB_SIZE
;
11270 *unresolved_reloc_p
= FALSE
;
11273 hash
= (struct elf32_arm_link_hash_entry
*)h
;
11275 stub_type
= arm_type_of_stub (info
, input_section
, rel
,
11276 st_type
, &branch_type
,
11277 hash
, value
, sym_sec
,
11278 input_bfd
, sym_name
);
11279 if (stub_type
!= arm_stub_none
)
11281 stub_entry
= elf32_arm_get_stub_entry (input_section
,
11285 if (stub_entry
!= NULL
)
11287 value
= (stub_entry
->stub_offset
11288 + stub_entry
->stub_sec
->output_offset
11289 + stub_entry
->stub_sec
->output_section
->vma
);
11293 relocation
= value
+ signed_addend
;
11294 relocation
-= (input_section
->output_section
->vma
11295 + input_section
->output_offset
11297 signed_check
= (bfd_signed_vma
) relocation
;
11299 if (signed_check
> reloc_signed_max
|| signed_check
< reloc_signed_min
)
11302 /* Put RELOCATION back into the insn. */
11304 bfd_vma S
= (relocation
& 0x00100000) >> 20;
11305 bfd_vma J2
= (relocation
& 0x00080000) >> 19;
11306 bfd_vma J1
= (relocation
& 0x00040000) >> 18;
11307 bfd_vma hi
= (relocation
& 0x0003f000) >> 12;
11308 bfd_vma lo
= (relocation
& 0x00000ffe) >> 1;
11310 upper_insn
= (upper_insn
& 0xfbc0) | (S
<< 10) | hi
;
11311 lower_insn
= (lower_insn
& 0xd000) | (J1
<< 13) | (J2
<< 11) | lo
;
11314 /* Put the relocated value back in the object file: */
11315 bfd_put_16 (input_bfd
, upper_insn
, hit_data
);
11316 bfd_put_16 (input_bfd
, lower_insn
, hit_data
+ 2);
11318 return (overflow
? bfd_reloc_overflow
: bfd_reloc_ok
);
11321 case R_ARM_THM_JUMP11
:
11322 case R_ARM_THM_JUMP8
:
11323 case R_ARM_THM_JUMP6
:
11324 /* Thumb B (branch) instruction). */
11326 bfd_signed_vma relocation
;
11327 bfd_signed_vma reloc_signed_max
= (1 << (howto
->bitsize
- 1)) - 1;
11328 bfd_signed_vma reloc_signed_min
= ~ reloc_signed_max
;
11329 bfd_signed_vma signed_check
;
11331 /* CZB cannot jump backward. */
11332 if (r_type
== R_ARM_THM_JUMP6
)
11333 reloc_signed_min
= 0;
11335 if (globals
->use_rel
)
11337 /* Need to refetch addend. */
11338 addend
= bfd_get_16 (input_bfd
, hit_data
) & howto
->src_mask
;
11339 if (addend
& ((howto
->src_mask
+ 1) >> 1))
11341 signed_addend
= -1;
11342 signed_addend
&= ~ howto
->src_mask
;
11343 signed_addend
|= addend
;
11346 signed_addend
= addend
;
11347 /* The value in the insn has been right shifted. We need to
11348 undo this, so that we can perform the address calculation
11349 in terms of bytes. */
11350 signed_addend
<<= howto
->rightshift
;
11352 relocation
= value
+ signed_addend
;
11354 relocation
-= (input_section
->output_section
->vma
11355 + input_section
->output_offset
11358 relocation
>>= howto
->rightshift
;
11359 signed_check
= relocation
;
11361 if (r_type
== R_ARM_THM_JUMP6
)
11362 relocation
= ((relocation
& 0x0020) << 4) | ((relocation
& 0x001f) << 3);
11364 relocation
&= howto
->dst_mask
;
11365 relocation
|= (bfd_get_16 (input_bfd
, hit_data
) & (~ howto
->dst_mask
));
11367 bfd_put_16 (input_bfd
, relocation
, hit_data
);
11369 /* Assumes two's complement. */
11370 if (signed_check
> reloc_signed_max
|| signed_check
< reloc_signed_min
)
11371 return bfd_reloc_overflow
;
11373 return bfd_reloc_ok
;
11376 case R_ARM_ALU_PCREL7_0
:
11377 case R_ARM_ALU_PCREL15_8
:
11378 case R_ARM_ALU_PCREL23_15
:
11381 bfd_vma relocation
;
11383 insn
= bfd_get_32 (input_bfd
, hit_data
);
11384 if (globals
->use_rel
)
11386 /* Extract the addend. */
11387 addend
= (insn
& 0xff) << ((insn
& 0xf00) >> 7);
11388 signed_addend
= addend
;
11390 relocation
= value
+ signed_addend
;
11392 relocation
-= (input_section
->output_section
->vma
11393 + input_section
->output_offset
11395 insn
= (insn
& ~0xfff)
11396 | ((howto
->bitpos
<< 7) & 0xf00)
11397 | ((relocation
>> howto
->bitpos
) & 0xff);
11398 bfd_put_32 (input_bfd
, value
, hit_data
);
11400 return bfd_reloc_ok
;
11402 case R_ARM_GNU_VTINHERIT
:
11403 case R_ARM_GNU_VTENTRY
:
11404 return bfd_reloc_ok
;
11406 case R_ARM_GOTOFF32
:
11407 /* Relocation is relative to the start of the
11408 global offset table. */
11410 BFD_ASSERT (sgot
!= NULL
);
11412 return bfd_reloc_notsupported
;
11414 /* If we are addressing a Thumb function, we need to adjust the
11415 address by one, so that attempts to call the function pointer will
11416 correctly interpret it as Thumb code. */
11417 if (branch_type
== ST_BRANCH_TO_THUMB
)
11420 /* Note that sgot->output_offset is not involved in this
11421 calculation. We always want the start of .got. If we
11422 define _GLOBAL_OFFSET_TABLE in a different way, as is
11423 permitted by the ABI, we might have to change this
11425 value
-= sgot
->output_section
->vma
;
11426 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
11427 contents
, rel
->r_offset
, value
,
11431 /* Use global offset table as symbol value. */
11432 BFD_ASSERT (sgot
!= NULL
);
11435 return bfd_reloc_notsupported
;
11437 *unresolved_reloc_p
= FALSE
;
11438 value
= sgot
->output_section
->vma
;
11439 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
11440 contents
, rel
->r_offset
, value
,
11444 case R_ARM_GOT_PREL
:
11445 /* Relocation is to the entry for this symbol in the
11446 global offset table. */
11448 return bfd_reloc_notsupported
;
11450 if (dynreloc_st_type
== STT_GNU_IFUNC
11451 && plt_offset
!= (bfd_vma
) -1
11452 && (h
== NULL
|| SYMBOL_REFERENCES_LOCAL (info
, h
)))
11454 /* We have a relocation against a locally-binding STT_GNU_IFUNC
11455 symbol, and the relocation resolves directly to the runtime
11456 target rather than to the .iplt entry. This means that any
11457 .got entry would be the same value as the .igot.plt entry,
11458 so there's no point creating both. */
11459 sgot
= globals
->root
.igotplt
;
11460 value
= sgot
->output_offset
+ gotplt_offset
;
11462 else if (h
!= NULL
)
11466 off
= h
->got
.offset
;
11467 BFD_ASSERT (off
!= (bfd_vma
) -1);
11468 if ((off
& 1) != 0)
11470 /* We have already processsed one GOT relocation against
11473 if (globals
->root
.dynamic_sections_created
11474 && !SYMBOL_REFERENCES_LOCAL (info
, h
))
11475 *unresolved_reloc_p
= FALSE
;
11479 Elf_Internal_Rela outrel
;
11482 if (((h
->dynindx
!= -1) || globals
->fdpic_p
)
11483 && !SYMBOL_REFERENCES_LOCAL (info
, h
))
11485 /* If the symbol doesn't resolve locally in a static
11486 object, we have an undefined reference. If the
11487 symbol doesn't resolve locally in a dynamic object,
11488 it should be resolved by the dynamic linker. */
11489 if (globals
->root
.dynamic_sections_created
)
11491 outrel
.r_info
= ELF32_R_INFO (h
->dynindx
, R_ARM_GLOB_DAT
);
11492 *unresolved_reloc_p
= FALSE
;
11496 outrel
.r_addend
= 0;
11500 if (dynreloc_st_type
== STT_GNU_IFUNC
)
11501 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_IRELATIVE
);
11502 else if (bfd_link_pic (info
)
11503 && (ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
11504 || h
->root
.type
!= bfd_link_hash_undefweak
))
11505 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_RELATIVE
);
11509 if (globals
->fdpic_p
)
11512 outrel
.r_addend
= dynreloc_value
;
11515 /* The GOT entry is initialized to zero by default.
11516 See if we should install a different value. */
11517 if (outrel
.r_addend
!= 0
11518 && (globals
->use_rel
|| outrel
.r_info
== 0))
11520 bfd_put_32 (output_bfd
, outrel
.r_addend
,
11521 sgot
->contents
+ off
);
11522 outrel
.r_addend
= 0;
11526 arm_elf_add_rofixup (output_bfd
,
11527 elf32_arm_hash_table(info
)->srofixup
,
11528 sgot
->output_section
->vma
11529 + sgot
->output_offset
+ off
);
11531 else if (outrel
.r_info
!= 0)
11533 outrel
.r_offset
= (sgot
->output_section
->vma
11534 + sgot
->output_offset
11536 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
11539 h
->got
.offset
|= 1;
11541 value
= sgot
->output_offset
+ off
;
11547 BFD_ASSERT (local_got_offsets
!= NULL
11548 && local_got_offsets
[r_symndx
] != (bfd_vma
) -1);
11550 off
= local_got_offsets
[r_symndx
];
11552 /* The offset must always be a multiple of 4. We use the
11553 least significant bit to record whether we have already
11554 generated the necessary reloc. */
11555 if ((off
& 1) != 0)
11559 Elf_Internal_Rela outrel
;
11562 if (dynreloc_st_type
== STT_GNU_IFUNC
)
11563 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_IRELATIVE
);
11564 else if (bfd_link_pic (info
))
11565 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_RELATIVE
);
11569 if (globals
->fdpic_p
)
11573 /* The GOT entry is initialized to zero by default.
11574 See if we should install a different value. */
11575 if (globals
->use_rel
|| outrel
.r_info
== 0)
11576 bfd_put_32 (output_bfd
, dynreloc_value
, sgot
->contents
+ off
);
11579 arm_elf_add_rofixup (output_bfd
,
11581 sgot
->output_section
->vma
11582 + sgot
->output_offset
+ off
);
11584 else if (outrel
.r_info
!= 0)
11586 outrel
.r_addend
= addend
+ dynreloc_value
;
11587 outrel
.r_offset
= (sgot
->output_section
->vma
11588 + sgot
->output_offset
11590 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
11593 local_got_offsets
[r_symndx
] |= 1;
11596 value
= sgot
->output_offset
+ off
;
11598 if (r_type
!= R_ARM_GOT32
)
11599 value
+= sgot
->output_section
->vma
;
11601 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
11602 contents
, rel
->r_offset
, value
,
11605 case R_ARM_TLS_LDO32
:
11606 value
= value
- dtpoff_base (info
);
11608 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
11609 contents
, rel
->r_offset
, value
,
11612 case R_ARM_TLS_LDM32
:
11613 case R_ARM_TLS_LDM32_FDPIC
:
11620 off
= globals
->tls_ldm_got
.offset
;
11622 if ((off
& 1) != 0)
11626 /* If we don't know the module number, create a relocation
11628 if (bfd_link_pic (info
))
11630 Elf_Internal_Rela outrel
;
11632 if (srelgot
== NULL
)
11635 outrel
.r_addend
= 0;
11636 outrel
.r_offset
= (sgot
->output_section
->vma
11637 + sgot
->output_offset
+ off
);
11638 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32
);
11640 if (globals
->use_rel
)
11641 bfd_put_32 (output_bfd
, outrel
.r_addend
,
11642 sgot
->contents
+ off
);
11644 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
11647 bfd_put_32 (output_bfd
, 1, sgot
->contents
+ off
);
11649 globals
->tls_ldm_got
.offset
|= 1;
11652 if (r_type
== R_ARM_TLS_LDM32_FDPIC
)
11654 bfd_put_32(output_bfd
,
11655 globals
->root
.sgot
->output_offset
+ off
,
11656 contents
+ rel
->r_offset
);
11658 return bfd_reloc_ok
;
11662 value
= sgot
->output_section
->vma
+ sgot
->output_offset
+ off
11663 - (input_section
->output_section
->vma
11664 + input_section
->output_offset
+ rel
->r_offset
);
11666 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
11667 contents
, rel
->r_offset
, value
,
11672 case R_ARM_TLS_CALL
:
11673 case R_ARM_THM_TLS_CALL
:
11674 case R_ARM_TLS_GD32
:
11675 case R_ARM_TLS_GD32_FDPIC
:
11676 case R_ARM_TLS_IE32
:
11677 case R_ARM_TLS_IE32_FDPIC
:
11678 case R_ARM_TLS_GOTDESC
:
11679 case R_ARM_TLS_DESCSEQ
:
11680 case R_ARM_THM_TLS_DESCSEQ
:
11682 bfd_vma off
, offplt
;
11686 BFD_ASSERT (sgot
!= NULL
);
11691 dyn
= globals
->root
.dynamic_sections_created
;
11692 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn
,
11693 bfd_link_pic (info
),
11695 && (!bfd_link_pic (info
)
11696 || !SYMBOL_REFERENCES_LOCAL (info
, h
)))
11698 *unresolved_reloc_p
= FALSE
;
11701 off
= h
->got
.offset
;
11702 offplt
= elf32_arm_hash_entry (h
)->tlsdesc_got
;
11703 tls_type
= ((struct elf32_arm_link_hash_entry
*) h
)->tls_type
;
11707 BFD_ASSERT (local_got_offsets
!= NULL
);
11708 off
= local_got_offsets
[r_symndx
];
11709 offplt
= local_tlsdesc_gotents
[r_symndx
];
11710 tls_type
= elf32_arm_local_got_tls_type (input_bfd
)[r_symndx
];
11713 /* Linker relaxations happens from one of the
11714 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
11715 if (ELF32_R_TYPE(rel
->r_info
) != r_type
)
11716 tls_type
= GOT_TLS_IE
;
11718 BFD_ASSERT (tls_type
!= GOT_UNKNOWN
);
11720 if ((off
& 1) != 0)
11724 bfd_boolean need_relocs
= FALSE
;
11725 Elf_Internal_Rela outrel
;
11728 /* The GOT entries have not been initialized yet. Do it
11729 now, and emit any relocations. If both an IE GOT and a
11730 GD GOT are necessary, we emit the GD first. */
11732 if ((bfd_link_pic (info
) || indx
!= 0)
11734 || (ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
11735 && !resolved_to_zero
)
11736 || h
->root
.type
!= bfd_link_hash_undefweak
))
11738 need_relocs
= TRUE
;
11739 BFD_ASSERT (srelgot
!= NULL
);
11742 if (tls_type
& GOT_TLS_GDESC
)
11746 /* We should have relaxed, unless this is an undefined
11748 BFD_ASSERT ((h
&& (h
->root
.type
== bfd_link_hash_undefweak
))
11749 || bfd_link_pic (info
));
11750 BFD_ASSERT (globals
->sgotplt_jump_table_size
+ offplt
+ 8
11751 <= globals
->root
.sgotplt
->size
);
11753 outrel
.r_addend
= 0;
11754 outrel
.r_offset
= (globals
->root
.sgotplt
->output_section
->vma
11755 + globals
->root
.sgotplt
->output_offset
11757 + globals
->sgotplt_jump_table_size
);
11759 outrel
.r_info
= ELF32_R_INFO (indx
, R_ARM_TLS_DESC
);
11760 sreloc
= globals
->root
.srelplt
;
11761 loc
= sreloc
->contents
;
11762 loc
+= globals
->next_tls_desc_index
++ * RELOC_SIZE (globals
);
11763 BFD_ASSERT (loc
+ RELOC_SIZE (globals
)
11764 <= sreloc
->contents
+ sreloc
->size
);
11766 SWAP_RELOC_OUT (globals
) (output_bfd
, &outrel
, loc
);
11768 /* For globals, the first word in the relocation gets
11769 the relocation index and the top bit set, or zero,
11770 if we're binding now. For locals, it gets the
11771 symbol's offset in the tls section. */
11772 bfd_put_32 (output_bfd
,
11773 !h
? value
- elf_hash_table (info
)->tls_sec
->vma
11774 : info
->flags
& DF_BIND_NOW
? 0
11775 : 0x80000000 | ELF32_R_SYM (outrel
.r_info
),
11776 globals
->root
.sgotplt
->contents
+ offplt
11777 + globals
->sgotplt_jump_table_size
);
11779 /* Second word in the relocation is always zero. */
11780 bfd_put_32 (output_bfd
, 0,
11781 globals
->root
.sgotplt
->contents
+ offplt
11782 + globals
->sgotplt_jump_table_size
+ 4);
11784 if (tls_type
& GOT_TLS_GD
)
11788 outrel
.r_addend
= 0;
11789 outrel
.r_offset
= (sgot
->output_section
->vma
11790 + sgot
->output_offset
11792 outrel
.r_info
= ELF32_R_INFO (indx
, R_ARM_TLS_DTPMOD32
);
11794 if (globals
->use_rel
)
11795 bfd_put_32 (output_bfd
, outrel
.r_addend
,
11796 sgot
->contents
+ cur_off
);
11798 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
11801 bfd_put_32 (output_bfd
, value
- dtpoff_base (info
),
11802 sgot
->contents
+ cur_off
+ 4);
11805 outrel
.r_addend
= 0;
11806 outrel
.r_info
= ELF32_R_INFO (indx
,
11807 R_ARM_TLS_DTPOFF32
);
11808 outrel
.r_offset
+= 4;
11810 if (globals
->use_rel
)
11811 bfd_put_32 (output_bfd
, outrel
.r_addend
,
11812 sgot
->contents
+ cur_off
+ 4);
11814 elf32_arm_add_dynreloc (output_bfd
, info
,
11820 /* If we are not emitting relocations for a
11821 general dynamic reference, then we must be in a
11822 static link or an executable link with the
11823 symbol binding locally. Mark it as belonging
11824 to module 1, the executable. */
11825 bfd_put_32 (output_bfd
, 1,
11826 sgot
->contents
+ cur_off
);
11827 bfd_put_32 (output_bfd
, value
- dtpoff_base (info
),
11828 sgot
->contents
+ cur_off
+ 4);
11834 if (tls_type
& GOT_TLS_IE
)
11839 outrel
.r_addend
= value
- dtpoff_base (info
);
11841 outrel
.r_addend
= 0;
11842 outrel
.r_offset
= (sgot
->output_section
->vma
11843 + sgot
->output_offset
11845 outrel
.r_info
= ELF32_R_INFO (indx
, R_ARM_TLS_TPOFF32
);
11847 if (globals
->use_rel
)
11848 bfd_put_32 (output_bfd
, outrel
.r_addend
,
11849 sgot
->contents
+ cur_off
);
11851 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
11854 bfd_put_32 (output_bfd
, tpoff (info
, value
),
11855 sgot
->contents
+ cur_off
);
11860 h
->got
.offset
|= 1;
11862 local_got_offsets
[r_symndx
] |= 1;
11865 if ((tls_type
& GOT_TLS_GD
) && r_type
!= R_ARM_TLS_GD32
&& r_type
!= R_ARM_TLS_GD32_FDPIC
)
11867 else if (tls_type
& GOT_TLS_GDESC
)
11870 if (ELF32_R_TYPE(rel
->r_info
) == R_ARM_TLS_CALL
11871 || ELF32_R_TYPE(rel
->r_info
) == R_ARM_THM_TLS_CALL
)
11873 bfd_signed_vma offset
;
11874 /* TLS stubs are arm mode. The original symbol is a
11875 data object, so branch_type is bogus. */
11876 branch_type
= ST_BRANCH_TO_ARM
;
11877 enum elf32_arm_stub_type stub_type
11878 = arm_type_of_stub (info
, input_section
, rel
,
11879 st_type
, &branch_type
,
11880 (struct elf32_arm_link_hash_entry
*)h
,
11881 globals
->tls_trampoline
, globals
->root
.splt
,
11882 input_bfd
, sym_name
);
11884 if (stub_type
!= arm_stub_none
)
11886 struct elf32_arm_stub_hash_entry
*stub_entry
11887 = elf32_arm_get_stub_entry
11888 (input_section
, globals
->root
.splt
, 0, rel
,
11889 globals
, stub_type
);
11890 offset
= (stub_entry
->stub_offset
11891 + stub_entry
->stub_sec
->output_offset
11892 + stub_entry
->stub_sec
->output_section
->vma
);
11895 offset
= (globals
->root
.splt
->output_section
->vma
11896 + globals
->root
.splt
->output_offset
11897 + globals
->tls_trampoline
);
11899 if (ELF32_R_TYPE(rel
->r_info
) == R_ARM_TLS_CALL
)
11901 unsigned long inst
;
11903 offset
-= (input_section
->output_section
->vma
11904 + input_section
->output_offset
11905 + rel
->r_offset
+ 8);
11907 inst
= offset
>> 2;
11908 inst
&= 0x00ffffff;
11909 value
= inst
| (globals
->use_blx
? 0xfa000000 : 0xeb000000);
11913 /* Thumb blx encodes the offset in a complicated
11915 unsigned upper_insn
, lower_insn
;
11918 offset
-= (input_section
->output_section
->vma
11919 + input_section
->output_offset
11920 + rel
->r_offset
+ 4);
11922 if (stub_type
!= arm_stub_none
11923 && arm_stub_is_thumb (stub_type
))
11925 lower_insn
= 0xd000;
11929 lower_insn
= 0xc000;
11930 /* Round up the offset to a word boundary. */
11931 offset
= (offset
+ 2) & ~2;
11935 upper_insn
= (0xf000
11936 | ((offset
>> 12) & 0x3ff)
11938 lower_insn
|= (((!((offset
>> 23) & 1)) ^ neg
) << 13)
11939 | (((!((offset
>> 22) & 1)) ^ neg
) << 11)
11940 | ((offset
>> 1) & 0x7ff);
11941 bfd_put_16 (input_bfd
, upper_insn
, hit_data
);
11942 bfd_put_16 (input_bfd
, lower_insn
, hit_data
+ 2);
11943 return bfd_reloc_ok
;
11946 /* These relocations needs special care, as besides the fact
11947 they point somewhere in .gotplt, the addend must be
11948 adjusted accordingly depending on the type of instruction
11950 else if ((r_type
== R_ARM_TLS_GOTDESC
) && (tls_type
& GOT_TLS_GDESC
))
11952 unsigned long data
, insn
;
11955 data
= bfd_get_32 (input_bfd
, hit_data
);
11961 insn
= bfd_get_16 (input_bfd
, contents
+ rel
->r_offset
- data
);
11962 if ((insn
& 0xf000) == 0xf000 || (insn
& 0xf800) == 0xe800)
11963 insn
= (insn
<< 16)
11964 | bfd_get_16 (input_bfd
,
11965 contents
+ rel
->r_offset
- data
+ 2);
11966 if ((insn
& 0xf800c000) == 0xf000c000)
11969 else if ((insn
& 0xffffff00) == 0x4400)
11975 /* xgettext:c-format */
11976 (_("%pB(%pA+%#" PRIx64
"): "
11977 "unexpected %s instruction '%#lx' "
11978 "referenced by TLS_GOTDESC"),
11979 input_bfd
, input_section
, (uint64_t) rel
->r_offset
,
11981 return bfd_reloc_notsupported
;
11986 insn
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
- data
);
11988 switch (insn
>> 24)
11990 case 0xeb: /* bl */
11991 case 0xfa: /* blx */
11995 case 0xe0: /* add */
12001 /* xgettext:c-format */
12002 (_("%pB(%pA+%#" PRIx64
"): "
12003 "unexpected %s instruction '%#lx' "
12004 "referenced by TLS_GOTDESC"),
12005 input_bfd
, input_section
, (uint64_t) rel
->r_offset
,
12007 return bfd_reloc_notsupported
;
12011 value
+= ((globals
->root
.sgotplt
->output_section
->vma
12012 + globals
->root
.sgotplt
->output_offset
+ off
)
12013 - (input_section
->output_section
->vma
12014 + input_section
->output_offset
12016 + globals
->sgotplt_jump_table_size
);
12019 value
= ((globals
->root
.sgot
->output_section
->vma
12020 + globals
->root
.sgot
->output_offset
+ off
)
12021 - (input_section
->output_section
->vma
12022 + input_section
->output_offset
+ rel
->r_offset
));
12024 if (globals
->fdpic_p
&& (r_type
== R_ARM_TLS_GD32_FDPIC
||
12025 r_type
== R_ARM_TLS_IE32_FDPIC
))
12027 /* For FDPIC relocations, resolve to the offset of the GOT
12028 entry from the start of GOT. */
12029 bfd_put_32(output_bfd
,
12030 globals
->root
.sgot
->output_offset
+ off
,
12031 contents
+ rel
->r_offset
);
12033 return bfd_reloc_ok
;
12037 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
12038 contents
, rel
->r_offset
, value
,
12043 case R_ARM_TLS_LE32
:
12044 if (bfd_link_dll (info
))
12047 /* xgettext:c-format */
12048 (_("%pB(%pA+%#" PRIx64
"): %s relocation not permitted "
12049 "in shared object"),
12050 input_bfd
, input_section
, (uint64_t) rel
->r_offset
, howto
->name
);
12051 return bfd_reloc_notsupported
;
12054 value
= tpoff (info
, value
);
12056 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
12057 contents
, rel
->r_offset
, value
,
12061 if (globals
->fix_v4bx
)
12063 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
12065 /* Ensure that we have a BX instruction. */
12066 BFD_ASSERT ((insn
& 0x0ffffff0) == 0x012fff10);
12068 if (globals
->fix_v4bx
== 2 && (insn
& 0xf) != 0xf)
12070 /* Branch to veneer. */
12072 glue_addr
= elf32_arm_bx_glue (info
, insn
& 0xf);
12073 glue_addr
-= input_section
->output_section
->vma
12074 + input_section
->output_offset
12075 + rel
->r_offset
+ 8;
12076 insn
= (insn
& 0xf0000000) | 0x0a000000
12077 | ((glue_addr
>> 2) & 0x00ffffff);
12081 /* Preserve Rm (lowest four bits) and the condition code
12082 (highest four bits). Other bits encode MOV PC,Rm. */
12083 insn
= (insn
& 0xf000000f) | 0x01a0f000;
12086 bfd_put_32 (input_bfd
, insn
, hit_data
);
12088 return bfd_reloc_ok
;
12090 case R_ARM_MOVW_ABS_NC
:
12091 case R_ARM_MOVT_ABS
:
12092 case R_ARM_MOVW_PREL_NC
:
12093 case R_ARM_MOVT_PREL
:
12094 /* Until we properly support segment-base-relative addressing then
12095 we assume the segment base to be zero, as for the group relocations.
12096 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
12097 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
12098 case R_ARM_MOVW_BREL_NC
:
12099 case R_ARM_MOVW_BREL
:
12100 case R_ARM_MOVT_BREL
:
12102 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
12104 if (globals
->use_rel
)
12106 addend
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
12107 signed_addend
= (addend
^ 0x8000) - 0x8000;
12110 value
+= signed_addend
;
12112 if (r_type
== R_ARM_MOVW_PREL_NC
|| r_type
== R_ARM_MOVT_PREL
)
12113 value
-= (input_section
->output_section
->vma
12114 + input_section
->output_offset
+ rel
->r_offset
);
12116 if (r_type
== R_ARM_MOVW_BREL
&& value
>= 0x10000)
12117 return bfd_reloc_overflow
;
12119 if (branch_type
== ST_BRANCH_TO_THUMB
)
12122 if (r_type
== R_ARM_MOVT_ABS
|| r_type
== R_ARM_MOVT_PREL
12123 || r_type
== R_ARM_MOVT_BREL
)
12126 insn
&= 0xfff0f000;
12127 insn
|= value
& 0xfff;
12128 insn
|= (value
& 0xf000) << 4;
12129 bfd_put_32 (input_bfd
, insn
, hit_data
);
12131 return bfd_reloc_ok
;
12133 case R_ARM_THM_MOVW_ABS_NC
:
12134 case R_ARM_THM_MOVT_ABS
:
12135 case R_ARM_THM_MOVW_PREL_NC
:
12136 case R_ARM_THM_MOVT_PREL
:
12137 /* Until we properly support segment-base-relative addressing then
12138 we assume the segment base to be zero, as for the above relocations.
12139 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
12140 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
12141 as R_ARM_THM_MOVT_ABS. */
12142 case R_ARM_THM_MOVW_BREL_NC
:
12143 case R_ARM_THM_MOVW_BREL
:
12144 case R_ARM_THM_MOVT_BREL
:
12148 insn
= bfd_get_16 (input_bfd
, hit_data
) << 16;
12149 insn
|= bfd_get_16 (input_bfd
, hit_data
+ 2);
12151 if (globals
->use_rel
)
12153 addend
= ((insn
>> 4) & 0xf000)
12154 | ((insn
>> 15) & 0x0800)
12155 | ((insn
>> 4) & 0x0700)
12157 signed_addend
= (addend
^ 0x8000) - 0x8000;
12160 value
+= signed_addend
;
12162 if (r_type
== R_ARM_THM_MOVW_PREL_NC
|| r_type
== R_ARM_THM_MOVT_PREL
)
12163 value
-= (input_section
->output_section
->vma
12164 + input_section
->output_offset
+ rel
->r_offset
);
12166 if (r_type
== R_ARM_THM_MOVW_BREL
&& value
>= 0x10000)
12167 return bfd_reloc_overflow
;
12169 if (branch_type
== ST_BRANCH_TO_THUMB
)
12172 if (r_type
== R_ARM_THM_MOVT_ABS
|| r_type
== R_ARM_THM_MOVT_PREL
12173 || r_type
== R_ARM_THM_MOVT_BREL
)
12176 insn
&= 0xfbf08f00;
12177 insn
|= (value
& 0xf000) << 4;
12178 insn
|= (value
& 0x0800) << 15;
12179 insn
|= (value
& 0x0700) << 4;
12180 insn
|= (value
& 0x00ff);
12182 bfd_put_16 (input_bfd
, insn
>> 16, hit_data
);
12183 bfd_put_16 (input_bfd
, insn
& 0xffff, hit_data
+ 2);
12185 return bfd_reloc_ok
;
12187 case R_ARM_ALU_PC_G0_NC
:
12188 case R_ARM_ALU_PC_G1_NC
:
12189 case R_ARM_ALU_PC_G0
:
12190 case R_ARM_ALU_PC_G1
:
12191 case R_ARM_ALU_PC_G2
:
12192 case R_ARM_ALU_SB_G0_NC
:
12193 case R_ARM_ALU_SB_G1_NC
:
12194 case R_ARM_ALU_SB_G0
:
12195 case R_ARM_ALU_SB_G1
:
12196 case R_ARM_ALU_SB_G2
:
12198 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
12199 bfd_vma pc
= input_section
->output_section
->vma
12200 + input_section
->output_offset
+ rel
->r_offset
;
12201 /* sb is the origin of the *segment* containing the symbol. */
12202 bfd_vma sb
= sym_sec
? sym_sec
->output_section
->vma
: 0;
12205 bfd_signed_vma signed_value
;
12208 /* Determine which group of bits to select. */
12211 case R_ARM_ALU_PC_G0_NC
:
12212 case R_ARM_ALU_PC_G0
:
12213 case R_ARM_ALU_SB_G0_NC
:
12214 case R_ARM_ALU_SB_G0
:
12218 case R_ARM_ALU_PC_G1_NC
:
12219 case R_ARM_ALU_PC_G1
:
12220 case R_ARM_ALU_SB_G1_NC
:
12221 case R_ARM_ALU_SB_G1
:
12225 case R_ARM_ALU_PC_G2
:
12226 case R_ARM_ALU_SB_G2
:
12234 /* If REL, extract the addend from the insn. If RELA, it will
12235 have already been fetched for us. */
12236 if (globals
->use_rel
)
12239 bfd_vma constant
= insn
& 0xff;
12240 bfd_vma rotation
= (insn
& 0xf00) >> 8;
12243 signed_addend
= constant
;
12246 /* Compensate for the fact that in the instruction, the
12247 rotation is stored in multiples of 2 bits. */
12250 /* Rotate "constant" right by "rotation" bits. */
12251 signed_addend
= (constant
>> rotation
) |
12252 (constant
<< (8 * sizeof (bfd_vma
) - rotation
));
12255 /* Determine if the instruction is an ADD or a SUB.
12256 (For REL, this determines the sign of the addend.) */
12257 negative
= identify_add_or_sub (insn
);
12261 /* xgettext:c-format */
12262 (_("%pB(%pA+%#" PRIx64
"): only ADD or SUB instructions "
12263 "are allowed for ALU group relocations"),
12264 input_bfd
, input_section
, (uint64_t) rel
->r_offset
);
12265 return bfd_reloc_overflow
;
12268 signed_addend
*= negative
;
12271 /* Compute the value (X) to go in the place. */
12272 if (r_type
== R_ARM_ALU_PC_G0_NC
12273 || r_type
== R_ARM_ALU_PC_G1_NC
12274 || r_type
== R_ARM_ALU_PC_G0
12275 || r_type
== R_ARM_ALU_PC_G1
12276 || r_type
== R_ARM_ALU_PC_G2
)
12278 signed_value
= value
- pc
+ signed_addend
;
12280 /* Section base relative. */
12281 signed_value
= value
- sb
+ signed_addend
;
12283 /* If the target symbol is a Thumb function, then set the
12284 Thumb bit in the address. */
12285 if (branch_type
== ST_BRANCH_TO_THUMB
)
12288 /* Calculate the value of the relevant G_n, in encoded
12289 constant-with-rotation format. */
12290 g_n
= calculate_group_reloc_mask (signed_value
< 0 ? - signed_value
: signed_value
,
12293 /* Check for overflow if required. */
12294 if ((r_type
== R_ARM_ALU_PC_G0
12295 || r_type
== R_ARM_ALU_PC_G1
12296 || r_type
== R_ARM_ALU_PC_G2
12297 || r_type
== R_ARM_ALU_SB_G0
12298 || r_type
== R_ARM_ALU_SB_G1
12299 || r_type
== R_ARM_ALU_SB_G2
) && residual
!= 0)
12302 /* xgettext:c-format */
12303 (_("%pB(%pA+%#" PRIx64
"): overflow whilst "
12304 "splitting %#" PRIx64
" for group relocation %s"),
12305 input_bfd
, input_section
, (uint64_t) rel
->r_offset
,
12306 (uint64_t) (signed_value
< 0 ? -signed_value
: signed_value
),
12308 return bfd_reloc_overflow
;
12311 /* Mask out the value and the ADD/SUB part of the opcode; take care
12312 not to destroy the S bit. */
12313 insn
&= 0xff1ff000;
12315 /* Set the opcode according to whether the value to go in the
12316 place is negative. */
12317 if (signed_value
< 0)
12322 /* Encode the offset. */
12325 bfd_put_32 (input_bfd
, insn
, hit_data
);
12327 return bfd_reloc_ok
;
12329 case R_ARM_LDR_PC_G0
:
12330 case R_ARM_LDR_PC_G1
:
12331 case R_ARM_LDR_PC_G2
:
12332 case R_ARM_LDR_SB_G0
:
12333 case R_ARM_LDR_SB_G1
:
12334 case R_ARM_LDR_SB_G2
:
12336 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
12337 bfd_vma pc
= input_section
->output_section
->vma
12338 + input_section
->output_offset
+ rel
->r_offset
;
12339 /* sb is the origin of the *segment* containing the symbol. */
12340 bfd_vma sb
= sym_sec
? sym_sec
->output_section
->vma
: 0;
12342 bfd_signed_vma signed_value
;
12345 /* Determine which groups of bits to calculate. */
12348 case R_ARM_LDR_PC_G0
:
12349 case R_ARM_LDR_SB_G0
:
12353 case R_ARM_LDR_PC_G1
:
12354 case R_ARM_LDR_SB_G1
:
12358 case R_ARM_LDR_PC_G2
:
12359 case R_ARM_LDR_SB_G2
:
12367 /* If REL, extract the addend from the insn. If RELA, it will
12368 have already been fetched for us. */
12369 if (globals
->use_rel
)
12371 int negative
= (insn
& (1 << 23)) ? 1 : -1;
12372 signed_addend
= negative
* (insn
& 0xfff);
12375 /* Compute the value (X) to go in the place. */
12376 if (r_type
== R_ARM_LDR_PC_G0
12377 || r_type
== R_ARM_LDR_PC_G1
12378 || r_type
== R_ARM_LDR_PC_G2
)
12380 signed_value
= value
- pc
+ signed_addend
;
12382 /* Section base relative. */
12383 signed_value
= value
- sb
+ signed_addend
;
12385 /* Calculate the value of the relevant G_{n-1} to obtain
12386 the residual at that stage. */
12387 calculate_group_reloc_mask (signed_value
< 0 ? - signed_value
: signed_value
,
12388 group
- 1, &residual
);
12390 /* Check for overflow. */
12391 if (residual
>= 0x1000)
12394 /* xgettext:c-format */
12395 (_("%pB(%pA+%#" PRIx64
"): overflow whilst "
12396 "splitting %#" PRIx64
" for group relocation %s"),
12397 input_bfd
, input_section
, (uint64_t) rel
->r_offset
,
12398 (uint64_t) (signed_value
< 0 ? -signed_value
: signed_value
),
12400 return bfd_reloc_overflow
;
12403 /* Mask out the value and U bit. */
12404 insn
&= 0xff7ff000;
12406 /* Set the U bit if the value to go in the place is non-negative. */
12407 if (signed_value
>= 0)
12410 /* Encode the offset. */
12413 bfd_put_32 (input_bfd
, insn
, hit_data
);
12415 return bfd_reloc_ok
;
12417 case R_ARM_LDRS_PC_G0
:
12418 case R_ARM_LDRS_PC_G1
:
12419 case R_ARM_LDRS_PC_G2
:
12420 case R_ARM_LDRS_SB_G0
:
12421 case R_ARM_LDRS_SB_G1
:
12422 case R_ARM_LDRS_SB_G2
:
12424 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
12425 bfd_vma pc
= input_section
->output_section
->vma
12426 + input_section
->output_offset
+ rel
->r_offset
;
12427 /* sb is the origin of the *segment* containing the symbol. */
12428 bfd_vma sb
= sym_sec
? sym_sec
->output_section
->vma
: 0;
12430 bfd_signed_vma signed_value
;
12433 /* Determine which groups of bits to calculate. */
12436 case R_ARM_LDRS_PC_G0
:
12437 case R_ARM_LDRS_SB_G0
:
12441 case R_ARM_LDRS_PC_G1
:
12442 case R_ARM_LDRS_SB_G1
:
12446 case R_ARM_LDRS_PC_G2
:
12447 case R_ARM_LDRS_SB_G2
:
12455 /* If REL, extract the addend from the insn. If RELA, it will
12456 have already been fetched for us. */
12457 if (globals
->use_rel
)
12459 int negative
= (insn
& (1 << 23)) ? 1 : -1;
12460 signed_addend
= negative
* (((insn
& 0xf00) >> 4) + (insn
& 0xf));
12463 /* Compute the value (X) to go in the place. */
12464 if (r_type
== R_ARM_LDRS_PC_G0
12465 || r_type
== R_ARM_LDRS_PC_G1
12466 || r_type
== R_ARM_LDRS_PC_G2
)
12468 signed_value
= value
- pc
+ signed_addend
;
12470 /* Section base relative. */
12471 signed_value
= value
- sb
+ signed_addend
;
12473 /* Calculate the value of the relevant G_{n-1} to obtain
12474 the residual at that stage. */
12475 calculate_group_reloc_mask (signed_value
< 0 ? - signed_value
: signed_value
,
12476 group
- 1, &residual
);
12478 /* Check for overflow. */
12479 if (residual
>= 0x100)
12482 /* xgettext:c-format */
12483 (_("%pB(%pA+%#" PRIx64
"): overflow whilst "
12484 "splitting %#" PRIx64
" for group relocation %s"),
12485 input_bfd
, input_section
, (uint64_t) rel
->r_offset
,
12486 (uint64_t) (signed_value
< 0 ? -signed_value
: signed_value
),
12488 return bfd_reloc_overflow
;
12491 /* Mask out the value and U bit. */
12492 insn
&= 0xff7ff0f0;
12494 /* Set the U bit if the value to go in the place is non-negative. */
12495 if (signed_value
>= 0)
12498 /* Encode the offset. */
12499 insn
|= ((residual
& 0xf0) << 4) | (residual
& 0xf);
12501 bfd_put_32 (input_bfd
, insn
, hit_data
);
12503 return bfd_reloc_ok
;
12505 case R_ARM_LDC_PC_G0
:
12506 case R_ARM_LDC_PC_G1
:
12507 case R_ARM_LDC_PC_G2
:
12508 case R_ARM_LDC_SB_G0
:
12509 case R_ARM_LDC_SB_G1
:
12510 case R_ARM_LDC_SB_G2
:
12512 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
12513 bfd_vma pc
= input_section
->output_section
->vma
12514 + input_section
->output_offset
+ rel
->r_offset
;
12515 /* sb is the origin of the *segment* containing the symbol. */
12516 bfd_vma sb
= sym_sec
? sym_sec
->output_section
->vma
: 0;
12518 bfd_signed_vma signed_value
;
12521 /* Determine which groups of bits to calculate. */
12524 case R_ARM_LDC_PC_G0
:
12525 case R_ARM_LDC_SB_G0
:
12529 case R_ARM_LDC_PC_G1
:
12530 case R_ARM_LDC_SB_G1
:
12534 case R_ARM_LDC_PC_G2
:
12535 case R_ARM_LDC_SB_G2
:
12543 /* If REL, extract the addend from the insn. If RELA, it will
12544 have already been fetched for us. */
12545 if (globals
->use_rel
)
12547 int negative
= (insn
& (1 << 23)) ? 1 : -1;
12548 signed_addend
= negative
* ((insn
& 0xff) << 2);
12551 /* Compute the value (X) to go in the place. */
12552 if (r_type
== R_ARM_LDC_PC_G0
12553 || r_type
== R_ARM_LDC_PC_G1
12554 || r_type
== R_ARM_LDC_PC_G2
)
12556 signed_value
= value
- pc
+ signed_addend
;
12558 /* Section base relative. */
12559 signed_value
= value
- sb
+ signed_addend
;
12561 /* Calculate the value of the relevant G_{n-1} to obtain
12562 the residual at that stage. */
12563 calculate_group_reloc_mask (signed_value
< 0 ? - signed_value
: signed_value
,
12564 group
- 1, &residual
);
12566 /* Check for overflow. (The absolute value to go in the place must be
12567 divisible by four and, after having been divided by four, must
12568 fit in eight bits.) */
12569 if ((residual
& 0x3) != 0 || residual
>= 0x400)
12572 /* xgettext:c-format */
12573 (_("%pB(%pA+%#" PRIx64
"): overflow whilst "
12574 "splitting %#" PRIx64
" for group relocation %s"),
12575 input_bfd
, input_section
, (uint64_t) rel
->r_offset
,
12576 (uint64_t) (signed_value
< 0 ? -signed_value
: signed_value
),
12578 return bfd_reloc_overflow
;
12581 /* Mask out the value and U bit. */
12582 insn
&= 0xff7fff00;
12584 /* Set the U bit if the value to go in the place is non-negative. */
12585 if (signed_value
>= 0)
12588 /* Encode the offset. */
12589 insn
|= residual
>> 2;
12591 bfd_put_32 (input_bfd
, insn
, hit_data
);
12593 return bfd_reloc_ok
;
12595 case R_ARM_THM_ALU_ABS_G0_NC
:
12596 case R_ARM_THM_ALU_ABS_G1_NC
:
12597 case R_ARM_THM_ALU_ABS_G2_NC
:
12598 case R_ARM_THM_ALU_ABS_G3_NC
:
12600 const int shift_array
[4] = {0, 8, 16, 24};
12601 bfd_vma insn
= bfd_get_16 (input_bfd
, hit_data
);
12602 bfd_vma addr
= value
;
12603 int shift
= shift_array
[r_type
- R_ARM_THM_ALU_ABS_G0_NC
];
12605 /* Compute address. */
12606 if (globals
->use_rel
)
12607 signed_addend
= insn
& 0xff;
12608 addr
+= signed_addend
;
12609 if (branch_type
== ST_BRANCH_TO_THUMB
)
12611 /* Clean imm8 insn. */
12613 /* And update with correct part of address. */
12614 insn
|= (addr
>> shift
) & 0xff;
12616 bfd_put_16 (input_bfd
, insn
, hit_data
);
12619 *unresolved_reloc_p
= FALSE
;
12620 return bfd_reloc_ok
;
12622 case R_ARM_GOTOFFFUNCDESC
:
12626 struct fdpic_local
*local_fdpic_cnts
= elf32_arm_local_fdpic_cnts(input_bfd
);
12627 int dynindx
= elf_section_data (sym_sec
->output_section
)->dynindx
;
12628 int offset
= local_fdpic_cnts
[r_symndx
].funcdesc_offset
& ~1;
12629 bfd_vma addr
= dynreloc_value
- sym_sec
->output_section
->vma
;
12632 if (bfd_link_pic(info
) && dynindx
== 0)
12635 /* Resolve relocation. */
12636 bfd_put_32(output_bfd
, (offset
+ sgot
->output_offset
)
12637 , contents
+ rel
->r_offset
);
12638 /* Emit R_ARM_FUNCDESC_VALUE or two fixups on funcdesc if
12640 arm_elf_fill_funcdesc(output_bfd
, info
,
12641 &local_fdpic_cnts
[r_symndx
].funcdesc_offset
,
12642 dynindx
, offset
, addr
, dynreloc_value
, seg
);
12647 int offset
= eh
->fdpic_cnts
.funcdesc_offset
& ~1;
12651 /* For static binaries, sym_sec can be null. */
12654 dynindx
= elf_section_data (sym_sec
->output_section
)->dynindx
;
12655 addr
= dynreloc_value
- sym_sec
->output_section
->vma
;
12663 if (bfd_link_pic(info
) && dynindx
== 0)
12666 /* This case cannot occur since funcdesc is allocated by
12667 the dynamic loader so we cannot resolve the relocation. */
12668 if (h
->dynindx
!= -1)
12671 /* Resolve relocation. */
12672 bfd_put_32(output_bfd
, (offset
+ sgot
->output_offset
),
12673 contents
+ rel
->r_offset
);
12674 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12675 arm_elf_fill_funcdesc(output_bfd
, info
,
12676 &eh
->fdpic_cnts
.funcdesc_offset
,
12677 dynindx
, offset
, addr
, dynreloc_value
, seg
);
12680 *unresolved_reloc_p
= FALSE
;
12681 return bfd_reloc_ok
;
12683 case R_ARM_GOTFUNCDESC
:
12687 Elf_Internal_Rela outrel
;
12689 /* Resolve relocation. */
12690 bfd_put_32(output_bfd
, ((eh
->fdpic_cnts
.gotfuncdesc_offset
& ~1)
12691 + sgot
->output_offset
),
12692 contents
+ rel
->r_offset
);
12693 /* Add funcdesc and associated R_ARM_FUNCDESC_VALUE. */
12694 if(h
->dynindx
== -1)
12697 int offset
= eh
->fdpic_cnts
.funcdesc_offset
& ~1;
12701 /* For static binaries sym_sec can be null. */
12704 dynindx
= elf_section_data (sym_sec
->output_section
)->dynindx
;
12705 addr
= dynreloc_value
- sym_sec
->output_section
->vma
;
12713 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12714 arm_elf_fill_funcdesc(output_bfd
, info
,
12715 &eh
->fdpic_cnts
.funcdesc_offset
,
12716 dynindx
, offset
, addr
, dynreloc_value
, seg
);
12719 /* Add a dynamic relocation on GOT entry if not already done. */
12720 if ((eh
->fdpic_cnts
.gotfuncdesc_offset
& 1) == 0)
12722 if (h
->dynindx
== -1)
12724 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_RELATIVE
);
12725 if (h
->root
.type
== bfd_link_hash_undefweak
)
12726 bfd_put_32(output_bfd
, 0, sgot
->contents
12727 + (eh
->fdpic_cnts
.gotfuncdesc_offset
& ~1));
12729 bfd_put_32(output_bfd
, sgot
->output_section
->vma
12730 + sgot
->output_offset
12731 + (eh
->fdpic_cnts
.funcdesc_offset
& ~1),
12733 + (eh
->fdpic_cnts
.gotfuncdesc_offset
& ~1));
12737 outrel
.r_info
= ELF32_R_INFO (h
->dynindx
, R_ARM_FUNCDESC
);
12739 outrel
.r_offset
= sgot
->output_section
->vma
12740 + sgot
->output_offset
12741 + (eh
->fdpic_cnts
.gotfuncdesc_offset
& ~1);
12742 outrel
.r_addend
= 0;
12743 if (h
->dynindx
== -1 && !bfd_link_pic(info
))
12744 if (h
->root
.type
== bfd_link_hash_undefweak
)
12745 arm_elf_add_rofixup(output_bfd
, globals
->srofixup
, -1);
12747 arm_elf_add_rofixup(output_bfd
, globals
->srofixup
, outrel
.r_offset
);
12749 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
12750 eh
->fdpic_cnts
.gotfuncdesc_offset
|= 1;
12755 /* Such relocation on static function should not have been
12756 emitted by the compiler. */
12760 *unresolved_reloc_p
= FALSE
;
12761 return bfd_reloc_ok
;
12763 case R_ARM_FUNCDESC
:
12767 struct fdpic_local
*local_fdpic_cnts
= elf32_arm_local_fdpic_cnts(input_bfd
);
12768 Elf_Internal_Rela outrel
;
12769 int dynindx
= elf_section_data (sym_sec
->output_section
)->dynindx
;
12770 int offset
= local_fdpic_cnts
[r_symndx
].funcdesc_offset
& ~1;
12771 bfd_vma addr
= dynreloc_value
- sym_sec
->output_section
->vma
;
12774 if (bfd_link_pic(info
) && dynindx
== 0)
12777 /* Replace static FUNCDESC relocation with a
12778 R_ARM_RELATIVE dynamic relocation or with a rofixup for
12780 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_RELATIVE
);
12781 outrel
.r_offset
= input_section
->output_section
->vma
12782 + input_section
->output_offset
+ rel
->r_offset
;
12783 outrel
.r_addend
= 0;
12784 if (bfd_link_pic(info
))
12785 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
12787 arm_elf_add_rofixup(output_bfd
, globals
->srofixup
, outrel
.r_offset
);
12789 bfd_put_32 (input_bfd
, sgot
->output_section
->vma
12790 + sgot
->output_offset
+ offset
, hit_data
);
12792 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12793 arm_elf_fill_funcdesc(output_bfd
, info
,
12794 &local_fdpic_cnts
[r_symndx
].funcdesc_offset
,
12795 dynindx
, offset
, addr
, dynreloc_value
, seg
);
12799 if (h
->dynindx
== -1)
12802 int offset
= eh
->fdpic_cnts
.funcdesc_offset
& ~1;
12805 Elf_Internal_Rela outrel
;
12807 /* For static binaries sym_sec can be null. */
12810 dynindx
= elf_section_data (sym_sec
->output_section
)->dynindx
;
12811 addr
= dynreloc_value
- sym_sec
->output_section
->vma
;
12819 if (bfd_link_pic(info
) && dynindx
== 0)
12822 /* Replace static FUNCDESC relocation with a
12823 R_ARM_RELATIVE dynamic relocation. */
12824 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_RELATIVE
);
12825 outrel
.r_offset
= input_section
->output_section
->vma
12826 + input_section
->output_offset
+ rel
->r_offset
;
12827 outrel
.r_addend
= 0;
12828 if (bfd_link_pic(info
))
12829 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
12831 arm_elf_add_rofixup(output_bfd
, globals
->srofixup
, outrel
.r_offset
);
12833 bfd_put_32 (input_bfd
, sgot
->output_section
->vma
12834 + sgot
->output_offset
+ offset
, hit_data
);
12836 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12837 arm_elf_fill_funcdesc(output_bfd
, info
,
12838 &eh
->fdpic_cnts
.funcdesc_offset
,
12839 dynindx
, offset
, addr
, dynreloc_value
, seg
);
12843 Elf_Internal_Rela outrel
;
12845 /* Add a dynamic relocation. */
12846 outrel
.r_info
= ELF32_R_INFO (h
->dynindx
, R_ARM_FUNCDESC
);
12847 outrel
.r_offset
= input_section
->output_section
->vma
12848 + input_section
->output_offset
+ rel
->r_offset
;
12849 outrel
.r_addend
= 0;
12850 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
12854 *unresolved_reloc_p
= FALSE
;
12855 return bfd_reloc_ok
;
12858 return bfd_reloc_notsupported
;
12862 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
12864 arm_add_to_rel (bfd
* abfd
,
12865 bfd_byte
* address
,
12866 reloc_howto_type
* howto
,
12867 bfd_signed_vma increment
)
12869 bfd_signed_vma addend
;
12871 if (howto
->type
== R_ARM_THM_CALL
12872 || howto
->type
== R_ARM_THM_JUMP24
)
12874 int upper_insn
, lower_insn
;
12877 upper_insn
= bfd_get_16 (abfd
, address
);
12878 lower_insn
= bfd_get_16 (abfd
, address
+ 2);
12879 upper
= upper_insn
& 0x7ff;
12880 lower
= lower_insn
& 0x7ff;
12882 addend
= (upper
<< 12) | (lower
<< 1);
12883 addend
+= increment
;
12886 upper_insn
= (upper_insn
& 0xf800) | ((addend
>> 11) & 0x7ff);
12887 lower_insn
= (lower_insn
& 0xf800) | (addend
& 0x7ff);
12889 bfd_put_16 (abfd
, (bfd_vma
) upper_insn
, address
);
12890 bfd_put_16 (abfd
, (bfd_vma
) lower_insn
, address
+ 2);
12896 contents
= bfd_get_32 (abfd
, address
);
12898 /* Get the (signed) value from the instruction. */
12899 addend
= contents
& howto
->src_mask
;
12900 if (addend
& ((howto
->src_mask
+ 1) >> 1))
12902 bfd_signed_vma mask
;
12905 mask
&= ~ howto
->src_mask
;
12909 /* Add in the increment, (which is a byte value). */
12910 switch (howto
->type
)
12913 addend
+= increment
;
12920 addend
<<= howto
->size
;
12921 addend
+= increment
;
12923 /* Should we check for overflow here ? */
12925 /* Drop any undesired bits. */
12926 addend
>>= howto
->rightshift
;
12930 contents
= (contents
& ~ howto
->dst_mask
) | (addend
& howto
->dst_mask
);
12932 bfd_put_32 (abfd
, contents
, address
);
12936 #define IS_ARM_TLS_RELOC(R_TYPE) \
12937 ((R_TYPE) == R_ARM_TLS_GD32 \
12938 || (R_TYPE) == R_ARM_TLS_GD32_FDPIC \
12939 || (R_TYPE) == R_ARM_TLS_LDO32 \
12940 || (R_TYPE) == R_ARM_TLS_LDM32 \
12941 || (R_TYPE) == R_ARM_TLS_LDM32_FDPIC \
12942 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
12943 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
12944 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
12945 || (R_TYPE) == R_ARM_TLS_LE32 \
12946 || (R_TYPE) == R_ARM_TLS_IE32 \
12947 || (R_TYPE) == R_ARM_TLS_IE32_FDPIC \
12948 || IS_ARM_TLS_GNU_RELOC (R_TYPE))
12950 /* Specific set of relocations for the gnu tls dialect. */
12951 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
12952 ((R_TYPE) == R_ARM_TLS_GOTDESC \
12953 || (R_TYPE) == R_ARM_TLS_CALL \
12954 || (R_TYPE) == R_ARM_THM_TLS_CALL \
12955 || (R_TYPE) == R_ARM_TLS_DESCSEQ \
12956 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
12958 /* Relocate an ARM ELF section. */
12961 elf32_arm_relocate_section (bfd
* output_bfd
,
12962 struct bfd_link_info
* info
,
12964 asection
* input_section
,
12965 bfd_byte
* contents
,
12966 Elf_Internal_Rela
* relocs
,
12967 Elf_Internal_Sym
* local_syms
,
12968 asection
** local_sections
)
12970 Elf_Internal_Shdr
*symtab_hdr
;
12971 struct elf_link_hash_entry
**sym_hashes
;
12972 Elf_Internal_Rela
*rel
;
12973 Elf_Internal_Rela
*relend
;
12975 struct elf32_arm_link_hash_table
* globals
;
12977 globals
= elf32_arm_hash_table (info
);
12978 if (globals
== NULL
)
12981 symtab_hdr
= & elf_symtab_hdr (input_bfd
);
12982 sym_hashes
= elf_sym_hashes (input_bfd
);
12985 relend
= relocs
+ input_section
->reloc_count
;
12986 for (; rel
< relend
; rel
++)
12989 reloc_howto_type
* howto
;
12990 unsigned long r_symndx
;
12991 Elf_Internal_Sym
* sym
;
12993 struct elf_link_hash_entry
* h
;
12994 bfd_vma relocation
;
12995 bfd_reloc_status_type r
;
12998 bfd_boolean unresolved_reloc
= FALSE
;
12999 char *error_message
= NULL
;
13001 r_symndx
= ELF32_R_SYM (rel
->r_info
);
13002 r_type
= ELF32_R_TYPE (rel
->r_info
);
13003 r_type
= arm_real_reloc_type (globals
, r_type
);
13005 if ( r_type
== R_ARM_GNU_VTENTRY
13006 || r_type
== R_ARM_GNU_VTINHERIT
)
13009 howto
= bfd_reloc
.howto
= elf32_arm_howto_from_type (r_type
);
13012 return _bfd_unrecognized_reloc (input_bfd
, input_section
, r_type
);
13018 if (r_symndx
< symtab_hdr
->sh_info
)
13020 sym
= local_syms
+ r_symndx
;
13021 sym_type
= ELF32_ST_TYPE (sym
->st_info
);
13022 sec
= local_sections
[r_symndx
];
13024 /* An object file might have a reference to a local
13025 undefined symbol. This is a daft object file, but we
13026 should at least do something about it. V4BX & NONE
13027 relocations do not use the symbol and are explicitly
13028 allowed to use the undefined symbol, so allow those.
13029 Likewise for relocations against STN_UNDEF. */
13030 if (r_type
!= R_ARM_V4BX
13031 && r_type
!= R_ARM_NONE
13032 && r_symndx
!= STN_UNDEF
13033 && bfd_is_und_section (sec
)
13034 && ELF_ST_BIND (sym
->st_info
) != STB_WEAK
)
13035 (*info
->callbacks
->undefined_symbol
)
13036 (info
, bfd_elf_string_from_elf_section
13037 (input_bfd
, symtab_hdr
->sh_link
, sym
->st_name
),
13038 input_bfd
, input_section
,
13039 rel
->r_offset
, TRUE
);
13041 if (globals
->use_rel
)
13043 relocation
= (sec
->output_section
->vma
13044 + sec
->output_offset
13046 if (!bfd_link_relocatable (info
)
13047 && (sec
->flags
& SEC_MERGE
)
13048 && ELF_ST_TYPE (sym
->st_info
) == STT_SECTION
)
13051 bfd_vma addend
, value
;
13055 case R_ARM_MOVW_ABS_NC
:
13056 case R_ARM_MOVT_ABS
:
13057 value
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
);
13058 addend
= ((value
& 0xf0000) >> 4) | (value
& 0xfff);
13059 addend
= (addend
^ 0x8000) - 0x8000;
13062 case R_ARM_THM_MOVW_ABS_NC
:
13063 case R_ARM_THM_MOVT_ABS
:
13064 value
= bfd_get_16 (input_bfd
, contents
+ rel
->r_offset
)
13066 value
|= bfd_get_16 (input_bfd
,
13067 contents
+ rel
->r_offset
+ 2);
13068 addend
= ((value
& 0xf7000) >> 4) | (value
& 0xff)
13069 | ((value
& 0x04000000) >> 15);
13070 addend
= (addend
^ 0x8000) - 0x8000;
13074 if (howto
->rightshift
13075 || (howto
->src_mask
& (howto
->src_mask
+ 1)))
13078 /* xgettext:c-format */
13079 (_("%pB(%pA+%#" PRIx64
"): "
13080 "%s relocation against SEC_MERGE section"),
13081 input_bfd
, input_section
,
13082 (uint64_t) rel
->r_offset
, howto
->name
);
13086 value
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
);
13088 /* Get the (signed) value from the instruction. */
13089 addend
= value
& howto
->src_mask
;
13090 if (addend
& ((howto
->src_mask
+ 1) >> 1))
13092 bfd_signed_vma mask
;
13095 mask
&= ~ howto
->src_mask
;
13103 _bfd_elf_rel_local_sym (output_bfd
, sym
, &msec
, addend
)
13105 addend
+= msec
->output_section
->vma
+ msec
->output_offset
;
13107 /* Cases here must match those in the preceding
13108 switch statement. */
13111 case R_ARM_MOVW_ABS_NC
:
13112 case R_ARM_MOVT_ABS
:
13113 value
= (value
& 0xfff0f000) | ((addend
& 0xf000) << 4)
13114 | (addend
& 0xfff);
13115 bfd_put_32 (input_bfd
, value
, contents
+ rel
->r_offset
);
13118 case R_ARM_THM_MOVW_ABS_NC
:
13119 case R_ARM_THM_MOVT_ABS
:
13120 value
= (value
& 0xfbf08f00) | ((addend
& 0xf700) << 4)
13121 | (addend
& 0xff) | ((addend
& 0x0800) << 15);
13122 bfd_put_16 (input_bfd
, value
>> 16,
13123 contents
+ rel
->r_offset
);
13124 bfd_put_16 (input_bfd
, value
,
13125 contents
+ rel
->r_offset
+ 2);
13129 value
= (value
& ~ howto
->dst_mask
)
13130 | (addend
& howto
->dst_mask
);
13131 bfd_put_32 (input_bfd
, value
, contents
+ rel
->r_offset
);
13137 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, &sec
, rel
);
13141 bfd_boolean warned
, ignored
;
13143 RELOC_FOR_GLOBAL_SYMBOL (info
, input_bfd
, input_section
, rel
,
13144 r_symndx
, symtab_hdr
, sym_hashes
,
13145 h
, sec
, relocation
,
13146 unresolved_reloc
, warned
, ignored
);
13148 sym_type
= h
->type
;
13151 if (sec
!= NULL
&& discarded_section (sec
))
13152 RELOC_AGAINST_DISCARDED_SECTION (info
, input_bfd
, input_section
,
13153 rel
, 1, relend
, howto
, 0, contents
);
13155 if (bfd_link_relocatable (info
))
13157 /* This is a relocatable link. We don't have to change
13158 anything, unless the reloc is against a section symbol,
13159 in which case we have to adjust according to where the
13160 section symbol winds up in the output section. */
13161 if (sym
!= NULL
&& ELF_ST_TYPE (sym
->st_info
) == STT_SECTION
)
13163 if (globals
->use_rel
)
13164 arm_add_to_rel (input_bfd
, contents
+ rel
->r_offset
,
13165 howto
, (bfd_signed_vma
) sec
->output_offset
);
13167 rel
->r_addend
+= sec
->output_offset
;
13173 name
= h
->root
.root
.string
;
13176 name
= (bfd_elf_string_from_elf_section
13177 (input_bfd
, symtab_hdr
->sh_link
, sym
->st_name
));
13178 if (name
== NULL
|| *name
== '\0')
13179 name
= bfd_section_name (input_bfd
, sec
);
13182 if (r_symndx
!= STN_UNDEF
13183 && r_type
!= R_ARM_NONE
13185 || h
->root
.type
== bfd_link_hash_defined
13186 || h
->root
.type
== bfd_link_hash_defweak
)
13187 && IS_ARM_TLS_RELOC (r_type
) != (sym_type
== STT_TLS
))
13190 ((sym_type
== STT_TLS
13191 /* xgettext:c-format */
13192 ? _("%pB(%pA+%#" PRIx64
"): %s used with TLS symbol %s")
13193 /* xgettext:c-format */
13194 : _("%pB(%pA+%#" PRIx64
"): %s used with non-TLS symbol %s")),
13197 (uint64_t) rel
->r_offset
,
13202 /* We call elf32_arm_final_link_relocate unless we're completely
13203 done, i.e., the relaxation produced the final output we want,
13204 and we won't let anybody mess with it. Also, we have to do
13205 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
13206 both in relaxed and non-relaxed cases. */
13207 if ((elf32_arm_tls_transition (info
, r_type
, h
) != (unsigned)r_type
)
13208 || (IS_ARM_TLS_GNU_RELOC (r_type
)
13209 && !((h
? elf32_arm_hash_entry (h
)->tls_type
:
13210 elf32_arm_local_got_tls_type (input_bfd
)[r_symndx
])
13213 r
= elf32_arm_tls_relax (globals
, input_bfd
, input_section
,
13214 contents
, rel
, h
== NULL
);
13215 /* This may have been marked unresolved because it came from
13216 a shared library. But we've just dealt with that. */
13217 unresolved_reloc
= 0;
13220 r
= bfd_reloc_continue
;
13222 if (r
== bfd_reloc_continue
)
13224 unsigned char branch_type
=
13225 h
? ARM_GET_SYM_BRANCH_TYPE (h
->target_internal
)
13226 : ARM_GET_SYM_BRANCH_TYPE (sym
->st_target_internal
);
13228 r
= elf32_arm_final_link_relocate (howto
, input_bfd
, output_bfd
,
13229 input_section
, contents
, rel
,
13230 relocation
, info
, sec
, name
,
13231 sym_type
, branch_type
, h
,
13236 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
13237 because such sections are not SEC_ALLOC and thus ld.so will
13238 not process them. */
13239 if (unresolved_reloc
13240 && !((input_section
->flags
& SEC_DEBUGGING
) != 0
13242 && _bfd_elf_section_offset (output_bfd
, info
, input_section
,
13243 rel
->r_offset
) != (bfd_vma
) -1)
13246 /* xgettext:c-format */
13247 (_("%pB(%pA+%#" PRIx64
"): "
13248 "unresolvable %s relocation against symbol `%s'"),
13251 (uint64_t) rel
->r_offset
,
13253 h
->root
.root
.string
);
13257 if (r
!= bfd_reloc_ok
)
13261 case bfd_reloc_overflow
:
13262 /* If the overflowing reloc was to an undefined symbol,
13263 we have already printed one error message and there
13264 is no point complaining again. */
13265 if (!h
|| h
->root
.type
!= bfd_link_hash_undefined
)
13266 (*info
->callbacks
->reloc_overflow
)
13267 (info
, (h
? &h
->root
: NULL
), name
, howto
->name
,
13268 (bfd_vma
) 0, input_bfd
, input_section
, rel
->r_offset
);
13271 case bfd_reloc_undefined
:
13272 (*info
->callbacks
->undefined_symbol
)
13273 (info
, name
, input_bfd
, input_section
, rel
->r_offset
, TRUE
);
13276 case bfd_reloc_outofrange
:
13277 error_message
= _("out of range");
13280 case bfd_reloc_notsupported
:
13281 error_message
= _("unsupported relocation");
13284 case bfd_reloc_dangerous
:
13285 /* error_message should already be set. */
13289 error_message
= _("unknown error");
13290 /* Fall through. */
13293 BFD_ASSERT (error_message
!= NULL
);
13294 (*info
->callbacks
->reloc_dangerous
)
13295 (info
, error_message
, input_bfd
, input_section
, rel
->r_offset
);
13304 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
13305 adds the edit to the start of the list. (The list must be built in order of
13306 ascending TINDEX: the function's callers are primarily responsible for
13307 maintaining that condition). */
13310 add_unwind_table_edit (arm_unwind_table_edit
**head
,
13311 arm_unwind_table_edit
**tail
,
13312 arm_unwind_edit_type type
,
13313 asection
*linked_section
,
13314 unsigned int tindex
)
13316 arm_unwind_table_edit
*new_edit
= (arm_unwind_table_edit
*)
13317 xmalloc (sizeof (arm_unwind_table_edit
));
13319 new_edit
->type
= type
;
13320 new_edit
->linked_section
= linked_section
;
13321 new_edit
->index
= tindex
;
13325 new_edit
->next
= NULL
;
13328 (*tail
)->next
= new_edit
;
13330 (*tail
) = new_edit
;
13333 (*head
) = new_edit
;
13337 new_edit
->next
= *head
;
13346 static _arm_elf_section_data
*get_arm_elf_section_data (asection
*);
13348 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
13350 adjust_exidx_size(asection
*exidx_sec
, int adjust
)
13354 if (!exidx_sec
->rawsize
)
13355 exidx_sec
->rawsize
= exidx_sec
->size
;
13357 bfd_set_section_size (exidx_sec
->owner
, exidx_sec
, exidx_sec
->size
+ adjust
);
13358 out_sec
= exidx_sec
->output_section
;
13359 /* Adjust size of output section. */
13360 bfd_set_section_size (out_sec
->owner
, out_sec
, out_sec
->size
+adjust
);
13363 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
13365 insert_cantunwind_after(asection
*text_sec
, asection
*exidx_sec
)
13367 struct _arm_elf_section_data
*exidx_arm_data
;
13369 exidx_arm_data
= get_arm_elf_section_data (exidx_sec
);
13370 add_unwind_table_edit (
13371 &exidx_arm_data
->u
.exidx
.unwind_edit_list
,
13372 &exidx_arm_data
->u
.exidx
.unwind_edit_tail
,
13373 INSERT_EXIDX_CANTUNWIND_AT_END
, text_sec
, UINT_MAX
);
13375 exidx_arm_data
->additional_reloc_count
++;
13377 adjust_exidx_size(exidx_sec
, 8);
13380 /* Scan .ARM.exidx tables, and create a list describing edits which should be
13381 made to those tables, such that:
13383 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
13384 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
13385 codes which have been inlined into the index).
13387 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
13389 The edits are applied when the tables are written
13390 (in elf32_arm_write_section). */
13393 elf32_arm_fix_exidx_coverage (asection
**text_section_order
,
13394 unsigned int num_text_sections
,
13395 struct bfd_link_info
*info
,
13396 bfd_boolean merge_exidx_entries
)
13399 unsigned int last_second_word
= 0, i
;
13400 asection
*last_exidx_sec
= NULL
;
13401 asection
*last_text_sec
= NULL
;
13402 int last_unwind_type
= -1;
13404 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
13406 for (inp
= info
->input_bfds
; inp
!= NULL
; inp
= inp
->link
.next
)
13410 for (sec
= inp
->sections
; sec
!= NULL
; sec
= sec
->next
)
13412 struct bfd_elf_section_data
*elf_sec
= elf_section_data (sec
);
13413 Elf_Internal_Shdr
*hdr
= &elf_sec
->this_hdr
;
13415 if (!hdr
|| hdr
->sh_type
!= SHT_ARM_EXIDX
)
13418 if (elf_sec
->linked_to
)
13420 Elf_Internal_Shdr
*linked_hdr
13421 = &elf_section_data (elf_sec
->linked_to
)->this_hdr
;
13422 struct _arm_elf_section_data
*linked_sec_arm_data
13423 = get_arm_elf_section_data (linked_hdr
->bfd_section
);
13425 if (linked_sec_arm_data
== NULL
)
13428 /* Link this .ARM.exidx section back from the text section it
13430 linked_sec_arm_data
->u
.text
.arm_exidx_sec
= sec
;
13435 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
13436 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
13437 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
13439 for (i
= 0; i
< num_text_sections
; i
++)
13441 asection
*sec
= text_section_order
[i
];
13442 asection
*exidx_sec
;
13443 struct _arm_elf_section_data
*arm_data
= get_arm_elf_section_data (sec
);
13444 struct _arm_elf_section_data
*exidx_arm_data
;
13445 bfd_byte
*contents
= NULL
;
13446 int deleted_exidx_bytes
= 0;
13448 arm_unwind_table_edit
*unwind_edit_head
= NULL
;
13449 arm_unwind_table_edit
*unwind_edit_tail
= NULL
;
13450 Elf_Internal_Shdr
*hdr
;
13453 if (arm_data
== NULL
)
13456 exidx_sec
= arm_data
->u
.text
.arm_exidx_sec
;
13457 if (exidx_sec
== NULL
)
13459 /* Section has no unwind data. */
13460 if (last_unwind_type
== 0 || !last_exidx_sec
)
13463 /* Ignore zero sized sections. */
13464 if (sec
->size
== 0)
13467 insert_cantunwind_after(last_text_sec
, last_exidx_sec
);
13468 last_unwind_type
= 0;
13472 /* Skip /DISCARD/ sections. */
13473 if (bfd_is_abs_section (exidx_sec
->output_section
))
13476 hdr
= &elf_section_data (exidx_sec
)->this_hdr
;
13477 if (hdr
->sh_type
!= SHT_ARM_EXIDX
)
13480 exidx_arm_data
= get_arm_elf_section_data (exidx_sec
);
13481 if (exidx_arm_data
== NULL
)
13484 ibfd
= exidx_sec
->owner
;
13486 if (hdr
->contents
!= NULL
)
13487 contents
= hdr
->contents
;
13488 else if (! bfd_malloc_and_get_section (ibfd
, exidx_sec
, &contents
))
13492 if (last_unwind_type
> 0)
13494 unsigned int first_word
= bfd_get_32 (ibfd
, contents
);
13495 /* Add cantunwind if first unwind item does not match section
13497 if (first_word
!= sec
->vma
)
13499 insert_cantunwind_after (last_text_sec
, last_exidx_sec
);
13500 last_unwind_type
= 0;
13504 for (j
= 0; j
< hdr
->sh_size
; j
+= 8)
13506 unsigned int second_word
= bfd_get_32 (ibfd
, contents
+ j
+ 4);
13510 /* An EXIDX_CANTUNWIND entry. */
13511 if (second_word
== 1)
13513 if (last_unwind_type
== 0)
13517 /* Inlined unwinding data. Merge if equal to previous. */
13518 else if ((second_word
& 0x80000000) != 0)
13520 if (merge_exidx_entries
13521 && last_second_word
== second_word
&& last_unwind_type
== 1)
13524 last_second_word
= second_word
;
13526 /* Normal table entry. In theory we could merge these too,
13527 but duplicate entries are likely to be much less common. */
13531 if (elide
&& !bfd_link_relocatable (info
))
13533 add_unwind_table_edit (&unwind_edit_head
, &unwind_edit_tail
,
13534 DELETE_EXIDX_ENTRY
, NULL
, j
/ 8);
13536 deleted_exidx_bytes
+= 8;
13539 last_unwind_type
= unwind_type
;
13542 /* Free contents if we allocated it ourselves. */
13543 if (contents
!= hdr
->contents
)
13546 /* Record edits to be applied later (in elf32_arm_write_section). */
13547 exidx_arm_data
->u
.exidx
.unwind_edit_list
= unwind_edit_head
;
13548 exidx_arm_data
->u
.exidx
.unwind_edit_tail
= unwind_edit_tail
;
13550 if (deleted_exidx_bytes
> 0)
13551 adjust_exidx_size(exidx_sec
, -deleted_exidx_bytes
);
13553 last_exidx_sec
= exidx_sec
;
13554 last_text_sec
= sec
;
13557 /* Add terminating CANTUNWIND entry. */
13558 if (!bfd_link_relocatable (info
) && last_exidx_sec
13559 && last_unwind_type
!= 0)
13560 insert_cantunwind_after(last_text_sec
, last_exidx_sec
);
13566 elf32_arm_output_glue_section (struct bfd_link_info
*info
, bfd
*obfd
,
13567 bfd
*ibfd
, const char *name
)
13569 asection
*sec
, *osec
;
13571 sec
= bfd_get_linker_section (ibfd
, name
);
13572 if (sec
== NULL
|| (sec
->flags
& SEC_EXCLUDE
) != 0)
13575 osec
= sec
->output_section
;
13576 if (elf32_arm_write_section (obfd
, info
, sec
, sec
->contents
))
13579 if (! bfd_set_section_contents (obfd
, osec
, sec
->contents
,
13580 sec
->output_offset
, sec
->size
))
13587 elf32_arm_final_link (bfd
*abfd
, struct bfd_link_info
*info
)
13589 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (info
);
13590 asection
*sec
, *osec
;
13592 if (globals
== NULL
)
13595 /* Invoke the regular ELF backend linker to do all the work. */
13596 if (!bfd_elf_final_link (abfd
, info
))
13599 /* Process stub sections (eg BE8 encoding, ...). */
13600 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
13602 for (i
=0; i
<htab
->top_id
; i
++)
13604 sec
= htab
->stub_group
[i
].stub_sec
;
13605 /* Only process it once, in its link_sec slot. */
13606 if (sec
&& i
== htab
->stub_group
[i
].link_sec
->id
)
13608 osec
= sec
->output_section
;
13609 elf32_arm_write_section (abfd
, info
, sec
, sec
->contents
);
13610 if (! bfd_set_section_contents (abfd
, osec
, sec
->contents
,
13611 sec
->output_offset
, sec
->size
))
13616 /* Write out any glue sections now that we have created all the
13618 if (globals
->bfd_of_glue_owner
!= NULL
)
13620 if (! elf32_arm_output_glue_section (info
, abfd
,
13621 globals
->bfd_of_glue_owner
,
13622 ARM2THUMB_GLUE_SECTION_NAME
))
13625 if (! elf32_arm_output_glue_section (info
, abfd
,
13626 globals
->bfd_of_glue_owner
,
13627 THUMB2ARM_GLUE_SECTION_NAME
))
13630 if (! elf32_arm_output_glue_section (info
, abfd
,
13631 globals
->bfd_of_glue_owner
,
13632 VFP11_ERRATUM_VENEER_SECTION_NAME
))
13635 if (! elf32_arm_output_glue_section (info
, abfd
,
13636 globals
->bfd_of_glue_owner
,
13637 STM32L4XX_ERRATUM_VENEER_SECTION_NAME
))
13640 if (! elf32_arm_output_glue_section (info
, abfd
,
13641 globals
->bfd_of_glue_owner
,
13642 ARM_BX_GLUE_SECTION_NAME
))
13649 /* Return a best guess for the machine number based on the attributes. */
13651 static unsigned int
13652 bfd_arm_get_mach_from_attributes (bfd
* abfd
)
13654 int arch
= bfd_elf_get_obj_attr_int (abfd
, OBJ_ATTR_PROC
, Tag_CPU_arch
);
13658 case TAG_CPU_ARCH_PRE_V4
: return bfd_mach_arm_3M
;
13659 case TAG_CPU_ARCH_V4
: return bfd_mach_arm_4
;
13660 case TAG_CPU_ARCH_V4T
: return bfd_mach_arm_4T
;
13661 case TAG_CPU_ARCH_V5T
: return bfd_mach_arm_5T
;
13663 case TAG_CPU_ARCH_V5TE
:
13667 BFD_ASSERT (Tag_CPU_name
< NUM_KNOWN_OBJ_ATTRIBUTES
);
13668 name
= elf_known_obj_attributes (abfd
) [OBJ_ATTR_PROC
][Tag_CPU_name
].s
;
13672 if (strcmp (name
, "IWMMXT2") == 0)
13673 return bfd_mach_arm_iWMMXt2
;
13675 if (strcmp (name
, "IWMMXT") == 0)
13676 return bfd_mach_arm_iWMMXt
;
13678 if (strcmp (name
, "XSCALE") == 0)
13682 BFD_ASSERT (Tag_WMMX_arch
< NUM_KNOWN_OBJ_ATTRIBUTES
);
13683 wmmx
= elf_known_obj_attributes (abfd
) [OBJ_ATTR_PROC
][Tag_WMMX_arch
].i
;
13686 case 1: return bfd_mach_arm_iWMMXt
;
13687 case 2: return bfd_mach_arm_iWMMXt2
;
13688 default: return bfd_mach_arm_XScale
;
13693 return bfd_mach_arm_5TE
;
13696 case TAG_CPU_ARCH_V5TEJ
:
13697 return bfd_mach_arm_5TEJ
;
13698 case TAG_CPU_ARCH_V6
:
13699 return bfd_mach_arm_6
;
13700 case TAG_CPU_ARCH_V6KZ
:
13701 return bfd_mach_arm_6KZ
;
13702 case TAG_CPU_ARCH_V6T2
:
13703 return bfd_mach_arm_6T2
;
13704 case TAG_CPU_ARCH_V6K
:
13705 return bfd_mach_arm_6K
;
13706 case TAG_CPU_ARCH_V7
:
13707 return bfd_mach_arm_7
;
13708 case TAG_CPU_ARCH_V6_M
:
13709 return bfd_mach_arm_6M
;
13710 case TAG_CPU_ARCH_V6S_M
:
13711 return bfd_mach_arm_6SM
;
13712 case TAG_CPU_ARCH_V7E_M
:
13713 return bfd_mach_arm_7EM
;
13714 case TAG_CPU_ARCH_V8
:
13715 return bfd_mach_arm_8
;
13716 case TAG_CPU_ARCH_V8R
:
13717 return bfd_mach_arm_8R
;
13718 case TAG_CPU_ARCH_V8M_BASE
:
13719 return bfd_mach_arm_8M_BASE
;
13720 case TAG_CPU_ARCH_V8M_MAIN
:
13721 return bfd_mach_arm_8M_MAIN
;
13724 /* Force entry to be added for any new known Tag_CPU_arch value. */
13725 BFD_ASSERT (arch
> MAX_TAG_CPU_ARCH
);
13727 /* Unknown Tag_CPU_arch value. */
13728 return bfd_mach_arm_unknown
;
13732 /* Set the right machine number. */
13735 elf32_arm_object_p (bfd
*abfd
)
13739 mach
= bfd_arm_get_mach_from_notes (abfd
, ARM_NOTE_SECTION
);
13741 if (mach
== bfd_mach_arm_unknown
)
13743 if (elf_elfheader (abfd
)->e_flags
& EF_ARM_MAVERICK_FLOAT
)
13744 mach
= bfd_mach_arm_ep9312
;
13746 mach
= bfd_arm_get_mach_from_attributes (abfd
);
13749 bfd_default_set_arch_mach (abfd
, bfd_arch_arm
, mach
);
13753 /* Function to keep ARM specific flags in the ELF header. */
13756 elf32_arm_set_private_flags (bfd
*abfd
, flagword flags
)
13758 if (elf_flags_init (abfd
)
13759 && elf_elfheader (abfd
)->e_flags
!= flags
)
13761 if (EF_ARM_EABI_VERSION (flags
) == EF_ARM_EABI_UNKNOWN
)
13763 if (flags
& EF_ARM_INTERWORK
)
13765 (_("warning: not setting interworking flag of %pB since it has already been specified as non-interworking"),
13769 (_("warning: clearing the interworking flag of %pB due to outside request"),
13775 elf_elfheader (abfd
)->e_flags
= flags
;
13776 elf_flags_init (abfd
) = TRUE
;
13782 /* Copy backend specific data from one object module to another. */
13785 elf32_arm_copy_private_bfd_data (bfd
*ibfd
, bfd
*obfd
)
13788 flagword out_flags
;
13790 if (! is_arm_elf (ibfd
) || ! is_arm_elf (obfd
))
13793 in_flags
= elf_elfheader (ibfd
)->e_flags
;
13794 out_flags
= elf_elfheader (obfd
)->e_flags
;
13796 if (elf_flags_init (obfd
)
13797 && EF_ARM_EABI_VERSION (out_flags
) == EF_ARM_EABI_UNKNOWN
13798 && in_flags
!= out_flags
)
13800 /* Cannot mix APCS26 and APCS32 code. */
13801 if ((in_flags
& EF_ARM_APCS_26
) != (out_flags
& EF_ARM_APCS_26
))
13804 /* Cannot mix float APCS and non-float APCS code. */
13805 if ((in_flags
& EF_ARM_APCS_FLOAT
) != (out_flags
& EF_ARM_APCS_FLOAT
))
13808 /* If the src and dest have different interworking flags
13809 then turn off the interworking bit. */
13810 if ((in_flags
& EF_ARM_INTERWORK
) != (out_flags
& EF_ARM_INTERWORK
))
13812 if (out_flags
& EF_ARM_INTERWORK
)
13814 (_("warning: clearing the interworking flag of %pB because non-interworking code in %pB has been linked with it"),
13817 in_flags
&= ~EF_ARM_INTERWORK
;
13820 /* Likewise for PIC, though don't warn for this case. */
13821 if ((in_flags
& EF_ARM_PIC
) != (out_flags
& EF_ARM_PIC
))
13822 in_flags
&= ~EF_ARM_PIC
;
13825 elf_elfheader (obfd
)->e_flags
= in_flags
;
13826 elf_flags_init (obfd
) = TRUE
;
13828 return _bfd_elf_copy_private_bfd_data (ibfd
, obfd
);
13831 /* Values for Tag_ABI_PCS_R9_use. */
13840 /* Values for Tag_ABI_PCS_RW_data. */
13843 AEABI_PCS_RW_data_absolute
,
13844 AEABI_PCS_RW_data_PCrel
,
13845 AEABI_PCS_RW_data_SBrel
,
13846 AEABI_PCS_RW_data_unused
13849 /* Values for Tag_ABI_enum_size. */
13855 AEABI_enum_forced_wide
13858 /* Determine whether an object attribute tag takes an integer, a
13862 elf32_arm_obj_attrs_arg_type (int tag
)
13864 if (tag
== Tag_compatibility
)
13865 return ATTR_TYPE_FLAG_INT_VAL
| ATTR_TYPE_FLAG_STR_VAL
;
13866 else if (tag
== Tag_nodefaults
)
13867 return ATTR_TYPE_FLAG_INT_VAL
| ATTR_TYPE_FLAG_NO_DEFAULT
;
13868 else if (tag
== Tag_CPU_raw_name
|| tag
== Tag_CPU_name
)
13869 return ATTR_TYPE_FLAG_STR_VAL
;
13871 return ATTR_TYPE_FLAG_INT_VAL
;
13873 return (tag
& 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL
: ATTR_TYPE_FLAG_INT_VAL
;
13876 /* The ABI defines that Tag_conformance should be emitted first, and that
13877 Tag_nodefaults should be second (if either is defined). This sets those
13878 two positions, and bumps up the position of all the remaining tags to
13881 elf32_arm_obj_attrs_order (int num
)
13883 if (num
== LEAST_KNOWN_OBJ_ATTRIBUTE
)
13884 return Tag_conformance
;
13885 if (num
== LEAST_KNOWN_OBJ_ATTRIBUTE
+ 1)
13886 return Tag_nodefaults
;
13887 if ((num
- 2) < Tag_nodefaults
)
13889 if ((num
- 1) < Tag_conformance
)
13894 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
13896 elf32_arm_obj_attrs_handle_unknown (bfd
*abfd
, int tag
)
13898 if ((tag
& 127) < 64)
13901 (_("%pB: unknown mandatory EABI object attribute %d"),
13903 bfd_set_error (bfd_error_bad_value
);
13909 (_("warning: %pB: unknown EABI object attribute %d"),
13915 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
13916 Returns -1 if no architecture could be read. */
13919 get_secondary_compatible_arch (bfd
*abfd
)
13921 obj_attribute
*attr
=
13922 &elf_known_obj_attributes_proc (abfd
)[Tag_also_compatible_with
];
13924 /* Note: the tag and its argument below are uleb128 values, though
13925 currently-defined values fit in one byte for each. */
13927 && attr
->s
[0] == Tag_CPU_arch
13928 && (attr
->s
[1] & 128) != 128
13929 && attr
->s
[2] == 0)
13932 /* This tag is "safely ignorable", so don't complain if it looks funny. */
13936 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
13937 The tag is removed if ARCH is -1. */
13940 set_secondary_compatible_arch (bfd
*abfd
, int arch
)
13942 obj_attribute
*attr
=
13943 &elf_known_obj_attributes_proc (abfd
)[Tag_also_compatible_with
];
13951 /* Note: the tag and its argument below are uleb128 values, though
13952 currently-defined values fit in one byte for each. */
13954 attr
->s
= (char *) bfd_alloc (abfd
, 3);
13955 attr
->s
[0] = Tag_CPU_arch
;
13960 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
13964 tag_cpu_arch_combine (bfd
*ibfd
, int oldtag
, int *secondary_compat_out
,
13965 int newtag
, int secondary_compat
)
13967 #define T(X) TAG_CPU_ARCH_##X
13968 int tagl
, tagh
, result
;
13971 T(V6T2
), /* PRE_V4. */
13973 T(V6T2
), /* V4T. */
13974 T(V6T2
), /* V5T. */
13975 T(V6T2
), /* V5TE. */
13976 T(V6T2
), /* V5TEJ. */
13979 T(V6T2
) /* V6T2. */
13983 T(V6K
), /* PRE_V4. */
13987 T(V6K
), /* V5TE. */
13988 T(V6K
), /* V5TEJ. */
13990 T(V6KZ
), /* V6KZ. */
13996 T(V7
), /* PRE_V4. */
14001 T(V7
), /* V5TEJ. */
14014 T(V6K
), /* V5TE. */
14015 T(V6K
), /* V5TEJ. */
14017 T(V6KZ
), /* V6KZ. */
14021 T(V6_M
) /* V6_M. */
14023 const int v6s_m
[] =
14029 T(V6K
), /* V5TE. */
14030 T(V6K
), /* V5TEJ. */
14032 T(V6KZ
), /* V6KZ. */
14036 T(V6S_M
), /* V6_M. */
14037 T(V6S_M
) /* V6S_M. */
14039 const int v7e_m
[] =
14043 T(V7E_M
), /* V4T. */
14044 T(V7E_M
), /* V5T. */
14045 T(V7E_M
), /* V5TE. */
14046 T(V7E_M
), /* V5TEJ. */
14047 T(V7E_M
), /* V6. */
14048 T(V7E_M
), /* V6KZ. */
14049 T(V7E_M
), /* V6T2. */
14050 T(V7E_M
), /* V6K. */
14051 T(V7E_M
), /* V7. */
14052 T(V7E_M
), /* V6_M. */
14053 T(V7E_M
), /* V6S_M. */
14054 T(V7E_M
) /* V7E_M. */
14058 T(V8
), /* PRE_V4. */
14063 T(V8
), /* V5TEJ. */
14070 T(V8
), /* V6S_M. */
14071 T(V8
), /* V7E_M. */
14076 T(V8R
), /* PRE_V4. */
14080 T(V8R
), /* V5TE. */
14081 T(V8R
), /* V5TEJ. */
14083 T(V8R
), /* V6KZ. */
14084 T(V8R
), /* V6T2. */
14087 T(V8R
), /* V6_M. */
14088 T(V8R
), /* V6S_M. */
14089 T(V8R
), /* V7E_M. */
14093 const int v8m_baseline
[] =
14106 T(V8M_BASE
), /* V6_M. */
14107 T(V8M_BASE
), /* V6S_M. */
14111 T(V8M_BASE
) /* V8-M BASELINE. */
14113 const int v8m_mainline
[] =
14125 T(V8M_MAIN
), /* V7. */
14126 T(V8M_MAIN
), /* V6_M. */
14127 T(V8M_MAIN
), /* V6S_M. */
14128 T(V8M_MAIN
), /* V7E_M. */
14131 T(V8M_MAIN
), /* V8-M BASELINE. */
14132 T(V8M_MAIN
) /* V8-M MAINLINE. */
14134 const int v4t_plus_v6_m
[] =
14140 T(V5TE
), /* V5TE. */
14141 T(V5TEJ
), /* V5TEJ. */
14143 T(V6KZ
), /* V6KZ. */
14144 T(V6T2
), /* V6T2. */
14147 T(V6_M
), /* V6_M. */
14148 T(V6S_M
), /* V6S_M. */
14149 T(V7E_M
), /* V7E_M. */
14152 T(V8M_BASE
), /* V8-M BASELINE. */
14153 T(V8M_MAIN
), /* V8-M MAINLINE. */
14154 T(V4T_PLUS_V6_M
) /* V4T plus V6_M. */
14156 const int *comb
[] =
14168 /* Pseudo-architecture. */
14172 /* Check we've not got a higher architecture than we know about. */
14174 if (oldtag
> MAX_TAG_CPU_ARCH
|| newtag
> MAX_TAG_CPU_ARCH
)
14176 _bfd_error_handler (_("error: %pB: unknown CPU architecture"), ibfd
);
14180 /* Override old tag if we have a Tag_also_compatible_with on the output. */
14182 if ((oldtag
== T(V6_M
) && *secondary_compat_out
== T(V4T
))
14183 || (oldtag
== T(V4T
) && *secondary_compat_out
== T(V6_M
)))
14184 oldtag
= T(V4T_PLUS_V6_M
);
14186 /* And override the new tag if we have a Tag_also_compatible_with on the
14189 if ((newtag
== T(V6_M
) && secondary_compat
== T(V4T
))
14190 || (newtag
== T(V4T
) && secondary_compat
== T(V6_M
)))
14191 newtag
= T(V4T_PLUS_V6_M
);
14193 tagl
= (oldtag
< newtag
) ? oldtag
: newtag
;
14194 result
= tagh
= (oldtag
> newtag
) ? oldtag
: newtag
;
14196 /* Architectures before V6KZ add features monotonically. */
14197 if (tagh
<= TAG_CPU_ARCH_V6KZ
)
14200 result
= comb
[tagh
- T(V6T2
)] ? comb
[tagh
- T(V6T2
)][tagl
] : -1;
14202 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
14203 as the canonical version. */
14204 if (result
== T(V4T_PLUS_V6_M
))
14207 *secondary_compat_out
= T(V6_M
);
14210 *secondary_compat_out
= -1;
14214 _bfd_error_handler (_("error: %pB: conflicting CPU architectures %d/%d"),
14215 ibfd
, oldtag
, newtag
);
14223 /* Query attributes object to see if integer divide instructions may be
14224 present in an object. */
14226 elf32_arm_attributes_accept_div (const obj_attribute
*attr
)
14228 int arch
= attr
[Tag_CPU_arch
].i
;
14229 int profile
= attr
[Tag_CPU_arch_profile
].i
;
14231 switch (attr
[Tag_DIV_use
].i
)
14234 /* Integer divide allowed if instruction contained in archetecture. */
14235 if (arch
== TAG_CPU_ARCH_V7
&& (profile
== 'R' || profile
== 'M'))
14237 else if (arch
>= TAG_CPU_ARCH_V7E_M
)
14243 /* Integer divide explicitly prohibited. */
14247 /* Unrecognised case - treat as allowing divide everywhere. */
14249 /* Integer divide allowed in ARM state. */
14254 /* Query attributes object to see if integer divide instructions are
14255 forbidden to be in the object. This is not the inverse of
14256 elf32_arm_attributes_accept_div. */
14258 elf32_arm_attributes_forbid_div (const obj_attribute
*attr
)
14260 return attr
[Tag_DIV_use
].i
== 1;
14263 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
14264 are conflicting attributes. */
14267 elf32_arm_merge_eabi_attributes (bfd
*ibfd
, struct bfd_link_info
*info
)
14269 bfd
*obfd
= info
->output_bfd
;
14270 obj_attribute
*in_attr
;
14271 obj_attribute
*out_attr
;
14272 /* Some tags have 0 = don't care, 1 = strong requirement,
14273 2 = weak requirement. */
14274 static const int order_021
[3] = {0, 2, 1};
14276 bfd_boolean result
= TRUE
;
14277 const char *sec_name
= get_elf_backend_data (ibfd
)->obj_attrs_section
;
14279 /* Skip the linker stubs file. This preserves previous behavior
14280 of accepting unknown attributes in the first input file - but
14282 if (ibfd
->flags
& BFD_LINKER_CREATED
)
14285 /* Skip any input that hasn't attribute section.
14286 This enables to link object files without attribute section with
14288 if (bfd_get_section_by_name (ibfd
, sec_name
) == NULL
)
14291 if (!elf_known_obj_attributes_proc (obfd
)[0].i
)
14293 /* This is the first object. Copy the attributes. */
14294 _bfd_elf_copy_obj_attributes (ibfd
, obfd
);
14296 out_attr
= elf_known_obj_attributes_proc (obfd
);
14298 /* Use the Tag_null value to indicate the attributes have been
14302 /* We do not output objects with Tag_MPextension_use_legacy - we move
14303 the attribute's value to Tag_MPextension_use. */
14304 if (out_attr
[Tag_MPextension_use_legacy
].i
!= 0)
14306 if (out_attr
[Tag_MPextension_use
].i
!= 0
14307 && out_attr
[Tag_MPextension_use_legacy
].i
14308 != out_attr
[Tag_MPextension_use
].i
)
14311 (_("Error: %pB has both the current and legacy "
14312 "Tag_MPextension_use attributes"), ibfd
);
14316 out_attr
[Tag_MPextension_use
] =
14317 out_attr
[Tag_MPextension_use_legacy
];
14318 out_attr
[Tag_MPextension_use_legacy
].type
= 0;
14319 out_attr
[Tag_MPextension_use_legacy
].i
= 0;
14325 in_attr
= elf_known_obj_attributes_proc (ibfd
);
14326 out_attr
= elf_known_obj_attributes_proc (obfd
);
14327 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
14328 if (in_attr
[Tag_ABI_VFP_args
].i
!= out_attr
[Tag_ABI_VFP_args
].i
)
14330 /* Ignore mismatches if the object doesn't use floating point or is
14331 floating point ABI independent. */
14332 if (out_attr
[Tag_ABI_FP_number_model
].i
== AEABI_FP_number_model_none
14333 || (in_attr
[Tag_ABI_FP_number_model
].i
!= AEABI_FP_number_model_none
14334 && out_attr
[Tag_ABI_VFP_args
].i
== AEABI_VFP_args_compatible
))
14335 out_attr
[Tag_ABI_VFP_args
].i
= in_attr
[Tag_ABI_VFP_args
].i
;
14336 else if (in_attr
[Tag_ABI_FP_number_model
].i
!= AEABI_FP_number_model_none
14337 && in_attr
[Tag_ABI_VFP_args
].i
!= AEABI_VFP_args_compatible
)
14340 (_("error: %pB uses VFP register arguments, %pB does not"),
14341 in_attr
[Tag_ABI_VFP_args
].i
? ibfd
: obfd
,
14342 in_attr
[Tag_ABI_VFP_args
].i
? obfd
: ibfd
);
14347 for (i
= LEAST_KNOWN_OBJ_ATTRIBUTE
; i
< NUM_KNOWN_OBJ_ATTRIBUTES
; i
++)
14349 /* Merge this attribute with existing attributes. */
14352 case Tag_CPU_raw_name
:
14354 /* These are merged after Tag_CPU_arch. */
14357 case Tag_ABI_optimization_goals
:
14358 case Tag_ABI_FP_optimization_goals
:
14359 /* Use the first value seen. */
14364 int secondary_compat
= -1, secondary_compat_out
= -1;
14365 unsigned int saved_out_attr
= out_attr
[i
].i
;
14367 static const char *name_table
[] =
14369 /* These aren't real CPU names, but we can't guess
14370 that from the architecture version alone. */
14386 "ARM v8-M.baseline",
14387 "ARM v8-M.mainline",
14390 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
14391 secondary_compat
= get_secondary_compatible_arch (ibfd
);
14392 secondary_compat_out
= get_secondary_compatible_arch (obfd
);
14393 arch_attr
= tag_cpu_arch_combine (ibfd
, out_attr
[i
].i
,
14394 &secondary_compat_out
,
14398 /* Return with error if failed to merge. */
14399 if (arch_attr
== -1)
14402 out_attr
[i
].i
= arch_attr
;
14404 set_secondary_compatible_arch (obfd
, secondary_compat_out
);
14406 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
14407 if (out_attr
[i
].i
== saved_out_attr
)
14408 ; /* Leave the names alone. */
14409 else if (out_attr
[i
].i
== in_attr
[i
].i
)
14411 /* The output architecture has been changed to match the
14412 input architecture. Use the input names. */
14413 out_attr
[Tag_CPU_name
].s
= in_attr
[Tag_CPU_name
].s
14414 ? _bfd_elf_attr_strdup (obfd
, in_attr
[Tag_CPU_name
].s
)
14416 out_attr
[Tag_CPU_raw_name
].s
= in_attr
[Tag_CPU_raw_name
].s
14417 ? _bfd_elf_attr_strdup (obfd
, in_attr
[Tag_CPU_raw_name
].s
)
14422 out_attr
[Tag_CPU_name
].s
= NULL
;
14423 out_attr
[Tag_CPU_raw_name
].s
= NULL
;
14426 /* If we still don't have a value for Tag_CPU_name,
14427 make one up now. Tag_CPU_raw_name remains blank. */
14428 if (out_attr
[Tag_CPU_name
].s
== NULL
14429 && out_attr
[i
].i
< ARRAY_SIZE (name_table
))
14430 out_attr
[Tag_CPU_name
].s
=
14431 _bfd_elf_attr_strdup (obfd
, name_table
[out_attr
[i
].i
]);
14435 case Tag_ARM_ISA_use
:
14436 case Tag_THUMB_ISA_use
:
14437 case Tag_WMMX_arch
:
14438 case Tag_Advanced_SIMD_arch
:
14439 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
14440 case Tag_ABI_FP_rounding
:
14441 case Tag_ABI_FP_exceptions
:
14442 case Tag_ABI_FP_user_exceptions
:
14443 case Tag_ABI_FP_number_model
:
14444 case Tag_FP_HP_extension
:
14445 case Tag_CPU_unaligned_access
:
14447 case Tag_MPextension_use
:
14448 /* Use the largest value specified. */
14449 if (in_attr
[i
].i
> out_attr
[i
].i
)
14450 out_attr
[i
].i
= in_attr
[i
].i
;
14453 case Tag_ABI_align_preserved
:
14454 case Tag_ABI_PCS_RO_data
:
14455 /* Use the smallest value specified. */
14456 if (in_attr
[i
].i
< out_attr
[i
].i
)
14457 out_attr
[i
].i
= in_attr
[i
].i
;
14460 case Tag_ABI_align_needed
:
14461 if ((in_attr
[i
].i
> 0 || out_attr
[i
].i
> 0)
14462 && (in_attr
[Tag_ABI_align_preserved
].i
== 0
14463 || out_attr
[Tag_ABI_align_preserved
].i
== 0))
14465 /* This error message should be enabled once all non-conformant
14466 binaries in the toolchain have had the attributes set
14469 (_("error: %pB: 8-byte data alignment conflicts with %pB"),
14473 /* Fall through. */
14474 case Tag_ABI_FP_denormal
:
14475 case Tag_ABI_PCS_GOT_use
:
14476 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
14477 value if greater than 2 (for future-proofing). */
14478 if ((in_attr
[i
].i
> 2 && in_attr
[i
].i
> out_attr
[i
].i
)
14479 || (in_attr
[i
].i
<= 2 && out_attr
[i
].i
<= 2
14480 && order_021
[in_attr
[i
].i
] > order_021
[out_attr
[i
].i
]))
14481 out_attr
[i
].i
= in_attr
[i
].i
;
14484 case Tag_Virtualization_use
:
14485 /* The virtualization tag effectively stores two bits of
14486 information: the intended use of TrustZone (in bit 0), and the
14487 intended use of Virtualization (in bit 1). */
14488 if (out_attr
[i
].i
== 0)
14489 out_attr
[i
].i
= in_attr
[i
].i
;
14490 else if (in_attr
[i
].i
!= 0
14491 && in_attr
[i
].i
!= out_attr
[i
].i
)
14493 if (in_attr
[i
].i
<= 3 && out_attr
[i
].i
<= 3)
14498 (_("error: %pB: unable to merge virtualization attributes "
14506 case Tag_CPU_arch_profile
:
14507 if (out_attr
[i
].i
!= in_attr
[i
].i
)
14509 /* 0 will merge with anything.
14510 'A' and 'S' merge to 'A'.
14511 'R' and 'S' merge to 'R'.
14512 'M' and 'A|R|S' is an error. */
14513 if (out_attr
[i
].i
== 0
14514 || (out_attr
[i
].i
== 'S'
14515 && (in_attr
[i
].i
== 'A' || in_attr
[i
].i
== 'R')))
14516 out_attr
[i
].i
= in_attr
[i
].i
;
14517 else if (in_attr
[i
].i
== 0
14518 || (in_attr
[i
].i
== 'S'
14519 && (out_attr
[i
].i
== 'A' || out_attr
[i
].i
== 'R')))
14520 ; /* Do nothing. */
14524 (_("error: %pB: conflicting architecture profiles %c/%c"),
14526 in_attr
[i
].i
? in_attr
[i
].i
: '0',
14527 out_attr
[i
].i
? out_attr
[i
].i
: '0');
14533 case Tag_DSP_extension
:
14534 /* No need to change output value if any of:
14535 - pre (<=) ARMv5T input architecture (do not have DSP)
14536 - M input profile not ARMv7E-M and do not have DSP. */
14537 if (in_attr
[Tag_CPU_arch
].i
<= 3
14538 || (in_attr
[Tag_CPU_arch_profile
].i
== 'M'
14539 && in_attr
[Tag_CPU_arch
].i
!= 13
14540 && in_attr
[i
].i
== 0))
14541 ; /* Do nothing. */
14542 /* Output value should be 0 if DSP part of architecture, ie.
14543 - post (>=) ARMv5te architecture output
14544 - A, R or S profile output or ARMv7E-M output architecture. */
14545 else if (out_attr
[Tag_CPU_arch
].i
>= 4
14546 && (out_attr
[Tag_CPU_arch_profile
].i
== 'A'
14547 || out_attr
[Tag_CPU_arch_profile
].i
== 'R'
14548 || out_attr
[Tag_CPU_arch_profile
].i
== 'S'
14549 || out_attr
[Tag_CPU_arch
].i
== 13))
14551 /* Otherwise, DSP instructions are added and not part of output
14559 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
14560 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
14561 when it's 0. It might mean absence of FP hardware if
14562 Tag_FP_arch is zero. */
14564 #define VFP_VERSION_COUNT 9
14565 static const struct
14569 } vfp_versions
[VFP_VERSION_COUNT
] =
14585 /* If the output has no requirement about FP hardware,
14586 follow the requirement of the input. */
14587 if (out_attr
[i
].i
== 0)
14589 /* This assert is still reasonable, we shouldn't
14590 produce the suspicious build attribute
14591 combination (See below for in_attr). */
14592 BFD_ASSERT (out_attr
[Tag_ABI_HardFP_use
].i
== 0);
14593 out_attr
[i
].i
= in_attr
[i
].i
;
14594 out_attr
[Tag_ABI_HardFP_use
].i
14595 = in_attr
[Tag_ABI_HardFP_use
].i
;
14598 /* If the input has no requirement about FP hardware, do
14600 else if (in_attr
[i
].i
== 0)
14602 /* We used to assert that Tag_ABI_HardFP_use was
14603 zero here, but we should never assert when
14604 consuming an object file that has suspicious
14605 build attributes. The single precision variant
14606 of 'no FP architecture' is still 'no FP
14607 architecture', so we just ignore the tag in this
14612 /* Both the input and the output have nonzero Tag_FP_arch.
14613 So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero. */
14615 /* If both the input and the output have zero Tag_ABI_HardFP_use,
14617 if (in_attr
[Tag_ABI_HardFP_use
].i
== 0
14618 && out_attr
[Tag_ABI_HardFP_use
].i
== 0)
14620 /* If the input and the output have different Tag_ABI_HardFP_use,
14621 the combination of them is 0 (implied by Tag_FP_arch). */
14622 else if (in_attr
[Tag_ABI_HardFP_use
].i
14623 != out_attr
[Tag_ABI_HardFP_use
].i
)
14624 out_attr
[Tag_ABI_HardFP_use
].i
= 0;
14626 /* Now we can handle Tag_FP_arch. */
14628 /* Values of VFP_VERSION_COUNT or more aren't defined, so just
14629 pick the biggest. */
14630 if (in_attr
[i
].i
>= VFP_VERSION_COUNT
14631 && in_attr
[i
].i
> out_attr
[i
].i
)
14633 out_attr
[i
] = in_attr
[i
];
14636 /* The output uses the superset of input features
14637 (ISA version) and registers. */
14638 ver
= vfp_versions
[in_attr
[i
].i
].ver
;
14639 if (ver
< vfp_versions
[out_attr
[i
].i
].ver
)
14640 ver
= vfp_versions
[out_attr
[i
].i
].ver
;
14641 regs
= vfp_versions
[in_attr
[i
].i
].regs
;
14642 if (regs
< vfp_versions
[out_attr
[i
].i
].regs
)
14643 regs
= vfp_versions
[out_attr
[i
].i
].regs
;
14644 /* This assumes all possible supersets are also a valid
14646 for (newval
= VFP_VERSION_COUNT
- 1; newval
> 0; newval
--)
14648 if (regs
== vfp_versions
[newval
].regs
14649 && ver
== vfp_versions
[newval
].ver
)
14652 out_attr
[i
].i
= newval
;
14655 case Tag_PCS_config
:
14656 if (out_attr
[i
].i
== 0)
14657 out_attr
[i
].i
= in_attr
[i
].i
;
14658 else if (in_attr
[i
].i
!= 0 && out_attr
[i
].i
!= in_attr
[i
].i
)
14660 /* It's sometimes ok to mix different configs, so this is only
14663 (_("warning: %pB: conflicting platform configuration"), ibfd
);
14666 case Tag_ABI_PCS_R9_use
:
14667 if (in_attr
[i
].i
!= out_attr
[i
].i
14668 && out_attr
[i
].i
!= AEABI_R9_unused
14669 && in_attr
[i
].i
!= AEABI_R9_unused
)
14672 (_("error: %pB: conflicting use of R9"), ibfd
);
14675 if (out_attr
[i
].i
== AEABI_R9_unused
)
14676 out_attr
[i
].i
= in_attr
[i
].i
;
14678 case Tag_ABI_PCS_RW_data
:
14679 if (in_attr
[i
].i
== AEABI_PCS_RW_data_SBrel
14680 && out_attr
[Tag_ABI_PCS_R9_use
].i
!= AEABI_R9_SB
14681 && out_attr
[Tag_ABI_PCS_R9_use
].i
!= AEABI_R9_unused
)
14684 (_("error: %pB: SB relative addressing conflicts with use of R9"),
14688 /* Use the smallest value specified. */
14689 if (in_attr
[i
].i
< out_attr
[i
].i
)
14690 out_attr
[i
].i
= in_attr
[i
].i
;
14692 case Tag_ABI_PCS_wchar_t
:
14693 if (out_attr
[i
].i
&& in_attr
[i
].i
&& out_attr
[i
].i
!= in_attr
[i
].i
14694 && !elf_arm_tdata (obfd
)->no_wchar_size_warning
)
14697 (_("warning: %pB uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
14698 ibfd
, in_attr
[i
].i
, out_attr
[i
].i
);
14700 else if (in_attr
[i
].i
&& !out_attr
[i
].i
)
14701 out_attr
[i
].i
= in_attr
[i
].i
;
14703 case Tag_ABI_enum_size
:
14704 if (in_attr
[i
].i
!= AEABI_enum_unused
)
14706 if (out_attr
[i
].i
== AEABI_enum_unused
14707 || out_attr
[i
].i
== AEABI_enum_forced_wide
)
14709 /* The existing object is compatible with anything.
14710 Use whatever requirements the new object has. */
14711 out_attr
[i
].i
= in_attr
[i
].i
;
14713 else if (in_attr
[i
].i
!= AEABI_enum_forced_wide
14714 && out_attr
[i
].i
!= in_attr
[i
].i
14715 && !elf_arm_tdata (obfd
)->no_enum_size_warning
)
14717 static const char *aeabi_enum_names
[] =
14718 { "", "variable-size", "32-bit", "" };
14719 const char *in_name
=
14720 in_attr
[i
].i
< ARRAY_SIZE(aeabi_enum_names
)
14721 ? aeabi_enum_names
[in_attr
[i
].i
]
14723 const char *out_name
=
14724 out_attr
[i
].i
< ARRAY_SIZE(aeabi_enum_names
)
14725 ? aeabi_enum_names
[out_attr
[i
].i
]
14728 (_("warning: %pB uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
14729 ibfd
, in_name
, out_name
);
14733 case Tag_ABI_VFP_args
:
14736 case Tag_ABI_WMMX_args
:
14737 if (in_attr
[i
].i
!= out_attr
[i
].i
)
14740 (_("error: %pB uses iWMMXt register arguments, %pB does not"),
14745 case Tag_compatibility
:
14746 /* Merged in target-independent code. */
14748 case Tag_ABI_HardFP_use
:
14749 /* This is handled along with Tag_FP_arch. */
14751 case Tag_ABI_FP_16bit_format
:
14752 if (in_attr
[i
].i
!= 0 && out_attr
[i
].i
!= 0)
14754 if (in_attr
[i
].i
!= out_attr
[i
].i
)
14757 (_("error: fp16 format mismatch between %pB and %pB"),
14762 if (in_attr
[i
].i
!= 0)
14763 out_attr
[i
].i
= in_attr
[i
].i
;
14767 /* A value of zero on input means that the divide instruction may
14768 be used if available in the base architecture as specified via
14769 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that
14770 the user did not want divide instructions. A value of 2
14771 explicitly means that divide instructions were allowed in ARM
14772 and Thumb state. */
14773 if (in_attr
[i
].i
== out_attr
[i
].i
)
14774 /* Do nothing. */ ;
14775 else if (elf32_arm_attributes_forbid_div (in_attr
)
14776 && !elf32_arm_attributes_accept_div (out_attr
))
14778 else if (elf32_arm_attributes_forbid_div (out_attr
)
14779 && elf32_arm_attributes_accept_div (in_attr
))
14780 out_attr
[i
].i
= in_attr
[i
].i
;
14781 else if (in_attr
[i
].i
== 2)
14782 out_attr
[i
].i
= in_attr
[i
].i
;
14785 case Tag_MPextension_use_legacy
:
14786 /* We don't output objects with Tag_MPextension_use_legacy - we
14787 move the value to Tag_MPextension_use. */
14788 if (in_attr
[i
].i
!= 0 && in_attr
[Tag_MPextension_use
].i
!= 0)
14790 if (in_attr
[Tag_MPextension_use
].i
!= in_attr
[i
].i
)
14793 (_("%pB has both the current and legacy "
14794 "Tag_MPextension_use attributes"),
14800 if (in_attr
[i
].i
> out_attr
[Tag_MPextension_use
].i
)
14801 out_attr
[Tag_MPextension_use
] = in_attr
[i
];
14805 case Tag_nodefaults
:
14806 /* This tag is set if it exists, but the value is unused (and is
14807 typically zero). We don't actually need to do anything here -
14808 the merge happens automatically when the type flags are merged
14811 case Tag_also_compatible_with
:
14812 /* Already done in Tag_CPU_arch. */
14814 case Tag_conformance
:
14815 /* Keep the attribute if it matches. Throw it away otherwise.
14816 No attribute means no claim to conform. */
14817 if (!in_attr
[i
].s
|| !out_attr
[i
].s
14818 || strcmp (in_attr
[i
].s
, out_attr
[i
].s
) != 0)
14819 out_attr
[i
].s
= NULL
;
14824 = result
&& _bfd_elf_merge_unknown_attribute_low (ibfd
, obfd
, i
);
14827 /* If out_attr was copied from in_attr then it won't have a type yet. */
14828 if (in_attr
[i
].type
&& !out_attr
[i
].type
)
14829 out_attr
[i
].type
= in_attr
[i
].type
;
14832 /* Merge Tag_compatibility attributes and any common GNU ones. */
14833 if (!_bfd_elf_merge_object_attributes (ibfd
, info
))
14836 /* Check for any attributes not known on ARM. */
14837 result
&= _bfd_elf_merge_unknown_attribute_list (ibfd
, obfd
);
14843 /* Return TRUE if the two EABI versions are incompatible. */
14846 elf32_arm_versions_compatible (unsigned iver
, unsigned over
)
14848 /* v4 and v5 are the same spec before and after it was released,
14849 so allow mixing them. */
14850 if ((iver
== EF_ARM_EABI_VER4
&& over
== EF_ARM_EABI_VER5
)
14851 || (iver
== EF_ARM_EABI_VER5
&& over
== EF_ARM_EABI_VER4
))
14854 return (iver
== over
);
14857 /* Merge backend specific data from an object file to the output
14858 object file when linking. */
14861 elf32_arm_merge_private_bfd_data (bfd
*, struct bfd_link_info
*);
14863 /* Display the flags field. */
14866 elf32_arm_print_private_bfd_data (bfd
*abfd
, void * ptr
)
14868 FILE * file
= (FILE *) ptr
;
14869 unsigned long flags
;
14871 BFD_ASSERT (abfd
!= NULL
&& ptr
!= NULL
);
14873 /* Print normal ELF private data. */
14874 _bfd_elf_print_private_bfd_data (abfd
, ptr
);
14876 flags
= elf_elfheader (abfd
)->e_flags
;
14877 /* Ignore init flag - it may not be set, despite the flags field
14878 containing valid data. */
14880 fprintf (file
, _("private flags = %lx:"), elf_elfheader (abfd
)->e_flags
);
14882 switch (EF_ARM_EABI_VERSION (flags
))
14884 case EF_ARM_EABI_UNKNOWN
:
14885 /* The following flag bits are GNU extensions and not part of the
14886 official ARM ELF extended ABI. Hence they are only decoded if
14887 the EABI version is not set. */
14888 if (flags
& EF_ARM_INTERWORK
)
14889 fprintf (file
, _(" [interworking enabled]"));
14891 if (flags
& EF_ARM_APCS_26
)
14892 fprintf (file
, " [APCS-26]");
14894 fprintf (file
, " [APCS-32]");
14896 if (flags
& EF_ARM_VFP_FLOAT
)
14897 fprintf (file
, _(" [VFP float format]"));
14898 else if (flags
& EF_ARM_MAVERICK_FLOAT
)
14899 fprintf (file
, _(" [Maverick float format]"));
14901 fprintf (file
, _(" [FPA float format]"));
14903 if (flags
& EF_ARM_APCS_FLOAT
)
14904 fprintf (file
, _(" [floats passed in float registers]"));
14906 if (flags
& EF_ARM_PIC
)
14907 fprintf (file
, _(" [position independent]"));
14909 if (flags
& EF_ARM_NEW_ABI
)
14910 fprintf (file
, _(" [new ABI]"));
14912 if (flags
& EF_ARM_OLD_ABI
)
14913 fprintf (file
, _(" [old ABI]"));
14915 if (flags
& EF_ARM_SOFT_FLOAT
)
14916 fprintf (file
, _(" [software FP]"));
14918 flags
&= ~(EF_ARM_INTERWORK
| EF_ARM_APCS_26
| EF_ARM_APCS_FLOAT
14919 | EF_ARM_PIC
| EF_ARM_NEW_ABI
| EF_ARM_OLD_ABI
14920 | EF_ARM_SOFT_FLOAT
| EF_ARM_VFP_FLOAT
14921 | EF_ARM_MAVERICK_FLOAT
);
14924 case EF_ARM_EABI_VER1
:
14925 fprintf (file
, _(" [Version1 EABI]"));
14927 if (flags
& EF_ARM_SYMSARESORTED
)
14928 fprintf (file
, _(" [sorted symbol table]"));
14930 fprintf (file
, _(" [unsorted symbol table]"));
14932 flags
&= ~ EF_ARM_SYMSARESORTED
;
14935 case EF_ARM_EABI_VER2
:
14936 fprintf (file
, _(" [Version2 EABI]"));
14938 if (flags
& EF_ARM_SYMSARESORTED
)
14939 fprintf (file
, _(" [sorted symbol table]"));
14941 fprintf (file
, _(" [unsorted symbol table]"));
14943 if (flags
& EF_ARM_DYNSYMSUSESEGIDX
)
14944 fprintf (file
, _(" [dynamic symbols use segment index]"));
14946 if (flags
& EF_ARM_MAPSYMSFIRST
)
14947 fprintf (file
, _(" [mapping symbols precede others]"));
14949 flags
&= ~(EF_ARM_SYMSARESORTED
| EF_ARM_DYNSYMSUSESEGIDX
14950 | EF_ARM_MAPSYMSFIRST
);
14953 case EF_ARM_EABI_VER3
:
14954 fprintf (file
, _(" [Version3 EABI]"));
14957 case EF_ARM_EABI_VER4
:
14958 fprintf (file
, _(" [Version4 EABI]"));
14961 case EF_ARM_EABI_VER5
:
14962 fprintf (file
, _(" [Version5 EABI]"));
14964 if (flags
& EF_ARM_ABI_FLOAT_SOFT
)
14965 fprintf (file
, _(" [soft-float ABI]"));
14967 if (flags
& EF_ARM_ABI_FLOAT_HARD
)
14968 fprintf (file
, _(" [hard-float ABI]"));
14970 flags
&= ~(EF_ARM_ABI_FLOAT_SOFT
| EF_ARM_ABI_FLOAT_HARD
);
14973 if (flags
& EF_ARM_BE8
)
14974 fprintf (file
, _(" [BE8]"));
14976 if (flags
& EF_ARM_LE8
)
14977 fprintf (file
, _(" [LE8]"));
14979 flags
&= ~(EF_ARM_LE8
| EF_ARM_BE8
);
14983 fprintf (file
, _(" <EABI version unrecognised>"));
14987 flags
&= ~ EF_ARM_EABIMASK
;
14989 if (flags
& EF_ARM_RELEXEC
)
14990 fprintf (file
, _(" [relocatable executable]"));
14992 if (flags
& EF_ARM_PIC
)
14993 fprintf (file
, _(" [position independent]"));
14995 if (elf_elfheader (abfd
)->e_ident
[EI_OSABI
] == ELFOSABI_ARM_FDPIC
)
14996 fprintf (file
, _(" [FDPIC ABI supplement]"));
14998 flags
&= ~ (EF_ARM_RELEXEC
| EF_ARM_PIC
);
15001 fprintf (file
, _("<Unrecognised flag bits set>"));
15003 fputc ('\n', file
);
15009 elf32_arm_get_symbol_type (Elf_Internal_Sym
* elf_sym
, int type
)
15011 switch (ELF_ST_TYPE (elf_sym
->st_info
))
15013 case STT_ARM_TFUNC
:
15014 return ELF_ST_TYPE (elf_sym
->st_info
);
15016 case STT_ARM_16BIT
:
15017 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
15018 This allows us to distinguish between data used by Thumb instructions
15019 and non-data (which is probably code) inside Thumb regions of an
15021 if (type
!= STT_OBJECT
&& type
!= STT_TLS
)
15022 return ELF_ST_TYPE (elf_sym
->st_info
);
15033 elf32_arm_gc_mark_hook (asection
*sec
,
15034 struct bfd_link_info
*info
,
15035 Elf_Internal_Rela
*rel
,
15036 struct elf_link_hash_entry
*h
,
15037 Elf_Internal_Sym
*sym
)
15040 switch (ELF32_R_TYPE (rel
->r_info
))
15042 case R_ARM_GNU_VTINHERIT
:
15043 case R_ARM_GNU_VTENTRY
:
15047 return _bfd_elf_gc_mark_hook (sec
, info
, rel
, h
, sym
);
15050 /* Look through the relocs for a section during the first phase. */
15053 elf32_arm_check_relocs (bfd
*abfd
, struct bfd_link_info
*info
,
15054 asection
*sec
, const Elf_Internal_Rela
*relocs
)
15056 Elf_Internal_Shdr
*symtab_hdr
;
15057 struct elf_link_hash_entry
**sym_hashes
;
15058 const Elf_Internal_Rela
*rel
;
15059 const Elf_Internal_Rela
*rel_end
;
15062 struct elf32_arm_link_hash_table
*htab
;
15063 bfd_boolean call_reloc_p
;
15064 bfd_boolean may_become_dynamic_p
;
15065 bfd_boolean may_need_local_target_p
;
15066 unsigned long nsyms
;
15068 if (bfd_link_relocatable (info
))
15071 BFD_ASSERT (is_arm_elf (abfd
));
15073 htab
= elf32_arm_hash_table (info
);
15079 /* Create dynamic sections for relocatable executables so that we can
15080 copy relocations. */
15081 if (htab
->root
.is_relocatable_executable
15082 && ! htab
->root
.dynamic_sections_created
)
15084 if (! _bfd_elf_link_create_dynamic_sections (abfd
, info
))
15088 if (htab
->root
.dynobj
== NULL
)
15089 htab
->root
.dynobj
= abfd
;
15090 if (!create_ifunc_sections (info
))
15093 dynobj
= htab
->root
.dynobj
;
15095 symtab_hdr
= & elf_symtab_hdr (abfd
);
15096 sym_hashes
= elf_sym_hashes (abfd
);
15097 nsyms
= NUM_SHDR_ENTRIES (symtab_hdr
);
15099 rel_end
= relocs
+ sec
->reloc_count
;
15100 for (rel
= relocs
; rel
< rel_end
; rel
++)
15102 Elf_Internal_Sym
*isym
;
15103 struct elf_link_hash_entry
*h
;
15104 struct elf32_arm_link_hash_entry
*eh
;
15105 unsigned int r_symndx
;
15108 r_symndx
= ELF32_R_SYM (rel
->r_info
);
15109 r_type
= ELF32_R_TYPE (rel
->r_info
);
15110 r_type
= arm_real_reloc_type (htab
, r_type
);
15112 if (r_symndx
>= nsyms
15113 /* PR 9934: It is possible to have relocations that do not
15114 refer to symbols, thus it is also possible to have an
15115 object file containing relocations but no symbol table. */
15116 && (r_symndx
> STN_UNDEF
|| nsyms
> 0))
15118 _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd
,
15127 if (r_symndx
< symtab_hdr
->sh_info
)
15129 /* A local symbol. */
15130 isym
= bfd_sym_from_r_symndx (&htab
->sym_cache
,
15137 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
15138 while (h
->root
.type
== bfd_link_hash_indirect
15139 || h
->root
.type
== bfd_link_hash_warning
)
15140 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
15144 eh
= (struct elf32_arm_link_hash_entry
*) h
;
15146 call_reloc_p
= FALSE
;
15147 may_become_dynamic_p
= FALSE
;
15148 may_need_local_target_p
= FALSE
;
15150 /* Could be done earlier, if h were already available. */
15151 r_type
= elf32_arm_tls_transition (info
, r_type
, h
);
15154 case R_ARM_GOTOFFFUNCDESC
:
15158 if (!elf32_arm_allocate_local_sym_info (abfd
))
15160 elf32_arm_local_fdpic_cnts(abfd
)[r_symndx
].gotofffuncdesc_cnt
+= 1;
15161 elf32_arm_local_fdpic_cnts(abfd
)[r_symndx
].funcdesc_offset
= -1;
15165 eh
->fdpic_cnts
.gotofffuncdesc_cnt
++;
15170 case R_ARM_GOTFUNCDESC
:
15174 /* Such a relocation is not supposed to be generated
15175 by gcc on a static function. */
15176 /* Anyway if needed it could be handled. */
15181 eh
->fdpic_cnts
.gotfuncdesc_cnt
++;
15186 case R_ARM_FUNCDESC
:
15190 if (!elf32_arm_allocate_local_sym_info (abfd
))
15192 elf32_arm_local_fdpic_cnts(abfd
)[r_symndx
].funcdesc_cnt
+= 1;
15193 elf32_arm_local_fdpic_cnts(abfd
)[r_symndx
].funcdesc_offset
= -1;
15197 eh
->fdpic_cnts
.funcdesc_cnt
++;
15203 case R_ARM_GOT_PREL
:
15204 case R_ARM_TLS_GD32
:
15205 case R_ARM_TLS_GD32_FDPIC
:
15206 case R_ARM_TLS_IE32
:
15207 case R_ARM_TLS_IE32_FDPIC
:
15208 case R_ARM_TLS_GOTDESC
:
15209 case R_ARM_TLS_DESCSEQ
:
15210 case R_ARM_THM_TLS_DESCSEQ
:
15211 case R_ARM_TLS_CALL
:
15212 case R_ARM_THM_TLS_CALL
:
15213 /* This symbol requires a global offset table entry. */
15215 int tls_type
, old_tls_type
;
15219 case R_ARM_TLS_GD32
: tls_type
= GOT_TLS_GD
; break;
15220 case R_ARM_TLS_GD32_FDPIC
: tls_type
= GOT_TLS_GD
; break;
15222 case R_ARM_TLS_IE32
: tls_type
= GOT_TLS_IE
; break;
15223 case R_ARM_TLS_IE32_FDPIC
: tls_type
= GOT_TLS_IE
; break;
15225 case R_ARM_TLS_GOTDESC
:
15226 case R_ARM_TLS_CALL
: case R_ARM_THM_TLS_CALL
:
15227 case R_ARM_TLS_DESCSEQ
: case R_ARM_THM_TLS_DESCSEQ
:
15228 tls_type
= GOT_TLS_GDESC
; break;
15230 default: tls_type
= GOT_NORMAL
; break;
15233 if (!bfd_link_executable (info
) && (tls_type
& GOT_TLS_IE
))
15234 info
->flags
|= DF_STATIC_TLS
;
15239 old_tls_type
= elf32_arm_hash_entry (h
)->tls_type
;
15243 /* This is a global offset table entry for a local symbol. */
15244 if (!elf32_arm_allocate_local_sym_info (abfd
))
15246 elf_local_got_refcounts (abfd
)[r_symndx
] += 1;
15247 old_tls_type
= elf32_arm_local_got_tls_type (abfd
) [r_symndx
];
15250 /* If a variable is accessed with both tls methods, two
15251 slots may be created. */
15252 if (GOT_TLS_GD_ANY_P (old_tls_type
)
15253 && GOT_TLS_GD_ANY_P (tls_type
))
15254 tls_type
|= old_tls_type
;
15256 /* We will already have issued an error message if there
15257 is a TLS/non-TLS mismatch, based on the symbol
15258 type. So just combine any TLS types needed. */
15259 if (old_tls_type
!= GOT_UNKNOWN
&& old_tls_type
!= GOT_NORMAL
15260 && tls_type
!= GOT_NORMAL
)
15261 tls_type
|= old_tls_type
;
15263 /* If the symbol is accessed in both IE and GDESC
15264 method, we're able to relax. Turn off the GDESC flag,
15265 without messing up with any other kind of tls types
15266 that may be involved. */
15267 if ((tls_type
& GOT_TLS_IE
) && (tls_type
& GOT_TLS_GDESC
))
15268 tls_type
&= ~GOT_TLS_GDESC
;
15270 if (old_tls_type
!= tls_type
)
15273 elf32_arm_hash_entry (h
)->tls_type
= tls_type
;
15275 elf32_arm_local_got_tls_type (abfd
) [r_symndx
] = tls_type
;
15278 /* Fall through. */
15280 case R_ARM_TLS_LDM32
:
15281 case R_ARM_TLS_LDM32_FDPIC
:
15282 if (r_type
== R_ARM_TLS_LDM32
|| r_type
== R_ARM_TLS_LDM32_FDPIC
)
15283 htab
->tls_ldm_got
.refcount
++;
15284 /* Fall through. */
15286 case R_ARM_GOTOFF32
:
15288 if (htab
->root
.sgot
== NULL
15289 && !create_got_section (htab
->root
.dynobj
, info
))
15298 case R_ARM_THM_CALL
:
15299 case R_ARM_THM_JUMP24
:
15300 case R_ARM_THM_JUMP19
:
15301 call_reloc_p
= TRUE
;
15302 may_need_local_target_p
= TRUE
;
15306 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
15307 ldr __GOTT_INDEX__ offsets. */
15308 if (!htab
->vxworks_p
)
15310 may_need_local_target_p
= TRUE
;
15313 else goto jump_over
;
15315 /* Fall through. */
15317 case R_ARM_MOVW_ABS_NC
:
15318 case R_ARM_MOVT_ABS
:
15319 case R_ARM_THM_MOVW_ABS_NC
:
15320 case R_ARM_THM_MOVT_ABS
:
15321 if (bfd_link_pic (info
))
15324 (_("%pB: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
15325 abfd
, elf32_arm_howto_table_1
[r_type
].name
,
15326 (h
) ? h
->root
.root
.string
: "a local symbol");
15327 bfd_set_error (bfd_error_bad_value
);
15331 /* Fall through. */
15333 case R_ARM_ABS32_NOI
:
15335 if (h
!= NULL
&& bfd_link_executable (info
))
15337 h
->pointer_equality_needed
= 1;
15339 /* Fall through. */
15341 case R_ARM_REL32_NOI
:
15342 case R_ARM_MOVW_PREL_NC
:
15343 case R_ARM_MOVT_PREL
:
15344 case R_ARM_THM_MOVW_PREL_NC
:
15345 case R_ARM_THM_MOVT_PREL
:
15347 /* Should the interworking branches be listed here? */
15348 if ((bfd_link_pic (info
) || htab
->root
.is_relocatable_executable
15350 && (sec
->flags
& SEC_ALLOC
) != 0)
15353 && elf32_arm_howto_from_type (r_type
)->pc_relative
)
15355 /* In shared libraries and relocatable executables,
15356 we treat local relative references as calls;
15357 see the related SYMBOL_CALLS_LOCAL code in
15358 allocate_dynrelocs. */
15359 call_reloc_p
= TRUE
;
15360 may_need_local_target_p
= TRUE
;
15363 /* We are creating a shared library or relocatable
15364 executable, and this is a reloc against a global symbol,
15365 or a non-PC-relative reloc against a local symbol.
15366 We may need to copy the reloc into the output. */
15367 may_become_dynamic_p
= TRUE
;
15370 may_need_local_target_p
= TRUE
;
15373 /* This relocation describes the C++ object vtable hierarchy.
15374 Reconstruct it for later use during GC. */
15375 case R_ARM_GNU_VTINHERIT
:
15376 if (!bfd_elf_gc_record_vtinherit (abfd
, sec
, h
, rel
->r_offset
))
15380 /* This relocation describes which C++ vtable entries are actually
15381 used. Record for later use during GC. */
15382 case R_ARM_GNU_VTENTRY
:
15383 BFD_ASSERT (h
!= NULL
);
15385 && !bfd_elf_gc_record_vtentry (abfd
, sec
, h
, rel
->r_offset
))
15393 /* We may need a .plt entry if the function this reloc
15394 refers to is in a different object, regardless of the
15395 symbol's type. We can't tell for sure yet, because
15396 something later might force the symbol local. */
15398 else if (may_need_local_target_p
)
15399 /* If this reloc is in a read-only section, we might
15400 need a copy reloc. We can't check reliably at this
15401 stage whether the section is read-only, as input
15402 sections have not yet been mapped to output sections.
15403 Tentatively set the flag for now, and correct in
15404 adjust_dynamic_symbol. */
15405 h
->non_got_ref
= 1;
15408 if (may_need_local_target_p
15409 && (h
!= NULL
|| ELF32_ST_TYPE (isym
->st_info
) == STT_GNU_IFUNC
))
15411 union gotplt_union
*root_plt
;
15412 struct arm_plt_info
*arm_plt
;
15413 struct arm_local_iplt_info
*local_iplt
;
15417 root_plt
= &h
->plt
;
15418 arm_plt
= &eh
->plt
;
15422 local_iplt
= elf32_arm_create_local_iplt (abfd
, r_symndx
);
15423 if (local_iplt
== NULL
)
15425 root_plt
= &local_iplt
->root
;
15426 arm_plt
= &local_iplt
->arm
;
15429 /* If the symbol is a function that doesn't bind locally,
15430 this relocation will need a PLT entry. */
15431 if (root_plt
->refcount
!= -1)
15432 root_plt
->refcount
+= 1;
15435 arm_plt
->noncall_refcount
++;
15437 /* It's too early to use htab->use_blx here, so we have to
15438 record possible blx references separately from
15439 relocs that definitely need a thumb stub. */
15441 if (r_type
== R_ARM_THM_CALL
)
15442 arm_plt
->maybe_thumb_refcount
+= 1;
15444 if (r_type
== R_ARM_THM_JUMP24
15445 || r_type
== R_ARM_THM_JUMP19
)
15446 arm_plt
->thumb_refcount
+= 1;
15449 if (may_become_dynamic_p
)
15451 struct elf_dyn_relocs
*p
, **head
;
15453 /* Create a reloc section in dynobj. */
15454 if (sreloc
== NULL
)
15456 sreloc
= _bfd_elf_make_dynamic_reloc_section
15457 (sec
, dynobj
, 2, abfd
, ! htab
->use_rel
);
15459 if (sreloc
== NULL
)
15462 /* BPABI objects never have dynamic relocations mapped. */
15463 if (htab
->symbian_p
)
15467 flags
= bfd_get_section_flags (dynobj
, sreloc
);
15468 flags
&= ~(SEC_LOAD
| SEC_ALLOC
);
15469 bfd_set_section_flags (dynobj
, sreloc
, flags
);
15473 /* If this is a global symbol, count the number of
15474 relocations we need for this symbol. */
15476 head
= &((struct elf32_arm_link_hash_entry
*) h
)->dyn_relocs
;
15479 head
= elf32_arm_get_local_dynreloc_list (abfd
, r_symndx
, isym
);
15485 if (p
== NULL
|| p
->sec
!= sec
)
15487 bfd_size_type amt
= sizeof *p
;
15489 p
= (struct elf_dyn_relocs
*) bfd_alloc (htab
->root
.dynobj
, amt
);
15499 if (elf32_arm_howto_from_type (r_type
)->pc_relative
)
15502 if (h
== NULL
&& htab
->fdpic_p
&& !bfd_link_pic(info
)
15503 && r_type
!= R_ARM_ABS32
&& r_type
!= R_ARM_ABS32_NOI
) {
15504 /* Here we only support R_ARM_ABS32 and R_ARM_ABS32_NOI
15505 that will become rofixup. */
15506 /* This is due to the fact that we suppose all will become rofixup. */
15507 fprintf(stderr
, "FDPIC does not yet support %d relocation to become dynamic for executable\n", r_type
);
15509 (_("FDPIC does not yet support %s relocation"
15510 " to become dynamic for executable"),
15511 elf32_arm_howto_table_1
[r_type
].name
);
15521 elf32_arm_update_relocs (asection
*o
,
15522 struct bfd_elf_section_reloc_data
*reldata
)
15524 void (*swap_in
) (bfd
*, const bfd_byte
*, Elf_Internal_Rela
*);
15525 void (*swap_out
) (bfd
*, const Elf_Internal_Rela
*, bfd_byte
*);
15526 const struct elf_backend_data
*bed
;
15527 _arm_elf_section_data
*eado
;
15528 struct bfd_link_order
*p
;
15529 bfd_byte
*erela_head
, *erela
;
15530 Elf_Internal_Rela
*irela_head
, *irela
;
15531 Elf_Internal_Shdr
*rel_hdr
;
15533 unsigned int count
;
15535 eado
= get_arm_elf_section_data (o
);
15537 if (!eado
|| eado
->elf
.this_hdr
.sh_type
!= SHT_ARM_EXIDX
)
15541 bed
= get_elf_backend_data (abfd
);
15542 rel_hdr
= reldata
->hdr
;
15544 if (rel_hdr
->sh_entsize
== bed
->s
->sizeof_rel
)
15546 swap_in
= bed
->s
->swap_reloc_in
;
15547 swap_out
= bed
->s
->swap_reloc_out
;
15549 else if (rel_hdr
->sh_entsize
== bed
->s
->sizeof_rela
)
15551 swap_in
= bed
->s
->swap_reloca_in
;
15552 swap_out
= bed
->s
->swap_reloca_out
;
15557 erela_head
= rel_hdr
->contents
;
15558 irela_head
= (Elf_Internal_Rela
*) bfd_zmalloc
15559 ((NUM_SHDR_ENTRIES (rel_hdr
) + 1) * sizeof (*irela_head
));
15561 erela
= erela_head
;
15562 irela
= irela_head
;
15565 for (p
= o
->map_head
.link_order
; p
; p
= p
->next
)
15567 if (p
->type
== bfd_section_reloc_link_order
15568 || p
->type
== bfd_symbol_reloc_link_order
)
15570 (*swap_in
) (abfd
, erela
, irela
);
15571 erela
+= rel_hdr
->sh_entsize
;
15575 else if (p
->type
== bfd_indirect_link_order
)
15577 struct bfd_elf_section_reloc_data
*input_reldata
;
15578 arm_unwind_table_edit
*edit_list
, *edit_tail
;
15579 _arm_elf_section_data
*eadi
;
15584 i
= p
->u
.indirect
.section
;
15586 eadi
= get_arm_elf_section_data (i
);
15587 edit_list
= eadi
->u
.exidx
.unwind_edit_list
;
15588 edit_tail
= eadi
->u
.exidx
.unwind_edit_tail
;
15589 offset
= o
->vma
+ i
->output_offset
;
15591 if (eadi
->elf
.rel
.hdr
&&
15592 eadi
->elf
.rel
.hdr
->sh_entsize
== rel_hdr
->sh_entsize
)
15593 input_reldata
= &eadi
->elf
.rel
;
15594 else if (eadi
->elf
.rela
.hdr
&&
15595 eadi
->elf
.rela
.hdr
->sh_entsize
== rel_hdr
->sh_entsize
)
15596 input_reldata
= &eadi
->elf
.rela
;
15602 for (j
= 0; j
< NUM_SHDR_ENTRIES (input_reldata
->hdr
); j
++)
15604 arm_unwind_table_edit
*edit_node
, *edit_next
;
15606 bfd_vma reloc_index
;
15608 (*swap_in
) (abfd
, erela
, irela
);
15609 reloc_index
= (irela
->r_offset
- offset
) / 8;
15612 edit_node
= edit_list
;
15613 for (edit_next
= edit_list
;
15614 edit_next
&& edit_next
->index
<= reloc_index
;
15615 edit_next
= edit_node
->next
)
15618 edit_node
= edit_next
;
15621 if (edit_node
->type
!= DELETE_EXIDX_ENTRY
15622 || edit_node
->index
!= reloc_index
)
15624 irela
->r_offset
-= bias
* 8;
15629 erela
+= rel_hdr
->sh_entsize
;
15632 if (edit_tail
->type
== INSERT_EXIDX_CANTUNWIND_AT_END
)
15634 /* New relocation entity. */
15635 asection
*text_sec
= edit_tail
->linked_section
;
15636 asection
*text_out
= text_sec
->output_section
;
15637 bfd_vma exidx_offset
= offset
+ i
->size
- 8;
15639 irela
->r_addend
= 0;
15640 irela
->r_offset
= exidx_offset
;
15641 irela
->r_info
= ELF32_R_INFO
15642 (text_out
->target_index
, R_ARM_PREL31
);
15649 for (j
= 0; j
< NUM_SHDR_ENTRIES (input_reldata
->hdr
); j
++)
15651 (*swap_in
) (abfd
, erela
, irela
);
15652 erela
+= rel_hdr
->sh_entsize
;
15656 count
+= NUM_SHDR_ENTRIES (input_reldata
->hdr
);
15661 reldata
->count
= count
;
15662 rel_hdr
->sh_size
= count
* rel_hdr
->sh_entsize
;
15664 erela
= erela_head
;
15665 irela
= irela_head
;
15668 (*swap_out
) (abfd
, irela
, erela
);
15669 erela
+= rel_hdr
->sh_entsize
;
15676 /* Hashes are no longer valid. */
15677 free (reldata
->hashes
);
15678 reldata
->hashes
= NULL
;
15681 /* Unwinding tables are not referenced directly. This pass marks them as
15682 required if the corresponding code section is marked. Similarly, ARMv8-M
15683 secure entry functions can only be referenced by SG veneers which are
15684 created after the GC process. They need to be marked in case they reside in
15685 their own section (as would be the case if code was compiled with
15686 -ffunction-sections). */
15689 elf32_arm_gc_mark_extra_sections (struct bfd_link_info
*info
,
15690 elf_gc_mark_hook_fn gc_mark_hook
)
15693 Elf_Internal_Shdr
**elf_shdrp
;
15694 asection
*cmse_sec
;
15695 obj_attribute
*out_attr
;
15696 Elf_Internal_Shdr
*symtab_hdr
;
15697 unsigned i
, sym_count
, ext_start
;
15698 const struct elf_backend_data
*bed
;
15699 struct elf_link_hash_entry
**sym_hashes
;
15700 struct elf32_arm_link_hash_entry
*cmse_hash
;
15701 bfd_boolean again
, is_v8m
, first_bfd_browse
= TRUE
;
15703 _bfd_elf_gc_mark_extra_sections (info
, gc_mark_hook
);
15705 out_attr
= elf_known_obj_attributes_proc (info
->output_bfd
);
15706 is_v8m
= out_attr
[Tag_CPU_arch
].i
>= TAG_CPU_ARCH_V8M_BASE
15707 && out_attr
[Tag_CPU_arch_profile
].i
== 'M';
15709 /* Marking EH data may cause additional code sections to be marked,
15710 requiring multiple passes. */
15715 for (sub
= info
->input_bfds
; sub
!= NULL
; sub
= sub
->link
.next
)
15719 if (! is_arm_elf (sub
))
15722 elf_shdrp
= elf_elfsections (sub
);
15723 for (o
= sub
->sections
; o
!= NULL
; o
= o
->next
)
15725 Elf_Internal_Shdr
*hdr
;
15727 hdr
= &elf_section_data (o
)->this_hdr
;
15728 if (hdr
->sh_type
== SHT_ARM_EXIDX
15730 && hdr
->sh_link
< elf_numsections (sub
)
15732 && elf_shdrp
[hdr
->sh_link
]->bfd_section
->gc_mark
)
15735 if (!_bfd_elf_gc_mark (info
, o
, gc_mark_hook
))
15740 /* Mark section holding ARMv8-M secure entry functions. We mark all
15741 of them so no need for a second browsing. */
15742 if (is_v8m
&& first_bfd_browse
)
15744 sym_hashes
= elf_sym_hashes (sub
);
15745 bed
= get_elf_backend_data (sub
);
15746 symtab_hdr
= &elf_tdata (sub
)->symtab_hdr
;
15747 sym_count
= symtab_hdr
->sh_size
/ bed
->s
->sizeof_sym
;
15748 ext_start
= symtab_hdr
->sh_info
;
15750 /* Scan symbols. */
15751 for (i
= ext_start
; i
< sym_count
; i
++)
15753 cmse_hash
= elf32_arm_hash_entry (sym_hashes
[i
- ext_start
]);
15755 /* Assume it is a special symbol. If not, cmse_scan will
15756 warn about it and user can do something about it. */
15757 if (ARM_GET_SYM_CMSE_SPCL (cmse_hash
->root
.target_internal
))
15759 cmse_sec
= cmse_hash
->root
.root
.u
.def
.section
;
15760 if (!cmse_sec
->gc_mark
15761 && !_bfd_elf_gc_mark (info
, cmse_sec
, gc_mark_hook
))
15767 first_bfd_browse
= FALSE
;
15773 /* Treat mapping symbols as special target symbols. */
15776 elf32_arm_is_target_special_symbol (bfd
* abfd ATTRIBUTE_UNUSED
, asymbol
* sym
)
15778 return bfd_is_arm_special_symbol_name (sym
->name
,
15779 BFD_ARM_SPECIAL_SYM_TYPE_ANY
);
15782 /* This is a copy of elf_find_function() from elf.c except that
15783 ARM mapping symbols are ignored when looking for function names
15784 and STT_ARM_TFUNC is considered to a function type. */
15787 arm_elf_find_function (bfd
* abfd ATTRIBUTE_UNUSED
,
15788 asymbol
** symbols
,
15789 asection
* section
,
15791 const char ** filename_ptr
,
15792 const char ** functionname_ptr
)
15794 const char * filename
= NULL
;
15795 asymbol
* func
= NULL
;
15796 bfd_vma low_func
= 0;
15799 for (p
= symbols
; *p
!= NULL
; p
++)
15801 elf_symbol_type
*q
;
15803 q
= (elf_symbol_type
*) *p
;
15805 switch (ELF_ST_TYPE (q
->internal_elf_sym
.st_info
))
15810 filename
= bfd_asymbol_name (&q
->symbol
);
15813 case STT_ARM_TFUNC
:
15815 /* Skip mapping symbols. */
15816 if ((q
->symbol
.flags
& BSF_LOCAL
)
15817 && bfd_is_arm_special_symbol_name (q
->symbol
.name
,
15818 BFD_ARM_SPECIAL_SYM_TYPE_ANY
))
15820 /* Fall through. */
15821 if (bfd_get_section (&q
->symbol
) == section
15822 && q
->symbol
.value
>= low_func
15823 && q
->symbol
.value
<= offset
)
15825 func
= (asymbol
*) q
;
15826 low_func
= q
->symbol
.value
;
15836 *filename_ptr
= filename
;
15837 if (functionname_ptr
)
15838 *functionname_ptr
= bfd_asymbol_name (func
);
15844 /* Find the nearest line to a particular section and offset, for error
15845 reporting. This code is a duplicate of the code in elf.c, except
15846 that it uses arm_elf_find_function. */
15849 elf32_arm_find_nearest_line (bfd
* abfd
,
15850 asymbol
** symbols
,
15851 asection
* section
,
15853 const char ** filename_ptr
,
15854 const char ** functionname_ptr
,
15855 unsigned int * line_ptr
,
15856 unsigned int * discriminator_ptr
)
15858 bfd_boolean found
= FALSE
;
15860 if (_bfd_dwarf2_find_nearest_line (abfd
, symbols
, NULL
, section
, offset
,
15861 filename_ptr
, functionname_ptr
,
15862 line_ptr
, discriminator_ptr
,
15863 dwarf_debug_sections
, 0,
15864 & elf_tdata (abfd
)->dwarf2_find_line_info
))
15866 if (!*functionname_ptr
)
15867 arm_elf_find_function (abfd
, symbols
, section
, offset
,
15868 *filename_ptr
? NULL
: filename_ptr
,
15874 /* Skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain
15877 if (! _bfd_stab_section_find_nearest_line (abfd
, symbols
, section
, offset
,
15878 & found
, filename_ptr
,
15879 functionname_ptr
, line_ptr
,
15880 & elf_tdata (abfd
)->line_info
))
15883 if (found
&& (*functionname_ptr
|| *line_ptr
))
15886 if (symbols
== NULL
)
15889 if (! arm_elf_find_function (abfd
, symbols
, section
, offset
,
15890 filename_ptr
, functionname_ptr
))
15898 elf32_arm_find_inliner_info (bfd
* abfd
,
15899 const char ** filename_ptr
,
15900 const char ** functionname_ptr
,
15901 unsigned int * line_ptr
)
15904 found
= _bfd_dwarf2_find_inliner_info (abfd
, filename_ptr
,
15905 functionname_ptr
, line_ptr
,
15906 & elf_tdata (abfd
)->dwarf2_find_line_info
);
15910 /* Find dynamic relocs for H that apply to read-only sections. */
15913 readonly_dynrelocs (struct elf_link_hash_entry
*h
)
15915 struct elf_dyn_relocs
*p
;
15917 for (p
= elf32_arm_hash_entry (h
)->dyn_relocs
; p
!= NULL
; p
= p
->next
)
15919 asection
*s
= p
->sec
->output_section
;
15921 if (s
!= NULL
&& (s
->flags
& SEC_READONLY
) != 0)
15927 /* Adjust a symbol defined by a dynamic object and referenced by a
15928 regular object. The current definition is in some section of the
15929 dynamic object, but we're not including those sections. We have to
15930 change the definition to something the rest of the link can
15934 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info
* info
,
15935 struct elf_link_hash_entry
* h
)
15938 asection
*s
, *srel
;
15939 struct elf32_arm_link_hash_entry
* eh
;
15940 struct elf32_arm_link_hash_table
*globals
;
15942 globals
= elf32_arm_hash_table (info
);
15943 if (globals
== NULL
)
15946 dynobj
= elf_hash_table (info
)->dynobj
;
15948 /* Make sure we know what is going on here. */
15949 BFD_ASSERT (dynobj
!= NULL
15951 || h
->type
== STT_GNU_IFUNC
15955 && !h
->def_regular
)));
15957 eh
= (struct elf32_arm_link_hash_entry
*) h
;
15959 /* If this is a function, put it in the procedure linkage table. We
15960 will fill in the contents of the procedure linkage table later,
15961 when we know the address of the .got section. */
15962 if (h
->type
== STT_FUNC
|| h
->type
== STT_GNU_IFUNC
|| h
->needs_plt
)
15964 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
15965 symbol binds locally. */
15966 if (h
->plt
.refcount
<= 0
15967 || (h
->type
!= STT_GNU_IFUNC
15968 && (SYMBOL_CALLS_LOCAL (info
, h
)
15969 || (ELF_ST_VISIBILITY (h
->other
) != STV_DEFAULT
15970 && h
->root
.type
== bfd_link_hash_undefweak
))))
15972 /* This case can occur if we saw a PLT32 reloc in an input
15973 file, but the symbol was never referred to by a dynamic
15974 object, or if all references were garbage collected. In
15975 such a case, we don't actually need to build a procedure
15976 linkage table, and we can just do a PC24 reloc instead. */
15977 h
->plt
.offset
= (bfd_vma
) -1;
15978 eh
->plt
.thumb_refcount
= 0;
15979 eh
->plt
.maybe_thumb_refcount
= 0;
15980 eh
->plt
.noncall_refcount
= 0;
15988 /* It's possible that we incorrectly decided a .plt reloc was
15989 needed for an R_ARM_PC24 or similar reloc to a non-function sym
15990 in check_relocs. We can't decide accurately between function
15991 and non-function syms in check-relocs; Objects loaded later in
15992 the link may change h->type. So fix it now. */
15993 h
->plt
.offset
= (bfd_vma
) -1;
15994 eh
->plt
.thumb_refcount
= 0;
15995 eh
->plt
.maybe_thumb_refcount
= 0;
15996 eh
->plt
.noncall_refcount
= 0;
15999 /* If this is a weak symbol, and there is a real definition, the
16000 processor independent code will have arranged for us to see the
16001 real definition first, and we can just use the same value. */
16002 if (h
->is_weakalias
)
16004 struct elf_link_hash_entry
*def
= weakdef (h
);
16005 BFD_ASSERT (def
->root
.type
== bfd_link_hash_defined
);
16006 h
->root
.u
.def
.section
= def
->root
.u
.def
.section
;
16007 h
->root
.u
.def
.value
= def
->root
.u
.def
.value
;
16011 /* If there are no non-GOT references, we do not need a copy
16013 if (!h
->non_got_ref
)
16016 /* This is a reference to a symbol defined by a dynamic object which
16017 is not a function. */
16019 /* If we are creating a shared library, we must presume that the
16020 only references to the symbol are via the global offset table.
16021 For such cases we need not do anything here; the relocations will
16022 be handled correctly by relocate_section. Relocatable executables
16023 can reference data in shared objects directly, so we don't need to
16024 do anything here. */
16025 if (bfd_link_pic (info
) || globals
->root
.is_relocatable_executable
)
16028 /* We must allocate the symbol in our .dynbss section, which will
16029 become part of the .bss section of the executable. There will be
16030 an entry for this symbol in the .dynsym section. The dynamic
16031 object will contain position independent code, so all references
16032 from the dynamic object to this symbol will go through the global
16033 offset table. The dynamic linker will use the .dynsym entry to
16034 determine the address it must put in the global offset table, so
16035 both the dynamic object and the regular object will refer to the
16036 same memory location for the variable. */
16037 /* If allowed, we must generate a R_ARM_COPY reloc to tell the dynamic
16038 linker to copy the initial value out of the dynamic object and into
16039 the runtime process image. We need to remember the offset into the
16040 .rel(a).bss section we are going to use. */
16041 if ((h
->root
.u
.def
.section
->flags
& SEC_READONLY
) != 0)
16043 s
= globals
->root
.sdynrelro
;
16044 srel
= globals
->root
.sreldynrelro
;
16048 s
= globals
->root
.sdynbss
;
16049 srel
= globals
->root
.srelbss
;
16051 if (info
->nocopyreloc
== 0
16052 && (h
->root
.u
.def
.section
->flags
& SEC_ALLOC
) != 0
16055 elf32_arm_allocate_dynrelocs (info
, srel
, 1);
16059 return _bfd_elf_adjust_dynamic_copy (info
, h
, s
);
16062 /* Allocate space in .plt, .got and associated reloc sections for
16066 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry
*h
, void * inf
)
16068 struct bfd_link_info
*info
;
16069 struct elf32_arm_link_hash_table
*htab
;
16070 struct elf32_arm_link_hash_entry
*eh
;
16071 struct elf_dyn_relocs
*p
;
16073 if (h
->root
.type
== bfd_link_hash_indirect
)
16076 eh
= (struct elf32_arm_link_hash_entry
*) h
;
16078 info
= (struct bfd_link_info
*) inf
;
16079 htab
= elf32_arm_hash_table (info
);
16083 if ((htab
->root
.dynamic_sections_created
|| h
->type
== STT_GNU_IFUNC
)
16084 && h
->plt
.refcount
> 0)
16086 /* Make sure this symbol is output as a dynamic symbol.
16087 Undefined weak syms won't yet be marked as dynamic. */
16088 if (h
->dynindx
== -1 && !h
->forced_local
16089 && h
->root
.type
== bfd_link_hash_undefweak
)
16091 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
16095 /* If the call in the PLT entry binds locally, the associated
16096 GOT entry should use an R_ARM_IRELATIVE relocation instead of
16097 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
16098 than the .plt section. */
16099 if (h
->type
== STT_GNU_IFUNC
&& SYMBOL_CALLS_LOCAL (info
, h
))
16102 if (eh
->plt
.noncall_refcount
== 0
16103 && SYMBOL_REFERENCES_LOCAL (info
, h
))
16104 /* All non-call references can be resolved directly.
16105 This means that they can (and in some cases, must)
16106 resolve directly to the run-time target, rather than
16107 to the PLT. That in turns means that any .got entry
16108 would be equal to the .igot.plt entry, so there's
16109 no point having both. */
16110 h
->got
.refcount
= 0;
16113 if (bfd_link_pic (info
)
16115 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h
))
16117 elf32_arm_allocate_plt_entry (info
, eh
->is_iplt
, &h
->plt
, &eh
->plt
);
16119 /* If this symbol is not defined in a regular file, and we are
16120 not generating a shared library, then set the symbol to this
16121 location in the .plt. This is required to make function
16122 pointers compare as equal between the normal executable and
16123 the shared library. */
16124 if (! bfd_link_pic (info
)
16125 && !h
->def_regular
)
16127 h
->root
.u
.def
.section
= htab
->root
.splt
;
16128 h
->root
.u
.def
.value
= h
->plt
.offset
;
16130 /* Make sure the function is not marked as Thumb, in case
16131 it is the target of an ABS32 relocation, which will
16132 point to the PLT entry. */
16133 ARM_SET_SYM_BRANCH_TYPE (h
->target_internal
, ST_BRANCH_TO_ARM
);
16136 /* VxWorks executables have a second set of relocations for
16137 each PLT entry. They go in a separate relocation section,
16138 which is processed by the kernel loader. */
16139 if (htab
->vxworks_p
&& !bfd_link_pic (info
))
16141 /* There is a relocation for the initial PLT entry:
16142 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
16143 if (h
->plt
.offset
== htab
->plt_header_size
)
16144 elf32_arm_allocate_dynrelocs (info
, htab
->srelplt2
, 1);
16146 /* There are two extra relocations for each subsequent
16147 PLT entry: an R_ARM_32 relocation for the GOT entry,
16148 and an R_ARM_32 relocation for the PLT entry. */
16149 elf32_arm_allocate_dynrelocs (info
, htab
->srelplt2
, 2);
16154 h
->plt
.offset
= (bfd_vma
) -1;
16160 h
->plt
.offset
= (bfd_vma
) -1;
16164 eh
= (struct elf32_arm_link_hash_entry
*) h
;
16165 eh
->tlsdesc_got
= (bfd_vma
) -1;
16167 if (h
->got
.refcount
> 0)
16171 int tls_type
= elf32_arm_hash_entry (h
)->tls_type
;
16174 /* Make sure this symbol is output as a dynamic symbol.
16175 Undefined weak syms won't yet be marked as dynamic. */
16176 if (htab
->root
.dynamic_sections_created
&& h
->dynindx
== -1 && !h
->forced_local
16177 && h
->root
.type
== bfd_link_hash_undefweak
)
16179 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
16183 if (!htab
->symbian_p
)
16185 s
= htab
->root
.sgot
;
16186 h
->got
.offset
= s
->size
;
16188 if (tls_type
== GOT_UNKNOWN
)
16191 if (tls_type
== GOT_NORMAL
)
16192 /* Non-TLS symbols need one GOT slot. */
16196 if (tls_type
& GOT_TLS_GDESC
)
16198 /* R_ARM_TLS_DESC needs 2 GOT slots. */
16200 = (htab
->root
.sgotplt
->size
16201 - elf32_arm_compute_jump_table_size (htab
));
16202 htab
->root
.sgotplt
->size
+= 8;
16203 h
->got
.offset
= (bfd_vma
) -2;
16204 /* plt.got_offset needs to know there's a TLS_DESC
16205 reloc in the middle of .got.plt. */
16206 htab
->num_tls_desc
++;
16209 if (tls_type
& GOT_TLS_GD
)
16211 /* R_ARM_TLS_GD32 and R_ARM_TLS_GD32_FDPIC need two
16212 consecutive GOT slots. If the symbol is both GD
16213 and GDESC, got.offset may have been
16215 h
->got
.offset
= s
->size
;
16219 if (tls_type
& GOT_TLS_IE
)
16220 /* R_ARM_TLS_IE32/R_ARM_TLS_IE32_FDPIC need one GOT
16225 dyn
= htab
->root
.dynamic_sections_created
;
16228 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn
,
16229 bfd_link_pic (info
),
16231 && (!bfd_link_pic (info
)
16232 || !SYMBOL_REFERENCES_LOCAL (info
, h
)))
16235 if (tls_type
!= GOT_NORMAL
16236 && (bfd_link_pic (info
) || indx
!= 0)
16237 && (ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
16238 || h
->root
.type
!= bfd_link_hash_undefweak
))
16240 if (tls_type
& GOT_TLS_IE
)
16241 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16243 if (tls_type
& GOT_TLS_GD
)
16244 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16246 if (tls_type
& GOT_TLS_GDESC
)
16248 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelplt
, 1);
16249 /* GDESC needs a trampoline to jump to. */
16250 htab
->tls_trampoline
= -1;
16253 /* Only GD needs it. GDESC just emits one relocation per
16255 if ((tls_type
& GOT_TLS_GD
) && indx
!= 0)
16256 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16258 else if (((indx
!= -1) || htab
->fdpic_p
)
16259 && !SYMBOL_REFERENCES_LOCAL (info
, h
))
16261 if (htab
->root
.dynamic_sections_created
)
16262 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
16263 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16265 else if (h
->type
== STT_GNU_IFUNC
16266 && eh
->plt
.noncall_refcount
== 0)
16267 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
16268 they all resolve dynamically instead. Reserve room for the
16269 GOT entry's R_ARM_IRELATIVE relocation. */
16270 elf32_arm_allocate_irelocs (info
, htab
->root
.srelgot
, 1);
16271 else if (bfd_link_pic (info
)
16272 && (ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
16273 || h
->root
.type
!= bfd_link_hash_undefweak
))
16274 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
16275 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16276 else if (htab
->fdpic_p
&& tls_type
== GOT_NORMAL
)
16277 /* Reserve room for rofixup for FDPIC executable. */
16278 /* TLS relocs do not need space since they are completely
16280 htab
->srofixup
->size
+= 4;
16284 h
->got
.offset
= (bfd_vma
) -1;
16286 /* FDPIC support. */
16287 if (eh
->fdpic_cnts
.gotofffuncdesc_cnt
> 0)
16289 /* Symbol musn't be exported. */
16290 if (h
->dynindx
!= -1)
16293 /* We only allocate one function descriptor with its associated relocation. */
16294 if (eh
->fdpic_cnts
.funcdesc_offset
== -1)
16296 asection
*s
= htab
->root
.sgot
;
16298 eh
->fdpic_cnts
.funcdesc_offset
= s
->size
;
16300 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16301 if (bfd_link_pic(info
))
16302 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16304 htab
->srofixup
->size
+= 8;
16308 if (eh
->fdpic_cnts
.gotfuncdesc_cnt
> 0)
16310 asection
*s
= htab
->root
.sgot
;
16312 if (htab
->root
.dynamic_sections_created
&& h
->dynindx
== -1
16313 && !h
->forced_local
)
16314 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
16317 if (h
->dynindx
== -1)
16319 /* We only allocate one function descriptor with its associated relocation. q */
16320 if (eh
->fdpic_cnts
.funcdesc_offset
== -1)
16323 eh
->fdpic_cnts
.funcdesc_offset
= s
->size
;
16325 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16326 if (bfd_link_pic(info
))
16327 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16329 htab
->srofixup
->size
+= 8;
16333 /* Add one entry into the GOT and a R_ARM_FUNCDESC or
16334 R_ARM_RELATIVE/rofixup relocation on it. */
16335 eh
->fdpic_cnts
.gotfuncdesc_offset
= s
->size
;
16337 if (h
->dynindx
== -1 && !bfd_link_pic(info
))
16338 htab
->srofixup
->size
+= 4;
16340 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16343 if (eh
->fdpic_cnts
.funcdesc_cnt
> 0)
16345 if (htab
->root
.dynamic_sections_created
&& h
->dynindx
== -1
16346 && !h
->forced_local
)
16347 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
16350 if (h
->dynindx
== -1)
16352 /* We only allocate one function descriptor with its associated relocation. */
16353 if (eh
->fdpic_cnts
.funcdesc_offset
== -1)
16355 asection
*s
= htab
->root
.sgot
;
16357 eh
->fdpic_cnts
.funcdesc_offset
= s
->size
;
16359 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16360 if (bfd_link_pic(info
))
16361 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16363 htab
->srofixup
->size
+= 8;
16366 if (h
->dynindx
== -1 && !bfd_link_pic(info
))
16368 /* For FDPIC executable we replace R_ARM_RELATIVE with a rofixup. */
16369 htab
->srofixup
->size
+= 4 * eh
->fdpic_cnts
.funcdesc_cnt
;
16373 /* Will need one dynamic reloc per reference. will be either
16374 R_ARM_FUNCDESC or R_ARM_RELATIVE for hidden symbols. */
16375 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
,
16376 eh
->fdpic_cnts
.funcdesc_cnt
);
16380 /* Allocate stubs for exported Thumb functions on v4t. */
16381 if (!htab
->use_blx
&& h
->dynindx
!= -1
16383 && ARM_GET_SYM_BRANCH_TYPE (h
->target_internal
) == ST_BRANCH_TO_THUMB
16384 && ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
)
16386 struct elf_link_hash_entry
* th
;
16387 struct bfd_link_hash_entry
* bh
;
16388 struct elf_link_hash_entry
* myh
;
16392 /* Create a new symbol to regist the real location of the function. */
16393 s
= h
->root
.u
.def
.section
;
16394 sprintf (name
, "__real_%s", h
->root
.root
.string
);
16395 _bfd_generic_link_add_one_symbol (info
, s
->owner
,
16396 name
, BSF_GLOBAL
, s
,
16397 h
->root
.u
.def
.value
,
16398 NULL
, TRUE
, FALSE
, &bh
);
16400 myh
= (struct elf_link_hash_entry
*) bh
;
16401 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
16402 myh
->forced_local
= 1;
16403 ARM_SET_SYM_BRANCH_TYPE (myh
->target_internal
, ST_BRANCH_TO_THUMB
);
16404 eh
->export_glue
= myh
;
16405 th
= record_arm_to_thumb_glue (info
, h
);
16406 /* Point the symbol at the stub. */
16407 h
->type
= ELF_ST_INFO (ELF_ST_BIND (h
->type
), STT_FUNC
);
16408 ARM_SET_SYM_BRANCH_TYPE (h
->target_internal
, ST_BRANCH_TO_ARM
);
16409 h
->root
.u
.def
.section
= th
->root
.u
.def
.section
;
16410 h
->root
.u
.def
.value
= th
->root
.u
.def
.value
& ~1;
16413 if (eh
->dyn_relocs
== NULL
)
16416 /* In the shared -Bsymbolic case, discard space allocated for
16417 dynamic pc-relative relocs against symbols which turn out to be
16418 defined in regular objects. For the normal shared case, discard
16419 space for pc-relative relocs that have become local due to symbol
16420 visibility changes. */
16422 if (bfd_link_pic (info
) || htab
->root
.is_relocatable_executable
|| htab
->fdpic_p
)
16424 /* Relocs that use pc_count are PC-relative forms, which will appear
16425 on something like ".long foo - ." or "movw REG, foo - .". We want
16426 calls to protected symbols to resolve directly to the function
16427 rather than going via the plt. If people want function pointer
16428 comparisons to work as expected then they should avoid writing
16429 assembly like ".long foo - .". */
16430 if (SYMBOL_CALLS_LOCAL (info
, h
))
16432 struct elf_dyn_relocs
**pp
;
16434 for (pp
= &eh
->dyn_relocs
; (p
= *pp
) != NULL
; )
16436 p
->count
-= p
->pc_count
;
16445 if (htab
->vxworks_p
)
16447 struct elf_dyn_relocs
**pp
;
16449 for (pp
= &eh
->dyn_relocs
; (p
= *pp
) != NULL
; )
16451 if (strcmp (p
->sec
->output_section
->name
, ".tls_vars") == 0)
16458 /* Also discard relocs on undefined weak syms with non-default
16460 if (eh
->dyn_relocs
!= NULL
16461 && h
->root
.type
== bfd_link_hash_undefweak
)
16463 if (ELF_ST_VISIBILITY (h
->other
) != STV_DEFAULT
16464 || UNDEFWEAK_NO_DYNAMIC_RELOC (info
, h
))
16465 eh
->dyn_relocs
= NULL
;
16467 /* Make sure undefined weak symbols are output as a dynamic
16469 else if (htab
->root
.dynamic_sections_created
&& h
->dynindx
== -1
16470 && !h
->forced_local
)
16472 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
16477 else if (htab
->root
.is_relocatable_executable
&& h
->dynindx
== -1
16478 && h
->root
.type
== bfd_link_hash_new
)
16480 /* Output absolute symbols so that we can create relocations
16481 against them. For normal symbols we output a relocation
16482 against the section that contains them. */
16483 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
16490 /* For the non-shared case, discard space for relocs against
16491 symbols which turn out to need copy relocs or are not
16494 if (!h
->non_got_ref
16495 && ((h
->def_dynamic
16496 && !h
->def_regular
)
16497 || (htab
->root
.dynamic_sections_created
16498 && (h
->root
.type
== bfd_link_hash_undefweak
16499 || h
->root
.type
== bfd_link_hash_undefined
))))
16501 /* Make sure this symbol is output as a dynamic symbol.
16502 Undefined weak syms won't yet be marked as dynamic. */
16503 if (h
->dynindx
== -1 && !h
->forced_local
16504 && h
->root
.type
== bfd_link_hash_undefweak
)
16506 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
16510 /* If that succeeded, we know we'll be keeping all the
16512 if (h
->dynindx
!= -1)
16516 eh
->dyn_relocs
= NULL
;
16521 /* Finally, allocate space. */
16522 for (p
= eh
->dyn_relocs
; p
!= NULL
; p
= p
->next
)
16524 asection
*sreloc
= elf_section_data (p
->sec
)->sreloc
;
16526 if (h
->type
== STT_GNU_IFUNC
16527 && eh
->plt
.noncall_refcount
== 0
16528 && SYMBOL_REFERENCES_LOCAL (info
, h
))
16529 elf32_arm_allocate_irelocs (info
, sreloc
, p
->count
);
16530 else if (h
->dynindx
!= -1 && (!bfd_link_pic(info
) || !info
->symbolic
|| !h
->def_regular
))
16531 elf32_arm_allocate_dynrelocs (info
, sreloc
, p
->count
);
16532 else if (htab
->fdpic_p
&& !bfd_link_pic(info
))
16533 htab
->srofixup
->size
+= 4 * p
->count
;
16535 elf32_arm_allocate_dynrelocs (info
, sreloc
, p
->count
);
16541 /* Set DF_TEXTREL if we find any dynamic relocs that apply to
16542 read-only sections. */
16545 maybe_set_textrel (struct elf_link_hash_entry
*h
, void *info_p
)
16549 if (h
->root
.type
== bfd_link_hash_indirect
)
16552 sec
= readonly_dynrelocs (h
);
16555 struct bfd_link_info
*info
= (struct bfd_link_info
*) info_p
;
16557 info
->flags
|= DF_TEXTREL
;
16558 info
->callbacks
->minfo
16559 (_("%pB: dynamic relocation against `%pT' in read-only section `%pA'\n"),
16560 sec
->owner
, h
->root
.root
.string
, sec
);
16562 /* Not an error, just cut short the traversal. */
16570 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info
*info
,
16573 struct elf32_arm_link_hash_table
*globals
;
16575 globals
= elf32_arm_hash_table (info
);
16576 if (globals
== NULL
)
16579 globals
->byteswap_code
= byteswap_code
;
16582 /* Set the sizes of the dynamic sections. */
16585 elf32_arm_size_dynamic_sections (bfd
* output_bfd ATTRIBUTE_UNUSED
,
16586 struct bfd_link_info
* info
)
16591 bfd_boolean relocs
;
16593 struct elf32_arm_link_hash_table
*htab
;
16595 htab
= elf32_arm_hash_table (info
);
16599 dynobj
= elf_hash_table (info
)->dynobj
;
16600 BFD_ASSERT (dynobj
!= NULL
);
16601 check_use_blx (htab
);
16603 if (elf_hash_table (info
)->dynamic_sections_created
)
16605 /* Set the contents of the .interp section to the interpreter. */
16606 if (bfd_link_executable (info
) && !info
->nointerp
)
16608 s
= bfd_get_linker_section (dynobj
, ".interp");
16609 BFD_ASSERT (s
!= NULL
);
16610 s
->size
= sizeof ELF_DYNAMIC_INTERPRETER
;
16611 s
->contents
= (unsigned char *) ELF_DYNAMIC_INTERPRETER
;
16615 /* Set up .got offsets for local syms, and space for local dynamic
16617 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
16619 bfd_signed_vma
*local_got
;
16620 bfd_signed_vma
*end_local_got
;
16621 struct arm_local_iplt_info
**local_iplt_ptr
, *local_iplt
;
16622 char *local_tls_type
;
16623 bfd_vma
*local_tlsdesc_gotent
;
16624 bfd_size_type locsymcount
;
16625 Elf_Internal_Shdr
*symtab_hdr
;
16627 bfd_boolean is_vxworks
= htab
->vxworks_p
;
16628 unsigned int symndx
;
16629 struct fdpic_local
*local_fdpic_cnts
;
16631 if (! is_arm_elf (ibfd
))
16634 for (s
= ibfd
->sections
; s
!= NULL
; s
= s
->next
)
16636 struct elf_dyn_relocs
*p
;
16638 for (p
= (struct elf_dyn_relocs
*)
16639 elf_section_data (s
)->local_dynrel
; p
!= NULL
; p
= p
->next
)
16641 if (!bfd_is_abs_section (p
->sec
)
16642 && bfd_is_abs_section (p
->sec
->output_section
))
16644 /* Input section has been discarded, either because
16645 it is a copy of a linkonce section or due to
16646 linker script /DISCARD/, so we'll be discarding
16649 else if (is_vxworks
16650 && strcmp (p
->sec
->output_section
->name
,
16653 /* Relocations in vxworks .tls_vars sections are
16654 handled specially by the loader. */
16656 else if (p
->count
!= 0)
16658 srel
= elf_section_data (p
->sec
)->sreloc
;
16659 if (htab
->fdpic_p
&& !bfd_link_pic(info
))
16660 htab
->srofixup
->size
+= 4 * p
->count
;
16662 elf32_arm_allocate_dynrelocs (info
, srel
, p
->count
);
16663 if ((p
->sec
->output_section
->flags
& SEC_READONLY
) != 0)
16664 info
->flags
|= DF_TEXTREL
;
16669 local_got
= elf_local_got_refcounts (ibfd
);
16673 symtab_hdr
= & elf_symtab_hdr (ibfd
);
16674 locsymcount
= symtab_hdr
->sh_info
;
16675 end_local_got
= local_got
+ locsymcount
;
16676 local_iplt_ptr
= elf32_arm_local_iplt (ibfd
);
16677 local_tls_type
= elf32_arm_local_got_tls_type (ibfd
);
16678 local_tlsdesc_gotent
= elf32_arm_local_tlsdesc_gotent (ibfd
);
16679 local_fdpic_cnts
= elf32_arm_local_fdpic_cnts (ibfd
);
16681 s
= htab
->root
.sgot
;
16682 srel
= htab
->root
.srelgot
;
16683 for (; local_got
< end_local_got
;
16684 ++local_got
, ++local_iplt_ptr
, ++local_tls_type
,
16685 ++local_tlsdesc_gotent
, ++symndx
, ++local_fdpic_cnts
)
16687 *local_tlsdesc_gotent
= (bfd_vma
) -1;
16688 local_iplt
= *local_iplt_ptr
;
16690 /* FDPIC support. */
16691 if (local_fdpic_cnts
->gotofffuncdesc_cnt
> 0)
16693 if (local_fdpic_cnts
->funcdesc_offset
== -1)
16695 local_fdpic_cnts
->funcdesc_offset
= s
->size
;
16698 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16699 if (bfd_link_pic(info
))
16700 elf32_arm_allocate_dynrelocs (info
, srel
, 1);
16702 htab
->srofixup
->size
+= 8;
16706 if (local_fdpic_cnts
->funcdesc_cnt
> 0)
16708 if (local_fdpic_cnts
->funcdesc_offset
== -1)
16710 local_fdpic_cnts
->funcdesc_offset
= s
->size
;
16713 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16714 if (bfd_link_pic(info
))
16715 elf32_arm_allocate_dynrelocs (info
, srel
, 1);
16717 htab
->srofixup
->size
+= 8;
16720 /* We will add n R_ARM_RELATIVE relocations or n rofixups. */
16721 if (bfd_link_pic(info
))
16722 elf32_arm_allocate_dynrelocs (info
, srel
, local_fdpic_cnts
->funcdesc_cnt
);
16724 htab
->srofixup
->size
+= 4 * local_fdpic_cnts
->funcdesc_cnt
;
16727 if (local_iplt
!= NULL
)
16729 struct elf_dyn_relocs
*p
;
16731 if (local_iplt
->root
.refcount
> 0)
16733 elf32_arm_allocate_plt_entry (info
, TRUE
,
16736 if (local_iplt
->arm
.noncall_refcount
== 0)
16737 /* All references to the PLT are calls, so all
16738 non-call references can resolve directly to the
16739 run-time target. This means that the .got entry
16740 would be the same as the .igot.plt entry, so there's
16741 no point creating both. */
16746 BFD_ASSERT (local_iplt
->arm
.noncall_refcount
== 0);
16747 local_iplt
->root
.offset
= (bfd_vma
) -1;
16750 for (p
= local_iplt
->dyn_relocs
; p
!= NULL
; p
= p
->next
)
16754 psrel
= elf_section_data (p
->sec
)->sreloc
;
16755 if (local_iplt
->arm
.noncall_refcount
== 0)
16756 elf32_arm_allocate_irelocs (info
, psrel
, p
->count
);
16758 elf32_arm_allocate_dynrelocs (info
, psrel
, p
->count
);
16761 if (*local_got
> 0)
16763 Elf_Internal_Sym
*isym
;
16765 *local_got
= s
->size
;
16766 if (*local_tls_type
& GOT_TLS_GD
)
16767 /* TLS_GD relocs need an 8-byte structure in the GOT. */
16769 if (*local_tls_type
& GOT_TLS_GDESC
)
16771 *local_tlsdesc_gotent
= htab
->root
.sgotplt
->size
16772 - elf32_arm_compute_jump_table_size (htab
);
16773 htab
->root
.sgotplt
->size
+= 8;
16774 *local_got
= (bfd_vma
) -2;
16775 /* plt.got_offset needs to know there's a TLS_DESC
16776 reloc in the middle of .got.plt. */
16777 htab
->num_tls_desc
++;
16779 if (*local_tls_type
& GOT_TLS_IE
)
16782 if (*local_tls_type
& GOT_NORMAL
)
16784 /* If the symbol is both GD and GDESC, *local_got
16785 may have been overwritten. */
16786 *local_got
= s
->size
;
16790 isym
= bfd_sym_from_r_symndx (&htab
->sym_cache
, ibfd
, symndx
);
16794 /* If all references to an STT_GNU_IFUNC PLT are calls,
16795 then all non-call references, including this GOT entry,
16796 resolve directly to the run-time target. */
16797 if (ELF32_ST_TYPE (isym
->st_info
) == STT_GNU_IFUNC
16798 && (local_iplt
== NULL
16799 || local_iplt
->arm
.noncall_refcount
== 0))
16800 elf32_arm_allocate_irelocs (info
, srel
, 1);
16801 else if (bfd_link_pic (info
) || output_bfd
->flags
& DYNAMIC
|| htab
->fdpic_p
)
16803 if ((bfd_link_pic (info
) && !(*local_tls_type
& GOT_TLS_GDESC
)))
16804 elf32_arm_allocate_dynrelocs (info
, srel
, 1);
16805 else if (htab
->fdpic_p
&& *local_tls_type
& GOT_NORMAL
)
16806 htab
->srofixup
->size
+= 4;
16808 if ((bfd_link_pic (info
) || htab
->fdpic_p
)
16809 && *local_tls_type
& GOT_TLS_GDESC
)
16811 elf32_arm_allocate_dynrelocs (info
,
16812 htab
->root
.srelplt
, 1);
16813 htab
->tls_trampoline
= -1;
16818 *local_got
= (bfd_vma
) -1;
16822 if (htab
->tls_ldm_got
.refcount
> 0)
16824 /* Allocate two GOT entries and one dynamic relocation (if necessary)
16825 for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations. */
16826 htab
->tls_ldm_got
.offset
= htab
->root
.sgot
->size
;
16827 htab
->root
.sgot
->size
+= 8;
16828 if (bfd_link_pic (info
))
16829 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16832 htab
->tls_ldm_got
.offset
= -1;
16834 /* At the very end of the .rofixup section is a pointer to the GOT,
16835 reserve space for it. */
16836 if (htab
->fdpic_p
&& htab
->srofixup
!= NULL
)
16837 htab
->srofixup
->size
+= 4;
16839 /* Allocate global sym .plt and .got entries, and space for global
16840 sym dynamic relocs. */
16841 elf_link_hash_traverse (& htab
->root
, allocate_dynrelocs_for_symbol
, info
);
16843 /* Here we rummage through the found bfds to collect glue information. */
16844 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
16846 if (! is_arm_elf (ibfd
))
16849 /* Initialise mapping tables for code/data. */
16850 bfd_elf32_arm_init_maps (ibfd
);
16852 if (!bfd_elf32_arm_process_before_allocation (ibfd
, info
)
16853 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd
, info
)
16854 || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd
, info
))
16855 _bfd_error_handler (_("errors encountered processing file %pB"), ibfd
);
16858 /* Allocate space for the glue sections now that we've sized them. */
16859 bfd_elf32_arm_allocate_interworking_sections (info
);
16861 /* For every jump slot reserved in the sgotplt, reloc_count is
16862 incremented. However, when we reserve space for TLS descriptors,
16863 it's not incremented, so in order to compute the space reserved
16864 for them, it suffices to multiply the reloc count by the jump
16866 if (htab
->root
.srelplt
)
16867 htab
->sgotplt_jump_table_size
= elf32_arm_compute_jump_table_size(htab
);
16869 if (htab
->tls_trampoline
)
16871 if (htab
->root
.splt
->size
== 0)
16872 htab
->root
.splt
->size
+= htab
->plt_header_size
;
16874 htab
->tls_trampoline
= htab
->root
.splt
->size
;
16875 htab
->root
.splt
->size
+= htab
->plt_entry_size
;
16877 /* If we're not using lazy TLS relocations, don't generate the
16878 PLT and GOT entries they require. */
16879 if (!(info
->flags
& DF_BIND_NOW
))
16881 htab
->dt_tlsdesc_got
= htab
->root
.sgot
->size
;
16882 htab
->root
.sgot
->size
+= 4;
16884 htab
->dt_tlsdesc_plt
= htab
->root
.splt
->size
;
16885 htab
->root
.splt
->size
+= 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline
);
16889 /* The check_relocs and adjust_dynamic_symbol entry points have
16890 determined the sizes of the various dynamic sections. Allocate
16891 memory for them. */
16894 for (s
= dynobj
->sections
; s
!= NULL
; s
= s
->next
)
16898 if ((s
->flags
& SEC_LINKER_CREATED
) == 0)
16901 /* It's OK to base decisions on the section name, because none
16902 of the dynobj section names depend upon the input files. */
16903 name
= bfd_get_section_name (dynobj
, s
);
16905 if (s
== htab
->root
.splt
)
16907 /* Remember whether there is a PLT. */
16908 plt
= s
->size
!= 0;
16910 else if (CONST_STRNEQ (name
, ".rel"))
16914 /* Remember whether there are any reloc sections other
16915 than .rel(a).plt and .rela.plt.unloaded. */
16916 if (s
!= htab
->root
.srelplt
&& s
!= htab
->srelplt2
)
16919 /* We use the reloc_count field as a counter if we need
16920 to copy relocs into the output file. */
16921 s
->reloc_count
= 0;
16924 else if (s
!= htab
->root
.sgot
16925 && s
!= htab
->root
.sgotplt
16926 && s
!= htab
->root
.iplt
16927 && s
!= htab
->root
.igotplt
16928 && s
!= htab
->root
.sdynbss
16929 && s
!= htab
->root
.sdynrelro
16930 && s
!= htab
->srofixup
)
16932 /* It's not one of our sections, so don't allocate space. */
16938 /* If we don't need this section, strip it from the
16939 output file. This is mostly to handle .rel(a).bss and
16940 .rel(a).plt. We must create both sections in
16941 create_dynamic_sections, because they must be created
16942 before the linker maps input sections to output
16943 sections. The linker does that before
16944 adjust_dynamic_symbol is called, and it is that
16945 function which decides whether anything needs to go
16946 into these sections. */
16947 s
->flags
|= SEC_EXCLUDE
;
16951 if ((s
->flags
& SEC_HAS_CONTENTS
) == 0)
16954 /* Allocate memory for the section contents. */
16955 s
->contents
= (unsigned char *) bfd_zalloc (dynobj
, s
->size
);
16956 if (s
->contents
== NULL
)
16960 if (elf_hash_table (info
)->dynamic_sections_created
)
16962 /* Add some entries to the .dynamic section. We fill in the
16963 values later, in elf32_arm_finish_dynamic_sections, but we
16964 must add the entries now so that we get the correct size for
16965 the .dynamic section. The DT_DEBUG entry is filled in by the
16966 dynamic linker and used by the debugger. */
16967 #define add_dynamic_entry(TAG, VAL) \
16968 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
16970 if (bfd_link_executable (info
))
16972 if (!add_dynamic_entry (DT_DEBUG
, 0))
16978 if ( !add_dynamic_entry (DT_PLTGOT
, 0)
16979 || !add_dynamic_entry (DT_PLTRELSZ
, 0)
16980 || !add_dynamic_entry (DT_PLTREL
,
16981 htab
->use_rel
? DT_REL
: DT_RELA
)
16982 || !add_dynamic_entry (DT_JMPREL
, 0))
16985 if (htab
->dt_tlsdesc_plt
16986 && (!add_dynamic_entry (DT_TLSDESC_PLT
,0)
16987 || !add_dynamic_entry (DT_TLSDESC_GOT
,0)))
16995 if (!add_dynamic_entry (DT_REL
, 0)
16996 || !add_dynamic_entry (DT_RELSZ
, 0)
16997 || !add_dynamic_entry (DT_RELENT
, RELOC_SIZE (htab
)))
17002 if (!add_dynamic_entry (DT_RELA
, 0)
17003 || !add_dynamic_entry (DT_RELASZ
, 0)
17004 || !add_dynamic_entry (DT_RELAENT
, RELOC_SIZE (htab
)))
17009 /* If any dynamic relocs apply to a read-only section,
17010 then we need a DT_TEXTREL entry. */
17011 if ((info
->flags
& DF_TEXTREL
) == 0)
17012 elf_link_hash_traverse (&htab
->root
, maybe_set_textrel
, info
);
17014 if ((info
->flags
& DF_TEXTREL
) != 0)
17016 if (!add_dynamic_entry (DT_TEXTREL
, 0))
17019 if (htab
->vxworks_p
17020 && !elf_vxworks_add_dynamic_entries (output_bfd
, info
))
17023 #undef add_dynamic_entry
17028 /* Size sections even though they're not dynamic. We use it to setup
17029 _TLS_MODULE_BASE_, if needed. */
17032 elf32_arm_always_size_sections (bfd
*output_bfd
,
17033 struct bfd_link_info
*info
)
17036 struct elf32_arm_link_hash_table
*htab
;
17038 htab
= elf32_arm_hash_table (info
);
17040 if (bfd_link_relocatable (info
))
17043 tls_sec
= elf_hash_table (info
)->tls_sec
;
17047 struct elf_link_hash_entry
*tlsbase
;
17049 tlsbase
= elf_link_hash_lookup
17050 (elf_hash_table (info
), "_TLS_MODULE_BASE_", TRUE
, TRUE
, FALSE
);
17054 struct bfd_link_hash_entry
*bh
= NULL
;
17055 const struct elf_backend_data
*bed
17056 = get_elf_backend_data (output_bfd
);
17058 if (!(_bfd_generic_link_add_one_symbol
17059 (info
, output_bfd
, "_TLS_MODULE_BASE_", BSF_LOCAL
,
17060 tls_sec
, 0, NULL
, FALSE
,
17061 bed
->collect
, &bh
)))
17064 tlsbase
->type
= STT_TLS
;
17065 tlsbase
= (struct elf_link_hash_entry
*)bh
;
17066 tlsbase
->def_regular
= 1;
17067 tlsbase
->other
= STV_HIDDEN
;
17068 (*bed
->elf_backend_hide_symbol
) (info
, tlsbase
, TRUE
);
17072 if (htab
->fdpic_p
&& !bfd_link_relocatable (info
)
17073 && !bfd_elf_stack_segment_size (output_bfd
, info
,
17074 "__stacksize", DEFAULT_STACK_SIZE
))
17080 /* Finish up dynamic symbol handling. We set the contents of various
17081 dynamic sections here. */
17084 elf32_arm_finish_dynamic_symbol (bfd
* output_bfd
,
17085 struct bfd_link_info
* info
,
17086 struct elf_link_hash_entry
* h
,
17087 Elf_Internal_Sym
* sym
)
17089 struct elf32_arm_link_hash_table
*htab
;
17090 struct elf32_arm_link_hash_entry
*eh
;
17092 htab
= elf32_arm_hash_table (info
);
17096 eh
= (struct elf32_arm_link_hash_entry
*) h
;
17098 if (h
->plt
.offset
!= (bfd_vma
) -1)
17102 BFD_ASSERT (h
->dynindx
!= -1);
17103 if (! elf32_arm_populate_plt_entry (output_bfd
, info
, &h
->plt
, &eh
->plt
,
17108 if (!h
->def_regular
)
17110 /* Mark the symbol as undefined, rather than as defined in
17111 the .plt section. */
17112 sym
->st_shndx
= SHN_UNDEF
;
17113 /* If the symbol is weak we need to clear the value.
17114 Otherwise, the PLT entry would provide a definition for
17115 the symbol even if the symbol wasn't defined anywhere,
17116 and so the symbol would never be NULL. Leave the value if
17117 there were any relocations where pointer equality matters
17118 (this is a clue for the dynamic linker, to make function
17119 pointer comparisons work between an application and shared
17121 if (!h
->ref_regular_nonweak
|| !h
->pointer_equality_needed
)
17124 else if (eh
->is_iplt
&& eh
->plt
.noncall_refcount
!= 0)
17126 /* At least one non-call relocation references this .iplt entry,
17127 so the .iplt entry is the function's canonical address. */
17128 sym
->st_info
= ELF_ST_INFO (ELF_ST_BIND (sym
->st_info
), STT_FUNC
);
17129 ARM_SET_SYM_BRANCH_TYPE (sym
->st_target_internal
, ST_BRANCH_TO_ARM
);
17130 sym
->st_shndx
= (_bfd_elf_section_from_bfd_section
17131 (output_bfd
, htab
->root
.iplt
->output_section
));
17132 sym
->st_value
= (h
->plt
.offset
17133 + htab
->root
.iplt
->output_section
->vma
17134 + htab
->root
.iplt
->output_offset
);
17141 Elf_Internal_Rela rel
;
17143 /* This symbol needs a copy reloc. Set it up. */
17144 BFD_ASSERT (h
->dynindx
!= -1
17145 && (h
->root
.type
== bfd_link_hash_defined
17146 || h
->root
.type
== bfd_link_hash_defweak
));
17149 rel
.r_offset
= (h
->root
.u
.def
.value
17150 + h
->root
.u
.def
.section
->output_section
->vma
17151 + h
->root
.u
.def
.section
->output_offset
);
17152 rel
.r_info
= ELF32_R_INFO (h
->dynindx
, R_ARM_COPY
);
17153 if (h
->root
.u
.def
.section
== htab
->root
.sdynrelro
)
17154 s
= htab
->root
.sreldynrelro
;
17156 s
= htab
->root
.srelbss
;
17157 elf32_arm_add_dynreloc (output_bfd
, info
, s
, &rel
);
17160 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
17161 and for FDPIC, the _GLOBAL_OFFSET_TABLE_ symbol is not absolute:
17162 it is relative to the ".got" section. */
17163 if (h
== htab
->root
.hdynamic
17164 || (!htab
->fdpic_p
&& !htab
->vxworks_p
&& h
== htab
->root
.hgot
))
17165 sym
->st_shndx
= SHN_ABS
;
17171 arm_put_trampoline (struct elf32_arm_link_hash_table
*htab
, bfd
*output_bfd
,
17173 const unsigned long *template, unsigned count
)
17177 for (ix
= 0; ix
!= count
; ix
++)
17179 unsigned long insn
= template[ix
];
17181 /* Emit mov pc,rx if bx is not permitted. */
17182 if (htab
->fix_v4bx
== 1 && (insn
& 0x0ffffff0) == 0x012fff10)
17183 insn
= (insn
& 0xf000000f) | 0x01a0f000;
17184 put_arm_insn (htab
, output_bfd
, insn
, (char *)contents
+ ix
*4);
17188 /* Install the special first PLT entry for elf32-arm-nacl. Unlike
17189 other variants, NaCl needs this entry in a static executable's
17190 .iplt too. When we're handling that case, GOT_DISPLACEMENT is
17191 zero. For .iplt really only the last bundle is useful, and .iplt
17192 could have a shorter first entry, with each individual PLT entry's
17193 relative branch calculated differently so it targets the last
17194 bundle instead of the instruction before it (labelled .Lplt_tail
17195 above). But it's simpler to keep the size and layout of PLT0
17196 consistent with the dynamic case, at the cost of some dead code at
17197 the start of .iplt and the one dead store to the stack at the start
17200 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table
*htab
, bfd
*output_bfd
,
17201 asection
*plt
, bfd_vma got_displacement
)
17205 put_arm_insn (htab
, output_bfd
,
17206 elf32_arm_nacl_plt0_entry
[0]
17207 | arm_movw_immediate (got_displacement
),
17208 plt
->contents
+ 0);
17209 put_arm_insn (htab
, output_bfd
,
17210 elf32_arm_nacl_plt0_entry
[1]
17211 | arm_movt_immediate (got_displacement
),
17212 plt
->contents
+ 4);
17214 for (i
= 2; i
< ARRAY_SIZE (elf32_arm_nacl_plt0_entry
); ++i
)
17215 put_arm_insn (htab
, output_bfd
,
17216 elf32_arm_nacl_plt0_entry
[i
],
17217 plt
->contents
+ (i
* 4));
17220 /* Finish up the dynamic sections. */
17223 elf32_arm_finish_dynamic_sections (bfd
* output_bfd
, struct bfd_link_info
* info
)
17228 struct elf32_arm_link_hash_table
*htab
;
17230 htab
= elf32_arm_hash_table (info
);
17234 dynobj
= elf_hash_table (info
)->dynobj
;
17236 sgot
= htab
->root
.sgotplt
;
17237 /* A broken linker script might have discarded the dynamic sections.
17238 Catch this here so that we do not seg-fault later on. */
17239 if (sgot
!= NULL
&& bfd_is_abs_section (sgot
->output_section
))
17241 sdyn
= bfd_get_linker_section (dynobj
, ".dynamic");
17243 if (elf_hash_table (info
)->dynamic_sections_created
)
17246 Elf32_External_Dyn
*dyncon
, *dynconend
;
17248 splt
= htab
->root
.splt
;
17249 BFD_ASSERT (splt
!= NULL
&& sdyn
!= NULL
);
17250 BFD_ASSERT (htab
->symbian_p
|| sgot
!= NULL
);
17252 dyncon
= (Elf32_External_Dyn
*) sdyn
->contents
;
17253 dynconend
= (Elf32_External_Dyn
*) (sdyn
->contents
+ sdyn
->size
);
17255 for (; dyncon
< dynconend
; dyncon
++)
17257 Elf_Internal_Dyn dyn
;
17261 bfd_elf32_swap_dyn_in (dynobj
, dyncon
, &dyn
);
17268 if (htab
->vxworks_p
17269 && elf_vxworks_finish_dynamic_entry (output_bfd
, &dyn
))
17270 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
17275 goto get_vma_if_bpabi
;
17278 goto get_vma_if_bpabi
;
17281 goto get_vma_if_bpabi
;
17283 name
= ".gnu.version";
17284 goto get_vma_if_bpabi
;
17286 name
= ".gnu.version_d";
17287 goto get_vma_if_bpabi
;
17289 name
= ".gnu.version_r";
17290 goto get_vma_if_bpabi
;
17293 name
= htab
->symbian_p
? ".got" : ".got.plt";
17296 name
= RELOC_SECTION (htab
, ".plt");
17298 s
= bfd_get_linker_section (dynobj
, name
);
17302 (_("could not find section %s"), name
);
17303 bfd_set_error (bfd_error_invalid_operation
);
17306 if (!htab
->symbian_p
)
17307 dyn
.d_un
.d_ptr
= s
->output_section
->vma
+ s
->output_offset
;
17309 /* In the BPABI, tags in the PT_DYNAMIC section point
17310 at the file offset, not the memory address, for the
17311 convenience of the post linker. */
17312 dyn
.d_un
.d_ptr
= s
->output_section
->filepos
+ s
->output_offset
;
17313 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
17317 if (htab
->symbian_p
)
17322 s
= htab
->root
.srelplt
;
17323 BFD_ASSERT (s
!= NULL
);
17324 dyn
.d_un
.d_val
= s
->size
;
17325 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
17332 /* In the BPABI, the DT_REL tag must point at the file
17333 offset, not the VMA, of the first relocation
17334 section. So, we use code similar to that in
17335 elflink.c, but do not check for SHF_ALLOC on the
17336 relocation section, since relocation sections are
17337 never allocated under the BPABI. PLT relocs are also
17339 if (htab
->symbian_p
)
17342 type
= ((dyn
.d_tag
== DT_REL
|| dyn
.d_tag
== DT_RELSZ
)
17343 ? SHT_REL
: SHT_RELA
);
17344 dyn
.d_un
.d_val
= 0;
17345 for (i
= 1; i
< elf_numsections (output_bfd
); i
++)
17347 Elf_Internal_Shdr
*hdr
17348 = elf_elfsections (output_bfd
)[i
];
17349 if (hdr
->sh_type
== type
)
17351 if (dyn
.d_tag
== DT_RELSZ
17352 || dyn
.d_tag
== DT_RELASZ
)
17353 dyn
.d_un
.d_val
+= hdr
->sh_size
;
17354 else if ((ufile_ptr
) hdr
->sh_offset
17355 <= dyn
.d_un
.d_val
- 1)
17356 dyn
.d_un
.d_val
= hdr
->sh_offset
;
17359 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
17363 case DT_TLSDESC_PLT
:
17364 s
= htab
->root
.splt
;
17365 dyn
.d_un
.d_ptr
= (s
->output_section
->vma
+ s
->output_offset
17366 + htab
->dt_tlsdesc_plt
);
17367 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
17370 case DT_TLSDESC_GOT
:
17371 s
= htab
->root
.sgot
;
17372 dyn
.d_un
.d_ptr
= (s
->output_section
->vma
+ s
->output_offset
17373 + htab
->dt_tlsdesc_got
);
17374 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
17377 /* Set the bottom bit of DT_INIT/FINI if the
17378 corresponding function is Thumb. */
17380 name
= info
->init_function
;
17383 name
= info
->fini_function
;
17385 /* If it wasn't set by elf_bfd_final_link
17386 then there is nothing to adjust. */
17387 if (dyn
.d_un
.d_val
!= 0)
17389 struct elf_link_hash_entry
* eh
;
17391 eh
= elf_link_hash_lookup (elf_hash_table (info
), name
,
17392 FALSE
, FALSE
, TRUE
);
17394 && ARM_GET_SYM_BRANCH_TYPE (eh
->target_internal
)
17395 == ST_BRANCH_TO_THUMB
)
17397 dyn
.d_un
.d_val
|= 1;
17398 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
17405 /* Fill in the first entry in the procedure linkage table. */
17406 if (splt
->size
> 0 && htab
->plt_header_size
)
17408 const bfd_vma
*plt0_entry
;
17409 bfd_vma got_address
, plt_address
, got_displacement
;
17411 /* Calculate the addresses of the GOT and PLT. */
17412 got_address
= sgot
->output_section
->vma
+ sgot
->output_offset
;
17413 plt_address
= splt
->output_section
->vma
+ splt
->output_offset
;
17415 if (htab
->vxworks_p
)
17417 /* The VxWorks GOT is relocated by the dynamic linker.
17418 Therefore, we must emit relocations rather than simply
17419 computing the values now. */
17420 Elf_Internal_Rela rel
;
17422 plt0_entry
= elf32_arm_vxworks_exec_plt0_entry
;
17423 put_arm_insn (htab
, output_bfd
, plt0_entry
[0],
17424 splt
->contents
+ 0);
17425 put_arm_insn (htab
, output_bfd
, plt0_entry
[1],
17426 splt
->contents
+ 4);
17427 put_arm_insn (htab
, output_bfd
, plt0_entry
[2],
17428 splt
->contents
+ 8);
17429 bfd_put_32 (output_bfd
, got_address
, splt
->contents
+ 12);
17431 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
17432 rel
.r_offset
= plt_address
+ 12;
17433 rel
.r_info
= ELF32_R_INFO (htab
->root
.hgot
->indx
, R_ARM_ABS32
);
17435 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
,
17436 htab
->srelplt2
->contents
);
17438 else if (htab
->nacl_p
)
17439 arm_nacl_put_plt0 (htab
, output_bfd
, splt
,
17440 got_address
+ 8 - (plt_address
+ 16));
17441 else if (using_thumb_only (htab
))
17443 got_displacement
= got_address
- (plt_address
+ 12);
17445 plt0_entry
= elf32_thumb2_plt0_entry
;
17446 put_arm_insn (htab
, output_bfd
, plt0_entry
[0],
17447 splt
->contents
+ 0);
17448 put_arm_insn (htab
, output_bfd
, plt0_entry
[1],
17449 splt
->contents
+ 4);
17450 put_arm_insn (htab
, output_bfd
, plt0_entry
[2],
17451 splt
->contents
+ 8);
17453 bfd_put_32 (output_bfd
, got_displacement
, splt
->contents
+ 12);
17457 got_displacement
= got_address
- (plt_address
+ 16);
17459 plt0_entry
= elf32_arm_plt0_entry
;
17460 put_arm_insn (htab
, output_bfd
, plt0_entry
[0],
17461 splt
->contents
+ 0);
17462 put_arm_insn (htab
, output_bfd
, plt0_entry
[1],
17463 splt
->contents
+ 4);
17464 put_arm_insn (htab
, output_bfd
, plt0_entry
[2],
17465 splt
->contents
+ 8);
17466 put_arm_insn (htab
, output_bfd
, plt0_entry
[3],
17467 splt
->contents
+ 12);
17469 #ifdef FOUR_WORD_PLT
17470 /* The displacement value goes in the otherwise-unused
17471 last word of the second entry. */
17472 bfd_put_32 (output_bfd
, got_displacement
, splt
->contents
+ 28);
17474 bfd_put_32 (output_bfd
, got_displacement
, splt
->contents
+ 16);
17479 /* UnixWare sets the entsize of .plt to 4, although that doesn't
17480 really seem like the right value. */
17481 if (splt
->output_section
->owner
== output_bfd
)
17482 elf_section_data (splt
->output_section
)->this_hdr
.sh_entsize
= 4;
17484 if (htab
->dt_tlsdesc_plt
)
17486 bfd_vma got_address
17487 = sgot
->output_section
->vma
+ sgot
->output_offset
;
17488 bfd_vma gotplt_address
= (htab
->root
.sgot
->output_section
->vma
17489 + htab
->root
.sgot
->output_offset
);
17490 bfd_vma plt_address
17491 = splt
->output_section
->vma
+ splt
->output_offset
;
17493 arm_put_trampoline (htab
, output_bfd
,
17494 splt
->contents
+ htab
->dt_tlsdesc_plt
,
17495 dl_tlsdesc_lazy_trampoline
, 6);
17497 bfd_put_32 (output_bfd
,
17498 gotplt_address
+ htab
->dt_tlsdesc_got
17499 - (plt_address
+ htab
->dt_tlsdesc_plt
)
17500 - dl_tlsdesc_lazy_trampoline
[6],
17501 splt
->contents
+ htab
->dt_tlsdesc_plt
+ 24);
17502 bfd_put_32 (output_bfd
,
17503 got_address
- (plt_address
+ htab
->dt_tlsdesc_plt
)
17504 - dl_tlsdesc_lazy_trampoline
[7],
17505 splt
->contents
+ htab
->dt_tlsdesc_plt
+ 24 + 4);
17508 if (htab
->tls_trampoline
)
17510 arm_put_trampoline (htab
, output_bfd
,
17511 splt
->contents
+ htab
->tls_trampoline
,
17512 tls_trampoline
, 3);
17513 #ifdef FOUR_WORD_PLT
17514 bfd_put_32 (output_bfd
, 0x00000000,
17515 splt
->contents
+ htab
->tls_trampoline
+ 12);
17519 if (htab
->vxworks_p
17520 && !bfd_link_pic (info
)
17521 && htab
->root
.splt
->size
> 0)
17523 /* Correct the .rel(a).plt.unloaded relocations. They will have
17524 incorrect symbol indexes. */
17528 num_plts
= ((htab
->root
.splt
->size
- htab
->plt_header_size
)
17529 / htab
->plt_entry_size
);
17530 p
= htab
->srelplt2
->contents
+ RELOC_SIZE (htab
);
17532 for (; num_plts
; num_plts
--)
17534 Elf_Internal_Rela rel
;
17536 SWAP_RELOC_IN (htab
) (output_bfd
, p
, &rel
);
17537 rel
.r_info
= ELF32_R_INFO (htab
->root
.hgot
->indx
, R_ARM_ABS32
);
17538 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, p
);
17539 p
+= RELOC_SIZE (htab
);
17541 SWAP_RELOC_IN (htab
) (output_bfd
, p
, &rel
);
17542 rel
.r_info
= ELF32_R_INFO (htab
->root
.hplt
->indx
, R_ARM_ABS32
);
17543 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, p
);
17544 p
+= RELOC_SIZE (htab
);
17549 if (htab
->nacl_p
&& htab
->root
.iplt
!= NULL
&& htab
->root
.iplt
->size
> 0)
17550 /* NaCl uses a special first entry in .iplt too. */
17551 arm_nacl_put_plt0 (htab
, output_bfd
, htab
->root
.iplt
, 0);
17553 /* Fill in the first three entries in the global offset table. */
17556 if (sgot
->size
> 0)
17559 bfd_put_32 (output_bfd
, (bfd_vma
) 0, sgot
->contents
);
17561 bfd_put_32 (output_bfd
,
17562 sdyn
->output_section
->vma
+ sdyn
->output_offset
,
17564 bfd_put_32 (output_bfd
, (bfd_vma
) 0, sgot
->contents
+ 4);
17565 bfd_put_32 (output_bfd
, (bfd_vma
) 0, sgot
->contents
+ 8);
17568 elf_section_data (sgot
->output_section
)->this_hdr
.sh_entsize
= 4;
17571 /* At the very end of the .rofixup section is a pointer to the GOT. */
17572 if (htab
->fdpic_p
&& htab
->srofixup
!= NULL
)
17574 struct elf_link_hash_entry
*hgot
= htab
->root
.hgot
;
17576 bfd_vma got_value
= hgot
->root
.u
.def
.value
17577 + hgot
->root
.u
.def
.section
->output_section
->vma
17578 + hgot
->root
.u
.def
.section
->output_offset
;
17580 arm_elf_add_rofixup(output_bfd
, htab
->srofixup
, got_value
);
17582 /* Make sure we allocated and generated the same number of fixups. */
17583 BFD_ASSERT (htab
->srofixup
->reloc_count
* 4 == htab
->srofixup
->size
);
17590 elf32_arm_post_process_headers (bfd
* abfd
, struct bfd_link_info
* link_info ATTRIBUTE_UNUSED
)
17592 Elf_Internal_Ehdr
* i_ehdrp
; /* ELF file header, internal form. */
17593 struct elf32_arm_link_hash_table
*globals
;
17594 struct elf_segment_map
*m
;
17596 i_ehdrp
= elf_elfheader (abfd
);
17598 if (EF_ARM_EABI_VERSION (i_ehdrp
->e_flags
) == EF_ARM_EABI_UNKNOWN
)
17599 i_ehdrp
->e_ident
[EI_OSABI
] = ELFOSABI_ARM
;
17601 _bfd_elf_post_process_headers (abfd
, link_info
);
17602 i_ehdrp
->e_ident
[EI_ABIVERSION
] = ARM_ELF_ABI_VERSION
;
17606 globals
= elf32_arm_hash_table (link_info
);
17607 if (globals
!= NULL
&& globals
->byteswap_code
)
17608 i_ehdrp
->e_flags
|= EF_ARM_BE8
;
17610 if (globals
->fdpic_p
)
17611 i_ehdrp
->e_ident
[EI_OSABI
] |= ELFOSABI_ARM_FDPIC
;
17614 if (EF_ARM_EABI_VERSION (i_ehdrp
->e_flags
) == EF_ARM_EABI_VER5
17615 && ((i_ehdrp
->e_type
== ET_DYN
) || (i_ehdrp
->e_type
== ET_EXEC
)))
17617 int abi
= bfd_elf_get_obj_attr_int (abfd
, OBJ_ATTR_PROC
, Tag_ABI_VFP_args
);
17618 if (abi
== AEABI_VFP_args_vfp
)
17619 i_ehdrp
->e_flags
|= EF_ARM_ABI_FLOAT_HARD
;
17621 i_ehdrp
->e_flags
|= EF_ARM_ABI_FLOAT_SOFT
;
17624 /* Scan segment to set p_flags attribute if it contains only sections with
17625 SHF_ARM_PURECODE flag. */
17626 for (m
= elf_seg_map (abfd
); m
!= NULL
; m
= m
->next
)
17632 for (j
= 0; j
< m
->count
; j
++)
17634 if (!(elf_section_flags (m
->sections
[j
]) & SHF_ARM_PURECODE
))
17640 m
->p_flags_valid
= 1;
17645 static enum elf_reloc_type_class
17646 elf32_arm_reloc_type_class (const struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
17647 const asection
*rel_sec ATTRIBUTE_UNUSED
,
17648 const Elf_Internal_Rela
*rela
)
17650 switch ((int) ELF32_R_TYPE (rela
->r_info
))
17652 case R_ARM_RELATIVE
:
17653 return reloc_class_relative
;
17654 case R_ARM_JUMP_SLOT
:
17655 return reloc_class_plt
;
17657 return reloc_class_copy
;
17658 case R_ARM_IRELATIVE
:
17659 return reloc_class_ifunc
;
17661 return reloc_class_normal
;
17666 elf32_arm_final_write_processing (bfd
*abfd
, bfd_boolean linker ATTRIBUTE_UNUSED
)
17668 bfd_arm_update_notes (abfd
, ARM_NOTE_SECTION
);
17671 /* Return TRUE if this is an unwinding table entry. */
17674 is_arm_elf_unwind_section_name (bfd
* abfd ATTRIBUTE_UNUSED
, const char * name
)
17676 return (CONST_STRNEQ (name
, ELF_STRING_ARM_unwind
)
17677 || CONST_STRNEQ (name
, ELF_STRING_ARM_unwind_once
));
17681 /* Set the type and flags for an ARM section. We do this by
17682 the section name, which is a hack, but ought to work. */
17685 elf32_arm_fake_sections (bfd
* abfd
, Elf_Internal_Shdr
* hdr
, asection
* sec
)
17689 name
= bfd_get_section_name (abfd
, sec
);
17691 if (is_arm_elf_unwind_section_name (abfd
, name
))
17693 hdr
->sh_type
= SHT_ARM_EXIDX
;
17694 hdr
->sh_flags
|= SHF_LINK_ORDER
;
17697 if (sec
->flags
& SEC_ELF_PURECODE
)
17698 hdr
->sh_flags
|= SHF_ARM_PURECODE
;
17703 /* Handle an ARM specific section when reading an object file. This is
17704 called when bfd_section_from_shdr finds a section with an unknown
17708 elf32_arm_section_from_shdr (bfd
*abfd
,
17709 Elf_Internal_Shdr
* hdr
,
17713 /* There ought to be a place to keep ELF backend specific flags, but
17714 at the moment there isn't one. We just keep track of the
17715 sections by their name, instead. Fortunately, the ABI gives
17716 names for all the ARM specific sections, so we will probably get
17718 switch (hdr
->sh_type
)
17720 case SHT_ARM_EXIDX
:
17721 case SHT_ARM_PREEMPTMAP
:
17722 case SHT_ARM_ATTRIBUTES
:
17729 if (! _bfd_elf_make_section_from_shdr (abfd
, hdr
, name
, shindex
))
17735 static _arm_elf_section_data
*
17736 get_arm_elf_section_data (asection
* sec
)
17738 if (sec
&& sec
->owner
&& is_arm_elf (sec
->owner
))
17739 return elf32_arm_section_data (sec
);
17747 struct bfd_link_info
*info
;
17750 int (*func
) (void *, const char *, Elf_Internal_Sym
*,
17751 asection
*, struct elf_link_hash_entry
*);
17752 } output_arch_syminfo
;
17754 enum map_symbol_type
17762 /* Output a single mapping symbol. */
17765 elf32_arm_output_map_sym (output_arch_syminfo
*osi
,
17766 enum map_symbol_type type
,
17769 static const char *names
[3] = {"$a", "$t", "$d"};
17770 Elf_Internal_Sym sym
;
17772 sym
.st_value
= osi
->sec
->output_section
->vma
17773 + osi
->sec
->output_offset
17777 sym
.st_info
= ELF_ST_INFO (STB_LOCAL
, STT_NOTYPE
);
17778 sym
.st_shndx
= osi
->sec_shndx
;
17779 sym
.st_target_internal
= 0;
17780 elf32_arm_section_map_add (osi
->sec
, names
[type
][1], offset
);
17781 return osi
->func (osi
->flaginfo
, names
[type
], &sym
, osi
->sec
, NULL
) == 1;
17784 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
17785 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
17788 elf32_arm_output_plt_map_1 (output_arch_syminfo
*osi
,
17789 bfd_boolean is_iplt_entry_p
,
17790 union gotplt_union
*root_plt
,
17791 struct arm_plt_info
*arm_plt
)
17793 struct elf32_arm_link_hash_table
*htab
;
17794 bfd_vma addr
, plt_header_size
;
17796 if (root_plt
->offset
== (bfd_vma
) -1)
17799 htab
= elf32_arm_hash_table (osi
->info
);
17803 if (is_iplt_entry_p
)
17805 osi
->sec
= htab
->root
.iplt
;
17806 plt_header_size
= 0;
17810 osi
->sec
= htab
->root
.splt
;
17811 plt_header_size
= htab
->plt_header_size
;
17813 osi
->sec_shndx
= (_bfd_elf_section_from_bfd_section
17814 (osi
->info
->output_bfd
, osi
->sec
->output_section
));
17816 addr
= root_plt
->offset
& -2;
17817 if (htab
->symbian_p
)
17819 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
17821 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 4))
17824 else if (htab
->vxworks_p
)
17826 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
17828 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 8))
17830 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
+ 12))
17832 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 20))
17835 else if (htab
->nacl_p
)
17837 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
17840 else if (htab
->fdpic_p
)
17842 enum map_symbol_type type
= using_thumb_only(htab
)
17846 if (elf32_arm_plt_needs_thumb_stub_p (osi
->info
, arm_plt
))
17847 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_THUMB
, addr
- 4))
17849 if (!elf32_arm_output_map_sym (osi
, type
, addr
))
17851 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 16))
17853 if (htab
->plt_entry_size
== 4 * ARRAY_SIZE(elf32_arm_fdpic_plt_entry
))
17854 if (!elf32_arm_output_map_sym (osi
, type
, addr
+ 24))
17857 else if (using_thumb_only (htab
))
17859 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_THUMB
, addr
))
17864 bfd_boolean thumb_stub_p
;
17866 thumb_stub_p
= elf32_arm_plt_needs_thumb_stub_p (osi
->info
, arm_plt
);
17869 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_THUMB
, addr
- 4))
17872 #ifdef FOUR_WORD_PLT
17873 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
17875 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 12))
17878 /* A three-word PLT with no Thumb thunk contains only Arm code,
17879 so only need to output a mapping symbol for the first PLT entry and
17880 entries with thumb thunks. */
17881 if (thumb_stub_p
|| addr
== plt_header_size
)
17883 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
17892 /* Output mapping symbols for PLT entries associated with H. */
17895 elf32_arm_output_plt_map (struct elf_link_hash_entry
*h
, void *inf
)
17897 output_arch_syminfo
*osi
= (output_arch_syminfo
*) inf
;
17898 struct elf32_arm_link_hash_entry
*eh
;
17900 if (h
->root
.type
== bfd_link_hash_indirect
)
17903 if (h
->root
.type
== bfd_link_hash_warning
)
17904 /* When warning symbols are created, they **replace** the "real"
17905 entry in the hash table, thus we never get to see the real
17906 symbol in a hash traversal. So look at it now. */
17907 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
17909 eh
= (struct elf32_arm_link_hash_entry
*) h
;
17910 return elf32_arm_output_plt_map_1 (osi
, SYMBOL_CALLS_LOCAL (osi
->info
, h
),
17911 &h
->plt
, &eh
->plt
);
17914 /* Bind a veneered symbol to its veneer identified by its hash entry
17915 STUB_ENTRY. The veneered location thus loose its symbol. */
17918 arm_stub_claim_sym (struct elf32_arm_stub_hash_entry
*stub_entry
)
17920 struct elf32_arm_link_hash_entry
*hash
= stub_entry
->h
;
17923 hash
->root
.root
.u
.def
.section
= stub_entry
->stub_sec
;
17924 hash
->root
.root
.u
.def
.value
= stub_entry
->stub_offset
;
17925 hash
->root
.size
= stub_entry
->stub_size
;
17928 /* Output a single local symbol for a generated stub. */
17931 elf32_arm_output_stub_sym (output_arch_syminfo
*osi
, const char *name
,
17932 bfd_vma offset
, bfd_vma size
)
17934 Elf_Internal_Sym sym
;
17936 sym
.st_value
= osi
->sec
->output_section
->vma
17937 + osi
->sec
->output_offset
17939 sym
.st_size
= size
;
17941 sym
.st_info
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
17942 sym
.st_shndx
= osi
->sec_shndx
;
17943 sym
.st_target_internal
= 0;
17944 return osi
->func (osi
->flaginfo
, name
, &sym
, osi
->sec
, NULL
) == 1;
17948 arm_map_one_stub (struct bfd_hash_entry
* gen_entry
,
17951 struct elf32_arm_stub_hash_entry
*stub_entry
;
17952 asection
*stub_sec
;
17955 output_arch_syminfo
*osi
;
17956 const insn_sequence
*template_sequence
;
17957 enum stub_insn_type prev_type
;
17960 enum map_symbol_type sym_type
;
17962 /* Massage our args to the form they really have. */
17963 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
17964 osi
= (output_arch_syminfo
*) in_arg
;
17966 stub_sec
= stub_entry
->stub_sec
;
17968 /* Ensure this stub is attached to the current section being
17970 if (stub_sec
!= osi
->sec
)
17973 addr
= (bfd_vma
) stub_entry
->stub_offset
;
17974 template_sequence
= stub_entry
->stub_template
;
17976 if (arm_stub_sym_claimed (stub_entry
->stub_type
))
17977 arm_stub_claim_sym (stub_entry
);
17980 stub_name
= stub_entry
->output_name
;
17981 switch (template_sequence
[0].type
)
17984 if (!elf32_arm_output_stub_sym (osi
, stub_name
, addr
,
17985 stub_entry
->stub_size
))
17990 if (!elf32_arm_output_stub_sym (osi
, stub_name
, addr
| 1,
17991 stub_entry
->stub_size
))
18000 prev_type
= DATA_TYPE
;
18002 for (i
= 0; i
< stub_entry
->stub_template_size
; i
++)
18004 switch (template_sequence
[i
].type
)
18007 sym_type
= ARM_MAP_ARM
;
18012 sym_type
= ARM_MAP_THUMB
;
18016 sym_type
= ARM_MAP_DATA
;
18024 if (template_sequence
[i
].type
!= prev_type
)
18026 prev_type
= template_sequence
[i
].type
;
18027 if (!elf32_arm_output_map_sym (osi
, sym_type
, addr
+ size
))
18031 switch (template_sequence
[i
].type
)
18055 /* Output mapping symbols for linker generated sections,
18056 and for those data-only sections that do not have a
18060 elf32_arm_output_arch_local_syms (bfd
*output_bfd
,
18061 struct bfd_link_info
*info
,
18063 int (*func
) (void *, const char *,
18064 Elf_Internal_Sym
*,
18066 struct elf_link_hash_entry
*))
18068 output_arch_syminfo osi
;
18069 struct elf32_arm_link_hash_table
*htab
;
18071 bfd_size_type size
;
18074 htab
= elf32_arm_hash_table (info
);
18078 check_use_blx (htab
);
18080 osi
.flaginfo
= flaginfo
;
18084 /* Add a $d mapping symbol to data-only sections that
18085 don't have any mapping symbol. This may result in (harmless) redundant
18086 mapping symbols. */
18087 for (input_bfd
= info
->input_bfds
;
18089 input_bfd
= input_bfd
->link
.next
)
18091 if ((input_bfd
->flags
& (BFD_LINKER_CREATED
| HAS_SYMS
)) == HAS_SYMS
)
18092 for (osi
.sec
= input_bfd
->sections
;
18094 osi
.sec
= osi
.sec
->next
)
18096 if (osi
.sec
->output_section
!= NULL
18097 && ((osi
.sec
->output_section
->flags
& (SEC_ALLOC
| SEC_CODE
))
18099 && (osi
.sec
->flags
& (SEC_HAS_CONTENTS
| SEC_LINKER_CREATED
))
18100 == SEC_HAS_CONTENTS
18101 && get_arm_elf_section_data (osi
.sec
) != NULL
18102 && get_arm_elf_section_data (osi
.sec
)->mapcount
== 0
18103 && osi
.sec
->size
> 0
18104 && (osi
.sec
->flags
& SEC_EXCLUDE
) == 0)
18106 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
18107 (output_bfd
, osi
.sec
->output_section
);
18108 if (osi
.sec_shndx
!= (int)SHN_BAD
)
18109 elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, 0);
18114 /* ARM->Thumb glue. */
18115 if (htab
->arm_glue_size
> 0)
18117 osi
.sec
= bfd_get_linker_section (htab
->bfd_of_glue_owner
,
18118 ARM2THUMB_GLUE_SECTION_NAME
);
18120 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
18121 (output_bfd
, osi
.sec
->output_section
);
18122 if (bfd_link_pic (info
) || htab
->root
.is_relocatable_executable
18123 || htab
->pic_veneer
)
18124 size
= ARM2THUMB_PIC_GLUE_SIZE
;
18125 else if (htab
->use_blx
)
18126 size
= ARM2THUMB_V5_STATIC_GLUE_SIZE
;
18128 size
= ARM2THUMB_STATIC_GLUE_SIZE
;
18130 for (offset
= 0; offset
< htab
->arm_glue_size
; offset
+= size
)
18132 elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, offset
);
18133 elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, offset
+ size
- 4);
18137 /* Thumb->ARM glue. */
18138 if (htab
->thumb_glue_size
> 0)
18140 osi
.sec
= bfd_get_linker_section (htab
->bfd_of_glue_owner
,
18141 THUMB2ARM_GLUE_SECTION_NAME
);
18143 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
18144 (output_bfd
, osi
.sec
->output_section
);
18145 size
= THUMB2ARM_GLUE_SIZE
;
18147 for (offset
= 0; offset
< htab
->thumb_glue_size
; offset
+= size
)
18149 elf32_arm_output_map_sym (&osi
, ARM_MAP_THUMB
, offset
);
18150 elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, offset
+ 4);
18154 /* ARMv4 BX veneers. */
18155 if (htab
->bx_glue_size
> 0)
18157 osi
.sec
= bfd_get_linker_section (htab
->bfd_of_glue_owner
,
18158 ARM_BX_GLUE_SECTION_NAME
);
18160 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
18161 (output_bfd
, osi
.sec
->output_section
);
18163 elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0);
18166 /* Long calls stubs. */
18167 if (htab
->stub_bfd
&& htab
->stub_bfd
->sections
)
18169 asection
* stub_sec
;
18171 for (stub_sec
= htab
->stub_bfd
->sections
;
18173 stub_sec
= stub_sec
->next
)
18175 /* Ignore non-stub sections. */
18176 if (!strstr (stub_sec
->name
, STUB_SUFFIX
))
18179 osi
.sec
= stub_sec
;
18181 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
18182 (output_bfd
, osi
.sec
->output_section
);
18184 bfd_hash_traverse (&htab
->stub_hash_table
, arm_map_one_stub
, &osi
);
18188 /* Finally, output mapping symbols for the PLT. */
18189 if (htab
->root
.splt
&& htab
->root
.splt
->size
> 0)
18191 osi
.sec
= htab
->root
.splt
;
18192 osi
.sec_shndx
= (_bfd_elf_section_from_bfd_section
18193 (output_bfd
, osi
.sec
->output_section
));
18195 /* Output mapping symbols for the plt header. SymbianOS does not have a
18197 if (htab
->vxworks_p
)
18199 /* VxWorks shared libraries have no PLT header. */
18200 if (!bfd_link_pic (info
))
18202 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0))
18204 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, 12))
18208 else if (htab
->nacl_p
)
18210 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0))
18213 else if (using_thumb_only (htab
) && !htab
->fdpic_p
)
18215 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_THUMB
, 0))
18217 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, 12))
18219 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_THUMB
, 16))
18222 else if (!htab
->symbian_p
&& !htab
->fdpic_p
)
18224 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0))
18226 #ifndef FOUR_WORD_PLT
18227 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, 16))
18232 if (htab
->nacl_p
&& htab
->root
.iplt
&& htab
->root
.iplt
->size
> 0)
18234 /* NaCl uses a special first entry in .iplt too. */
18235 osi
.sec
= htab
->root
.iplt
;
18236 osi
.sec_shndx
= (_bfd_elf_section_from_bfd_section
18237 (output_bfd
, osi
.sec
->output_section
));
18238 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0))
18241 if ((htab
->root
.splt
&& htab
->root
.splt
->size
> 0)
18242 || (htab
->root
.iplt
&& htab
->root
.iplt
->size
> 0))
18244 elf_link_hash_traverse (&htab
->root
, elf32_arm_output_plt_map
, &osi
);
18245 for (input_bfd
= info
->input_bfds
;
18247 input_bfd
= input_bfd
->link
.next
)
18249 struct arm_local_iplt_info
**local_iplt
;
18250 unsigned int i
, num_syms
;
18252 local_iplt
= elf32_arm_local_iplt (input_bfd
);
18253 if (local_iplt
!= NULL
)
18255 num_syms
= elf_symtab_hdr (input_bfd
).sh_info
;
18256 for (i
= 0; i
< num_syms
; i
++)
18257 if (local_iplt
[i
] != NULL
18258 && !elf32_arm_output_plt_map_1 (&osi
, TRUE
,
18259 &local_iplt
[i
]->root
,
18260 &local_iplt
[i
]->arm
))
18265 if (htab
->dt_tlsdesc_plt
!= 0)
18267 /* Mapping symbols for the lazy tls trampoline. */
18268 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, htab
->dt_tlsdesc_plt
))
18271 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
,
18272 htab
->dt_tlsdesc_plt
+ 24))
18275 if (htab
->tls_trampoline
!= 0)
18277 /* Mapping symbols for the tls trampoline. */
18278 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, htab
->tls_trampoline
))
18280 #ifdef FOUR_WORD_PLT
18281 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
,
18282 htab
->tls_trampoline
+ 12))
18290 /* Filter normal symbols of CMSE entry functions of ABFD to include in
18291 the import library. All SYMCOUNT symbols of ABFD can be examined
18292 from their pointers in SYMS. Pointers of symbols to keep should be
18293 stored continuously at the beginning of that array.
18295 Returns the number of symbols to keep. */
18297 static unsigned int
18298 elf32_arm_filter_cmse_symbols (bfd
*abfd ATTRIBUTE_UNUSED
,
18299 struct bfd_link_info
*info
,
18300 asymbol
**syms
, long symcount
)
18304 long src_count
, dst_count
= 0;
18305 struct elf32_arm_link_hash_table
*htab
;
18307 htab
= elf32_arm_hash_table (info
);
18308 if (!htab
->stub_bfd
|| !htab
->stub_bfd
->sections
)
18312 cmse_name
= (char *) bfd_malloc (maxnamelen
);
18313 for (src_count
= 0; src_count
< symcount
; src_count
++)
18315 struct elf32_arm_link_hash_entry
*cmse_hash
;
18321 sym
= syms
[src_count
];
18322 flags
= sym
->flags
;
18323 name
= (char *) bfd_asymbol_name (sym
);
18325 if ((flags
& BSF_FUNCTION
) != BSF_FUNCTION
)
18327 if (!(flags
& (BSF_GLOBAL
| BSF_WEAK
)))
18330 namelen
= strlen (name
) + sizeof (CMSE_PREFIX
) + 1;
18331 if (namelen
> maxnamelen
)
18333 cmse_name
= (char *)
18334 bfd_realloc (cmse_name
, namelen
);
18335 maxnamelen
= namelen
;
18337 snprintf (cmse_name
, maxnamelen
, "%s%s", CMSE_PREFIX
, name
);
18338 cmse_hash
= (struct elf32_arm_link_hash_entry
*)
18339 elf_link_hash_lookup (&(htab
)->root
, cmse_name
, FALSE
, FALSE
, TRUE
);
18342 || (cmse_hash
->root
.root
.type
!= bfd_link_hash_defined
18343 && cmse_hash
->root
.root
.type
!= bfd_link_hash_defweak
)
18344 || cmse_hash
->root
.type
!= STT_FUNC
)
18347 if (!ARM_GET_SYM_CMSE_SPCL (cmse_hash
->root
.target_internal
))
18350 syms
[dst_count
++] = sym
;
18354 syms
[dst_count
] = NULL
;
18359 /* Filter symbols of ABFD to include in the import library. All
18360 SYMCOUNT symbols of ABFD can be examined from their pointers in
18361 SYMS. Pointers of symbols to keep should be stored continuously at
18362 the beginning of that array.
18364 Returns the number of symbols to keep. */
18366 static unsigned int
18367 elf32_arm_filter_implib_symbols (bfd
*abfd ATTRIBUTE_UNUSED
,
18368 struct bfd_link_info
*info
,
18369 asymbol
**syms
, long symcount
)
18371 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (info
);
18373 /* Requirement 8 of "ARM v8-M Security Extensions: Requirements on
18374 Development Tools" (ARM-ECM-0359818) mandates Secure Gateway import
18375 library to be a relocatable object file. */
18376 BFD_ASSERT (!(bfd_get_file_flags (info
->out_implib_bfd
) & EXEC_P
));
18377 if (globals
->cmse_implib
)
18378 return elf32_arm_filter_cmse_symbols (abfd
, info
, syms
, symcount
);
18380 return _bfd_elf_filter_global_symbols (abfd
, info
, syms
, symcount
);
18383 /* Allocate target specific section data. */
18386 elf32_arm_new_section_hook (bfd
*abfd
, asection
*sec
)
18388 if (!sec
->used_by_bfd
)
18390 _arm_elf_section_data
*sdata
;
18391 bfd_size_type amt
= sizeof (*sdata
);
18393 sdata
= (_arm_elf_section_data
*) bfd_zalloc (abfd
, amt
);
18396 sec
->used_by_bfd
= sdata
;
18399 return _bfd_elf_new_section_hook (abfd
, sec
);
18403 /* Used to order a list of mapping symbols by address. */
18406 elf32_arm_compare_mapping (const void * a
, const void * b
)
18408 const elf32_arm_section_map
*amap
= (const elf32_arm_section_map
*) a
;
18409 const elf32_arm_section_map
*bmap
= (const elf32_arm_section_map
*) b
;
18411 if (amap
->vma
> bmap
->vma
)
18413 else if (amap
->vma
< bmap
->vma
)
18415 else if (amap
->type
> bmap
->type
)
18416 /* Ensure results do not depend on the host qsort for objects with
18417 multiple mapping symbols at the same address by sorting on type
18420 else if (amap
->type
< bmap
->type
)
18426 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
18428 static unsigned long
18429 offset_prel31 (unsigned long addr
, bfd_vma offset
)
18431 return (addr
& ~0x7ffffffful
) | ((addr
+ offset
) & 0x7ffffffful
);
18434 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
18438 copy_exidx_entry (bfd
*output_bfd
, bfd_byte
*to
, bfd_byte
*from
, bfd_vma offset
)
18440 unsigned long first_word
= bfd_get_32 (output_bfd
, from
);
18441 unsigned long second_word
= bfd_get_32 (output_bfd
, from
+ 4);
18443 /* High bit of first word is supposed to be zero. */
18444 if ((first_word
& 0x80000000ul
) == 0)
18445 first_word
= offset_prel31 (first_word
, offset
);
18447 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
18448 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
18449 if ((second_word
!= 0x1) && ((second_word
& 0x80000000ul
) == 0))
18450 second_word
= offset_prel31 (second_word
, offset
);
18452 bfd_put_32 (output_bfd
, first_word
, to
);
18453 bfd_put_32 (output_bfd
, second_word
, to
+ 4);
18456 /* Data for make_branch_to_a8_stub(). */
18458 struct a8_branch_to_stub_data
18460 asection
*writing_section
;
18461 bfd_byte
*contents
;
18465 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
18466 places for a particular section. */
18469 make_branch_to_a8_stub (struct bfd_hash_entry
*gen_entry
,
18472 struct elf32_arm_stub_hash_entry
*stub_entry
;
18473 struct a8_branch_to_stub_data
*data
;
18474 bfd_byte
*contents
;
18475 unsigned long branch_insn
;
18476 bfd_vma veneered_insn_loc
, veneer_entry_loc
;
18477 bfd_signed_vma branch_offset
;
18481 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
18482 data
= (struct a8_branch_to_stub_data
*) in_arg
;
18484 if (stub_entry
->target_section
!= data
->writing_section
18485 || stub_entry
->stub_type
< arm_stub_a8_veneer_lwm
)
18488 contents
= data
->contents
;
18490 /* We use target_section as Cortex-A8 erratum workaround stubs are only
18491 generated when both source and target are in the same section. */
18492 veneered_insn_loc
= stub_entry
->target_section
->output_section
->vma
18493 + stub_entry
->target_section
->output_offset
18494 + stub_entry
->source_value
;
18496 veneer_entry_loc
= stub_entry
->stub_sec
->output_section
->vma
18497 + stub_entry
->stub_sec
->output_offset
18498 + stub_entry
->stub_offset
;
18500 if (stub_entry
->stub_type
== arm_stub_a8_veneer_blx
)
18501 veneered_insn_loc
&= ~3u;
18503 branch_offset
= veneer_entry_loc
- veneered_insn_loc
- 4;
18505 abfd
= stub_entry
->target_section
->owner
;
18506 loc
= stub_entry
->source_value
;
18508 /* We attempt to avoid this condition by setting stubs_always_after_branch
18509 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
18510 This check is just to be on the safe side... */
18511 if ((veneered_insn_loc
& ~0xfff) == (veneer_entry_loc
& ~0xfff))
18513 _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub is "
18514 "allocated in unsafe location"), abfd
);
18518 switch (stub_entry
->stub_type
)
18520 case arm_stub_a8_veneer_b
:
18521 case arm_stub_a8_veneer_b_cond
:
18522 branch_insn
= 0xf0009000;
18525 case arm_stub_a8_veneer_blx
:
18526 branch_insn
= 0xf000e800;
18529 case arm_stub_a8_veneer_bl
:
18531 unsigned int i1
, j1
, i2
, j2
, s
;
18533 branch_insn
= 0xf000d000;
18536 if (branch_offset
< -16777216 || branch_offset
> 16777214)
18538 /* There's not much we can do apart from complain if this
18540 _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub out "
18541 "of range (input file too large)"), abfd
);
18545 /* i1 = not(j1 eor s), so:
18547 j1 = (not i1) eor s. */
18549 branch_insn
|= (branch_offset
>> 1) & 0x7ff;
18550 branch_insn
|= ((branch_offset
>> 12) & 0x3ff) << 16;
18551 i2
= (branch_offset
>> 22) & 1;
18552 i1
= (branch_offset
>> 23) & 1;
18553 s
= (branch_offset
>> 24) & 1;
18556 branch_insn
|= j2
<< 11;
18557 branch_insn
|= j1
<< 13;
18558 branch_insn
|= s
<< 26;
18567 bfd_put_16 (abfd
, (branch_insn
>> 16) & 0xffff, &contents
[loc
]);
18568 bfd_put_16 (abfd
, branch_insn
& 0xffff, &contents
[loc
+ 2]);
18573 /* Beginning of stm32l4xx work-around. */
18575 /* Functions encoding instructions necessary for the emission of the
18576 fix-stm32l4xx-629360.
18577 Encoding is extracted from the
18578 ARM (C) Architecture Reference Manual
18579 ARMv7-A and ARMv7-R edition
18580 ARM DDI 0406C.b (ID072512). */
18582 static inline bfd_vma
18583 create_instruction_branch_absolute (int branch_offset
)
18585 /* A8.8.18 B (A8-334)
18586 B target_address (Encoding T4). */
18587 /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii. */
18588 /* jump offset is: S:I1:I2:imm10:imm11:0. */
18589 /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S). */
18591 int s
= ((branch_offset
& 0x1000000) >> 24);
18592 int j1
= s
^ !((branch_offset
& 0x800000) >> 23);
18593 int j2
= s
^ !((branch_offset
& 0x400000) >> 22);
18595 if (branch_offset
< -(1 << 24) || branch_offset
>= (1 << 24))
18596 BFD_ASSERT (0 && "Error: branch out of range. Cannot create branch.");
18598 bfd_vma patched_inst
= 0xf0009000
18600 | (((unsigned long) (branch_offset
) >> 12) & 0x3ff) << 16 /* imm10. */
18601 | j1
<< 13 /* J1. */
18602 | j2
<< 11 /* J2. */
18603 | (((unsigned long) (branch_offset
) >> 1) & 0x7ff); /* imm11. */
18605 return patched_inst
;
18608 static inline bfd_vma
18609 create_instruction_ldmia (int base_reg
, int wback
, int reg_mask
)
18611 /* A8.8.57 LDM/LDMIA/LDMFD (A8-396)
18612 LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2). */
18613 bfd_vma patched_inst
= 0xe8900000
18614 | (/*W=*/wback
<< 21)
18616 | (reg_mask
& 0x0000ffff);
18618 return patched_inst
;
18621 static inline bfd_vma
18622 create_instruction_ldmdb (int base_reg
, int wback
, int reg_mask
)
18624 /* A8.8.60 LDMDB/LDMEA (A8-402)
18625 LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1). */
18626 bfd_vma patched_inst
= 0xe9100000
18627 | (/*W=*/wback
<< 21)
18629 | (reg_mask
& 0x0000ffff);
18631 return patched_inst
;
18634 static inline bfd_vma
18635 create_instruction_mov (int target_reg
, int source_reg
)
18637 /* A8.8.103 MOV (register) (A8-486)
18638 MOV Rd, Rm (Encoding T1). */
18639 bfd_vma patched_inst
= 0x4600
18640 | (target_reg
& 0x7)
18641 | ((target_reg
& 0x8) >> 3) << 7
18642 | (source_reg
<< 3);
18644 return patched_inst
;
18647 static inline bfd_vma
18648 create_instruction_sub (int target_reg
, int source_reg
, int value
)
18650 /* A8.8.221 SUB (immediate) (A8-708)
18651 SUB Rd, Rn, #value (Encoding T3). */
18652 bfd_vma patched_inst
= 0xf1a00000
18653 | (target_reg
<< 8)
18654 | (source_reg
<< 16)
18656 | ((value
& 0x800) >> 11) << 26
18657 | ((value
& 0x700) >> 8) << 12
18660 return patched_inst
;
18663 static inline bfd_vma
18664 create_instruction_vldmia (int base_reg
, int is_dp
, int wback
, int num_words
,
18667 /* A8.8.332 VLDM (A8-922)
18668 VLMD{MODE} Rn{!}, {list} (Encoding T1 or T2). */
18669 bfd_vma patched_inst
= (is_dp
? 0xec900b00 : 0xec900a00)
18670 | (/*W=*/wback
<< 21)
18672 | (num_words
& 0x000000ff)
18673 | (((unsigned)first_reg
>> 1) & 0x0000000f) << 12
18674 | (first_reg
& 0x00000001) << 22;
18676 return patched_inst
;
18679 static inline bfd_vma
18680 create_instruction_vldmdb (int base_reg
, int is_dp
, int num_words
,
18683 /* A8.8.332 VLDM (A8-922)
18684 VLMD{MODE} Rn!, {} (Encoding T1 or T2). */
18685 bfd_vma patched_inst
= (is_dp
? 0xed300b00 : 0xed300a00)
18687 | (num_words
& 0x000000ff)
18688 | (((unsigned)first_reg
>>1 ) & 0x0000000f) << 12
18689 | (first_reg
& 0x00000001) << 22;
18691 return patched_inst
;
18694 static inline bfd_vma
18695 create_instruction_udf_w (int value
)
18697 /* A8.8.247 UDF (A8-758)
18698 Undefined (Encoding T2). */
18699 bfd_vma patched_inst
= 0xf7f0a000
18700 | (value
& 0x00000fff)
18701 | (value
& 0x000f0000) << 16;
18703 return patched_inst
;
18706 static inline bfd_vma
18707 create_instruction_udf (int value
)
18709 /* A8.8.247 UDF (A8-758)
18710 Undefined (Encoding T1). */
18711 bfd_vma patched_inst
= 0xde00
18714 return patched_inst
;
18717 /* Functions writing an instruction in memory, returning the next
18718 memory position to write to. */
18720 static inline bfd_byte
*
18721 push_thumb2_insn32 (struct elf32_arm_link_hash_table
* htab
,
18722 bfd
* output_bfd
, bfd_byte
*pt
, insn32 insn
)
18724 put_thumb2_insn (htab
, output_bfd
, insn
, pt
);
18728 static inline bfd_byte
*
18729 push_thumb2_insn16 (struct elf32_arm_link_hash_table
* htab
,
18730 bfd
* output_bfd
, bfd_byte
*pt
, insn32 insn
)
18732 put_thumb_insn (htab
, output_bfd
, insn
, pt
);
18736 /* Function filling up a region in memory with T1 and T2 UDFs taking
18737 care of alignment. */
18740 stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table
* htab
,
18742 const bfd_byte
* const base_stub_contents
,
18743 bfd_byte
* const from_stub_contents
,
18744 const bfd_byte
* const end_stub_contents
)
18746 bfd_byte
*current_stub_contents
= from_stub_contents
;
18748 /* Fill the remaining of the stub with deterministic contents : UDF
18750 Check if realignment is needed on modulo 4 frontier using T1, to
18752 if ((current_stub_contents
< end_stub_contents
)
18753 && !((current_stub_contents
- base_stub_contents
) % 2)
18754 && ((current_stub_contents
- base_stub_contents
) % 4))
18755 current_stub_contents
=
18756 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
18757 create_instruction_udf (0));
18759 for (; current_stub_contents
< end_stub_contents
;)
18760 current_stub_contents
=
18761 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18762 create_instruction_udf_w (0));
18764 return current_stub_contents
;
18767 /* Functions writing the stream of instructions equivalent to the
18768 derived sequence for ldmia, ldmdb, vldm respectively. */
18771 stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table
* htab
,
18773 const insn32 initial_insn
,
18774 const bfd_byte
*const initial_insn_addr
,
18775 bfd_byte
*const base_stub_contents
)
18777 int wback
= (initial_insn
& 0x00200000) >> 21;
18778 int ri
, rn
= (initial_insn
& 0x000F0000) >> 16;
18779 int insn_all_registers
= initial_insn
& 0x0000ffff;
18780 int insn_low_registers
, insn_high_registers
;
18781 int usable_register_mask
;
18782 int nb_registers
= elf32_arm_popcount (insn_all_registers
);
18783 int restore_pc
= (insn_all_registers
& (1 << 15)) ? 1 : 0;
18784 int restore_rn
= (insn_all_registers
& (1 << rn
)) ? 1 : 0;
18785 bfd_byte
*current_stub_contents
= base_stub_contents
;
18787 BFD_ASSERT (is_thumb2_ldmia (initial_insn
));
18789 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
18790 smaller than 8 registers load sequences that do not cause the
18792 if (nb_registers
<= 8)
18794 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
18795 current_stub_contents
=
18796 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18799 /* B initial_insn_addr+4. */
18801 current_stub_contents
=
18802 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18803 create_instruction_branch_absolute
18804 (initial_insn_addr
- current_stub_contents
));
18806 /* Fill the remaining of the stub with deterministic contents. */
18807 current_stub_contents
=
18808 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
18809 base_stub_contents
, current_stub_contents
,
18810 base_stub_contents
+
18811 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
);
18816 /* - reg_list[13] == 0. */
18817 BFD_ASSERT ((insn_all_registers
& (1 << 13))==0);
18819 /* - reg_list[14] & reg_list[15] != 1. */
18820 BFD_ASSERT ((insn_all_registers
& 0xC000) != 0xC000);
18822 /* - if (wback==1) reg_list[rn] == 0. */
18823 BFD_ASSERT (!wback
|| !restore_rn
);
18825 /* - nb_registers > 8. */
18826 BFD_ASSERT (elf32_arm_popcount (insn_all_registers
) > 8);
18828 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
18830 /* In the following algorithm, we split this wide LDM using 2 LDM insns:
18831 - One with the 7 lowest registers (register mask 0x007F)
18832 This LDM will finally contain between 2 and 7 registers
18833 - One with the 7 highest registers (register mask 0xDF80)
18834 This ldm will finally contain between 2 and 7 registers. */
18835 insn_low_registers
= insn_all_registers
& 0x007F;
18836 insn_high_registers
= insn_all_registers
& 0xDF80;
18838 /* A spare register may be needed during this veneer to temporarily
18839 handle the base register. This register will be restored with the
18840 last LDM operation.
18841 The usable register may be any general purpose register (that
18842 excludes PC, SP, LR : register mask is 0x1FFF). */
18843 usable_register_mask
= 0x1FFF;
18845 /* Generate the stub function. */
18848 /* LDMIA Rn!, {R-low-register-list} : (Encoding T2). */
18849 current_stub_contents
=
18850 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18851 create_instruction_ldmia
18852 (rn
, /*wback=*/1, insn_low_registers
));
18854 /* LDMIA Rn!, {R-high-register-list} : (Encoding T2). */
18855 current_stub_contents
=
18856 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18857 create_instruction_ldmia
18858 (rn
, /*wback=*/1, insn_high_registers
));
18861 /* B initial_insn_addr+4. */
18862 current_stub_contents
=
18863 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18864 create_instruction_branch_absolute
18865 (initial_insn_addr
- current_stub_contents
));
18868 else /* if (!wback). */
18872 /* If Rn is not part of the high-register-list, move it there. */
18873 if (!(insn_high_registers
& (1 << rn
)))
18875 /* Choose a Ri in the high-register-list that will be restored. */
18876 ri
= ctz (insn_high_registers
& usable_register_mask
& ~(1 << rn
));
18879 current_stub_contents
=
18880 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
18881 create_instruction_mov (ri
, rn
));
18884 /* LDMIA Ri!, {R-low-register-list} : (Encoding T2). */
18885 current_stub_contents
=
18886 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18887 create_instruction_ldmia
18888 (ri
, /*wback=*/1, insn_low_registers
));
18890 /* LDMIA Ri, {R-high-register-list} : (Encoding T2). */
18891 current_stub_contents
=
18892 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18893 create_instruction_ldmia
18894 (ri
, /*wback=*/0, insn_high_registers
));
18898 /* B initial_insn_addr+4. */
18899 current_stub_contents
=
18900 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18901 create_instruction_branch_absolute
18902 (initial_insn_addr
- current_stub_contents
));
18906 /* Fill the remaining of the stub with deterministic contents. */
18907 current_stub_contents
=
18908 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
18909 base_stub_contents
, current_stub_contents
,
18910 base_stub_contents
+
18911 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
);
18915 stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table
* htab
,
18917 const insn32 initial_insn
,
18918 const bfd_byte
*const initial_insn_addr
,
18919 bfd_byte
*const base_stub_contents
)
18921 int wback
= (initial_insn
& 0x00200000) >> 21;
18922 int ri
, rn
= (initial_insn
& 0x000f0000) >> 16;
18923 int insn_all_registers
= initial_insn
& 0x0000ffff;
18924 int insn_low_registers
, insn_high_registers
;
18925 int usable_register_mask
;
18926 int restore_pc
= (insn_all_registers
& (1 << 15)) ? 1 : 0;
18927 int restore_rn
= (insn_all_registers
& (1 << rn
)) ? 1 : 0;
18928 int nb_registers
= elf32_arm_popcount (insn_all_registers
);
18929 bfd_byte
*current_stub_contents
= base_stub_contents
;
18931 BFD_ASSERT (is_thumb2_ldmdb (initial_insn
));
18933 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
18934 smaller than 8 registers load sequences that do not cause the
18936 if (nb_registers
<= 8)
18938 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
18939 current_stub_contents
=
18940 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18943 /* B initial_insn_addr+4. */
18944 current_stub_contents
=
18945 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18946 create_instruction_branch_absolute
18947 (initial_insn_addr
- current_stub_contents
));
18949 /* Fill the remaining of the stub with deterministic contents. */
18950 current_stub_contents
=
18951 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
18952 base_stub_contents
, current_stub_contents
,
18953 base_stub_contents
+
18954 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
);
18959 /* - reg_list[13] == 0. */
18960 BFD_ASSERT ((insn_all_registers
& (1 << 13)) == 0);
18962 /* - reg_list[14] & reg_list[15] != 1. */
18963 BFD_ASSERT ((insn_all_registers
& 0xC000) != 0xC000);
18965 /* - if (wback==1) reg_list[rn] == 0. */
18966 BFD_ASSERT (!wback
|| !restore_rn
);
18968 /* - nb_registers > 8. */
18969 BFD_ASSERT (elf32_arm_popcount (insn_all_registers
) > 8);
18971 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
18973 /* In the following algorithm, we split this wide LDM using 2 LDM insn:
18974 - One with the 7 lowest registers (register mask 0x007F)
18975 This LDM will finally contain between 2 and 7 registers
18976 - One with the 7 highest registers (register mask 0xDF80)
18977 This ldm will finally contain between 2 and 7 registers. */
18978 insn_low_registers
= insn_all_registers
& 0x007F;
18979 insn_high_registers
= insn_all_registers
& 0xDF80;
18981 /* A spare register may be needed during this veneer to temporarily
18982 handle the base register. This register will be restored with
18983 the last LDM operation.
18984 The usable register may be any general purpose register (that excludes
18985 PC, SP, LR : register mask is 0x1FFF). */
18986 usable_register_mask
= 0x1FFF;
18988 /* Generate the stub function. */
18989 if (!wback
&& !restore_pc
&& !restore_rn
)
18991 /* Choose a Ri in the low-register-list that will be restored. */
18992 ri
= ctz (insn_low_registers
& usable_register_mask
& ~(1 << rn
));
18995 current_stub_contents
=
18996 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
18997 create_instruction_mov (ri
, rn
));
18999 /* LDMDB Ri!, {R-high-register-list}. */
19000 current_stub_contents
=
19001 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19002 create_instruction_ldmdb
19003 (ri
, /*wback=*/1, insn_high_registers
));
19005 /* LDMDB Ri, {R-low-register-list}. */
19006 current_stub_contents
=
19007 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19008 create_instruction_ldmdb
19009 (ri
, /*wback=*/0, insn_low_registers
));
19011 /* B initial_insn_addr+4. */
19012 current_stub_contents
=
19013 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19014 create_instruction_branch_absolute
19015 (initial_insn_addr
- current_stub_contents
));
19017 else if (wback
&& !restore_pc
&& !restore_rn
)
19019 /* LDMDB Rn!, {R-high-register-list}. */
19020 current_stub_contents
=
19021 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19022 create_instruction_ldmdb
19023 (rn
, /*wback=*/1, insn_high_registers
));
19025 /* LDMDB Rn!, {R-low-register-list}. */
19026 current_stub_contents
=
19027 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19028 create_instruction_ldmdb
19029 (rn
, /*wback=*/1, insn_low_registers
));
19031 /* B initial_insn_addr+4. */
19032 current_stub_contents
=
19033 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19034 create_instruction_branch_absolute
19035 (initial_insn_addr
- current_stub_contents
));
19037 else if (!wback
&& restore_pc
&& !restore_rn
)
19039 /* Choose a Ri in the high-register-list that will be restored. */
19040 ri
= ctz (insn_high_registers
& usable_register_mask
& ~(1 << rn
));
19042 /* SUB Ri, Rn, #(4*nb_registers). */
19043 current_stub_contents
=
19044 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19045 create_instruction_sub (ri
, rn
, (4 * nb_registers
)));
19047 /* LDMIA Ri!, {R-low-register-list}. */
19048 current_stub_contents
=
19049 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19050 create_instruction_ldmia
19051 (ri
, /*wback=*/1, insn_low_registers
));
19053 /* LDMIA Ri, {R-high-register-list}. */
19054 current_stub_contents
=
19055 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19056 create_instruction_ldmia
19057 (ri
, /*wback=*/0, insn_high_registers
));
19059 else if (wback
&& restore_pc
&& !restore_rn
)
19061 /* Choose a Ri in the high-register-list that will be restored. */
19062 ri
= ctz (insn_high_registers
& usable_register_mask
& ~(1 << rn
));
19064 /* SUB Rn, Rn, #(4*nb_registers) */
19065 current_stub_contents
=
19066 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19067 create_instruction_sub (rn
, rn
, (4 * nb_registers
)));
19070 current_stub_contents
=
19071 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
19072 create_instruction_mov (ri
, rn
));
19074 /* LDMIA Ri!, {R-low-register-list}. */
19075 current_stub_contents
=
19076 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19077 create_instruction_ldmia
19078 (ri
, /*wback=*/1, insn_low_registers
));
19080 /* LDMIA Ri, {R-high-register-list}. */
19081 current_stub_contents
=
19082 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19083 create_instruction_ldmia
19084 (ri
, /*wback=*/0, insn_high_registers
));
19086 else if (!wback
&& !restore_pc
&& restore_rn
)
19089 if (!(insn_low_registers
& (1 << rn
)))
19091 /* Choose a Ri in the low-register-list that will be restored. */
19092 ri
= ctz (insn_low_registers
& usable_register_mask
& ~(1 << rn
));
19095 current_stub_contents
=
19096 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
19097 create_instruction_mov (ri
, rn
));
19100 /* LDMDB Ri!, {R-high-register-list}. */
19101 current_stub_contents
=
19102 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19103 create_instruction_ldmdb
19104 (ri
, /*wback=*/1, insn_high_registers
));
19106 /* LDMDB Ri, {R-low-register-list}. */
19107 current_stub_contents
=
19108 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19109 create_instruction_ldmdb
19110 (ri
, /*wback=*/0, insn_low_registers
));
19112 /* B initial_insn_addr+4. */
19113 current_stub_contents
=
19114 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19115 create_instruction_branch_absolute
19116 (initial_insn_addr
- current_stub_contents
));
19118 else if (!wback
&& restore_pc
&& restore_rn
)
19121 if (!(insn_high_registers
& (1 << rn
)))
19123 /* Choose a Ri in the high-register-list that will be restored. */
19124 ri
= ctz (insn_high_registers
& usable_register_mask
& ~(1 << rn
));
19127 /* SUB Ri, Rn, #(4*nb_registers). */
19128 current_stub_contents
=
19129 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19130 create_instruction_sub (ri
, rn
, (4 * nb_registers
)));
19132 /* LDMIA Ri!, {R-low-register-list}. */
19133 current_stub_contents
=
19134 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19135 create_instruction_ldmia
19136 (ri
, /*wback=*/1, insn_low_registers
));
19138 /* LDMIA Ri, {R-high-register-list}. */
19139 current_stub_contents
=
19140 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19141 create_instruction_ldmia
19142 (ri
, /*wback=*/0, insn_high_registers
));
19144 else if (wback
&& restore_rn
)
19146 /* The assembler should not have accepted to encode this. */
19147 BFD_ASSERT (0 && "Cannot patch an instruction that has an "
19148 "undefined behavior.\n");
19151 /* Fill the remaining of the stub with deterministic contents. */
19152 current_stub_contents
=
19153 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
19154 base_stub_contents
, current_stub_contents
,
19155 base_stub_contents
+
19156 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
);
19161 stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table
* htab
,
19163 const insn32 initial_insn
,
19164 const bfd_byte
*const initial_insn_addr
,
19165 bfd_byte
*const base_stub_contents
)
19167 int num_words
= ((unsigned int) initial_insn
<< 24) >> 24;
19168 bfd_byte
*current_stub_contents
= base_stub_contents
;
19170 BFD_ASSERT (is_thumb2_vldm (initial_insn
));
19172 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
19173 smaller than 8 words load sequences that do not cause the
19175 if (num_words
<= 8)
19177 /* Untouched instruction. */
19178 current_stub_contents
=
19179 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19182 /* B initial_insn_addr+4. */
19183 current_stub_contents
=
19184 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19185 create_instruction_branch_absolute
19186 (initial_insn_addr
- current_stub_contents
));
19190 bfd_boolean is_dp
= /* DP encoding. */
19191 (initial_insn
& 0xfe100f00) == 0xec100b00;
19192 bfd_boolean is_ia_nobang
= /* (IA without !). */
19193 (((initial_insn
<< 7) >> 28) & 0xd) == 0x4;
19194 bfd_boolean is_ia_bang
= /* (IA with !) - includes VPOP. */
19195 (((initial_insn
<< 7) >> 28) & 0xd) == 0x5;
19196 bfd_boolean is_db_bang
= /* (DB with !). */
19197 (((initial_insn
<< 7) >> 28) & 0xd) == 0x9;
19198 int base_reg
= ((unsigned int) initial_insn
<< 12) >> 28;
19199 /* d = UInt (Vd:D);. */
19200 int first_reg
= ((((unsigned int) initial_insn
<< 16) >> 28) << 1)
19201 | (((unsigned int)initial_insn
<< 9) >> 31);
19203 /* Compute the number of 8-words chunks needed to split. */
19204 int chunks
= (num_words
% 8) ? (num_words
/ 8 + 1) : (num_words
/ 8);
19207 /* The test coverage has been done assuming the following
19208 hypothesis that exactly one of the previous is_ predicates is
19210 BFD_ASSERT ( (is_ia_nobang
^ is_ia_bang
^ is_db_bang
)
19211 && !(is_ia_nobang
& is_ia_bang
& is_db_bang
));
19213 /* We treat the cutting of the words in one pass for all
19214 cases, then we emit the adjustments:
19217 -> vldm rx!, {8_words_or_less} for each needed 8_word
19218 -> sub rx, rx, #size (list)
19221 -> vldm rx!, {8_words_or_less} for each needed 8_word
19222 This also handles vpop instruction (when rx is sp)
19225 -> vldmb rx!, {8_words_or_less} for each needed 8_word. */
19226 for (chunk
= 0; chunk
< chunks
; ++chunk
)
19228 bfd_vma new_insn
= 0;
19230 if (is_ia_nobang
|| is_ia_bang
)
19232 new_insn
= create_instruction_vldmia
19236 chunks
- (chunk
+ 1) ?
19237 8 : num_words
- chunk
* 8,
19238 first_reg
+ chunk
* 8);
19240 else if (is_db_bang
)
19242 new_insn
= create_instruction_vldmdb
19245 chunks
- (chunk
+ 1) ?
19246 8 : num_words
- chunk
* 8,
19247 first_reg
+ chunk
* 8);
19251 current_stub_contents
=
19252 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19256 /* Only this case requires the base register compensation
19260 current_stub_contents
=
19261 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19262 create_instruction_sub
19263 (base_reg
, base_reg
, 4*num_words
));
19266 /* B initial_insn_addr+4. */
19267 current_stub_contents
=
19268 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19269 create_instruction_branch_absolute
19270 (initial_insn_addr
- current_stub_contents
));
19273 /* Fill the remaining of the stub with deterministic contents. */
19274 current_stub_contents
=
19275 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
19276 base_stub_contents
, current_stub_contents
,
19277 base_stub_contents
+
19278 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE
);
19282 stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table
* htab
,
19284 const insn32 wrong_insn
,
19285 const bfd_byte
*const wrong_insn_addr
,
19286 bfd_byte
*const stub_contents
)
19288 if (is_thumb2_ldmia (wrong_insn
))
19289 stm32l4xx_create_replacing_stub_ldmia (htab
, output_bfd
,
19290 wrong_insn
, wrong_insn_addr
,
19292 else if (is_thumb2_ldmdb (wrong_insn
))
19293 stm32l4xx_create_replacing_stub_ldmdb (htab
, output_bfd
,
19294 wrong_insn
, wrong_insn_addr
,
19296 else if (is_thumb2_vldm (wrong_insn
))
19297 stm32l4xx_create_replacing_stub_vldm (htab
, output_bfd
,
19298 wrong_insn
, wrong_insn_addr
,
19302 /* End of stm32l4xx work-around. */
19305 /* Do code byteswapping. Return FALSE afterwards so that the section is
19306 written out as normal. */
19309 elf32_arm_write_section (bfd
*output_bfd
,
19310 struct bfd_link_info
*link_info
,
19312 bfd_byte
*contents
)
19314 unsigned int mapcount
, errcount
;
19315 _arm_elf_section_data
*arm_data
;
19316 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
19317 elf32_arm_section_map
*map
;
19318 elf32_vfp11_erratum_list
*errnode
;
19319 elf32_stm32l4xx_erratum_list
*stm32l4xx_errnode
;
19322 bfd_vma offset
= sec
->output_section
->vma
+ sec
->output_offset
;
19326 if (globals
== NULL
)
19329 /* If this section has not been allocated an _arm_elf_section_data
19330 structure then we cannot record anything. */
19331 arm_data
= get_arm_elf_section_data (sec
);
19332 if (arm_data
== NULL
)
19335 mapcount
= arm_data
->mapcount
;
19336 map
= arm_data
->map
;
19337 errcount
= arm_data
->erratumcount
;
19341 unsigned int endianflip
= bfd_big_endian (output_bfd
) ? 3 : 0;
19343 for (errnode
= arm_data
->erratumlist
; errnode
!= 0;
19344 errnode
= errnode
->next
)
19346 bfd_vma target
= errnode
->vma
- offset
;
19348 switch (errnode
->type
)
19350 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER
:
19352 bfd_vma branch_to_veneer
;
19353 /* Original condition code of instruction, plus bit mask for
19354 ARM B instruction. */
19355 unsigned int insn
= (errnode
->u
.b
.vfp_insn
& 0xf0000000)
19358 /* The instruction is before the label. */
19361 /* Above offset included in -4 below. */
19362 branch_to_veneer
= errnode
->u
.b
.veneer
->vma
19363 - errnode
->vma
- 4;
19365 if ((signed) branch_to_veneer
< -(1 << 25)
19366 || (signed) branch_to_veneer
>= (1 << 25))
19367 _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19368 "range"), output_bfd
);
19370 insn
|= (branch_to_veneer
>> 2) & 0xffffff;
19371 contents
[endianflip
^ target
] = insn
& 0xff;
19372 contents
[endianflip
^ (target
+ 1)] = (insn
>> 8) & 0xff;
19373 contents
[endianflip
^ (target
+ 2)] = (insn
>> 16) & 0xff;
19374 contents
[endianflip
^ (target
+ 3)] = (insn
>> 24) & 0xff;
19378 case VFP11_ERRATUM_ARM_VENEER
:
19380 bfd_vma branch_from_veneer
;
19383 /* Take size of veneer into account. */
19384 branch_from_veneer
= errnode
->u
.v
.branch
->vma
19385 - errnode
->vma
- 12;
19387 if ((signed) branch_from_veneer
< -(1 << 25)
19388 || (signed) branch_from_veneer
>= (1 << 25))
19389 _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19390 "range"), output_bfd
);
19392 /* Original instruction. */
19393 insn
= errnode
->u
.v
.branch
->u
.b
.vfp_insn
;
19394 contents
[endianflip
^ target
] = insn
& 0xff;
19395 contents
[endianflip
^ (target
+ 1)] = (insn
>> 8) & 0xff;
19396 contents
[endianflip
^ (target
+ 2)] = (insn
>> 16) & 0xff;
19397 contents
[endianflip
^ (target
+ 3)] = (insn
>> 24) & 0xff;
19399 /* Branch back to insn after original insn. */
19400 insn
= 0xea000000 | ((branch_from_veneer
>> 2) & 0xffffff);
19401 contents
[endianflip
^ (target
+ 4)] = insn
& 0xff;
19402 contents
[endianflip
^ (target
+ 5)] = (insn
>> 8) & 0xff;
19403 contents
[endianflip
^ (target
+ 6)] = (insn
>> 16) & 0xff;
19404 contents
[endianflip
^ (target
+ 7)] = (insn
>> 24) & 0xff;
19414 if (arm_data
->stm32l4xx_erratumcount
!= 0)
19416 for (stm32l4xx_errnode
= arm_data
->stm32l4xx_erratumlist
;
19417 stm32l4xx_errnode
!= 0;
19418 stm32l4xx_errnode
= stm32l4xx_errnode
->next
)
19420 bfd_vma target
= stm32l4xx_errnode
->vma
- offset
;
19422 switch (stm32l4xx_errnode
->type
)
19424 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER
:
19427 bfd_vma branch_to_veneer
=
19428 stm32l4xx_errnode
->u
.b
.veneer
->vma
- stm32l4xx_errnode
->vma
;
19430 if ((signed) branch_to_veneer
< -(1 << 24)
19431 || (signed) branch_to_veneer
>= (1 << 24))
19433 bfd_vma out_of_range
=
19434 ((signed) branch_to_veneer
< -(1 << 24)) ?
19435 - branch_to_veneer
- (1 << 24) :
19436 ((signed) branch_to_veneer
>= (1 << 24)) ?
19437 branch_to_veneer
- (1 << 24) : 0;
19440 (_("%pB(%#" PRIx64
"): error: "
19441 "cannot create STM32L4XX veneer; "
19442 "jump out of range by %" PRId64
" bytes; "
19443 "cannot encode branch instruction"),
19445 (uint64_t) (stm32l4xx_errnode
->vma
- 4),
19446 (int64_t) out_of_range
);
19450 insn
= create_instruction_branch_absolute
19451 (stm32l4xx_errnode
->u
.b
.veneer
->vma
- stm32l4xx_errnode
->vma
);
19453 /* The instruction is before the label. */
19456 put_thumb2_insn (globals
, output_bfd
,
19457 (bfd_vma
) insn
, contents
+ target
);
19461 case STM32L4XX_ERRATUM_VENEER
:
19464 bfd_byte
* veneer_r
;
19467 veneer
= contents
+ target
;
19469 + stm32l4xx_errnode
->u
.b
.veneer
->vma
19470 - stm32l4xx_errnode
->vma
- 4;
19472 if ((signed) (veneer_r
- veneer
-
19473 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE
>
19474 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
?
19475 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE
:
19476 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
) < -(1 << 24)
19477 || (signed) (veneer_r
- veneer
) >= (1 << 24))
19479 _bfd_error_handler (_("%pB: error: cannot create STM32L4XX "
19480 "veneer"), output_bfd
);
19484 /* Original instruction. */
19485 insn
= stm32l4xx_errnode
->u
.v
.branch
->u
.b
.insn
;
19487 stm32l4xx_create_replacing_stub
19488 (globals
, output_bfd
, insn
, (void*)veneer_r
, (void*)veneer
);
19498 if (arm_data
->elf
.this_hdr
.sh_type
== SHT_ARM_EXIDX
)
19500 arm_unwind_table_edit
*edit_node
19501 = arm_data
->u
.exidx
.unwind_edit_list
;
19502 /* Now, sec->size is the size of the section we will write. The original
19503 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
19504 markers) was sec->rawsize. (This isn't the case if we perform no
19505 edits, then rawsize will be zero and we should use size). */
19506 bfd_byte
*edited_contents
= (bfd_byte
*) bfd_malloc (sec
->size
);
19507 unsigned int input_size
= sec
->rawsize
? sec
->rawsize
: sec
->size
;
19508 unsigned int in_index
, out_index
;
19509 bfd_vma add_to_offsets
= 0;
19511 for (in_index
= 0, out_index
= 0; in_index
* 8 < input_size
|| edit_node
;)
19515 unsigned int edit_index
= edit_node
->index
;
19517 if (in_index
< edit_index
&& in_index
* 8 < input_size
)
19519 copy_exidx_entry (output_bfd
, edited_contents
+ out_index
* 8,
19520 contents
+ in_index
* 8, add_to_offsets
);
19524 else if (in_index
== edit_index
19525 || (in_index
* 8 >= input_size
19526 && edit_index
== UINT_MAX
))
19528 switch (edit_node
->type
)
19530 case DELETE_EXIDX_ENTRY
:
19532 add_to_offsets
+= 8;
19535 case INSERT_EXIDX_CANTUNWIND_AT_END
:
19537 asection
*text_sec
= edit_node
->linked_section
;
19538 bfd_vma text_offset
= text_sec
->output_section
->vma
19539 + text_sec
->output_offset
19541 bfd_vma exidx_offset
= offset
+ out_index
* 8;
19542 unsigned long prel31_offset
;
19544 /* Note: this is meant to be equivalent to an
19545 R_ARM_PREL31 relocation. These synthetic
19546 EXIDX_CANTUNWIND markers are not relocated by the
19547 usual BFD method. */
19548 prel31_offset
= (text_offset
- exidx_offset
)
19550 if (bfd_link_relocatable (link_info
))
19552 /* Here relocation for new EXIDX_CANTUNWIND is
19553 created, so there is no need to
19554 adjust offset by hand. */
19555 prel31_offset
= text_sec
->output_offset
19559 /* First address we can't unwind. */
19560 bfd_put_32 (output_bfd
, prel31_offset
,
19561 &edited_contents
[out_index
* 8]);
19563 /* Code for EXIDX_CANTUNWIND. */
19564 bfd_put_32 (output_bfd
, 0x1,
19565 &edited_contents
[out_index
* 8 + 4]);
19568 add_to_offsets
-= 8;
19573 edit_node
= edit_node
->next
;
19578 /* No more edits, copy remaining entries verbatim. */
19579 copy_exidx_entry (output_bfd
, edited_contents
+ out_index
* 8,
19580 contents
+ in_index
* 8, add_to_offsets
);
19586 if (!(sec
->flags
& SEC_EXCLUDE
) && !(sec
->flags
& SEC_NEVER_LOAD
))
19587 bfd_set_section_contents (output_bfd
, sec
->output_section
,
19589 (file_ptr
) sec
->output_offset
, sec
->size
);
19594 /* Fix code to point to Cortex-A8 erratum stubs. */
19595 if (globals
->fix_cortex_a8
)
19597 struct a8_branch_to_stub_data data
;
19599 data
.writing_section
= sec
;
19600 data
.contents
= contents
;
19602 bfd_hash_traverse (& globals
->stub_hash_table
, make_branch_to_a8_stub
,
19609 if (globals
->byteswap_code
)
19611 qsort (map
, mapcount
, sizeof (* map
), elf32_arm_compare_mapping
);
19614 for (i
= 0; i
< mapcount
; i
++)
19616 if (i
== mapcount
- 1)
19619 end
= map
[i
+ 1].vma
;
19621 switch (map
[i
].type
)
19624 /* Byte swap code words. */
19625 while (ptr
+ 3 < end
)
19627 tmp
= contents
[ptr
];
19628 contents
[ptr
] = contents
[ptr
+ 3];
19629 contents
[ptr
+ 3] = tmp
;
19630 tmp
= contents
[ptr
+ 1];
19631 contents
[ptr
+ 1] = contents
[ptr
+ 2];
19632 contents
[ptr
+ 2] = tmp
;
19638 /* Byte swap code halfwords. */
19639 while (ptr
+ 1 < end
)
19641 tmp
= contents
[ptr
];
19642 contents
[ptr
] = contents
[ptr
+ 1];
19643 contents
[ptr
+ 1] = tmp
;
19649 /* Leave data alone. */
19657 arm_data
->mapcount
= -1;
19658 arm_data
->mapsize
= 0;
19659 arm_data
->map
= NULL
;
19664 /* Mangle thumb function symbols as we read them in. */
19667 elf32_arm_swap_symbol_in (bfd
* abfd
,
19670 Elf_Internal_Sym
*dst
)
19672 Elf_Internal_Shdr
*symtab_hdr
;
19673 const char *name
= NULL
;
19675 if (!bfd_elf32_swap_symbol_in (abfd
, psrc
, pshn
, dst
))
19677 dst
->st_target_internal
= 0;
19679 /* New EABI objects mark thumb function symbols by setting the low bit of
19681 if (ELF_ST_TYPE (dst
->st_info
) == STT_FUNC
19682 || ELF_ST_TYPE (dst
->st_info
) == STT_GNU_IFUNC
)
19684 if (dst
->st_value
& 1)
19686 dst
->st_value
&= ~(bfd_vma
) 1;
19687 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
,
19688 ST_BRANCH_TO_THUMB
);
19691 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
, ST_BRANCH_TO_ARM
);
19693 else if (ELF_ST_TYPE (dst
->st_info
) == STT_ARM_TFUNC
)
19695 dst
->st_info
= ELF_ST_INFO (ELF_ST_BIND (dst
->st_info
), STT_FUNC
);
19696 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
, ST_BRANCH_TO_THUMB
);
19698 else if (ELF_ST_TYPE (dst
->st_info
) == STT_SECTION
)
19699 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
, ST_BRANCH_LONG
);
19701 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
, ST_BRANCH_UNKNOWN
);
19703 /* Mark CMSE special symbols. */
19704 symtab_hdr
= & elf_symtab_hdr (abfd
);
19705 if (symtab_hdr
->sh_size
)
19706 name
= bfd_elf_sym_name (abfd
, symtab_hdr
, dst
, NULL
);
19707 if (name
&& CONST_STRNEQ (name
, CMSE_PREFIX
))
19708 ARM_SET_SYM_CMSE_SPCL (dst
->st_target_internal
);
19714 /* Mangle thumb function symbols as we write them out. */
19717 elf32_arm_swap_symbol_out (bfd
*abfd
,
19718 const Elf_Internal_Sym
*src
,
19722 Elf_Internal_Sym newsym
;
19724 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
19725 of the address set, as per the new EABI. We do this unconditionally
19726 because objcopy does not set the elf header flags until after
19727 it writes out the symbol table. */
19728 if (ARM_GET_SYM_BRANCH_TYPE (src
->st_target_internal
) == ST_BRANCH_TO_THUMB
)
19731 if (ELF_ST_TYPE (src
->st_info
) != STT_GNU_IFUNC
)
19732 newsym
.st_info
= ELF_ST_INFO (ELF_ST_BIND (src
->st_info
), STT_FUNC
);
19733 if (newsym
.st_shndx
!= SHN_UNDEF
)
19735 /* Do this only for defined symbols. At link type, the static
19736 linker will simulate the work of dynamic linker of resolving
19737 symbols and will carry over the thumbness of found symbols to
19738 the output symbol table. It's not clear how it happens, but
19739 the thumbness of undefined symbols can well be different at
19740 runtime, and writing '1' for them will be confusing for users
19741 and possibly for dynamic linker itself.
19743 newsym
.st_value
|= 1;
19748 bfd_elf32_swap_symbol_out (abfd
, src
, cdst
, shndx
);
19751 /* Add the PT_ARM_EXIDX program header. */
19754 elf32_arm_modify_segment_map (bfd
*abfd
,
19755 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
19757 struct elf_segment_map
*m
;
19760 sec
= bfd_get_section_by_name (abfd
, ".ARM.exidx");
19761 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
19763 /* If there is already a PT_ARM_EXIDX header, then we do not
19764 want to add another one. This situation arises when running
19765 "strip"; the input binary already has the header. */
19766 m
= elf_seg_map (abfd
);
19767 while (m
&& m
->p_type
!= PT_ARM_EXIDX
)
19771 m
= (struct elf_segment_map
*)
19772 bfd_zalloc (abfd
, sizeof (struct elf_segment_map
));
19775 m
->p_type
= PT_ARM_EXIDX
;
19777 m
->sections
[0] = sec
;
19779 m
->next
= elf_seg_map (abfd
);
19780 elf_seg_map (abfd
) = m
;
19787 /* We may add a PT_ARM_EXIDX program header. */
19790 elf32_arm_additional_program_headers (bfd
*abfd
,
19791 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
19795 sec
= bfd_get_section_by_name (abfd
, ".ARM.exidx");
19796 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
19802 /* Hook called by the linker routine which adds symbols from an object
19806 elf32_arm_add_symbol_hook (bfd
*abfd
, struct bfd_link_info
*info
,
19807 Elf_Internal_Sym
*sym
, const char **namep
,
19808 flagword
*flagsp
, asection
**secp
, bfd_vma
*valp
)
19810 if (elf32_arm_hash_table (info
) == NULL
)
19813 if (elf32_arm_hash_table (info
)->vxworks_p
19814 && !elf_vxworks_add_symbol_hook (abfd
, info
, sym
, namep
,
19815 flagsp
, secp
, valp
))
19821 /* We use this to override swap_symbol_in and swap_symbol_out. */
19822 const struct elf_size_info elf32_arm_size_info
=
19824 sizeof (Elf32_External_Ehdr
),
19825 sizeof (Elf32_External_Phdr
),
19826 sizeof (Elf32_External_Shdr
),
19827 sizeof (Elf32_External_Rel
),
19828 sizeof (Elf32_External_Rela
),
19829 sizeof (Elf32_External_Sym
),
19830 sizeof (Elf32_External_Dyn
),
19831 sizeof (Elf_External_Note
),
19835 ELFCLASS32
, EV_CURRENT
,
19836 bfd_elf32_write_out_phdrs
,
19837 bfd_elf32_write_shdrs_and_ehdr
,
19838 bfd_elf32_checksum_contents
,
19839 bfd_elf32_write_relocs
,
19840 elf32_arm_swap_symbol_in
,
19841 elf32_arm_swap_symbol_out
,
19842 bfd_elf32_slurp_reloc_table
,
19843 bfd_elf32_slurp_symbol_table
,
19844 bfd_elf32_swap_dyn_in
,
19845 bfd_elf32_swap_dyn_out
,
19846 bfd_elf32_swap_reloc_in
,
19847 bfd_elf32_swap_reloc_out
,
19848 bfd_elf32_swap_reloca_in
,
19849 bfd_elf32_swap_reloca_out
19853 read_code32 (const bfd
*abfd
, const bfd_byte
*addr
)
19855 /* V7 BE8 code is always little endian. */
19856 if ((elf_elfheader (abfd
)->e_flags
& EF_ARM_BE8
) != 0)
19857 return bfd_getl32 (addr
);
19859 return bfd_get_32 (abfd
, addr
);
19863 read_code16 (const bfd
*abfd
, const bfd_byte
*addr
)
19865 /* V7 BE8 code is always little endian. */
19866 if ((elf_elfheader (abfd
)->e_flags
& EF_ARM_BE8
) != 0)
19867 return bfd_getl16 (addr
);
19869 return bfd_get_16 (abfd
, addr
);
19872 /* Return size of plt0 entry starting at ADDR
19873 or (bfd_vma) -1 if size can not be determined. */
19876 elf32_arm_plt0_size (const bfd
*abfd
, const bfd_byte
*addr
)
19878 bfd_vma first_word
;
19881 first_word
= read_code32 (abfd
, addr
);
19883 if (first_word
== elf32_arm_plt0_entry
[0])
19884 plt0_size
= 4 * ARRAY_SIZE (elf32_arm_plt0_entry
);
19885 else if (first_word
== elf32_thumb2_plt0_entry
[0])
19886 plt0_size
= 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry
);
19888 /* We don't yet handle this PLT format. */
19889 return (bfd_vma
) -1;
19894 /* Return size of plt entry starting at offset OFFSET
19895 of plt section located at address START
19896 or (bfd_vma) -1 if size can not be determined. */
19899 elf32_arm_plt_size (const bfd
*abfd
, const bfd_byte
*start
, bfd_vma offset
)
19901 bfd_vma first_insn
;
19902 bfd_vma plt_size
= 0;
19903 const bfd_byte
*addr
= start
+ offset
;
19905 /* PLT entry size if fixed on Thumb-only platforms. */
19906 if (read_code32 (abfd
, start
) == elf32_thumb2_plt0_entry
[0])
19907 return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry
);
19909 /* Respect Thumb stub if necessary. */
19910 if (read_code16 (abfd
, addr
) == elf32_arm_plt_thumb_stub
[0])
19912 plt_size
+= 2 * ARRAY_SIZE(elf32_arm_plt_thumb_stub
);
19915 /* Strip immediate from first add. */
19916 first_insn
= read_code32 (abfd
, addr
+ plt_size
) & 0xffffff00;
19918 #ifdef FOUR_WORD_PLT
19919 if (first_insn
== elf32_arm_plt_entry
[0])
19920 plt_size
+= 4 * ARRAY_SIZE (elf32_arm_plt_entry
);
19922 if (first_insn
== elf32_arm_plt_entry_long
[0])
19923 plt_size
+= 4 * ARRAY_SIZE (elf32_arm_plt_entry_long
);
19924 else if (first_insn
== elf32_arm_plt_entry_short
[0])
19925 plt_size
+= 4 * ARRAY_SIZE (elf32_arm_plt_entry_short
);
19928 /* We don't yet handle this PLT format. */
19929 return (bfd_vma
) -1;
19934 /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab. */
19937 elf32_arm_get_synthetic_symtab (bfd
*abfd
,
19938 long symcount ATTRIBUTE_UNUSED
,
19939 asymbol
**syms ATTRIBUTE_UNUSED
,
19949 Elf_Internal_Shdr
*hdr
;
19957 if ((abfd
->flags
& (DYNAMIC
| EXEC_P
)) == 0)
19960 if (dynsymcount
<= 0)
19963 relplt
= bfd_get_section_by_name (abfd
, ".rel.plt");
19964 if (relplt
== NULL
)
19967 hdr
= &elf_section_data (relplt
)->this_hdr
;
19968 if (hdr
->sh_link
!= elf_dynsymtab (abfd
)
19969 || (hdr
->sh_type
!= SHT_REL
&& hdr
->sh_type
!= SHT_RELA
))
19972 plt
= bfd_get_section_by_name (abfd
, ".plt");
19976 if (!elf32_arm_size_info
.slurp_reloc_table (abfd
, relplt
, dynsyms
, TRUE
))
19979 data
= plt
->contents
;
19982 if (!bfd_get_full_section_contents(abfd
, (asection
*) plt
, &data
) || data
== NULL
)
19984 bfd_cache_section_contents((asection
*) plt
, data
);
19987 count
= relplt
->size
/ hdr
->sh_entsize
;
19988 size
= count
* sizeof (asymbol
);
19989 p
= relplt
->relocation
;
19990 for (i
= 0; i
< count
; i
++, p
+= elf32_arm_size_info
.int_rels_per_ext_rel
)
19992 size
+= strlen ((*p
->sym_ptr_ptr
)->name
) + sizeof ("@plt");
19993 if (p
->addend
!= 0)
19994 size
+= sizeof ("+0x") - 1 + 8;
19997 s
= *ret
= (asymbol
*) bfd_malloc (size
);
20001 offset
= elf32_arm_plt0_size (abfd
, data
);
20002 if (offset
== (bfd_vma
) -1)
20005 names
= (char *) (s
+ count
);
20006 p
= relplt
->relocation
;
20008 for (i
= 0; i
< count
; i
++, p
+= elf32_arm_size_info
.int_rels_per_ext_rel
)
20012 bfd_vma plt_size
= elf32_arm_plt_size (abfd
, data
, offset
);
20013 if (plt_size
== (bfd_vma
) -1)
20016 *s
= **p
->sym_ptr_ptr
;
20017 /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since
20018 we are defining a symbol, ensure one of them is set. */
20019 if ((s
->flags
& BSF_LOCAL
) == 0)
20020 s
->flags
|= BSF_GLOBAL
;
20021 s
->flags
|= BSF_SYNTHETIC
;
20026 len
= strlen ((*p
->sym_ptr_ptr
)->name
);
20027 memcpy (names
, (*p
->sym_ptr_ptr
)->name
, len
);
20029 if (p
->addend
!= 0)
20033 memcpy (names
, "+0x", sizeof ("+0x") - 1);
20034 names
+= sizeof ("+0x") - 1;
20035 bfd_sprintf_vma (abfd
, buf
, p
->addend
);
20036 for (a
= buf
; *a
== '0'; ++a
)
20039 memcpy (names
, a
, len
);
20042 memcpy (names
, "@plt", sizeof ("@plt"));
20043 names
+= sizeof ("@plt");
20045 offset
+= plt_size
;
20052 elf32_arm_section_flags (flagword
*flags
, const Elf_Internal_Shdr
* hdr
)
20054 if (hdr
->sh_flags
& SHF_ARM_PURECODE
)
20055 *flags
|= SEC_ELF_PURECODE
;
20060 elf32_arm_lookup_section_flags (char *flag_name
)
20062 if (!strcmp (flag_name
, "SHF_ARM_PURECODE"))
20063 return SHF_ARM_PURECODE
;
20065 return SEC_NO_FLAGS
;
20068 static unsigned int
20069 elf32_arm_count_additional_relocs (asection
*sec
)
20071 struct _arm_elf_section_data
*arm_data
;
20072 arm_data
= get_arm_elf_section_data (sec
);
20074 return arm_data
== NULL
? 0 : arm_data
->additional_reloc_count
;
20077 /* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which
20078 has a type >= SHT_LOOS. Returns TRUE if these fields were initialised
20079 FALSE otherwise. ISECTION is the best guess matching section from the
20080 input bfd IBFD, but it might be NULL. */
20083 elf32_arm_copy_special_section_fields (const bfd
*ibfd ATTRIBUTE_UNUSED
,
20084 bfd
*obfd ATTRIBUTE_UNUSED
,
20085 const Elf_Internal_Shdr
*isection ATTRIBUTE_UNUSED
,
20086 Elf_Internal_Shdr
*osection
)
20088 switch (osection
->sh_type
)
20090 case SHT_ARM_EXIDX
:
20092 Elf_Internal_Shdr
**oheaders
= elf_elfsections (obfd
);
20093 Elf_Internal_Shdr
**iheaders
= elf_elfsections (ibfd
);
20096 osection
->sh_flags
= SHF_ALLOC
| SHF_LINK_ORDER
;
20097 osection
->sh_info
= 0;
20099 /* The sh_link field must be set to the text section associated with
20100 this index section. Unfortunately the ARM EHABI does not specify
20101 exactly how to determine this association. Our caller does try
20102 to match up OSECTION with its corresponding input section however
20103 so that is a good first guess. */
20104 if (isection
!= NULL
20105 && osection
->bfd_section
!= NULL
20106 && isection
->bfd_section
!= NULL
20107 && isection
->bfd_section
->output_section
!= NULL
20108 && isection
->bfd_section
->output_section
== osection
->bfd_section
20109 && iheaders
!= NULL
20110 && isection
->sh_link
> 0
20111 && isection
->sh_link
< elf_numsections (ibfd
)
20112 && iheaders
[isection
->sh_link
]->bfd_section
!= NULL
20113 && iheaders
[isection
->sh_link
]->bfd_section
->output_section
!= NULL
20116 for (i
= elf_numsections (obfd
); i
-- > 0;)
20117 if (oheaders
[i
]->bfd_section
20118 == iheaders
[isection
->sh_link
]->bfd_section
->output_section
)
20124 /* Failing that we have to find a matching section ourselves. If
20125 we had the output section name available we could compare that
20126 with input section names. Unfortunately we don't. So instead
20127 we use a simple heuristic and look for the nearest executable
20128 section before this one. */
20129 for (i
= elf_numsections (obfd
); i
-- > 0;)
20130 if (oheaders
[i
] == osection
)
20136 if (oheaders
[i
]->sh_type
== SHT_PROGBITS
20137 && (oheaders
[i
]->sh_flags
& (SHF_ALLOC
| SHF_EXECINSTR
))
20138 == (SHF_ALLOC
| SHF_EXECINSTR
))
20144 osection
->sh_link
= i
;
20145 /* If the text section was part of a group
20146 then the index section should be too. */
20147 if (oheaders
[i
]->sh_flags
& SHF_GROUP
)
20148 osection
->sh_flags
|= SHF_GROUP
;
20154 case SHT_ARM_PREEMPTMAP
:
20155 osection
->sh_flags
= SHF_ALLOC
;
20158 case SHT_ARM_ATTRIBUTES
:
20159 case SHT_ARM_DEBUGOVERLAY
:
20160 case SHT_ARM_OVERLAYSECTION
:
20168 /* Returns TRUE if NAME is an ARM mapping symbol.
20169 Traditionally the symbols $a, $d and $t have been used.
20170 The ARM ELF standard also defines $x (for A64 code). It also allows a
20171 period initiated suffix to be added to the symbol: "$[adtx]\.[:sym_char]+".
20172 Other tools might also produce $b (Thumb BL), $f, $p, $m and $v, but we do
20173 not support them here. $t.x indicates the start of ThumbEE instructions. */
20176 is_arm_mapping_symbol (const char * name
)
20178 return name
!= NULL
/* Paranoia. */
20179 && name
[0] == '$' /* Note: if objcopy --prefix-symbols has been used then
20180 the mapping symbols could have acquired a prefix.
20181 We do not support this here, since such symbols no
20182 longer conform to the ARM ELF ABI. */
20183 && (name
[1] == 'a' || name
[1] == 'd' || name
[1] == 't' || name
[1] == 'x')
20184 && (name
[2] == 0 || name
[2] == '.');
20185 /* FIXME: Strictly speaking the symbol is only a valid mapping symbol if
20186 any characters that follow the period are legal characters for the body
20187 of a symbol's name. For now we just assume that this is the case. */
20190 /* Make sure that mapping symbols in object files are not removed via the
20191 "strip --strip-unneeded" tool. These symbols are needed in order to
20192 correctly generate interworking veneers, and for byte swapping code
20193 regions. Once an object file has been linked, it is safe to remove the
20194 symbols as they will no longer be needed. */
20197 elf32_arm_backend_symbol_processing (bfd
*abfd
, asymbol
*sym
)
20199 if (((abfd
->flags
& (EXEC_P
| DYNAMIC
)) == 0)
20200 && sym
->section
!= bfd_abs_section_ptr
20201 && is_arm_mapping_symbol (sym
->name
))
20202 sym
->flags
|= BSF_KEEP
;
20205 #undef elf_backend_copy_special_section_fields
20206 #define elf_backend_copy_special_section_fields elf32_arm_copy_special_section_fields
20208 #define ELF_ARCH bfd_arch_arm
20209 #define ELF_TARGET_ID ARM_ELF_DATA
20210 #define ELF_MACHINE_CODE EM_ARM
20211 #ifdef __QNXTARGET__
20212 #define ELF_MAXPAGESIZE 0x1000
20214 #define ELF_MAXPAGESIZE 0x10000
20216 #define ELF_MINPAGESIZE 0x1000
20217 #define ELF_COMMONPAGESIZE 0x1000
20219 #define bfd_elf32_mkobject elf32_arm_mkobject
20221 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
20222 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
20223 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
20224 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
20225 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
20226 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
20227 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
20228 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
20229 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
20230 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
20231 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
20232 #define bfd_elf32_bfd_final_link elf32_arm_final_link
20233 #define bfd_elf32_get_synthetic_symtab elf32_arm_get_synthetic_symtab
20235 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
20236 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
20237 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
20238 #define elf_backend_check_relocs elf32_arm_check_relocs
20239 #define elf_backend_update_relocs elf32_arm_update_relocs
20240 #define elf_backend_relocate_section elf32_arm_relocate_section
20241 #define elf_backend_write_section elf32_arm_write_section
20242 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
20243 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
20244 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
20245 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
20246 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
20247 #define elf_backend_always_size_sections elf32_arm_always_size_sections
20248 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
20249 #define elf_backend_post_process_headers elf32_arm_post_process_headers
20250 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
20251 #define elf_backend_object_p elf32_arm_object_p
20252 #define elf_backend_fake_sections elf32_arm_fake_sections
20253 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
20254 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20255 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
20256 #define elf_backend_size_info elf32_arm_size_info
20257 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
20258 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
20259 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
20260 #define elf_backend_filter_implib_symbols elf32_arm_filter_implib_symbols
20261 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
20262 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
20263 #define elf_backend_count_additional_relocs elf32_arm_count_additional_relocs
20264 #define elf_backend_symbol_processing elf32_arm_backend_symbol_processing
20266 #define elf_backend_can_refcount 1
20267 #define elf_backend_can_gc_sections 1
20268 #define elf_backend_plt_readonly 1
20269 #define elf_backend_want_got_plt 1
20270 #define elf_backend_want_plt_sym 0
20271 #define elf_backend_want_dynrelro 1
20272 #define elf_backend_may_use_rel_p 1
20273 #define elf_backend_may_use_rela_p 0
20274 #define elf_backend_default_use_rela_p 0
20275 #define elf_backend_dtrel_excludes_plt 1
20277 #define elf_backend_got_header_size 12
20278 #define elf_backend_extern_protected_data 1
20280 #undef elf_backend_obj_attrs_vendor
20281 #define elf_backend_obj_attrs_vendor "aeabi"
20282 #undef elf_backend_obj_attrs_section
20283 #define elf_backend_obj_attrs_section ".ARM.attributes"
20284 #undef elf_backend_obj_attrs_arg_type
20285 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
20286 #undef elf_backend_obj_attrs_section_type
20287 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
20288 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
20289 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
20291 #undef elf_backend_section_flags
20292 #define elf_backend_section_flags elf32_arm_section_flags
20293 #undef elf_backend_lookup_section_flags_hook
20294 #define elf_backend_lookup_section_flags_hook elf32_arm_lookup_section_flags
20296 #define elf_backend_linux_prpsinfo32_ugid16 TRUE
20298 #include "elf32-target.h"
20300 /* Native Client targets. */
20302 #undef TARGET_LITTLE_SYM
20303 #define TARGET_LITTLE_SYM arm_elf32_nacl_le_vec
20304 #undef TARGET_LITTLE_NAME
20305 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl"
20306 #undef TARGET_BIG_SYM
20307 #define TARGET_BIG_SYM arm_elf32_nacl_be_vec
20308 #undef TARGET_BIG_NAME
20309 #define TARGET_BIG_NAME "elf32-bigarm-nacl"
20311 /* Like elf32_arm_link_hash_table_create -- but overrides
20312 appropriately for NaCl. */
20314 static struct bfd_link_hash_table
*
20315 elf32_arm_nacl_link_hash_table_create (bfd
*abfd
)
20317 struct bfd_link_hash_table
*ret
;
20319 ret
= elf32_arm_link_hash_table_create (abfd
);
20322 struct elf32_arm_link_hash_table
*htab
20323 = (struct elf32_arm_link_hash_table
*) ret
;
20327 htab
->plt_header_size
= 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry
);
20328 htab
->plt_entry_size
= 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry
);
20333 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
20334 really need to use elf32_arm_modify_segment_map. But we do it
20335 anyway just to reduce gratuitous differences with the stock ARM backend. */
20338 elf32_arm_nacl_modify_segment_map (bfd
*abfd
, struct bfd_link_info
*info
)
20340 return (elf32_arm_modify_segment_map (abfd
, info
)
20341 && nacl_modify_segment_map (abfd
, info
));
20345 elf32_arm_nacl_final_write_processing (bfd
*abfd
, bfd_boolean linker
)
20347 elf32_arm_final_write_processing (abfd
, linker
);
20348 nacl_final_write_processing (abfd
, linker
);
20352 elf32_arm_nacl_plt_sym_val (bfd_vma i
, const asection
*plt
,
20353 const arelent
*rel ATTRIBUTE_UNUSED
)
20356 + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry
) +
20357 i
* ARRAY_SIZE (elf32_arm_nacl_plt_entry
));
20361 #define elf32_bed elf32_arm_nacl_bed
20362 #undef bfd_elf32_bfd_link_hash_table_create
20363 #define bfd_elf32_bfd_link_hash_table_create \
20364 elf32_arm_nacl_link_hash_table_create
20365 #undef elf_backend_plt_alignment
20366 #define elf_backend_plt_alignment 4
20367 #undef elf_backend_modify_segment_map
20368 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
20369 #undef elf_backend_modify_program_headers
20370 #define elf_backend_modify_program_headers nacl_modify_program_headers
20371 #undef elf_backend_final_write_processing
20372 #define elf_backend_final_write_processing elf32_arm_nacl_final_write_processing
20373 #undef bfd_elf32_get_synthetic_symtab
20374 #undef elf_backend_plt_sym_val
20375 #define elf_backend_plt_sym_val elf32_arm_nacl_plt_sym_val
20376 #undef elf_backend_copy_special_section_fields
20378 #undef ELF_MINPAGESIZE
20379 #undef ELF_COMMONPAGESIZE
20382 #include "elf32-target.h"
20384 /* Reset to defaults. */
20385 #undef elf_backend_plt_alignment
20386 #undef elf_backend_modify_segment_map
20387 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
20388 #undef elf_backend_modify_program_headers
20389 #undef elf_backend_final_write_processing
20390 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20391 #undef ELF_MINPAGESIZE
20392 #define ELF_MINPAGESIZE 0x1000
20393 #undef ELF_COMMONPAGESIZE
20394 #define ELF_COMMONPAGESIZE 0x1000
20397 /* FDPIC Targets. */
20399 #undef TARGET_LITTLE_SYM
20400 #define TARGET_LITTLE_SYM arm_elf32_fdpic_le_vec
20401 #undef TARGET_LITTLE_NAME
20402 #define TARGET_LITTLE_NAME "elf32-littlearm-fdpic"
20403 #undef TARGET_BIG_SYM
20404 #define TARGET_BIG_SYM arm_elf32_fdpic_be_vec
20405 #undef TARGET_BIG_NAME
20406 #define TARGET_BIG_NAME "elf32-bigarm-fdpic"
20407 #undef elf_match_priority
20408 #define elf_match_priority 128
20410 #define ELF_OSABI ELFOSABI_ARM_FDPIC
20412 /* Like elf32_arm_link_hash_table_create -- but overrides
20413 appropriately for FDPIC. */
20415 static struct bfd_link_hash_table
*
20416 elf32_arm_fdpic_link_hash_table_create (bfd
*abfd
)
20418 struct bfd_link_hash_table
*ret
;
20420 ret
= elf32_arm_link_hash_table_create (abfd
);
20423 struct elf32_arm_link_hash_table
*htab
= (struct elf32_arm_link_hash_table
*) ret
;
20430 /* We need dynamic symbols for every section, since segments can
20431 relocate independently. */
20433 elf32_arm_fdpic_omit_section_dynsym (bfd
*output_bfd ATTRIBUTE_UNUSED
,
20434 struct bfd_link_info
*info
20436 asection
*p ATTRIBUTE_UNUSED
)
20438 switch (elf_section_data (p
)->this_hdr
.sh_type
)
20442 /* If sh_type is yet undecided, assume it could be
20443 SHT_PROGBITS/SHT_NOBITS. */
20447 /* There shouldn't be section relative relocations
20448 against any other section. */
20455 #define elf32_bed elf32_arm_fdpic_bed
20457 #undef bfd_elf32_bfd_link_hash_table_create
20458 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_fdpic_link_hash_table_create
20460 #undef elf_backend_omit_section_dynsym
20461 #define elf_backend_omit_section_dynsym elf32_arm_fdpic_omit_section_dynsym
20463 #include "elf32-target.h"
20465 #undef elf_match_priority
20467 #undef elf_backend_omit_section_dynsym
20469 /* VxWorks Targets. */
20471 #undef TARGET_LITTLE_SYM
20472 #define TARGET_LITTLE_SYM arm_elf32_vxworks_le_vec
20473 #undef TARGET_LITTLE_NAME
20474 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
20475 #undef TARGET_BIG_SYM
20476 #define TARGET_BIG_SYM arm_elf32_vxworks_be_vec
20477 #undef TARGET_BIG_NAME
20478 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
20480 /* Like elf32_arm_link_hash_table_create -- but overrides
20481 appropriately for VxWorks. */
20483 static struct bfd_link_hash_table
*
20484 elf32_arm_vxworks_link_hash_table_create (bfd
*abfd
)
20486 struct bfd_link_hash_table
*ret
;
20488 ret
= elf32_arm_link_hash_table_create (abfd
);
20491 struct elf32_arm_link_hash_table
*htab
20492 = (struct elf32_arm_link_hash_table
*) ret
;
20494 htab
->vxworks_p
= 1;
20500 elf32_arm_vxworks_final_write_processing (bfd
*abfd
, bfd_boolean linker
)
20502 elf32_arm_final_write_processing (abfd
, linker
);
20503 elf_vxworks_final_write_processing (abfd
, linker
);
20507 #define elf32_bed elf32_arm_vxworks_bed
20509 #undef bfd_elf32_bfd_link_hash_table_create
20510 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
20511 #undef elf_backend_final_write_processing
20512 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
20513 #undef elf_backend_emit_relocs
20514 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
20516 #undef elf_backend_may_use_rel_p
20517 #define elf_backend_may_use_rel_p 0
20518 #undef elf_backend_may_use_rela_p
20519 #define elf_backend_may_use_rela_p 1
20520 #undef elf_backend_default_use_rela_p
20521 #define elf_backend_default_use_rela_p 1
20522 #undef elf_backend_want_plt_sym
20523 #define elf_backend_want_plt_sym 1
20524 #undef ELF_MAXPAGESIZE
20525 #define ELF_MAXPAGESIZE 0x1000
20527 #include "elf32-target.h"
20530 /* Merge backend specific data from an object file to the output
20531 object file when linking. */
20534 elf32_arm_merge_private_bfd_data (bfd
*ibfd
, struct bfd_link_info
*info
)
20536 bfd
*obfd
= info
->output_bfd
;
20537 flagword out_flags
;
20539 bfd_boolean flags_compatible
= TRUE
;
20542 /* Check if we have the same endianness. */
20543 if (! _bfd_generic_verify_endian_match (ibfd
, info
))
20546 if (! is_arm_elf (ibfd
) || ! is_arm_elf (obfd
))
20549 if (!elf32_arm_merge_eabi_attributes (ibfd
, info
))
20552 /* The input BFD must have had its flags initialised. */
20553 /* The following seems bogus to me -- The flags are initialized in
20554 the assembler but I don't think an elf_flags_init field is
20555 written into the object. */
20556 /* BFD_ASSERT (elf_flags_init (ibfd)); */
20558 in_flags
= elf_elfheader (ibfd
)->e_flags
;
20559 out_flags
= elf_elfheader (obfd
)->e_flags
;
20561 /* In theory there is no reason why we couldn't handle this. However
20562 in practice it isn't even close to working and there is no real
20563 reason to want it. */
20564 if (EF_ARM_EABI_VERSION (in_flags
) >= EF_ARM_EABI_VER4
20565 && !(ibfd
->flags
& DYNAMIC
)
20566 && (in_flags
& EF_ARM_BE8
))
20568 _bfd_error_handler (_("error: %pB is already in final BE8 format"),
20573 if (!elf_flags_init (obfd
))
20575 /* If the input is the default architecture and had the default
20576 flags then do not bother setting the flags for the output
20577 architecture, instead allow future merges to do this. If no
20578 future merges ever set these flags then they will retain their
20579 uninitialised values, which surprise surprise, correspond
20580 to the default values. */
20581 if (bfd_get_arch_info (ibfd
)->the_default
20582 && elf_elfheader (ibfd
)->e_flags
== 0)
20585 elf_flags_init (obfd
) = TRUE
;
20586 elf_elfheader (obfd
)->e_flags
= in_flags
;
20588 if (bfd_get_arch (obfd
) == bfd_get_arch (ibfd
)
20589 && bfd_get_arch_info (obfd
)->the_default
)
20590 return bfd_set_arch_mach (obfd
, bfd_get_arch (ibfd
), bfd_get_mach (ibfd
));
20595 /* Determine what should happen if the input ARM architecture
20596 does not match the output ARM architecture. */
20597 if (! bfd_arm_merge_machines (ibfd
, obfd
))
20600 /* Identical flags must be compatible. */
20601 if (in_flags
== out_flags
)
20604 /* Check to see if the input BFD actually contains any sections. If
20605 not, its flags may not have been initialised either, but it
20606 cannot actually cause any incompatiblity. Do not short-circuit
20607 dynamic objects; their section list may be emptied by
20608 elf_link_add_object_symbols.
20610 Also check to see if there are no code sections in the input.
20611 In this case there is no need to check for code specific flags.
20612 XXX - do we need to worry about floating-point format compatability
20613 in data sections ? */
20614 if (!(ibfd
->flags
& DYNAMIC
))
20616 bfd_boolean null_input_bfd
= TRUE
;
20617 bfd_boolean only_data_sections
= TRUE
;
20619 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
20621 /* Ignore synthetic glue sections. */
20622 if (strcmp (sec
->name
, ".glue_7")
20623 && strcmp (sec
->name
, ".glue_7t"))
20625 if ((bfd_get_section_flags (ibfd
, sec
)
20626 & (SEC_LOAD
| SEC_CODE
| SEC_HAS_CONTENTS
))
20627 == (SEC_LOAD
| SEC_CODE
| SEC_HAS_CONTENTS
))
20628 only_data_sections
= FALSE
;
20630 null_input_bfd
= FALSE
;
20635 if (null_input_bfd
|| only_data_sections
)
20639 /* Complain about various flag mismatches. */
20640 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags
),
20641 EF_ARM_EABI_VERSION (out_flags
)))
20644 (_("error: source object %pB has EABI version %d, but target %pB has EABI version %d"),
20645 ibfd
, (in_flags
& EF_ARM_EABIMASK
) >> 24,
20646 obfd
, (out_flags
& EF_ARM_EABIMASK
) >> 24);
20650 /* Not sure what needs to be checked for EABI versions >= 1. */
20651 /* VxWorks libraries do not use these flags. */
20652 if (get_elf_backend_data (obfd
) != &elf32_arm_vxworks_bed
20653 && get_elf_backend_data (ibfd
) != &elf32_arm_vxworks_bed
20654 && EF_ARM_EABI_VERSION (in_flags
) == EF_ARM_EABI_UNKNOWN
)
20656 if ((in_flags
& EF_ARM_APCS_26
) != (out_flags
& EF_ARM_APCS_26
))
20659 (_("error: %pB is compiled for APCS-%d, whereas target %pB uses APCS-%d"),
20660 ibfd
, in_flags
& EF_ARM_APCS_26
? 26 : 32,
20661 obfd
, out_flags
& EF_ARM_APCS_26
? 26 : 32);
20662 flags_compatible
= FALSE
;
20665 if ((in_flags
& EF_ARM_APCS_FLOAT
) != (out_flags
& EF_ARM_APCS_FLOAT
))
20667 if (in_flags
& EF_ARM_APCS_FLOAT
)
20669 (_("error: %pB passes floats in float registers, whereas %pB passes them in integer registers"),
20673 (_("error: %pB passes floats in integer registers, whereas %pB passes them in float registers"),
20676 flags_compatible
= FALSE
;
20679 if ((in_flags
& EF_ARM_VFP_FLOAT
) != (out_flags
& EF_ARM_VFP_FLOAT
))
20681 if (in_flags
& EF_ARM_VFP_FLOAT
)
20683 (_("error: %pB uses %s instructions, whereas %pB does not"),
20684 ibfd
, "VFP", obfd
);
20687 (_("error: %pB uses %s instructions, whereas %pB does not"),
20688 ibfd
, "FPA", obfd
);
20690 flags_compatible
= FALSE
;
20693 if ((in_flags
& EF_ARM_MAVERICK_FLOAT
) != (out_flags
& EF_ARM_MAVERICK_FLOAT
))
20695 if (in_flags
& EF_ARM_MAVERICK_FLOAT
)
20697 (_("error: %pB uses %s instructions, whereas %pB does not"),
20698 ibfd
, "Maverick", obfd
);
20701 (_("error: %pB does not use %s instructions, whereas %pB does"),
20702 ibfd
, "Maverick", obfd
);
20704 flags_compatible
= FALSE
;
20707 #ifdef EF_ARM_SOFT_FLOAT
20708 if ((in_flags
& EF_ARM_SOFT_FLOAT
) != (out_flags
& EF_ARM_SOFT_FLOAT
))
20710 /* We can allow interworking between code that is VFP format
20711 layout, and uses either soft float or integer regs for
20712 passing floating point arguments and results. We already
20713 know that the APCS_FLOAT flags match; similarly for VFP
20715 if ((in_flags
& EF_ARM_APCS_FLOAT
) != 0
20716 || (in_flags
& EF_ARM_VFP_FLOAT
) == 0)
20718 if (in_flags
& EF_ARM_SOFT_FLOAT
)
20720 (_("error: %pB uses software FP, whereas %pB uses hardware FP"),
20724 (_("error: %pB uses hardware FP, whereas %pB uses software FP"),
20727 flags_compatible
= FALSE
;
20732 /* Interworking mismatch is only a warning. */
20733 if ((in_flags
& EF_ARM_INTERWORK
) != (out_flags
& EF_ARM_INTERWORK
))
20735 if (in_flags
& EF_ARM_INTERWORK
)
20738 (_("warning: %pB supports interworking, whereas %pB does not"),
20744 (_("warning: %pB does not support interworking, whereas %pB does"),
20750 return flags_compatible
;
20754 /* Symbian OS Targets. */
20756 #undef TARGET_LITTLE_SYM
20757 #define TARGET_LITTLE_SYM arm_elf32_symbian_le_vec
20758 #undef TARGET_LITTLE_NAME
20759 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
20760 #undef TARGET_BIG_SYM
20761 #define TARGET_BIG_SYM arm_elf32_symbian_be_vec
20762 #undef TARGET_BIG_NAME
20763 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
20765 /* Like elf32_arm_link_hash_table_create -- but overrides
20766 appropriately for Symbian OS. */
20768 static struct bfd_link_hash_table
*
20769 elf32_arm_symbian_link_hash_table_create (bfd
*abfd
)
20771 struct bfd_link_hash_table
*ret
;
20773 ret
= elf32_arm_link_hash_table_create (abfd
);
20776 struct elf32_arm_link_hash_table
*htab
20777 = (struct elf32_arm_link_hash_table
*)ret
;
20778 /* There is no PLT header for Symbian OS. */
20779 htab
->plt_header_size
= 0;
20780 /* The PLT entries are each one instruction and one word. */
20781 htab
->plt_entry_size
= 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry
);
20782 htab
->symbian_p
= 1;
20783 /* Symbian uses armv5t or above, so use_blx is always true. */
20785 htab
->root
.is_relocatable_executable
= 1;
20790 static const struct bfd_elf_special_section
20791 elf32_arm_symbian_special_sections
[] =
20793 /* In a BPABI executable, the dynamic linking sections do not go in
20794 the loadable read-only segment. The post-linker may wish to
20795 refer to these sections, but they are not part of the final
20797 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC
, 0 },
20798 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB
, 0 },
20799 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM
, 0 },
20800 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS
, 0 },
20801 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH
, 0 },
20802 /* These sections do not need to be writable as the SymbianOS
20803 postlinker will arrange things so that no dynamic relocation is
20805 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY
, SHF_ALLOC
},
20806 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY
, SHF_ALLOC
},
20807 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY
, SHF_ALLOC
},
20808 { NULL
, 0, 0, 0, 0 }
20812 elf32_arm_symbian_begin_write_processing (bfd
*abfd
,
20813 struct bfd_link_info
*link_info
)
20815 /* BPABI objects are never loaded directly by an OS kernel; they are
20816 processed by a postlinker first, into an OS-specific format. If
20817 the D_PAGED bit is set on the file, BFD will align segments on
20818 page boundaries, so that an OS can directly map the file. With
20819 BPABI objects, that just results in wasted space. In addition,
20820 because we clear the D_PAGED bit, map_sections_to_segments will
20821 recognize that the program headers should not be mapped into any
20822 loadable segment. */
20823 abfd
->flags
&= ~D_PAGED
;
20824 elf32_arm_begin_write_processing (abfd
, link_info
);
20828 elf32_arm_symbian_modify_segment_map (bfd
*abfd
,
20829 struct bfd_link_info
*info
)
20831 struct elf_segment_map
*m
;
20834 /* BPABI shared libraries and executables should have a PT_DYNAMIC
20835 segment. However, because the .dynamic section is not marked
20836 with SEC_LOAD, the generic ELF code will not create such a
20838 dynsec
= bfd_get_section_by_name (abfd
, ".dynamic");
20841 for (m
= elf_seg_map (abfd
); m
!= NULL
; m
= m
->next
)
20842 if (m
->p_type
== PT_DYNAMIC
)
20847 m
= _bfd_elf_make_dynamic_segment (abfd
, dynsec
);
20848 m
->next
= elf_seg_map (abfd
);
20849 elf_seg_map (abfd
) = m
;
20853 /* Also call the generic arm routine. */
20854 return elf32_arm_modify_segment_map (abfd
, info
);
20857 /* Return address for Ith PLT stub in section PLT, for relocation REL
20858 or (bfd_vma) -1 if it should not be included. */
20861 elf32_arm_symbian_plt_sym_val (bfd_vma i
, const asection
*plt
,
20862 const arelent
*rel ATTRIBUTE_UNUSED
)
20864 return plt
->vma
+ 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry
) * i
;
20868 #define elf32_bed elf32_arm_symbian_bed
20870 /* The dynamic sections are not allocated on SymbianOS; the postlinker
20871 will process them and then discard them. */
20872 #undef ELF_DYNAMIC_SEC_FLAGS
20873 #define ELF_DYNAMIC_SEC_FLAGS \
20874 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
20876 #undef elf_backend_emit_relocs
20878 #undef bfd_elf32_bfd_link_hash_table_create
20879 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
20880 #undef elf_backend_special_sections
20881 #define elf_backend_special_sections elf32_arm_symbian_special_sections
20882 #undef elf_backend_begin_write_processing
20883 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
20884 #undef elf_backend_final_write_processing
20885 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20887 #undef elf_backend_modify_segment_map
20888 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
20890 /* There is no .got section for BPABI objects, and hence no header. */
20891 #undef elf_backend_got_header_size
20892 #define elf_backend_got_header_size 0
20894 /* Similarly, there is no .got.plt section. */
20895 #undef elf_backend_want_got_plt
20896 #define elf_backend_want_got_plt 0
20898 #undef elf_backend_plt_sym_val
20899 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
20901 #undef elf_backend_may_use_rel_p
20902 #define elf_backend_may_use_rel_p 1
20903 #undef elf_backend_may_use_rela_p
20904 #define elf_backend_may_use_rela_p 0
20905 #undef elf_backend_default_use_rela_p
20906 #define elf_backend_default_use_rela_p 0
20907 #undef elf_backend_want_plt_sym
20908 #define elf_backend_want_plt_sym 0
20909 #undef elf_backend_dtrel_excludes_plt
20910 #define elf_backend_dtrel_excludes_plt 0
20911 #undef ELF_MAXPAGESIZE
20912 #define ELF_MAXPAGESIZE 0x8000
20914 #include "elf32-target.h"