1 // arm.cc -- arm target support for gold.
3 // Copyright 2009 Free Software Foundation, Inc.
4 // Written by Doug Kwan <dougkwan@google.com> based on the i386 code
5 // by Ian Lance Taylor <iant@google.com>.
6 // This file also contains borrowed and adapted code from
9 // This file is part of gold.
11 // This program is free software; you can redistribute it and/or modify
12 // it under the terms of the GNU General Public License as published by
13 // the Free Software Foundation; either version 3 of the License, or
14 // (at your option) any later version.
16 // This program is distributed in the hope that it will be useful,
17 // but WITHOUT ANY WARRANTY; without even the implied warranty of
18 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 // GNU General Public License for more details.
21 // You should have received a copy of the GNU General Public License
22 // along with this program; if not, write to the Free Software
23 // Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
24 // MA 02110-1301, USA.
37 #include "parameters.h"
44 #include "copy-relocs.h"
46 #include "target-reloc.h"
47 #include "target-select.h"
51 #include "attributes.h"
58 template<bool big_endian
>
59 class Output_data_plt_arm
;
61 template<bool big_endian
>
64 template<bool big_endian
>
65 class Arm_input_section
;
67 template<bool big_endian
>
68 class Arm_output_section
;
70 template<bool big_endian
>
73 template<bool big_endian
>
77 typedef elfcpp::Elf_types
<32>::Elf_Addr Arm_address
;
79 // Maximum branch offsets for ARM, THUMB and THUMB2.
80 const int32_t ARM_MAX_FWD_BRANCH_OFFSET
= ((((1 << 23) - 1) << 2) + 8);
81 const int32_t ARM_MAX_BWD_BRANCH_OFFSET
= ((-((1 << 23) << 2)) + 8);
82 const int32_t THM_MAX_FWD_BRANCH_OFFSET
= ((1 << 22) -2 + 4);
83 const int32_t THM_MAX_BWD_BRANCH_OFFSET
= (-(1 << 22) + 4);
84 const int32_t THM2_MAX_FWD_BRANCH_OFFSET
= (((1 << 24) - 2) + 4);
85 const int32_t THM2_MAX_BWD_BRANCH_OFFSET
= (-(1 << 24) + 4);
87 // The arm target class.
89 // This is a very simple port of gold for ARM-EABI. It is intended for
90 // supporting Android only for the time being. Only these relocation types
119 // R_ARM_THM_MOVW_ABS_NC
120 // R_ARM_THM_MOVT_ABS
121 // R_ARM_MOVW_PREL_NC
123 // R_ARM_THM_MOVW_PREL_NC
124 // R_ARM_THM_MOVT_PREL
131 // - Support more relocation types as needed.
132 // - Make PLTs more flexible for different architecture features like
134 // There are probably a lot more.
136 // Instruction template class. This class is similar to the insn_sequence
137 // struct in bfd/elf32-arm.c.
142 // Types of instruction templates.
146 // THUMB16_SPECIAL_TYPE is used by sub-classes of Stub for instruction
147 // templates with class-specific semantics. Currently this is used
148 // only by the Cortex_a8_stub class for handling condition codes in
149 // conditional branches.
150 THUMB16_SPECIAL_TYPE
,
156 // Factory methods to create instruction templates in different formats.
158 static const Insn_template
159 thumb16_insn(uint32_t data
)
160 { return Insn_template(data
, THUMB16_TYPE
, elfcpp::R_ARM_NONE
, 0); }
162 // A Thumb conditional branch, in which the proper condition is inserted
163 // when we build the stub.
164 static const Insn_template
165 thumb16_bcond_insn(uint32_t data
)
166 { return Insn_template(data
, THUMB16_SPECIAL_TYPE
, elfcpp::R_ARM_NONE
, 1); }
168 static const Insn_template
169 thumb32_insn(uint32_t data
)
170 { return Insn_template(data
, THUMB32_TYPE
, elfcpp::R_ARM_NONE
, 0); }
172 static const Insn_template
173 thumb32_b_insn(uint32_t data
, int reloc_addend
)
175 return Insn_template(data
, THUMB32_TYPE
, elfcpp::R_ARM_THM_JUMP24
,
179 static const Insn_template
180 arm_insn(uint32_t data
)
181 { return Insn_template(data
, ARM_TYPE
, elfcpp::R_ARM_NONE
, 0); }
183 static const Insn_template
184 arm_rel_insn(unsigned data
, int reloc_addend
)
185 { return Insn_template(data
, ARM_TYPE
, elfcpp::R_ARM_JUMP24
, reloc_addend
); }
187 static const Insn_template
188 data_word(unsigned data
, unsigned int r_type
, int reloc_addend
)
189 { return Insn_template(data
, DATA_TYPE
, r_type
, reloc_addend
); }
191 // Accessors. This class is used for read-only objects so no modifiers
196 { return this->data_
; }
198 // Return the instruction sequence type of this.
201 { return this->type_
; }
203 // Return the ARM relocation type of this.
206 { return this->r_type_
; }
210 { return this->reloc_addend_
; }
212 // Return size of instruction template in bytes.
216 // Return byte-alignment of instruction template.
221 // We make the constructor private to ensure that only the factory
224 Insn_template(unsigned data
, Type type
, unsigned int r_type
, int reloc_addend
)
225 : data_(data
), type_(type
), r_type_(r_type
), reloc_addend_(reloc_addend
)
228 // Instruction specific data. This is used to store information like
229 // some of the instruction bits.
231 // Instruction template type.
233 // Relocation type if there is a relocation or R_ARM_NONE otherwise.
234 unsigned int r_type_
;
235 // Relocation addend.
236 int32_t reloc_addend_
;
239 // Macro for generating code to stub types. One entry per long/short
243 DEF_STUB(long_branch_any_any) \
244 DEF_STUB(long_branch_v4t_arm_thumb) \
245 DEF_STUB(long_branch_thumb_only) \
246 DEF_STUB(long_branch_v4t_thumb_thumb) \
247 DEF_STUB(long_branch_v4t_thumb_arm) \
248 DEF_STUB(short_branch_v4t_thumb_arm) \
249 DEF_STUB(long_branch_any_arm_pic) \
250 DEF_STUB(long_branch_any_thumb_pic) \
251 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
252 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
253 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
254 DEF_STUB(long_branch_thumb_only_pic) \
255 DEF_STUB(a8_veneer_b_cond) \
256 DEF_STUB(a8_veneer_b) \
257 DEF_STUB(a8_veneer_bl) \
258 DEF_STUB(a8_veneer_blx) \
259 DEF_STUB(v4_veneer_bx)
263 #define DEF_STUB(x) arm_stub_##x,
269 // First reloc stub type.
270 arm_stub_reloc_first
= arm_stub_long_branch_any_any
,
271 // Last reloc stub type.
272 arm_stub_reloc_last
= arm_stub_long_branch_thumb_only_pic
,
274 // First Cortex-A8 stub type.
275 arm_stub_cortex_a8_first
= arm_stub_a8_veneer_b_cond
,
276 // Last Cortex-A8 stub type.
277 arm_stub_cortex_a8_last
= arm_stub_a8_veneer_blx
,
280 arm_stub_type_last
= arm_stub_v4_veneer_bx
284 // Stub template class. Templates are meant to be read-only objects.
285 // A stub template for a stub type contains all read-only attributes
286 // common to all stubs of the same type.
291 Stub_template(Stub_type
, const Insn_template
*, size_t);
299 { return this->type_
; }
301 // Return an array of instruction templates.
304 { return this->insns_
; }
306 // Return size of template in number of instructions.
309 { return this->insn_count_
; }
311 // Return size of template in bytes.
314 { return this->size_
; }
316 // Return alignment of the stub template.
319 { return this->alignment_
; }
321 // Return whether entry point is in thumb mode.
323 entry_in_thumb_mode() const
324 { return this->entry_in_thumb_mode_
; }
326 // Return number of relocations in this template.
329 { return this->relocs_
.size(); }
331 // Return index of the I-th instruction with relocation.
333 reloc_insn_index(size_t i
) const
335 gold_assert(i
< this->relocs_
.size());
336 return this->relocs_
[i
].first
;
339 // Return the offset of the I-th instruction with relocation from the
340 // beginning of the stub.
342 reloc_offset(size_t i
) const
344 gold_assert(i
< this->relocs_
.size());
345 return this->relocs_
[i
].second
;
349 // This contains information about an instruction template with a relocation
350 // and its offset from start of stub.
351 typedef std::pair
<size_t, section_size_type
> Reloc
;
353 // A Stub_template may not be copied. We want to share templates as much
355 Stub_template(const Stub_template
&);
356 Stub_template
& operator=(const Stub_template
&);
360 // Points to an array of Insn_templates.
361 const Insn_template
* insns_
;
362 // Number of Insn_templates in insns_[].
364 // Size of templated instructions in bytes.
366 // Alignment of templated instructions.
368 // Flag to indicate if entry is in thumb mode.
369 bool entry_in_thumb_mode_
;
370 // A table of reloc instruction indices and offsets. We can find these by
371 // looking at the instruction templates but we pre-compute and then stash
372 // them here for speed.
373 std::vector
<Reloc
> relocs_
;
377 // A class for code stubs. This is a base class for different type of
378 // stubs used in the ARM target.
384 static const section_offset_type invalid_offset
=
385 static_cast<section_offset_type
>(-1);
388 Stub(const Stub_template
* stub_template
)
389 : stub_template_(stub_template
), offset_(invalid_offset
)
396 // Return the stub template.
398 stub_template() const
399 { return this->stub_template_
; }
401 // Return offset of code stub from beginning of its containing stub table.
405 gold_assert(this->offset_
!= invalid_offset
);
406 return this->offset_
;
409 // Set offset of code stub from beginning of its containing stub table.
411 set_offset(section_offset_type offset
)
412 { this->offset_
= offset
; }
414 // Return the relocation target address of the i-th relocation in the
415 // stub. This must be defined in a child class.
417 reloc_target(size_t i
)
418 { return this->do_reloc_target(i
); }
420 // Write a stub at output VIEW. BIG_ENDIAN select how a stub is written.
422 write(unsigned char* view
, section_size_type view_size
, bool big_endian
)
423 { this->do_write(view
, view_size
, big_endian
); }
425 // Return the instruction for THUMB16_SPECIAL_TYPE instruction template
426 // for the i-th instruction.
428 thumb16_special(size_t i
)
429 { return this->do_thumb16_special(i
); }
432 // This must be defined in the child class.
434 do_reloc_target(size_t) = 0;
436 // This may be overridden in the child class.
438 do_write(unsigned char* view
, section_size_type view_size
, bool big_endian
)
441 this->do_fixed_endian_write
<true>(view
, view_size
);
443 this->do_fixed_endian_write
<false>(view
, view_size
);
446 // This must be overridden if a child class uses the THUMB16_SPECIAL_TYPE
447 // instruction template.
449 do_thumb16_special(size_t)
450 { gold_unreachable(); }
453 // A template to implement do_write.
454 template<bool big_endian
>
456 do_fixed_endian_write(unsigned char*, section_size_type
);
459 const Stub_template
* stub_template_
;
460 // Offset within the section of containing this stub.
461 section_offset_type offset_
;
464 // Reloc stub class. These are stubs we use to fix up relocation because
465 // of limited branch ranges.
467 class Reloc_stub
: public Stub
470 static const unsigned int invalid_index
= static_cast<unsigned int>(-1);
471 // We assume we never jump to this address.
472 static const Arm_address invalid_address
= static_cast<Arm_address
>(-1);
474 // Return destination address.
476 destination_address() const
478 gold_assert(this->destination_address_
!= this->invalid_address
);
479 return this->destination_address_
;
482 // Set destination address.
484 set_destination_address(Arm_address address
)
486 gold_assert(address
!= this->invalid_address
);
487 this->destination_address_
= address
;
490 // Reset destination address.
492 reset_destination_address()
493 { this->destination_address_
= this->invalid_address
; }
495 // Determine stub type for a branch of a relocation of R_TYPE going
496 // from BRANCH_ADDRESS to BRANCH_TARGET. If TARGET_IS_THUMB is set,
497 // the branch target is a thumb instruction. TARGET is used for look
498 // up ARM-specific linker settings.
500 stub_type_for_reloc(unsigned int r_type
, Arm_address branch_address
,
501 Arm_address branch_target
, bool target_is_thumb
);
503 // Reloc_stub key. A key is logically a triplet of a stub type, a symbol
504 // and an addend. Since we treat global and local symbol differently, we
505 // use a Symbol object for a global symbol and a object-index pair for
510 // If SYMBOL is not null, this is a global symbol, we ignore RELOBJ and
511 // R_SYM. Otherwise, this is a local symbol and RELOBJ must non-NULL
512 // and R_SYM must not be invalid_index.
513 Key(Stub_type stub_type
, const Symbol
* symbol
, const Relobj
* relobj
,
514 unsigned int r_sym
, int32_t addend
)
515 : stub_type_(stub_type
), addend_(addend
)
519 this->r_sym_
= Reloc_stub::invalid_index
;
520 this->u_
.symbol
= symbol
;
524 gold_assert(relobj
!= NULL
&& r_sym
!= invalid_index
);
525 this->r_sym_
= r_sym
;
526 this->u_
.relobj
= relobj
;
533 // Accessors: Keys are meant to be read-only object so no modifiers are
539 { return this->stub_type_
; }
541 // Return the local symbol index or invalid_index.
544 { return this->r_sym_
; }
546 // Return the symbol if there is one.
549 { return this->r_sym_
== invalid_index
? this->u_
.symbol
: NULL
; }
551 // Return the relobj if there is one.
554 { return this->r_sym_
!= invalid_index
? this->u_
.relobj
: NULL
; }
556 // Whether this equals to another key k.
558 eq(const Key
& k
) const
560 return ((this->stub_type_
== k
.stub_type_
)
561 && (this->r_sym_
== k
.r_sym_
)
562 && ((this->r_sym_
!= Reloc_stub::invalid_index
)
563 ? (this->u_
.relobj
== k
.u_
.relobj
)
564 : (this->u_
.symbol
== k
.u_
.symbol
))
565 && (this->addend_
== k
.addend_
));
568 // Return a hash value.
572 return (this->stub_type_
574 ^ gold::string_hash
<char>(
575 (this->r_sym_
!= Reloc_stub::invalid_index
)
576 ? this->u_
.relobj
->name().c_str()
577 : this->u_
.symbol
->name())
581 // Functors for STL associative containers.
585 operator()(const Key
& k
) const
586 { return k
.hash_value(); }
592 operator()(const Key
& k1
, const Key
& k2
) const
593 { return k1
.eq(k2
); }
596 // Name of key. This is mainly for debugging.
602 Stub_type stub_type_
;
603 // If this is a local symbol, this is the index in the defining object.
604 // Otherwise, it is invalid_index for a global symbol.
606 // If r_sym_ is invalid index. This points to a global symbol.
607 // Otherwise, this points a relobj. We used the unsized and target
608 // independent Symbol and Relobj classes instead of Sized_symbol<32> and
609 // Arm_relobj. This is done to avoid making the stub class a template
610 // as most of the stub machinery is endianity-neutral. However, it
611 // may require a bit of casting done by users of this class.
614 const Symbol
* symbol
;
615 const Relobj
* relobj
;
617 // Addend associated with a reloc.
622 // Reloc_stubs are created via a stub factory. So these are protected.
623 Reloc_stub(const Stub_template
* stub_template
)
624 : Stub(stub_template
), destination_address_(invalid_address
)
630 friend class Stub_factory
;
632 // Return the relocation target address of the i-th relocation in the
635 do_reloc_target(size_t i
)
637 // All reloc stub have only one relocation.
639 return this->destination_address_
;
643 // Address of destination.
644 Arm_address destination_address_
;
647 // Cortex-A8 stub class. We need a Cortex-A8 stub to redirect any 32-bit
648 // THUMB branch that meets the following conditions:
650 // 1. The branch straddles across a page boundary. i.e. lower 12-bit of
651 // branch address is 0xffe.
652 // 2. The branch target address is in the same page as the first word of the
654 // 3. The branch follows a 32-bit instruction which is not a branch.
656 // To do the fix up, we need to store the address of the branch instruction
657 // and its target at least. We also need to store the original branch
658 // instruction bits for the condition code in a conditional branch. The
659 // condition code is used in a special instruction template. We also want
660 // to identify input sections needing Cortex-A8 workaround quickly. We store
661 // extra information about object and section index of the code section
662 // containing a branch being fixed up. The information is used to mark
663 // the code section when we finalize the Cortex-A8 stubs.
666 class Cortex_a8_stub
: public Stub
672 // Return the object of the code section containing the branch being fixed
676 { return this->relobj_
; }
678 // Return the section index of the code section containing the branch being
682 { return this->shndx_
; }
684 // Return the source address of stub. This is the address of the original
685 // branch instruction. LSB is 1 always set to indicate that it is a THUMB
688 source_address() const
689 { return this->source_address_
; }
691 // Return the destination address of the stub. This is the branch taken
692 // address of the original branch instruction. LSB is 1 if it is a THUMB
693 // instruction address.
695 destination_address() const
696 { return this->destination_address_
; }
698 // Return the instruction being fixed up.
700 original_insn() const
701 { return this->original_insn_
; }
704 // Cortex_a8_stubs are created via a stub factory. So these are protected.
705 Cortex_a8_stub(const Stub_template
* stub_template
, Relobj
* relobj
,
706 unsigned int shndx
, Arm_address source_address
,
707 Arm_address destination_address
, uint32_t original_insn
)
708 : Stub(stub_template
), relobj_(relobj
), shndx_(shndx
),
709 source_address_(source_address
| 1U),
710 destination_address_(destination_address
),
711 original_insn_(original_insn
)
714 friend class Stub_factory
;
716 // Return the relocation target address of the i-th relocation in the
719 do_reloc_target(size_t i
)
721 if (this->stub_template()->type() == arm_stub_a8_veneer_b_cond
)
723 // The conditional branch veneer has two relocations.
725 return i
== 0 ? this->source_address_
+ 4 : this->destination_address_
;
729 // All other Cortex-A8 stubs have only one relocation.
731 return this->destination_address_
;
735 // Return an instruction for the THUMB16_SPECIAL_TYPE instruction template.
737 do_thumb16_special(size_t);
740 // Object of the code section containing the branch being fixed up.
742 // Section index of the code section containing the branch begin fixed up.
744 // Source address of original branch.
745 Arm_address source_address_
;
746 // Destination address of the original branch.
747 Arm_address destination_address_
;
748 // Original branch instruction. This is needed for copying the condition
749 // code from a condition branch to its stub.
750 uint32_t original_insn_
;
753 // ARMv4 BX Rx branch relocation stub class.
754 class Arm_v4bx_stub
: public Stub
760 // Return the associated register.
763 { return this->reg_
; }
766 // Arm V4BX stubs are created via a stub factory. So these are protected.
767 Arm_v4bx_stub(const Stub_template
* stub_template
, const uint32_t reg
)
768 : Stub(stub_template
), reg_(reg
)
771 friend class Stub_factory
;
773 // Return the relocation target address of the i-th relocation in the
776 do_reloc_target(size_t)
777 { gold_unreachable(); }
779 // This may be overridden in the child class.
781 do_write(unsigned char* view
, section_size_type view_size
, bool big_endian
)
784 this->do_fixed_endian_v4bx_write
<true>(view
, view_size
);
786 this->do_fixed_endian_v4bx_write
<false>(view
, view_size
);
790 // A template to implement do_write.
791 template<bool big_endian
>
793 do_fixed_endian_v4bx_write(unsigned char* view
, section_size_type
)
795 const Insn_template
* insns
= this->stub_template()->insns();
796 elfcpp::Swap
<32, big_endian
>::writeval(view
,
798 + (this->reg_
<< 16)));
799 view
+= insns
[0].size();
800 elfcpp::Swap
<32, big_endian
>::writeval(view
,
801 (insns
[1].data() + this->reg_
));
802 view
+= insns
[1].size();
803 elfcpp::Swap
<32, big_endian
>::writeval(view
,
804 (insns
[2].data() + this->reg_
));
807 // A register index (r0-r14), which is associated with the stub.
811 // Stub factory class.
816 // Return the unique instance of this class.
817 static const Stub_factory
&
820 static Stub_factory singleton
;
824 // Make a relocation stub.
826 make_reloc_stub(Stub_type stub_type
) const
828 gold_assert(stub_type
>= arm_stub_reloc_first
829 && stub_type
<= arm_stub_reloc_last
);
830 return new Reloc_stub(this->stub_templates_
[stub_type
]);
833 // Make a Cortex-A8 stub.
835 make_cortex_a8_stub(Stub_type stub_type
, Relobj
* relobj
, unsigned int shndx
,
836 Arm_address source
, Arm_address destination
,
837 uint32_t original_insn
) const
839 gold_assert(stub_type
>= arm_stub_cortex_a8_first
840 && stub_type
<= arm_stub_cortex_a8_last
);
841 return new Cortex_a8_stub(this->stub_templates_
[stub_type
], relobj
, shndx
,
842 source
, destination
, original_insn
);
845 // Make an ARM V4BX relocation stub.
846 // This method creates a stub from the arm_stub_v4_veneer_bx template only.
848 make_arm_v4bx_stub(uint32_t reg
) const
850 gold_assert(reg
< 0xf);
851 return new Arm_v4bx_stub(this->stub_templates_
[arm_stub_v4_veneer_bx
],
856 // Constructor and destructor are protected since we only return a single
857 // instance created in Stub_factory::get_instance().
861 // A Stub_factory may not be copied since it is a singleton.
862 Stub_factory(const Stub_factory
&);
863 Stub_factory
& operator=(Stub_factory
&);
865 // Stub templates. These are initialized in the constructor.
866 const Stub_template
* stub_templates_
[arm_stub_type_last
+1];
869 // A class to hold stubs for the ARM target.
871 template<bool big_endian
>
872 class Stub_table
: public Output_data
875 Stub_table(Arm_input_section
<big_endian
>* owner
)
876 : Output_data(), owner_(owner
), reloc_stubs_(), cortex_a8_stubs_(),
877 arm_v4bx_stubs_(0xf), prev_data_size_(0), prev_addralign_(1)
883 // Owner of this stub table.
884 Arm_input_section
<big_endian
>*
886 { return this->owner_
; }
888 // Whether this stub table is empty.
892 return (this->reloc_stubs_
.empty()
893 && this->cortex_a8_stubs_
.empty()
894 && this->arm_v4bx_stubs_
.empty());
897 // Return the current data size.
899 current_data_size() const
900 { return this->current_data_size_for_child(); }
902 // Add a STUB with using KEY. Caller is reponsible for avoid adding
903 // if already a STUB with the same key has been added.
905 add_reloc_stub(Reloc_stub
* stub
, const Reloc_stub::Key
& key
)
907 const Stub_template
* stub_template
= stub
->stub_template();
908 gold_assert(stub_template
->type() == key
.stub_type());
909 this->reloc_stubs_
[key
] = stub
;
912 // Add a Cortex-A8 STUB that fixes up a THUMB branch at ADDRESS.
913 // Caller is reponsible for avoid adding if already a STUB with the same
914 // address has been added.
916 add_cortex_a8_stub(Arm_address address
, Cortex_a8_stub
* stub
)
918 std::pair
<Arm_address
, Cortex_a8_stub
*> value(address
, stub
);
919 this->cortex_a8_stubs_
.insert(value
);
922 // Add an ARM V4BX relocation stub. A register index will be retrieved
925 add_arm_v4bx_stub(Arm_v4bx_stub
* stub
)
927 gold_assert(stub
!= NULL
&& this->arm_v4bx_stubs_
[stub
->reg()] == NULL
);
928 this->arm_v4bx_stubs_
[stub
->reg()] = stub
;
931 // Remove all Cortex-A8 stubs.
933 remove_all_cortex_a8_stubs();
935 // Look up a relocation stub using KEY. Return NULL if there is none.
937 find_reloc_stub(const Reloc_stub::Key
& key
) const
939 typename
Reloc_stub_map::const_iterator p
= this->reloc_stubs_
.find(key
);
940 return (p
!= this->reloc_stubs_
.end()) ? p
->second
: NULL
;
943 // Look up an arm v4bx relocation stub using the register index.
944 // Return NULL if there is none.
946 find_arm_v4bx_stub(const uint32_t reg
) const
948 gold_assert(reg
< 0xf);
949 return this->arm_v4bx_stubs_
[reg
];
952 // Relocate stubs in this stub table.
954 relocate_stubs(const Relocate_info
<32, big_endian
>*,
955 Target_arm
<big_endian
>*, Output_section
*,
956 unsigned char*, Arm_address
, section_size_type
);
958 // Update data size and alignment at the end of a relaxation pass. Return
959 // true if either data size or alignment is different from that of the
960 // previous relaxation pass.
962 update_data_size_and_addralign();
964 // Finalize stubs. Set the offsets of all stubs and mark input sections
965 // needing the Cortex-A8 workaround.
969 // Apply Cortex-A8 workaround to an address range.
971 apply_cortex_a8_workaround_to_address_range(Target_arm
<big_endian
>*,
972 unsigned char*, Arm_address
,
976 // Write out section contents.
978 do_write(Output_file
*);
980 // Return the required alignment.
983 { return this->prev_addralign_
; }
985 // Reset address and file offset.
987 do_reset_address_and_file_offset()
988 { this->set_current_data_size_for_child(this->prev_data_size_
); }
990 // Set final data size.
992 set_final_data_size()
993 { this->set_data_size(this->current_data_size()); }
996 // Relocate one stub.
998 relocate_stub(Stub
*, const Relocate_info
<32, big_endian
>*,
999 Target_arm
<big_endian
>*, Output_section
*,
1000 unsigned char*, Arm_address
, section_size_type
);
1002 // Unordered map of relocation stubs.
1004 Unordered_map
<Reloc_stub::Key
, Reloc_stub
*, Reloc_stub::Key::hash
,
1005 Reloc_stub::Key::equal_to
>
1008 // List of Cortex-A8 stubs ordered by addresses of branches being
1009 // fixed up in output.
1010 typedef std::map
<Arm_address
, Cortex_a8_stub
*> Cortex_a8_stub_list
;
1011 // List of Arm V4BX relocation stubs ordered by associated registers.
1012 typedef std::vector
<Arm_v4bx_stub
*> Arm_v4bx_stub_list
;
1014 // Owner of this stub table.
1015 Arm_input_section
<big_endian
>* owner_
;
1016 // The relocation stubs.
1017 Reloc_stub_map reloc_stubs_
;
1018 // The cortex_a8_stubs.
1019 Cortex_a8_stub_list cortex_a8_stubs_
;
1020 // The Arm V4BX relocation stubs.
1021 Arm_v4bx_stub_list arm_v4bx_stubs_
;
1022 // data size of this in the previous pass.
1023 off_t prev_data_size_
;
1024 // address alignment of this in the previous pass.
1025 uint64_t prev_addralign_
;
1028 // A class to wrap an ordinary input section containing executable code.
1030 template<bool big_endian
>
1031 class Arm_input_section
: public Output_relaxed_input_section
1034 Arm_input_section(Relobj
* relobj
, unsigned int shndx
)
1035 : Output_relaxed_input_section(relobj
, shndx
, 1),
1036 original_addralign_(1), original_size_(0), stub_table_(NULL
)
1039 ~Arm_input_section()
1046 // Whether this is a stub table owner.
1048 is_stub_table_owner() const
1049 { return this->stub_table_
!= NULL
&& this->stub_table_
->owner() == this; }
1051 // Return the stub table.
1052 Stub_table
<big_endian
>*
1054 { return this->stub_table_
; }
1056 // Set the stub_table.
1058 set_stub_table(Stub_table
<big_endian
>* stub_table
)
1059 { this->stub_table_
= stub_table
; }
1061 // Downcast a base pointer to an Arm_input_section pointer. This is
1062 // not type-safe but we only use Arm_input_section not the base class.
1063 static Arm_input_section
<big_endian
>*
1064 as_arm_input_section(Output_relaxed_input_section
* poris
)
1065 { return static_cast<Arm_input_section
<big_endian
>*>(poris
); }
1068 // Write data to output file.
1070 do_write(Output_file
*);
1072 // Return required alignment of this.
1074 do_addralign() const
1076 if (this->is_stub_table_owner())
1077 return std::max(this->stub_table_
->addralign(),
1078 this->original_addralign_
);
1080 return this->original_addralign_
;
1083 // Finalize data size.
1085 set_final_data_size();
1087 // Reset address and file offset.
1089 do_reset_address_and_file_offset();
1093 do_output_offset(const Relobj
* object
, unsigned int shndx
,
1094 section_offset_type offset
,
1095 section_offset_type
* poutput
) const
1097 if ((object
== this->relobj())
1098 && (shndx
== this->shndx())
1100 && (convert_types
<uint64_t, section_offset_type
>(offset
)
1101 <= this->original_size_
))
1111 // Copying is not allowed.
1112 Arm_input_section(const Arm_input_section
&);
1113 Arm_input_section
& operator=(const Arm_input_section
&);
1115 // Address alignment of the original input section.
1116 uint64_t original_addralign_
;
1117 // Section size of the original input section.
1118 uint64_t original_size_
;
1120 Stub_table
<big_endian
>* stub_table_
;
1123 // Arm output section class. This is defined mainly to add a number of
1124 // stub generation methods.
1126 template<bool big_endian
>
1127 class Arm_output_section
: public Output_section
1130 Arm_output_section(const char* name
, elfcpp::Elf_Word type
,
1131 elfcpp::Elf_Xword flags
)
1132 : Output_section(name
, type
, flags
)
1135 ~Arm_output_section()
1138 // Group input sections for stub generation.
1140 group_sections(section_size_type
, bool, Target_arm
<big_endian
>*);
1142 // Downcast a base pointer to an Arm_output_section pointer. This is
1143 // not type-safe but we only use Arm_output_section not the base class.
1144 static Arm_output_section
<big_endian
>*
1145 as_arm_output_section(Output_section
* os
)
1146 { return static_cast<Arm_output_section
<big_endian
>*>(os
); }
1150 typedef Output_section::Input_section Input_section
;
1151 typedef Output_section::Input_section_list Input_section_list
;
1153 // Create a stub group.
1154 void create_stub_group(Input_section_list::const_iterator
,
1155 Input_section_list::const_iterator
,
1156 Input_section_list::const_iterator
,
1157 Target_arm
<big_endian
>*,
1158 std::vector
<Output_relaxed_input_section
*>*);
1161 // Arm_relobj class.
1163 template<bool big_endian
>
1164 class Arm_relobj
: public Sized_relobj
<32, big_endian
>
1167 static const Arm_address invalid_address
= static_cast<Arm_address
>(-1);
1169 Arm_relobj(const std::string
& name
, Input_file
* input_file
, off_t offset
,
1170 const typename
elfcpp::Ehdr
<32, big_endian
>& ehdr
)
1171 : Sized_relobj
<32, big_endian
>(name
, input_file
, offset
, ehdr
),
1172 stub_tables_(), local_symbol_is_thumb_function_(),
1173 attributes_section_data_(NULL
), mapping_symbols_info_(),
1174 section_has_cortex_a8_workaround_(NULL
)
1178 { delete this->attributes_section_data_
; }
1180 // Return the stub table of the SHNDX-th section if there is one.
1181 Stub_table
<big_endian
>*
1182 stub_table(unsigned int shndx
) const
1184 gold_assert(shndx
< this->stub_tables_
.size());
1185 return this->stub_tables_
[shndx
];
1188 // Set STUB_TABLE to be the stub_table of the SHNDX-th section.
1190 set_stub_table(unsigned int shndx
, Stub_table
<big_endian
>* stub_table
)
1192 gold_assert(shndx
< this->stub_tables_
.size());
1193 this->stub_tables_
[shndx
] = stub_table
;
1196 // Whether a local symbol is a THUMB function. R_SYM is the symbol table
1197 // index. This is only valid after do_count_local_symbol is called.
1199 local_symbol_is_thumb_function(unsigned int r_sym
) const
1201 gold_assert(r_sym
< this->local_symbol_is_thumb_function_
.size());
1202 return this->local_symbol_is_thumb_function_
[r_sym
];
1205 // Scan all relocation sections for stub generation.
1207 scan_sections_for_stubs(Target_arm
<big_endian
>*, const Symbol_table
*,
1210 // Convert regular input section with index SHNDX to a relaxed section.
1212 convert_input_section_to_relaxed_section(unsigned shndx
)
1214 // The stubs have relocations and we need to process them after writing
1215 // out the stubs. So relocation now must follow section write.
1216 this->invalidate_section_offset(shndx
);
1217 this->set_relocs_must_follow_section_writes();
1220 // Downcast a base pointer to an Arm_relobj pointer. This is
1221 // not type-safe but we only use Arm_relobj not the base class.
1222 static Arm_relobj
<big_endian
>*
1223 as_arm_relobj(Relobj
* relobj
)
1224 { return static_cast<Arm_relobj
<big_endian
>*>(relobj
); }
1226 // Processor-specific flags in ELF file header. This is valid only after
1229 processor_specific_flags() const
1230 { return this->processor_specific_flags_
; }
1232 // Attribute section data This is the contents of the .ARM.attribute section
1234 const Attributes_section_data
*
1235 attributes_section_data() const
1236 { return this->attributes_section_data_
; }
1238 // Mapping symbol location.
1239 typedef std::pair
<unsigned int, Arm_address
> Mapping_symbol_position
;
1241 // Functor for STL container.
1242 struct Mapping_symbol_position_less
1245 operator()(const Mapping_symbol_position
& p1
,
1246 const Mapping_symbol_position
& p2
) const
1248 return (p1
.first
< p2
.first
1249 || (p1
.first
== p2
.first
&& p1
.second
< p2
.second
));
1253 // We only care about the first character of a mapping symbol, so
1254 // we only store that instead of the whole symbol name.
1255 typedef std::map
<Mapping_symbol_position
, char,
1256 Mapping_symbol_position_less
> Mapping_symbols_info
;
1258 // Whether a section contains any Cortex-A8 workaround.
1260 section_has_cortex_a8_workaround(unsigned int shndx
) const
1262 return (this->section_has_cortex_a8_workaround_
!= NULL
1263 && (*this->section_has_cortex_a8_workaround_
)[shndx
]);
1266 // Mark a section that has Cortex-A8 workaround.
1268 mark_section_for_cortex_a8_workaround(unsigned int shndx
)
1270 if (this->section_has_cortex_a8_workaround_
== NULL
)
1271 this->section_has_cortex_a8_workaround_
=
1272 new std::vector
<bool>(this->shnum(), false);
1273 (*this->section_has_cortex_a8_workaround_
)[shndx
] = true;
1277 // Post constructor setup.
1281 // Call parent's setup method.
1282 Sized_relobj
<32, big_endian
>::do_setup();
1284 // Initialize look-up tables.
1285 Stub_table_list
empty_stub_table_list(this->shnum(), NULL
);
1286 this->stub_tables_
.swap(empty_stub_table_list
);
1289 // Count the local symbols.
1291 do_count_local_symbols(Stringpool_template
<char>*,
1292 Stringpool_template
<char>*);
1295 do_relocate_sections(const Symbol_table
* symtab
, const Layout
* layout
,
1296 const unsigned char* pshdrs
,
1297 typename Sized_relobj
<32, big_endian
>::Views
* pivews
);
1299 // Read the symbol information.
1301 do_read_symbols(Read_symbols_data
* sd
);
1303 // Process relocs for garbage collection.
1305 do_gc_process_relocs(Symbol_table
*, Layout
*, Read_relocs_data
*);
1309 // Whether a section needs to be scanned for relocation stubs.
1311 section_needs_reloc_stub_scanning(const elfcpp::Shdr
<32, big_endian
>&,
1312 const Relobj::Output_sections
&,
1313 const Symbol_table
*);
1315 // Whether a section needs to be scanned for the Cortex-A8 erratum.
1317 section_needs_cortex_a8_stub_scanning(const elfcpp::Shdr
<32, big_endian
>&,
1318 unsigned int, Output_section
*,
1319 const Symbol_table
*);
1321 // Scan a section for the Cortex-A8 erratum.
1323 scan_section_for_cortex_a8_erratum(const elfcpp::Shdr
<32, big_endian
>&,
1324 unsigned int, Output_section
*,
1325 Target_arm
<big_endian
>*);
1327 // List of stub tables.
1328 typedef std::vector
<Stub_table
<big_endian
>*> Stub_table_list
;
1329 Stub_table_list stub_tables_
;
1330 // Bit vector to tell if a local symbol is a thumb function or not.
1331 // This is only valid after do_count_local_symbol is called.
1332 std::vector
<bool> local_symbol_is_thumb_function_
;
1333 // processor-specific flags in ELF file header.
1334 elfcpp::Elf_Word processor_specific_flags_
;
1335 // Object attributes if there is an .ARM.attributes section or NULL.
1336 Attributes_section_data
* attributes_section_data_
;
1337 // Mapping symbols information.
1338 Mapping_symbols_info mapping_symbols_info_
;
1339 // Bitmap to indicate sections with Cortex-A8 workaround or NULL.
1340 std::vector
<bool>* section_has_cortex_a8_workaround_
;
1343 // Arm_dynobj class.
1345 template<bool big_endian
>
1346 class Arm_dynobj
: public Sized_dynobj
<32, big_endian
>
1349 Arm_dynobj(const std::string
& name
, Input_file
* input_file
, off_t offset
,
1350 const elfcpp::Ehdr
<32, big_endian
>& ehdr
)
1351 : Sized_dynobj
<32, big_endian
>(name
, input_file
, offset
, ehdr
),
1352 processor_specific_flags_(0), attributes_section_data_(NULL
)
1356 { delete this->attributes_section_data_
; }
1358 // Downcast a base pointer to an Arm_relobj pointer. This is
1359 // not type-safe but we only use Arm_relobj not the base class.
1360 static Arm_dynobj
<big_endian
>*
1361 as_arm_dynobj(Dynobj
* dynobj
)
1362 { return static_cast<Arm_dynobj
<big_endian
>*>(dynobj
); }
1364 // Processor-specific flags in ELF file header. This is valid only after
1367 processor_specific_flags() const
1368 { return this->processor_specific_flags_
; }
1370 // Attributes section data.
1371 const Attributes_section_data
*
1372 attributes_section_data() const
1373 { return this->attributes_section_data_
; }
1376 // Read the symbol information.
1378 do_read_symbols(Read_symbols_data
* sd
);
1381 // processor-specific flags in ELF file header.
1382 elfcpp::Elf_Word processor_specific_flags_
;
1383 // Object attributes if there is an .ARM.attributes section or NULL.
1384 Attributes_section_data
* attributes_section_data_
;
1387 // Functor to read reloc addends during stub generation.
1389 template<int sh_type
, bool big_endian
>
1390 struct Stub_addend_reader
1392 // Return the addend for a relocation of a particular type. Depending
1393 // on whether this is a REL or RELA relocation, read the addend from a
1394 // view or from a Reloc object.
1395 elfcpp::Elf_types
<32>::Elf_Swxword
1397 unsigned int /* r_type */,
1398 const unsigned char* /* view */,
1399 const typename Reloc_types
<sh_type
,
1400 32, big_endian
>::Reloc
& /* reloc */) const;
1403 // Specialized Stub_addend_reader for SHT_REL type relocation sections.
1405 template<bool big_endian
>
1406 struct Stub_addend_reader
<elfcpp::SHT_REL
, big_endian
>
1408 elfcpp::Elf_types
<32>::Elf_Swxword
1411 const unsigned char*,
1412 const typename Reloc_types
<elfcpp::SHT_REL
, 32, big_endian
>::Reloc
&) const;
1415 // Specialized Stub_addend_reader for RELA type relocation sections.
1416 // We currently do not handle RELA type relocation sections but it is trivial
1417 // to implement the addend reader. This is provided for completeness and to
1418 // make it easier to add support for RELA relocation sections in the future.
1420 template<bool big_endian
>
1421 struct Stub_addend_reader
<elfcpp::SHT_RELA
, big_endian
>
1423 elfcpp::Elf_types
<32>::Elf_Swxword
1426 const unsigned char*,
1427 const typename Reloc_types
<elfcpp::SHT_RELA
, 32,
1428 big_endian
>::Reloc
& reloc
) const
1429 { return reloc
.get_r_addend(); }
1432 // Cortex_a8_reloc class. We keep record of relocation that may need
1433 // the Cortex-A8 erratum workaround.
1435 class Cortex_a8_reloc
1438 Cortex_a8_reloc(Reloc_stub
* reloc_stub
, unsigned r_type
,
1439 Arm_address destination
)
1440 : reloc_stub_(reloc_stub
), r_type_(r_type
), destination_(destination
)
1446 // Accessors: This is a read-only class.
1448 // Return the relocation stub associated with this relocation if there is
1452 { return this->reloc_stub_
; }
1454 // Return the relocation type.
1457 { return this->r_type_
; }
1459 // Return the destination address of the relocation. LSB stores the THUMB
1463 { return this->destination_
; }
1466 // Associated relocation stub if there is one, or NULL.
1467 const Reloc_stub
* reloc_stub_
;
1469 unsigned int r_type_
;
1470 // Destination address of this relocation. LSB is used to distinguish
1472 Arm_address destination_
;
1475 // Utilities for manipulating integers of up to 32-bits
1479 // Sign extend an n-bit unsigned integer stored in an uint32_t into
1480 // an int32_t. NO_BITS must be between 1 to 32.
1481 template<int no_bits
>
1482 static inline int32_t
1483 sign_extend(uint32_t bits
)
1485 gold_assert(no_bits
>= 0 && no_bits
<= 32);
1487 return static_cast<int32_t>(bits
);
1488 uint32_t mask
= (~((uint32_t) 0)) >> (32 - no_bits
);
1490 uint32_t top_bit
= 1U << (no_bits
- 1);
1491 int32_t as_signed
= static_cast<int32_t>(bits
);
1492 return (bits
& top_bit
) ? as_signed
+ (-top_bit
* 2) : as_signed
;
1495 // Detects overflow of an NO_BITS integer stored in a uint32_t.
1496 template<int no_bits
>
1498 has_overflow(uint32_t bits
)
1500 gold_assert(no_bits
>= 0 && no_bits
<= 32);
1503 int32_t max
= (1 << (no_bits
- 1)) - 1;
1504 int32_t min
= -(1 << (no_bits
- 1));
1505 int32_t as_signed
= static_cast<int32_t>(bits
);
1506 return as_signed
> max
|| as_signed
< min
;
1509 // Detects overflow of an NO_BITS integer stored in a uint32_t when it
1510 // fits in the given number of bits as either a signed or unsigned value.
1511 // For example, has_signed_unsigned_overflow<8> would check
1512 // -128 <= bits <= 255
1513 template<int no_bits
>
1515 has_signed_unsigned_overflow(uint32_t bits
)
1517 gold_assert(no_bits
>= 2 && no_bits
<= 32);
1520 int32_t max
= static_cast<int32_t>((1U << no_bits
) - 1);
1521 int32_t min
= -(1 << (no_bits
- 1));
1522 int32_t as_signed
= static_cast<int32_t>(bits
);
1523 return as_signed
> max
|| as_signed
< min
;
1526 // Select bits from A and B using bits in MASK. For each n in [0..31],
1527 // the n-th bit in the result is chosen from the n-th bits of A and B.
1528 // A zero selects A and a one selects B.
1529 static inline uint32_t
1530 bit_select(uint32_t a
, uint32_t b
, uint32_t mask
)
1531 { return (a
& ~mask
) | (b
& mask
); }
1534 template<bool big_endian
>
1535 class Target_arm
: public Sized_target
<32, big_endian
>
1538 typedef Output_data_reloc
<elfcpp::SHT_REL
, true, 32, big_endian
>
1541 // When were are relocating a stub, we pass this as the relocation number.
1542 static const size_t fake_relnum_for_stubs
= static_cast<size_t>(-1);
1545 : Sized_target
<32, big_endian
>(&arm_info
),
1546 got_(NULL
), plt_(NULL
), got_plt_(NULL
), rel_dyn_(NULL
),
1547 copy_relocs_(elfcpp::R_ARM_COPY
), dynbss_(NULL
), stub_tables_(),
1548 stub_factory_(Stub_factory::get_instance()), may_use_blx_(false),
1549 should_force_pic_veneer_(false), arm_input_section_map_(),
1550 attributes_section_data_(NULL
), fix_cortex_a8_(false),
1551 cortex_a8_relocs_info_(), fix_v4bx_(0)
1554 // Whether we can use BLX.
1557 { return this->may_use_blx_
; }
1559 // Set use-BLX flag.
1561 set_may_use_blx(bool value
)
1562 { this->may_use_blx_
= value
; }
1564 // Whether we force PCI branch veneers.
1566 should_force_pic_veneer() const
1567 { return this->should_force_pic_veneer_
; }
1569 // Set PIC veneer flag.
1571 set_should_force_pic_veneer(bool value
)
1572 { this->should_force_pic_veneer_
= value
; }
1574 // Whether we use THUMB-2 instructions.
1576 using_thumb2() const
1578 Object_attribute
* attr
=
1579 this->get_aeabi_object_attribute(elfcpp::Tag_CPU_arch
);
1580 int arch
= attr
->int_value();
1581 return arch
== elfcpp::TAG_CPU_ARCH_V6T2
|| arch
>= elfcpp::TAG_CPU_ARCH_V7
;
1584 // Whether we use THUMB/THUMB-2 instructions only.
1586 using_thumb_only() const
1588 Object_attribute
* attr
=
1589 this->get_aeabi_object_attribute(elfcpp::Tag_CPU_arch
);
1590 if (attr
->int_value() != elfcpp::TAG_CPU_ARCH_V7
1591 && attr
->int_value() != elfcpp::TAG_CPU_ARCH_V7E_M
)
1593 attr
= this->get_aeabi_object_attribute(elfcpp::Tag_CPU_arch_profile
);
1594 return attr
->int_value() == 'M';
1597 // Whether we have an NOP instruction. If not, use mov r0, r0 instead.
1599 may_use_arm_nop() const
1601 Object_attribute
* attr
=
1602 this->get_aeabi_object_attribute(elfcpp::Tag_CPU_arch
);
1603 int arch
= attr
->int_value();
1604 return (arch
== elfcpp::TAG_CPU_ARCH_V6T2
1605 || arch
== elfcpp::TAG_CPU_ARCH_V6K
1606 || arch
== elfcpp::TAG_CPU_ARCH_V7
1607 || arch
== elfcpp::TAG_CPU_ARCH_V7E_M
);
1610 // Whether we have THUMB-2 NOP.W instruction.
1612 may_use_thumb2_nop() const
1614 Object_attribute
* attr
=
1615 this->get_aeabi_object_attribute(elfcpp::Tag_CPU_arch
);
1616 int arch
= attr
->int_value();
1617 return (arch
== elfcpp::TAG_CPU_ARCH_V6T2
1618 || arch
== elfcpp::TAG_CPU_ARCH_V7
1619 || arch
== elfcpp::TAG_CPU_ARCH_V7E_M
);
1622 // Process the relocations to determine unreferenced sections for
1623 // garbage collection.
1625 gc_process_relocs(Symbol_table
* symtab
,
1627 Sized_relobj
<32, big_endian
>* object
,
1628 unsigned int data_shndx
,
1629 unsigned int sh_type
,
1630 const unsigned char* prelocs
,
1632 Output_section
* output_section
,
1633 bool needs_special_offset_handling
,
1634 size_t local_symbol_count
,
1635 const unsigned char* plocal_symbols
);
1637 // Scan the relocations to look for symbol adjustments.
1639 scan_relocs(Symbol_table
* symtab
,
1641 Sized_relobj
<32, big_endian
>* object
,
1642 unsigned int data_shndx
,
1643 unsigned int sh_type
,
1644 const unsigned char* prelocs
,
1646 Output_section
* output_section
,
1647 bool needs_special_offset_handling
,
1648 size_t local_symbol_count
,
1649 const unsigned char* plocal_symbols
);
1651 // Finalize the sections.
1653 do_finalize_sections(Layout
*, const Input_objects
*, Symbol_table
*);
1655 // Return the value to use for a dynamic symbol which requires special
1658 do_dynsym_value(const Symbol
*) const;
1660 // Relocate a section.
1662 relocate_section(const Relocate_info
<32, big_endian
>*,
1663 unsigned int sh_type
,
1664 const unsigned char* prelocs
,
1666 Output_section
* output_section
,
1667 bool needs_special_offset_handling
,
1668 unsigned char* view
,
1669 Arm_address view_address
,
1670 section_size_type view_size
,
1671 const Reloc_symbol_changes
*);
1673 // Scan the relocs during a relocatable link.
1675 scan_relocatable_relocs(Symbol_table
* symtab
,
1677 Sized_relobj
<32, big_endian
>* object
,
1678 unsigned int data_shndx
,
1679 unsigned int sh_type
,
1680 const unsigned char* prelocs
,
1682 Output_section
* output_section
,
1683 bool needs_special_offset_handling
,
1684 size_t local_symbol_count
,
1685 const unsigned char* plocal_symbols
,
1686 Relocatable_relocs
*);
1688 // Relocate a section during a relocatable link.
1690 relocate_for_relocatable(const Relocate_info
<32, big_endian
>*,
1691 unsigned int sh_type
,
1692 const unsigned char* prelocs
,
1694 Output_section
* output_section
,
1695 off_t offset_in_output_section
,
1696 const Relocatable_relocs
*,
1697 unsigned char* view
,
1698 Arm_address view_address
,
1699 section_size_type view_size
,
1700 unsigned char* reloc_view
,
1701 section_size_type reloc_view_size
);
1703 // Return whether SYM is defined by the ABI.
1705 do_is_defined_by_abi(Symbol
* sym
) const
1706 { return strcmp(sym
->name(), "__tls_get_addr") == 0; }
1708 // Return the size of the GOT section.
1712 gold_assert(this->got_
!= NULL
);
1713 return this->got_
->data_size();
1716 // Map platform-specific reloc types
1718 get_real_reloc_type (unsigned int r_type
);
1721 // Methods to support stub-generations.
1724 // Return the stub factory
1726 stub_factory() const
1727 { return this->stub_factory_
; }
1729 // Make a new Arm_input_section object.
1730 Arm_input_section
<big_endian
>*
1731 new_arm_input_section(Relobj
*, unsigned int);
1733 // Find the Arm_input_section object corresponding to the SHNDX-th input
1734 // section of RELOBJ.
1735 Arm_input_section
<big_endian
>*
1736 find_arm_input_section(Relobj
* relobj
, unsigned int shndx
) const;
1738 // Make a new Stub_table
1739 Stub_table
<big_endian
>*
1740 new_stub_table(Arm_input_section
<big_endian
>*);
1742 // Scan a section for stub generation.
1744 scan_section_for_stubs(const Relocate_info
<32, big_endian
>*, unsigned int,
1745 const unsigned char*, size_t, Output_section
*,
1746 bool, const unsigned char*, Arm_address
,
1751 relocate_stub(Stub
*, const Relocate_info
<32, big_endian
>*,
1752 Output_section
*, unsigned char*, Arm_address
,
1755 // Get the default ARM target.
1756 static Target_arm
<big_endian
>*
1759 gold_assert(parameters
->target().machine_code() == elfcpp::EM_ARM
1760 && parameters
->target().is_big_endian() == big_endian
);
1761 return static_cast<Target_arm
<big_endian
>*>(
1762 parameters
->sized_target
<32, big_endian
>());
1765 // Whether relocation type uses LSB to distinguish THUMB addresses.
1767 reloc_uses_thumb_bit(unsigned int r_type
);
1769 // Whether NAME belongs to a mapping symbol.
1771 is_mapping_symbol_name(const char* name
)
1775 && (name
[1] == 'a' || name
[1] == 't' || name
[1] == 'd')
1776 && (name
[2] == '\0' || name
[2] == '.'));
1779 // Whether we work around the Cortex-A8 erratum.
1781 fix_cortex_a8() const
1782 { return this->fix_cortex_a8_
; }
1784 // Whether we fix R_ARM_V4BX relocation.
1786 // 1 - replace with MOV instruction (armv4 target)
1787 // 2 - make interworking veneer (>= armv4t targets only)
1790 { return this->fix_v4bx_
; }
1792 // Scan a span of THUMB code section for Cortex-A8 erratum.
1794 scan_span_for_cortex_a8_erratum(Arm_relobj
<big_endian
>*, unsigned int,
1795 section_size_type
, section_size_type
,
1796 const unsigned char*, Arm_address
);
1798 // Apply Cortex-A8 workaround to a branch.
1800 apply_cortex_a8_workaround(const Cortex_a8_stub
*, Arm_address
,
1801 unsigned char*, Arm_address
);
1804 // Make an ELF object.
1806 do_make_elf_object(const std::string
&, Input_file
*, off_t
,
1807 const elfcpp::Ehdr
<32, big_endian
>& ehdr
);
1810 do_make_elf_object(const std::string
&, Input_file
*, off_t
,
1811 const elfcpp::Ehdr
<32, !big_endian
>&)
1812 { gold_unreachable(); }
1815 do_make_elf_object(const std::string
&, Input_file
*, off_t
,
1816 const elfcpp::Ehdr
<64, false>&)
1817 { gold_unreachable(); }
1820 do_make_elf_object(const std::string
&, Input_file
*, off_t
,
1821 const elfcpp::Ehdr
<64, true>&)
1822 { gold_unreachable(); }
1824 // Make an output section.
1826 do_make_output_section(const char* name
, elfcpp::Elf_Word type
,
1827 elfcpp::Elf_Xword flags
)
1828 { return new Arm_output_section
<big_endian
>(name
, type
, flags
); }
1831 do_adjust_elf_header(unsigned char* view
, int len
) const;
1833 // We only need to generate stubs, and hence perform relaxation if we are
1834 // not doing relocatable linking.
1836 do_may_relax() const
1837 { return !parameters
->options().relocatable(); }
1840 do_relax(int, const Input_objects
*, Symbol_table
*, Layout
*);
1842 // Determine whether an object attribute tag takes an integer, a
1845 do_attribute_arg_type(int tag
) const;
1847 // Reorder tags during output.
1849 do_attributes_order(int num
) const;
1852 // The class which scans relocations.
1857 : issued_non_pic_error_(false)
1861 local(Symbol_table
* symtab
, Layout
* layout
, Target_arm
* target
,
1862 Sized_relobj
<32, big_endian
>* object
,
1863 unsigned int data_shndx
,
1864 Output_section
* output_section
,
1865 const elfcpp::Rel
<32, big_endian
>& reloc
, unsigned int r_type
,
1866 const elfcpp::Sym
<32, big_endian
>& lsym
);
1869 global(Symbol_table
* symtab
, Layout
* layout
, Target_arm
* target
,
1870 Sized_relobj
<32, big_endian
>* object
,
1871 unsigned int data_shndx
,
1872 Output_section
* output_section
,
1873 const elfcpp::Rel
<32, big_endian
>& reloc
, unsigned int r_type
,
1878 unsupported_reloc_local(Sized_relobj
<32, big_endian
>*,
1879 unsigned int r_type
);
1882 unsupported_reloc_global(Sized_relobj
<32, big_endian
>*,
1883 unsigned int r_type
, Symbol
*);
1886 check_non_pic(Relobj
*, unsigned int r_type
);
1888 // Almost identical to Symbol::needs_plt_entry except that it also
1889 // handles STT_ARM_TFUNC.
1891 symbol_needs_plt_entry(const Symbol
* sym
)
1893 // An undefined symbol from an executable does not need a PLT entry.
1894 if (sym
->is_undefined() && !parameters
->options().shared())
1897 return (!parameters
->doing_static_link()
1898 && (sym
->type() == elfcpp::STT_FUNC
1899 || sym
->type() == elfcpp::STT_ARM_TFUNC
)
1900 && (sym
->is_from_dynobj()
1901 || sym
->is_undefined()
1902 || sym
->is_preemptible()));
1905 // Whether we have issued an error about a non-PIC compilation.
1906 bool issued_non_pic_error_
;
1909 // The class which implements relocation.
1919 // Return whether the static relocation needs to be applied.
1921 should_apply_static_reloc(const Sized_symbol
<32>* gsym
,
1924 Output_section
* output_section
);
1926 // Do a relocation. Return false if the caller should not issue
1927 // any warnings about this relocation.
1929 relocate(const Relocate_info
<32, big_endian
>*, Target_arm
*,
1930 Output_section
*, size_t relnum
,
1931 const elfcpp::Rel
<32, big_endian
>&,
1932 unsigned int r_type
, const Sized_symbol
<32>*,
1933 const Symbol_value
<32>*,
1934 unsigned char*, Arm_address
,
1937 // Return whether we want to pass flag NON_PIC_REF for this
1938 // reloc. This means the relocation type accesses a symbol not via
1941 reloc_is_non_pic (unsigned int r_type
)
1945 // These relocation types reference GOT or PLT entries explicitly.
1946 case elfcpp::R_ARM_GOT_BREL
:
1947 case elfcpp::R_ARM_GOT_ABS
:
1948 case elfcpp::R_ARM_GOT_PREL
:
1949 case elfcpp::R_ARM_GOT_BREL12
:
1950 case elfcpp::R_ARM_PLT32_ABS
:
1951 case elfcpp::R_ARM_TLS_GD32
:
1952 case elfcpp::R_ARM_TLS_LDM32
:
1953 case elfcpp::R_ARM_TLS_IE32
:
1954 case elfcpp::R_ARM_TLS_IE12GP
:
1956 // These relocate types may use PLT entries.
1957 case elfcpp::R_ARM_CALL
:
1958 case elfcpp::R_ARM_THM_CALL
:
1959 case elfcpp::R_ARM_JUMP24
:
1960 case elfcpp::R_ARM_THM_JUMP24
:
1961 case elfcpp::R_ARM_THM_JUMP19
:
1962 case elfcpp::R_ARM_PLT32
:
1963 case elfcpp::R_ARM_THM_XPC22
:
1972 // A class which returns the size required for a relocation type,
1973 // used while scanning relocs during a relocatable link.
1974 class Relocatable_size_for_reloc
1978 get_size_for_reloc(unsigned int, Relobj
*);
1981 // Get the GOT section, creating it if necessary.
1982 Output_data_got
<32, big_endian
>*
1983 got_section(Symbol_table
*, Layout
*);
1985 // Get the GOT PLT section.
1987 got_plt_section() const
1989 gold_assert(this->got_plt_
!= NULL
);
1990 return this->got_plt_
;
1993 // Create a PLT entry for a global symbol.
1995 make_plt_entry(Symbol_table
*, Layout
*, Symbol
*);
1997 // Get the PLT section.
1998 const Output_data_plt_arm
<big_endian
>*
2001 gold_assert(this->plt_
!= NULL
);
2005 // Get the dynamic reloc section, creating it if necessary.
2007 rel_dyn_section(Layout
*);
2009 // Return true if the symbol may need a COPY relocation.
2010 // References from an executable object to non-function symbols
2011 // defined in a dynamic object may need a COPY relocation.
2013 may_need_copy_reloc(Symbol
* gsym
)
2015 return (gsym
->type() != elfcpp::STT_ARM_TFUNC
2016 && gsym
->may_need_copy_reloc());
2019 // Add a potential copy relocation.
2021 copy_reloc(Symbol_table
* symtab
, Layout
* layout
,
2022 Sized_relobj
<32, big_endian
>* object
,
2023 unsigned int shndx
, Output_section
* output_section
,
2024 Symbol
* sym
, const elfcpp::Rel
<32, big_endian
>& reloc
)
2026 this->copy_relocs_
.copy_reloc(symtab
, layout
,
2027 symtab
->get_sized_symbol
<32>(sym
),
2028 object
, shndx
, output_section
, reloc
,
2029 this->rel_dyn_section(layout
));
2032 // Whether two EABI versions are compatible.
2034 are_eabi_versions_compatible(elfcpp::Elf_Word v1
, elfcpp::Elf_Word v2
);
2036 // Merge processor-specific flags from input object and those in the ELF
2037 // header of the output.
2039 merge_processor_specific_flags(const std::string
&, elfcpp::Elf_Word
);
2041 // Get the secondary compatible architecture.
2043 get_secondary_compatible_arch(const Attributes_section_data
*);
2045 // Set the secondary compatible architecture.
2047 set_secondary_compatible_arch(Attributes_section_data
*, int);
2050 tag_cpu_arch_combine(const char*, int, int*, int, int);
2052 // Helper to print AEABI enum tag value.
2054 aeabi_enum_name(unsigned int);
2056 // Return string value for TAG_CPU_name.
2058 tag_cpu_name_value(unsigned int);
2060 // Merge object attributes from input object and those in the output.
2062 merge_object_attributes(const char*, const Attributes_section_data
*);
2064 // Helper to get an AEABI object attribute
2066 get_aeabi_object_attribute(int tag
) const
2068 Attributes_section_data
* pasd
= this->attributes_section_data_
;
2069 gold_assert(pasd
!= NULL
);
2070 Object_attribute
* attr
=
2071 pasd
->get_attribute(Object_attribute::OBJ_ATTR_PROC
, tag
);
2072 gold_assert(attr
!= NULL
);
2077 // Methods to support stub-generations.
2080 // Group input sections for stub generation.
2082 group_sections(Layout
*, section_size_type
, bool);
2084 // Scan a relocation for stub generation.
2086 scan_reloc_for_stub(const Relocate_info
<32, big_endian
>*, unsigned int,
2087 const Sized_symbol
<32>*, unsigned int,
2088 const Symbol_value
<32>*,
2089 elfcpp::Elf_types
<32>::Elf_Swxword
, Arm_address
);
2091 // Scan a relocation section for stub.
2092 template<int sh_type
>
2094 scan_reloc_section_for_stubs(
2095 const Relocate_info
<32, big_endian
>* relinfo
,
2096 const unsigned char* prelocs
,
2098 Output_section
* output_section
,
2099 bool needs_special_offset_handling
,
2100 const unsigned char* view
,
2101 elfcpp::Elf_types
<32>::Elf_Addr view_address
,
2104 // Information about this specific target which we pass to the
2105 // general Target structure.
2106 static const Target::Target_info arm_info
;
2108 // The types of GOT entries needed for this platform.
2111 GOT_TYPE_STANDARD
= 0 // GOT entry for a regular symbol
2114 typedef typename
std::vector
<Stub_table
<big_endian
>*> Stub_table_list
;
2116 // Map input section to Arm_input_section.
2117 typedef Unordered_map
<Section_id
,
2118 Arm_input_section
<big_endian
>*,
2120 Arm_input_section_map
;
2122 // Map output addresses to relocs for Cortex-A8 erratum.
2123 typedef Unordered_map
<Arm_address
, const Cortex_a8_reloc
*>
2124 Cortex_a8_relocs_info
;
2127 Output_data_got
<32, big_endian
>* got_
;
2129 Output_data_plt_arm
<big_endian
>* plt_
;
2130 // The GOT PLT section.
2131 Output_data_space
* got_plt_
;
2132 // The dynamic reloc section.
2133 Reloc_section
* rel_dyn_
;
2134 // Relocs saved to avoid a COPY reloc.
2135 Copy_relocs
<elfcpp::SHT_REL
, 32, big_endian
> copy_relocs_
;
2136 // Space for variables copied with a COPY reloc.
2137 Output_data_space
* dynbss_
;
2138 // Vector of Stub_tables created.
2139 Stub_table_list stub_tables_
;
2141 const Stub_factory
&stub_factory_
;
2142 // Whether we can use BLX.
2144 // Whether we force PIC branch veneers.
2145 bool should_force_pic_veneer_
;
2146 // Map for locating Arm_input_sections.
2147 Arm_input_section_map arm_input_section_map_
;
2148 // Attributes section data in output.
2149 Attributes_section_data
* attributes_section_data_
;
2150 // Whether we want to fix code for Cortex-A8 erratum.
2151 bool fix_cortex_a8_
;
2152 // Map addresses to relocs for Cortex-A8 erratum.
2153 Cortex_a8_relocs_info cortex_a8_relocs_info_
;
2154 // Whether we need to fix code for V4BX relocations.
2158 template<bool big_endian
>
2159 const Target::Target_info Target_arm
<big_endian
>::arm_info
=
2162 big_endian
, // is_big_endian
2163 elfcpp::EM_ARM
, // machine_code
2164 false, // has_make_symbol
2165 false, // has_resolve
2166 false, // has_code_fill
2167 true, // is_default_stack_executable
2169 "/usr/lib/libc.so.1", // dynamic_linker
2170 0x8000, // default_text_segment_address
2171 0x1000, // abi_pagesize (overridable by -z max-page-size)
2172 0x1000, // common_pagesize (overridable by -z common-page-size)
2173 elfcpp::SHN_UNDEF
, // small_common_shndx
2174 elfcpp::SHN_UNDEF
, // large_common_shndx
2175 0, // small_common_section_flags
2176 0, // large_common_section_flags
2177 ".ARM.attributes", // attributes_section
2178 "aeabi" // attributes_vendor
2181 // Arm relocate functions class
2184 template<bool big_endian
>
2185 class Arm_relocate_functions
: public Relocate_functions
<32, big_endian
>
2190 STATUS_OKAY
, // No error during relocation.
2191 STATUS_OVERFLOW
, // Relocation oveflow.
2192 STATUS_BAD_RELOC
// Relocation cannot be applied.
2196 typedef Relocate_functions
<32, big_endian
> Base
;
2197 typedef Arm_relocate_functions
<big_endian
> This
;
2199 // Encoding of imm16 argument for movt and movw ARM instructions
2202 // imm16 := imm4 | imm12
2204 // f e d c b a 9 8 7 6 5 4 3 2 1 0 f e d c b a 9 8 7 6 5 4 3 2 1 0
2205 // +-------+---------------+-------+-------+-----------------------+
2206 // | | |imm4 | |imm12 |
2207 // +-------+---------------+-------+-------+-----------------------+
2209 // Extract the relocation addend from VAL based on the ARM
2210 // instruction encoding described above.
2211 static inline typename
elfcpp::Swap
<32, big_endian
>::Valtype
2212 extract_arm_movw_movt_addend(
2213 typename
elfcpp::Swap
<32, big_endian
>::Valtype val
)
2215 // According to the Elf ABI for ARM Architecture the immediate
2216 // field is sign-extended to form the addend.
2217 return utils::sign_extend
<16>(((val
>> 4) & 0xf000) | (val
& 0xfff));
2220 // Insert X into VAL based on the ARM instruction encoding described
2222 static inline typename
elfcpp::Swap
<32, big_endian
>::Valtype
2223 insert_val_arm_movw_movt(
2224 typename
elfcpp::Swap
<32, big_endian
>::Valtype val
,
2225 typename
elfcpp::Swap
<32, big_endian
>::Valtype x
)
2229 val
|= (x
& 0xf000) << 4;
2233 // Encoding of imm16 argument for movt and movw Thumb2 instructions
2236 // imm16 := imm4 | i | imm3 | imm8
2238 // f e d c b a 9 8 7 6 5 4 3 2 1 0 f e d c b a 9 8 7 6 5 4 3 2 1 0
2239 // +---------+-+-----------+-------++-+-----+-------+---------------+
2240 // | |i| |imm4 || |imm3 | |imm8 |
2241 // +---------+-+-----------+-------++-+-----+-------+---------------+
2243 // Extract the relocation addend from VAL based on the Thumb2
2244 // instruction encoding described above.
2245 static inline typename
elfcpp::Swap
<32, big_endian
>::Valtype
2246 extract_thumb_movw_movt_addend(
2247 typename
elfcpp::Swap
<32, big_endian
>::Valtype val
)
2249 // According to the Elf ABI for ARM Architecture the immediate
2250 // field is sign-extended to form the addend.
2251 return utils::sign_extend
<16>(((val
>> 4) & 0xf000)
2252 | ((val
>> 15) & 0x0800)
2253 | ((val
>> 4) & 0x0700)
2257 // Insert X into VAL based on the Thumb2 instruction encoding
2259 static inline typename
elfcpp::Swap
<32, big_endian
>::Valtype
2260 insert_val_thumb_movw_movt(
2261 typename
elfcpp::Swap
<32, big_endian
>::Valtype val
,
2262 typename
elfcpp::Swap
<32, big_endian
>::Valtype x
)
2265 val
|= (x
& 0xf000) << 4;
2266 val
|= (x
& 0x0800) << 15;
2267 val
|= (x
& 0x0700) << 4;
2268 val
|= (x
& 0x00ff);
2272 // Handle ARM long branches.
2273 static typename
This::Status
2274 arm_branch_common(unsigned int, const Relocate_info
<32, big_endian
>*,
2275 unsigned char *, const Sized_symbol
<32>*,
2276 const Arm_relobj
<big_endian
>*, unsigned int,
2277 const Symbol_value
<32>*, Arm_address
, Arm_address
, bool);
2279 // Handle THUMB long branches.
2280 static typename
This::Status
2281 thumb_branch_common(unsigned int, const Relocate_info
<32, big_endian
>*,
2282 unsigned char *, const Sized_symbol
<32>*,
2283 const Arm_relobj
<big_endian
>*, unsigned int,
2284 const Symbol_value
<32>*, Arm_address
, Arm_address
, bool);
2288 // Return the branch offset of a 32-bit THUMB branch.
2289 static inline int32_t
2290 thumb32_branch_offset(uint16_t upper_insn
, uint16_t lower_insn
)
2292 // We use the Thumb-2 encoding (backwards compatible with Thumb-1)
2293 // involving the J1 and J2 bits.
2294 uint32_t s
= (upper_insn
& (1U << 10)) >> 10;
2295 uint32_t upper
= upper_insn
& 0x3ffU
;
2296 uint32_t lower
= lower_insn
& 0x7ffU
;
2297 uint32_t j1
= (lower_insn
& (1U << 13)) >> 13;
2298 uint32_t j2
= (lower_insn
& (1U << 11)) >> 11;
2299 uint32_t i1
= j1
^ s
? 0 : 1;
2300 uint32_t i2
= j2
^ s
? 0 : 1;
2302 return utils::sign_extend
<25>((s
<< 24) | (i1
<< 23) | (i2
<< 22)
2303 | (upper
<< 12) | (lower
<< 1));
2306 // Insert OFFSET to a 32-bit THUMB branch and return the upper instruction.
2307 // UPPER_INSN is the original upper instruction of the branch. Caller is
2308 // responsible for overflow checking and BLX offset adjustment.
2309 static inline uint16_t
2310 thumb32_branch_upper(uint16_t upper_insn
, int32_t offset
)
2312 uint32_t s
= offset
< 0 ? 1 : 0;
2313 uint32_t bits
= static_cast<uint32_t>(offset
);
2314 return (upper_insn
& ~0x7ffU
) | ((bits
>> 12) & 0x3ffU
) | (s
<< 10);
2317 // Insert OFFSET to a 32-bit THUMB branch and return the lower instruction.
2318 // LOWER_INSN is the original lower instruction of the branch. Caller is
2319 // responsible for overflow checking and BLX offset adjustment.
2320 static inline uint16_t
2321 thumb32_branch_lower(uint16_t lower_insn
, int32_t offset
)
2323 uint32_t s
= offset
< 0 ? 1 : 0;
2324 uint32_t bits
= static_cast<uint32_t>(offset
);
2325 return ((lower_insn
& ~0x2fffU
)
2326 | ((((bits
>> 23) & 1) ^ !s
) << 13)
2327 | ((((bits
>> 22) & 1) ^ !s
) << 11)
2328 | ((bits
>> 1) & 0x7ffU
));
2331 // Return the branch offset of a 32-bit THUMB conditional branch.
2332 static inline int32_t
2333 thumb32_cond_branch_offset(uint16_t upper_insn
, uint16_t lower_insn
)
2335 uint32_t s
= (upper_insn
& 0x0400U
) >> 10;
2336 uint32_t j1
= (lower_insn
& 0x2000U
) >> 13;
2337 uint32_t j2
= (lower_insn
& 0x0800U
) >> 11;
2338 uint32_t lower
= (lower_insn
& 0x07ffU
);
2339 uint32_t upper
= (s
<< 8) | (j2
<< 7) | (j1
<< 6) | (upper_insn
& 0x003fU
);
2341 return utils::sign_extend
<21>((upper
<< 12) | (lower
<< 1));
2344 // Insert OFFSET to a 32-bit THUMB conditional branch and return the upper
2345 // instruction. UPPER_INSN is the original upper instruction of the branch.
2346 // Caller is responsible for overflow checking.
2347 static inline uint16_t
2348 thumb32_cond_branch_upper(uint16_t upper_insn
, int32_t offset
)
2350 uint32_t s
= offset
< 0 ? 1 : 0;
2351 uint32_t bits
= static_cast<uint32_t>(offset
);
2352 return (upper_insn
& 0xfbc0U
) | (s
<< 10) | ((bits
& 0x0003f000U
) >> 12);
2355 // Insert OFFSET to a 32-bit THUMB conditional branch and return the lower
2356 // instruction. LOWER_INSN is the original lower instruction of the branch.
2357 // Caller is reponsible for overflow checking.
2358 static inline uint16_t
2359 thumb32_cond_branch_lower(uint16_t lower_insn
, int32_t offset
)
2361 uint32_t bits
= static_cast<uint32_t>(offset
);
2362 uint32_t j2
= (bits
& 0x00080000U
) >> 19;
2363 uint32_t j1
= (bits
& 0x00040000U
) >> 18;
2364 uint32_t lo
= (bits
& 0x00000ffeU
) >> 1;
2366 return (lower_insn
& 0xd000U
) | (j1
<< 13) | (j2
<< 11) | lo
;
2369 // R_ARM_ABS8: S + A
2370 static inline typename
This::Status
2371 abs8(unsigned char *view
,
2372 const Sized_relobj
<32, big_endian
>* object
,
2373 const Symbol_value
<32>* psymval
)
2375 typedef typename
elfcpp::Swap
<8, big_endian
>::Valtype Valtype
;
2376 typedef typename
elfcpp::Swap
<32, big_endian
>::Valtype Reltype
;
2377 Valtype
* wv
= reinterpret_cast<Valtype
*>(view
);
2378 Valtype val
= elfcpp::Swap
<8, big_endian
>::readval(wv
);
2379 Reltype addend
= utils::sign_extend
<8>(val
);
2380 Reltype x
= psymval
->value(object
, addend
);
2381 val
= utils::bit_select(val
, x
, 0xffU
);
2382 elfcpp::Swap
<8, big_endian
>::writeval(wv
, val
);
2383 return (utils::has_signed_unsigned_overflow
<8>(x
)
2384 ? This::STATUS_OVERFLOW
2385 : This::STATUS_OKAY
);
2388 // R_ARM_THM_ABS5: S + A
2389 static inline typename
This::Status
2390 thm_abs5(unsigned char *view
,
2391 const Sized_relobj
<32, big_endian
>* object
,
2392 const Symbol_value
<32>* psymval
)
2394 typedef typename
elfcpp::Swap
<16, big_endian
>::Valtype Valtype
;
2395 typedef typename
elfcpp::Swap
<32, big_endian
>::Valtype Reltype
;
2396 Valtype
* wv
= reinterpret_cast<Valtype
*>(view
);
2397 Valtype val
= elfcpp::Swap
<16, big_endian
>::readval(wv
);
2398 Reltype addend
= (val
& 0x7e0U
) >> 6;
2399 Reltype x
= psymval
->value(object
, addend
);
2400 val
= utils::bit_select(val
, x
<< 6, 0x7e0U
);
2401 elfcpp::Swap
<16, big_endian
>::writeval(wv
, val
);
2402 return (utils::has_overflow
<5>(x
)
2403 ? This::STATUS_OVERFLOW
2404 : This::STATUS_OKAY
);
2407 // R_ARM_ABS12: S + A
2408 static inline typename
This::Status
2409 abs12(unsigned char *view
,
2410 const Sized_relobj
<32, big_endian
>* object
,
2411 const Symbol_value
<32>* psymval
)
2413 typedef typename
elfcpp::Swap
<32, big_endian
>::Valtype Valtype
;
2414 typedef typename
elfcpp::Swap
<32, big_endian
>::Valtype Reltype
;
2415 Valtype
* wv
= reinterpret_cast<Valtype
*>(view
);
2416 Valtype val
= elfcpp::Swap
<32, big_endian
>::readval(wv
);
2417 Reltype addend
= val
& 0x0fffU
;
2418 Reltype x
= psymval
->value(object
, addend
);
2419 val
= utils::bit_select(val
, x
, 0x0fffU
);
2420 elfcpp::Swap
<32, big_endian
>::writeval(wv
, val
);
2421 return (utils::has_overflow
<12>(x
)
2422 ? This::STATUS_OVERFLOW
2423 : This::STATUS_OKAY
);
2426 // R_ARM_ABS16: S + A
2427 static inline typename
This::Status
2428 abs16(unsigned char *view
,
2429 const Sized_relobj
<32, big_endian
>* object
,
2430 const Symbol_value
<32>* psymval
)
2432 typedef typename
elfcpp::Swap
<16, big_endian
>::Valtype Valtype
;
2433 typedef typename
elfcpp::Swap
<32, big_endian
>::Valtype Reltype
;
2434 Valtype
* wv
= reinterpret_cast<Valtype
*>(view
);
2435 Valtype val
= elfcpp::Swap
<16, big_endian
>::readval(wv
);
2436 Reltype addend
= utils::sign_extend
<16>(val
);
2437 Reltype x
= psymval
->value(object
, addend
);
2438 val
= utils::bit_select(val
, x
, 0xffffU
);
2439 elfcpp::Swap
<16, big_endian
>::writeval(wv
, val
);
2440 return (utils::has_signed_unsigned_overflow
<16>(x
)
2441 ? This::STATUS_OVERFLOW
2442 : This::STATUS_OKAY
);
2445 // R_ARM_ABS32: (S + A) | T
2446 static inline typename
This::Status
2447 abs32(unsigned char *view
,
2448 const Sized_relobj
<32, big_endian
>* object
,
2449 const Symbol_value
<32>* psymval
,
2450 Arm_address thumb_bit
)
2452 typedef typename
elfcpp::Swap
<32, big_endian
>::Valtype Valtype
;
2453 Valtype
* wv
= reinterpret_cast<Valtype
*>(view
);
2454 Valtype addend
= elfcpp::Swap
<32, big_endian
>::readval(wv
);
2455 Valtype x
= psymval
->value(object
, addend
) | thumb_bit
;
2456 elfcpp::Swap
<32, big_endian
>::writeval(wv
, x
);
2457 return This::STATUS_OKAY
;
2460 // R_ARM_REL32: (S + A) | T - P
2461 static inline typename
This::Status
2462 rel32(unsigned char *view
,
2463 const Sized_relobj
<32, big_endian
>* object
,
2464 const Symbol_value
<32>* psymval
,
2465 Arm_address address
,
2466 Arm_address thumb_bit
)
2468 typedef typename
elfcpp::Swap
<32, big_endian
>::Valtype Valtype
;
2469 Valtype
* wv
= reinterpret_cast<Valtype
*>(view
);
2470 Valtype addend
= elfcpp::Swap
<32, big_endian
>::readval(wv
);
2471 Valtype x
= (psymval
->value(object
, addend
) | thumb_bit
) - address
;
2472 elfcpp::Swap
<32, big_endian
>::writeval(wv
, x
);
2473 return This::STATUS_OKAY
;
2476 // R_ARM_THM_CALL: (S + A) | T - P
2477 static inline typename
This::Status
2478 thm_call(const Relocate_info
<32, big_endian
>* relinfo
, unsigned char *view
,
2479 const Sized_symbol
<32>* gsym
, const Arm_relobj
<big_endian
>* object
,
2480 unsigned int r_sym
, const Symbol_value
<32>* psymval
,
2481 Arm_address address
, Arm_address thumb_bit
,
2482 bool is_weakly_undefined_without_plt
)
2484 return thumb_branch_common(elfcpp::R_ARM_THM_CALL
, relinfo
, view
, gsym
,
2485 object
, r_sym
, psymval
, address
, thumb_bit
,
2486 is_weakly_undefined_without_plt
);
2489 // R_ARM_THM_JUMP24: (S + A) | T - P
2490 static inline typename
This::Status
2491 thm_jump24(const Relocate_info
<32, big_endian
>* relinfo
, unsigned char *view
,
2492 const Sized_symbol
<32>* gsym
, const Arm_relobj
<big_endian
>* object
,
2493 unsigned int r_sym
, const Symbol_value
<32>* psymval
,
2494 Arm_address address
, Arm_address thumb_bit
,
2495 bool is_weakly_undefined_without_plt
)
2497 return thumb_branch_common(elfcpp::R_ARM_THM_JUMP24
, relinfo
, view
, gsym
,
2498 object
, r_sym
, psymval
, address
, thumb_bit
,
2499 is_weakly_undefined_without_plt
);
2502 // R_ARM_THM_JUMP24: (S + A) | T - P
2503 static typename
This::Status
2504 thm_jump19(unsigned char *view
, const Arm_relobj
<big_endian
>* object
,
2505 const Symbol_value
<32>* psymval
, Arm_address address
,
2506 Arm_address thumb_bit
);
2508 // R_ARM_THM_XPC22: (S + A) | T - P
2509 static inline typename
This::Status
2510 thm_xpc22(const Relocate_info
<32, big_endian
>* relinfo
, unsigned char *view
,
2511 const Sized_symbol
<32>* gsym
, const Arm_relobj
<big_endian
>* object
,
2512 unsigned int r_sym
, const Symbol_value
<32>* psymval
,
2513 Arm_address address
, Arm_address thumb_bit
,
2514 bool is_weakly_undefined_without_plt
)
2516 return thumb_branch_common(elfcpp::R_ARM_THM_XPC22
, relinfo
, view
, gsym
,
2517 object
, r_sym
, psymval
, address
, thumb_bit
,
2518 is_weakly_undefined_without_plt
);
2521 // R_ARM_THM_JUMP6: S + A – P
2522 static inline typename
This::Status
2523 thm_jump6(unsigned char *view
,
2524 const Sized_relobj
<32, big_endian
>* object
,
2525 const Symbol_value
<32>* psymval
,
2526 Arm_address address
)
2528 typedef typename
elfcpp::Swap
<16, big_endian
>::Valtype Valtype
;
2529 typedef typename
elfcpp::Swap
<16, big_endian
>::Valtype Reltype
;
2530 Valtype
* wv
= reinterpret_cast<Valtype
*>(view
);
2531 Valtype val
= elfcpp::Swap
<16, big_endian
>::readval(wv
);
2532 // bit[9]:bit[7:3]:’0’ (mask: 0x02f8)
2533 Reltype addend
= (((val
& 0x0200) >> 3) | ((val
& 0x00f8) >> 2));
2534 Reltype x
= (psymval
->value(object
, addend
) - address
);
2535 val
= (val
& 0xfd07) | ((x
& 0x0040) << 3) | ((val
& 0x003e) << 2);
2536 elfcpp::Swap
<16, big_endian
>::writeval(wv
, val
);
2537 // CZB does only forward jumps.
2538 return ((x
> 0x007e)
2539 ? This::STATUS_OVERFLOW
2540 : This::STATUS_OKAY
);
2543 // R_ARM_THM_JUMP8: S + A – P
2544 static inline typename
This::Status
2545 thm_jump8(unsigned char *view
,
2546 const Sized_relobj
<32, big_endian
>* object
,
2547 const Symbol_value
<32>* psymval
,
2548 Arm_address address
)
2550 typedef typename
elfcpp::Swap
<16, big_endian
>::Valtype Valtype
;
2551 typedef typename
elfcpp::Swap
<16, big_endian
>::Valtype Reltype
;
2552 Valtype
* wv
= reinterpret_cast<Valtype
*>(view
);
2553 Valtype val
= elfcpp::Swap
<16, big_endian
>::readval(wv
);
2554 Reltype addend
= utils::sign_extend
<8>((val
& 0x00ff) << 1);
2555 Reltype x
= (psymval
->value(object
, addend
) - address
);
2556 elfcpp::Swap
<16, big_endian
>::writeval(wv
, (val
& 0xff00) | ((x
& 0x01fe) >> 1));
2557 return (utils::has_overflow
<8>(x
)
2558 ? This::STATUS_OVERFLOW
2559 : This::STATUS_OKAY
);
2562 // R_ARM_THM_JUMP11: S + A – P
2563 static inline typename
This::Status
2564 thm_jump11(unsigned char *view
,
2565 const Sized_relobj
<32, big_endian
>* object
,
2566 const Symbol_value
<32>* psymval
,
2567 Arm_address address
)
2569 typedef typename
elfcpp::Swap
<16, big_endian
>::Valtype Valtype
;
2570 typedef typename
elfcpp::Swap
<16, big_endian
>::Valtype Reltype
;
2571 Valtype
* wv
= reinterpret_cast<Valtype
*>(view
);
2572 Valtype val
= elfcpp::Swap
<16, big_endian
>::readval(wv
);
2573 Reltype addend
= utils::sign_extend
<11>((val
& 0x07ff) << 1);
2574 Reltype x
= (psymval
->value(object
, addend
) - address
);
2575 elfcpp::Swap
<16, big_endian
>::writeval(wv
, (val
& 0xf800) | ((x
& 0x0ffe) >> 1));
2576 return (utils::has_overflow
<11>(x
)
2577 ? This::STATUS_OVERFLOW
2578 : This::STATUS_OKAY
);
2581 // R_ARM_BASE_PREL: B(S) + A - P
2582 static inline typename
This::Status
2583 base_prel(unsigned char* view
,
2585 Arm_address address
)
2587 Base::rel32(view
, origin
- address
);
2591 // R_ARM_BASE_ABS: B(S) + A
2592 static inline typename
This::Status
2593 base_abs(unsigned char* view
,
2596 Base::rel32(view
, origin
);
2600 // R_ARM_GOT_BREL: GOT(S) + A - GOT_ORG
2601 static inline typename
This::Status
2602 got_brel(unsigned char* view
,
2603 typename
elfcpp::Swap
<32, big_endian
>::Valtype got_offset
)
2605 Base::rel32(view
, got_offset
);
2606 return This::STATUS_OKAY
;
2609 // R_ARM_GOT_PREL: GOT(S) + A - P
2610 static inline typename
This::Status
2611 got_prel(unsigned char *view
,
2612 Arm_address got_entry
,
2613 Arm_address address
)
2615 Base::rel32(view
, got_entry
- address
);
2616 return This::STATUS_OKAY
;
2619 // R_ARM_PLT32: (S + A) | T - P
2620 static inline typename
This::Status
2621 plt32(const Relocate_info
<32, big_endian
>* relinfo
,
2622 unsigned char *view
,
2623 const Sized_symbol
<32>* gsym
,
2624 const Arm_relobj
<big_endian
>* object
,
2626 const Symbol_value
<32>* psymval
,
2627 Arm_address address
,
2628 Arm_address thumb_bit
,
2629 bool is_weakly_undefined_without_plt
)
2631 return arm_branch_common(elfcpp::R_ARM_PLT32
, relinfo
, view
, gsym
,
2632 object
, r_sym
, psymval
, address
, thumb_bit
,
2633 is_weakly_undefined_without_plt
);
2636 // R_ARM_XPC25: (S + A) | T - P
2637 static inline typename
This::Status
2638 xpc25(const Relocate_info
<32, big_endian
>* relinfo
,
2639 unsigned char *view
,
2640 const Sized_symbol
<32>* gsym
,
2641 const Arm_relobj
<big_endian
>* object
,
2643 const Symbol_value
<32>* psymval
,
2644 Arm_address address
,
2645 Arm_address thumb_bit
,
2646 bool is_weakly_undefined_without_plt
)
2648 return arm_branch_common(elfcpp::R_ARM_XPC25
, relinfo
, view
, gsym
,
2649 object
, r_sym
, psymval
, address
, thumb_bit
,
2650 is_weakly_undefined_without_plt
);
2653 // R_ARM_CALL: (S + A) | T - P
2654 static inline typename
This::Status
2655 call(const Relocate_info
<32, big_endian
>* relinfo
,
2656 unsigned char *view
,
2657 const Sized_symbol
<32>* gsym
,
2658 const Arm_relobj
<big_endian
>* object
,
2660 const Symbol_value
<32>* psymval
,
2661 Arm_address address
,
2662 Arm_address thumb_bit
,
2663 bool is_weakly_undefined_without_plt
)
2665 return arm_branch_common(elfcpp::R_ARM_CALL
, relinfo
, view
, gsym
,
2666 object
, r_sym
, psymval
, address
, thumb_bit
,
2667 is_weakly_undefined_without_plt
);
2670 // R_ARM_JUMP24: (S + A) | T - P
2671 static inline typename
This::Status
2672 jump24(const Relocate_info
<32, big_endian
>* relinfo
,
2673 unsigned char *view
,
2674 const Sized_symbol
<32>* gsym
,
2675 const Arm_relobj
<big_endian
>* object
,
2677 const Symbol_value
<32>* psymval
,
2678 Arm_address address
,
2679 Arm_address thumb_bit
,
2680 bool is_weakly_undefined_without_plt
)
2682 return arm_branch_common(elfcpp::R_ARM_JUMP24
, relinfo
, view
, gsym
,
2683 object
, r_sym
, psymval
, address
, thumb_bit
,
2684 is_weakly_undefined_without_plt
);
2687 // R_ARM_PREL: (S + A) | T - P
2688 static inline typename
This::Status
2689 prel31(unsigned char *view
,
2690 const Sized_relobj
<32, big_endian
>* object
,
2691 const Symbol_value
<32>* psymval
,
2692 Arm_address address
,
2693 Arm_address thumb_bit
)
2695 typedef typename
elfcpp::Swap
<32, big_endian
>::Valtype Valtype
;
2696 Valtype
* wv
= reinterpret_cast<Valtype
*>(view
);
2697 Valtype val
= elfcpp::Swap
<32, big_endian
>::readval(wv
);
2698 Valtype addend
= utils::sign_extend
<31>(val
);
2699 Valtype x
= (psymval
->value(object
, addend
) | thumb_bit
) - address
;
2700 val
= utils::bit_select(val
, x
, 0x7fffffffU
);
2701 elfcpp::Swap
<32, big_endian
>::writeval(wv
, val
);
2702 return (utils::has_overflow
<31>(x
) ?
2703 This::STATUS_OVERFLOW
: This::STATUS_OKAY
);
2706 // R_ARM_MOVW_ABS_NC: (S + A) | T
2707 static inline typename
This::Status
2708 movw_abs_nc(unsigned char *view
,
2709 const Sized_relobj
<32, big_endian
>* object
,
2710 const Symbol_value
<32>* psymval
,
2711 Arm_address thumb_bit
)
2713 typedef typename
elfcpp::Swap
<32, big_endian
>::Valtype Valtype
;
2714 Valtype
* wv
= reinterpret_cast<Valtype
*>(view
);
2715 Valtype val
= elfcpp::Swap
<32, big_endian
>::readval(wv
);
2716 Valtype addend
= This::extract_arm_movw_movt_addend(val
);
2717 Valtype x
= psymval
->value(object
, addend
) | thumb_bit
;
2718 val
= This::insert_val_arm_movw_movt(val
, x
);
2719 elfcpp::Swap
<32, big_endian
>::writeval(wv
, val
);
2720 return This::STATUS_OKAY
;
2723 // R_ARM_MOVT_ABS: S + A
2724 static inline typename
This::Status
2725 movt_abs(unsigned char *view
,
2726 const Sized_relobj
<32, big_endian
>* object
,
2727 const Symbol_value
<32>* psymval
)
2729 typedef typename
elfcpp::Swap
<32, big_endian
>::Valtype Valtype
;
2730 Valtype
* wv
= reinterpret_cast<Valtype
*>(view
);
2731 Valtype val
= elfcpp::Swap
<32, big_endian
>::readval(wv
);
2732 Valtype addend
= This::extract_arm_movw_movt_addend(val
);
2733 Valtype x
= psymval
->value(object
, addend
) >> 16;
2734 val
= This::insert_val_arm_movw_movt(val
, x
);
2735 elfcpp::Swap
<32, big_endian
>::writeval(wv
, val
);
2736 return This::STATUS_OKAY
;
2739 // R_ARM_THM_MOVW_ABS_NC: S + A | T
2740 static inline typename
This::Status
2741 thm_movw_abs_nc(unsigned char *view
,
2742 const Sized_relobj
<32, big_endian
>* object
,
2743 const Symbol_value
<32>* psymval
,
2744 Arm_address thumb_bit
)
2746 typedef typename
elfcpp::Swap
<16, big_endian
>::Valtype Valtype
;
2747 typedef typename
elfcpp::Swap
<32, big_endian
>::Valtype Reltype
;
2748 Valtype
* wv
= reinterpret_cast<Valtype
*>(view
);
2749 Reltype val
= ((elfcpp::Swap
<16, big_endian
>::readval(wv
) << 16)
2750 | elfcpp::Swap
<16, big_endian
>::readval(wv
+ 1));
2751 Reltype addend
= extract_thumb_movw_movt_addend(val
);
2752 Reltype x
= psymval
->value(object
, addend
) | thumb_bit
;
2753 val
= This::insert_val_thumb_movw_movt(val
, x
);
2754 elfcpp::Swap
<16, big_endian
>::writeval(wv
, val
>> 16);
2755 elfcpp::Swap
<16, big_endian
>::writeval(wv
+ 1, val
& 0xffff);
2756 return This::STATUS_OKAY
;
2759 // R_ARM_THM_MOVT_ABS: S + A
2760 static inline typename
This::Status
2761 thm_movt_abs(unsigned char *view
,
2762 const Sized_relobj
<32, big_endian
>* object
,
2763 const Symbol_value
<32>* psymval
)
2765 typedef typename
elfcpp::Swap
<16, big_endian
>::Valtype Valtype
;
2766 typedef typename
elfcpp::Swap
<32, big_endian
>::Valtype Reltype
;
2767 Valtype
* wv
= reinterpret_cast<Valtype
*>(view
);
2768 Reltype val
= ((elfcpp::Swap
<16, big_endian
>::readval(wv
) << 16)
2769 | elfcpp::Swap
<16, big_endian
>::readval(wv
+ 1));
2770 Reltype addend
= This::extract_thumb_movw_movt_addend(val
);
2771 Reltype x
= psymval
->value(object
, addend
) >> 16;
2772 val
= This::insert_val_thumb_movw_movt(val
, x
);
2773 elfcpp::Swap
<16, big_endian
>::writeval(wv
, val
>> 16);
2774 elfcpp::Swap
<16, big_endian
>::writeval(wv
+ 1, val
& 0xffff);
2775 return This::STATUS_OKAY
;
2778 // R_ARM_MOVW_PREL_NC: (S + A) | T - P
2779 static inline typename
This::Status
2780 movw_prel_nc(unsigned char *view
,
2781 const Sized_relobj
<32, big_endian
>* object
,
2782 const Symbol_value
<32>* psymval
,
2783 Arm_address address
,
2784 Arm_address thumb_bit
)
2786 typedef typename
elfcpp::Swap
<32, big_endian
>::Valtype Valtype
;
2787 Valtype
* wv
= reinterpret_cast<Valtype
*>(view
);
2788 Valtype val
= elfcpp::Swap
<32, big_endian
>::readval(wv
);
2789 Valtype addend
= This::extract_arm_movw_movt_addend(val
);
2790 Valtype x
= (psymval
->value(object
, addend
) | thumb_bit
) - address
;
2791 val
= This::insert_val_arm_movw_movt(val
, x
);
2792 elfcpp::Swap
<32, big_endian
>::writeval(wv
, val
);
2793 return This::STATUS_OKAY
;
2796 // R_ARM_MOVT_PREL: S + A - P
2797 static inline typename
This::Status
2798 movt_prel(unsigned char *view
,
2799 const Sized_relobj
<32, big_endian
>* object
,
2800 const Symbol_value
<32>* psymval
,
2801 Arm_address address
)
2803 typedef typename
elfcpp::Swap
<32, big_endian
>::Valtype Valtype
;
2804 Valtype
* wv
= reinterpret_cast<Valtype
*>(view
);
2805 Valtype val
= elfcpp::Swap
<32, big_endian
>::readval(wv
);
2806 Valtype addend
= This::extract_arm_movw_movt_addend(val
);
2807 Valtype x
= (psymval
->value(object
, addend
) - address
) >> 16;
2808 val
= This::insert_val_arm_movw_movt(val
, x
);
2809 elfcpp::Swap
<32, big_endian
>::writeval(wv
, val
);
2810 return This::STATUS_OKAY
;
2813 // R_ARM_THM_MOVW_PREL_NC: (S + A) | T - P
2814 static inline typename
This::Status
2815 thm_movw_prel_nc(unsigned char *view
,
2816 const Sized_relobj
<32, big_endian
>* object
,
2817 const Symbol_value
<32>* psymval
,
2818 Arm_address address
,
2819 Arm_address thumb_bit
)
2821 typedef typename
elfcpp::Swap
<16, big_endian
>::Valtype Valtype
;
2822 typedef typename
elfcpp::Swap
<32, big_endian
>::Valtype Reltype
;
2823 Valtype
* wv
= reinterpret_cast<Valtype
*>(view
);
2824 Reltype val
= (elfcpp::Swap
<16, big_endian
>::readval(wv
) << 16)
2825 | elfcpp::Swap
<16, big_endian
>::readval(wv
+ 1);
2826 Reltype addend
= This::extract_thumb_movw_movt_addend(val
);
2827 Reltype x
= (psymval
->value(object
, addend
) | thumb_bit
) - address
;
2828 val
= This::insert_val_thumb_movw_movt(val
, x
);
2829 elfcpp::Swap
<16, big_endian
>::writeval(wv
, val
>> 16);
2830 elfcpp::Swap
<16, big_endian
>::writeval(wv
+ 1, val
& 0xffff);
2831 return This::STATUS_OKAY
;
2834 // R_ARM_THM_MOVT_PREL: S + A - P
2835 static inline typename
This::Status
2836 thm_movt_prel(unsigned char *view
,
2837 const Sized_relobj
<32, big_endian
>* object
,
2838 const Symbol_value
<32>* psymval
,
2839 Arm_address address
)
2841 typedef typename
elfcpp::Swap
<16, big_endian
>::Valtype Valtype
;
2842 typedef typename
elfcpp::Swap
<32, big_endian
>::Valtype Reltype
;
2843 Valtype
* wv
= reinterpret_cast<Valtype
*>(view
);
2844 Reltype val
= (elfcpp::Swap
<16, big_endian
>::readval(wv
) << 16)
2845 | elfcpp::Swap
<16, big_endian
>::readval(wv
+ 1);
2846 Reltype addend
= This::extract_thumb_movw_movt_addend(val
);
2847 Reltype x
= (psymval
->value(object
, addend
) - address
) >> 16;
2848 val
= This::insert_val_thumb_movw_movt(val
, x
);
2849 elfcpp::Swap
<16, big_endian
>::writeval(wv
, val
>> 16);
2850 elfcpp::Swap
<16, big_endian
>::writeval(wv
+ 1, val
& 0xffff);
2851 return This::STATUS_OKAY
;
2855 static inline typename
This::Status
2856 v4bx(const Relocate_info
<32, big_endian
>* relinfo
,
2857 unsigned char *view
,
2858 const Arm_relobj
<big_endian
>* object
,
2859 const Arm_address address
,
2860 const bool is_interworking
)
2863 typedef typename
elfcpp::Swap
<32, big_endian
>::Valtype Valtype
;
2864 Valtype
* wv
= reinterpret_cast<Valtype
*>(view
);
2865 Valtype val
= elfcpp::Swap
<32, big_endian
>::readval(wv
);
2867 // Ensure that we have a BX instruction.
2868 gold_assert((val
& 0x0ffffff0) == 0x012fff10);
2869 const uint32_t reg
= (val
& 0xf);
2870 if (is_interworking
&& reg
!= 0xf)
2872 Stub_table
<big_endian
>* stub_table
=
2873 object
->stub_table(relinfo
->data_shndx
);
2874 gold_assert(stub_table
!= NULL
);
2876 Arm_v4bx_stub
* stub
= stub_table
->find_arm_v4bx_stub(reg
);
2877 gold_assert(stub
!= NULL
);
2879 int32_t veneer_address
=
2880 stub_table
->address() + stub
->offset() - 8 - address
;
2881 gold_assert((veneer_address
<= ARM_MAX_FWD_BRANCH_OFFSET
)
2882 && (veneer_address
>= ARM_MAX_BWD_BRANCH_OFFSET
));
2883 // Replace with a branch to veneer (B <addr>)
2884 val
= (val
& 0xf0000000) | 0x0a000000
2885 | ((veneer_address
>> 2) & 0x00ffffff);
2889 // Preserve Rm (lowest four bits) and the condition code
2890 // (highest four bits). Other bits encode MOV PC,Rm.
2891 val
= (val
& 0xf000000f) | 0x01a0f000;
2893 elfcpp::Swap
<32, big_endian
>::writeval(wv
, val
);
2894 return This::STATUS_OKAY
;
2898 // Relocate ARM long branches. This handles relocation types
2899 // R_ARM_CALL, R_ARM_JUMP24, R_ARM_PLT32 and R_ARM_XPC25.
2900 // If IS_WEAK_UNDEFINED_WITH_PLT is true. The target symbol is weakly
2901 // undefined and we do not use PLT in this relocation. In such a case,
2902 // the branch is converted into an NOP.
2904 template<bool big_endian
>
2905 typename Arm_relocate_functions
<big_endian
>::Status
2906 Arm_relocate_functions
<big_endian
>::arm_branch_common(
2907 unsigned int r_type
,
2908 const Relocate_info
<32, big_endian
>* relinfo
,
2909 unsigned char *view
,
2910 const Sized_symbol
<32>* gsym
,
2911 const Arm_relobj
<big_endian
>* object
,
2913 const Symbol_value
<32>* psymval
,
2914 Arm_address address
,
2915 Arm_address thumb_bit
,
2916 bool is_weakly_undefined_without_plt
)
2918 typedef typename
elfcpp::Swap
<32, big_endian
>::Valtype Valtype
;
2919 Valtype
* wv
= reinterpret_cast<Valtype
*>(view
);
2920 Valtype val
= elfcpp::Swap
<32, big_endian
>::readval(wv
);
2922 bool insn_is_b
= (((val
>> 28) & 0xf) <= 0xe)
2923 && ((val
& 0x0f000000UL
) == 0x0a000000UL
);
2924 bool insn_is_uncond_bl
= (val
& 0xff000000UL
) == 0xeb000000UL
;
2925 bool insn_is_cond_bl
= (((val
>> 28) & 0xf) < 0xe)
2926 && ((val
& 0x0f000000UL
) == 0x0b000000UL
);
2927 bool insn_is_blx
= (val
& 0xfe000000UL
) == 0xfa000000UL
;
2928 bool insn_is_any_branch
= (val
& 0x0e000000UL
) == 0x0a000000UL
;
2930 // Check that the instruction is valid.
2931 if (r_type
== elfcpp::R_ARM_CALL
)
2933 if (!insn_is_uncond_bl
&& !insn_is_blx
)
2934 return This::STATUS_BAD_RELOC
;
2936 else if (r_type
== elfcpp::R_ARM_JUMP24
)
2938 if (!insn_is_b
&& !insn_is_cond_bl
)
2939 return This::STATUS_BAD_RELOC
;
2941 else if (r_type
== elfcpp::R_ARM_PLT32
)
2943 if (!insn_is_any_branch
)
2944 return This::STATUS_BAD_RELOC
;
2946 else if (r_type
== elfcpp::R_ARM_XPC25
)
2948 // FIXME: AAELF document IH0044C does not say much about it other
2949 // than it being obsolete.
2950 if (!insn_is_any_branch
)
2951 return This::STATUS_BAD_RELOC
;
2956 // A branch to an undefined weak symbol is turned into a jump to
2957 // the next instruction unless a PLT entry will be created.
2958 // Do the same for local undefined symbols.
2959 // The jump to the next instruction is optimized as a NOP depending
2960 // on the architecture.
2961 const Target_arm
<big_endian
>* arm_target
=
2962 Target_arm
<big_endian
>::default_target();
2963 if (is_weakly_undefined_without_plt
)
2965 Valtype cond
= val
& 0xf0000000U
;
2966 if (arm_target
->may_use_arm_nop())
2967 val
= cond
| 0x0320f000;
2969 val
= cond
| 0x01a00000; // Using pre-UAL nop: mov r0, r0.
2970 elfcpp::Swap
<32, big_endian
>::writeval(wv
, val
);
2971 return This::STATUS_OKAY
;
2974 Valtype addend
= utils::sign_extend
<26>(val
<< 2);
2975 Valtype branch_target
= psymval
->value(object
, addend
);
2976 int32_t branch_offset
= branch_target
- address
;
2978 // We need a stub if the branch offset is too large or if we need
2980 bool may_use_blx
= arm_target
->may_use_blx();
2981 Reloc_stub
* stub
= NULL
;
2982 if ((branch_offset
> ARM_MAX_FWD_BRANCH_OFFSET
)
2983 || (branch_offset
< ARM_MAX_BWD_BRANCH_OFFSET
)
2984 || ((thumb_bit
!= 0) && !(may_use_blx
&& r_type
== elfcpp::R_ARM_CALL
)))
2986 Stub_type stub_type
=
2987 Reloc_stub::stub_type_for_reloc(r_type
, address
, branch_target
,
2989 if (stub_type
!= arm_stub_none
)
2991 Stub_table
<big_endian
>* stub_table
=
2992 object
->stub_table(relinfo
->data_shndx
);
2993 gold_assert(stub_table
!= NULL
);
2995 Reloc_stub::Key
stub_key(stub_type
, gsym
, object
, r_sym
, addend
);
2996 stub
= stub_table
->find_reloc_stub(stub_key
);
2997 gold_assert(stub
!= NULL
);
2998 thumb_bit
= stub
->stub_template()->entry_in_thumb_mode() ? 1 : 0;
2999 branch_target
= stub_table
->address() + stub
->offset() + addend
;
3000 branch_offset
= branch_target
- address
;
3001 gold_assert((branch_offset
<= ARM_MAX_FWD_BRANCH_OFFSET
)
3002 && (branch_offset
>= ARM_MAX_BWD_BRANCH_OFFSET
));
3006 // At this point, if we still need to switch mode, the instruction
3007 // must either be a BLX or a BL that can be converted to a BLX.
3011 gold_assert(may_use_blx
&& r_type
== elfcpp::R_ARM_CALL
);
3012 val
= (val
& 0xffffff) | 0xfa000000 | ((branch_offset
& 2) << 23);
3015 val
= utils::bit_select(val
, (branch_offset
>> 2), 0xffffffUL
);
3016 elfcpp::Swap
<32, big_endian
>::writeval(wv
, val
);
3017 return (utils::has_overflow
<26>(branch_offset
)
3018 ? This::STATUS_OVERFLOW
: This::STATUS_OKAY
);
3021 // Relocate THUMB long branches. This handles relocation types
3022 // R_ARM_THM_CALL, R_ARM_THM_JUMP24 and R_ARM_THM_XPC22.
3023 // If IS_WEAK_UNDEFINED_WITH_PLT is true. The target symbol is weakly
3024 // undefined and we do not use PLT in this relocation. In such a case,
3025 // the branch is converted into an NOP.
3027 template<bool big_endian
>
3028 typename Arm_relocate_functions
<big_endian
>::Status
3029 Arm_relocate_functions
<big_endian
>::thumb_branch_common(
3030 unsigned int r_type
,
3031 const Relocate_info
<32, big_endian
>* relinfo
,
3032 unsigned char *view
,
3033 const Sized_symbol
<32>* gsym
,
3034 const Arm_relobj
<big_endian
>* object
,
3036 const Symbol_value
<32>* psymval
,
3037 Arm_address address
,
3038 Arm_address thumb_bit
,
3039 bool is_weakly_undefined_without_plt
)
3041 typedef typename
elfcpp::Swap
<16, big_endian
>::Valtype Valtype
;
3042 Valtype
* wv
= reinterpret_cast<Valtype
*>(view
);
3043 uint32_t upper_insn
= elfcpp::Swap
<16, big_endian
>::readval(wv
);
3044 uint32_t lower_insn
= elfcpp::Swap
<16, big_endian
>::readval(wv
+ 1);
3046 // FIXME: These tests are too loose and do not take THUMB/THUMB-2 difference
3048 bool is_bl_insn
= (lower_insn
& 0x1000U
) == 0x1000U
;
3049 bool is_blx_insn
= (lower_insn
& 0x1000U
) == 0x0000U
;
3051 // Check that the instruction is valid.
3052 if (r_type
== elfcpp::R_ARM_THM_CALL
)
3054 if (!is_bl_insn
&& !is_blx_insn
)
3055 return This::STATUS_BAD_RELOC
;
3057 else if (r_type
== elfcpp::R_ARM_THM_JUMP24
)
3059 // This cannot be a BLX.
3061 return This::STATUS_BAD_RELOC
;
3063 else if (r_type
== elfcpp::R_ARM_THM_XPC22
)
3065 // Check for Thumb to Thumb call.
3067 return This::STATUS_BAD_RELOC
;
3070 gold_warning(_("%s: Thumb BLX instruction targets "
3071 "thumb function '%s'."),
3072 object
->name().c_str(),
3073 (gsym
? gsym
->name() : "(local)"));
3074 // Convert BLX to BL.
3075 lower_insn
|= 0x1000U
;
3081 // A branch to an undefined weak symbol is turned into a jump to
3082 // the next instruction unless a PLT entry will be created.
3083 // The jump to the next instruction is optimized as a NOP.W for
3084 // Thumb-2 enabled architectures.
3085 const Target_arm
<big_endian
>* arm_target
=
3086 Target_arm
<big_endian
>::default_target();
3087 if (is_weakly_undefined_without_plt
)
3089 if (arm_target
->may_use_thumb2_nop())
3091 elfcpp::Swap
<16, big_endian
>::writeval(wv
, 0xf3af);
3092 elfcpp::Swap
<16, big_endian
>::writeval(wv
+ 1, 0x8000);
3096 elfcpp::Swap
<16, big_endian
>::writeval(wv
, 0xe000);
3097 elfcpp::Swap
<16, big_endian
>::writeval(wv
+ 1, 0xbf00);
3099 return This::STATUS_OKAY
;
3102 int32_t addend
= This::thumb32_branch_offset(upper_insn
, lower_insn
);
3103 Arm_address branch_target
= psymval
->value(object
, addend
);
3104 int32_t branch_offset
= branch_target
- address
;
3106 // We need a stub if the branch offset is too large or if we need
3108 bool may_use_blx
= arm_target
->may_use_blx();
3109 bool thumb2
= arm_target
->using_thumb2();
3111 && (branch_offset
> THM_MAX_FWD_BRANCH_OFFSET
3112 || (branch_offset
< THM_MAX_BWD_BRANCH_OFFSET
)))
3114 && (branch_offset
> THM2_MAX_FWD_BRANCH_OFFSET
3115 || (branch_offset
< THM2_MAX_BWD_BRANCH_OFFSET
)))
3116 || ((thumb_bit
== 0)
3117 && (((r_type
== elfcpp::R_ARM_THM_CALL
) && !may_use_blx
)
3118 || r_type
== elfcpp::R_ARM_THM_JUMP24
)))
3120 Stub_type stub_type
=
3121 Reloc_stub::stub_type_for_reloc(r_type
, address
, branch_target
,
3123 if (stub_type
!= arm_stub_none
)
3125 Stub_table
<big_endian
>* stub_table
=
3126 object
->stub_table(relinfo
->data_shndx
);
3127 gold_assert(stub_table
!= NULL
);
3129 Reloc_stub::Key
stub_key(stub_type
, gsym
, object
, r_sym
, addend
);
3130 Reloc_stub
* stub
= stub_table
->find_reloc_stub(stub_key
);
3131 gold_assert(stub
!= NULL
);
3132 thumb_bit
= stub
->stub_template()->entry_in_thumb_mode() ? 1 : 0;
3133 branch_target
= stub_table
->address() + stub
->offset() + addend
;
3134 branch_offset
= branch_target
- address
;
3138 // At this point, if we still need to switch mode, the instruction
3139 // must either be a BLX or a BL that can be converted to a BLX.
3142 gold_assert(may_use_blx
3143 && (r_type
== elfcpp::R_ARM_THM_CALL
3144 || r_type
== elfcpp::R_ARM_THM_XPC22
));
3145 // Make sure this is a BLX.
3146 lower_insn
&= ~0x1000U
;
3150 // Make sure this is a BL.
3151 lower_insn
|= 0x1000U
;
3154 if ((lower_insn
& 0x5000U
) == 0x4000U
)
3155 // For a BLX instruction, make sure that the relocation is rounded up
3156 // to a word boundary. This follows the semantics of the instruction
3157 // which specifies that bit 1 of the target address will come from bit
3158 // 1 of the base address.
3159 branch_offset
= (branch_offset
+ 2) & ~3;
3161 // Put BRANCH_OFFSET back into the insn. Assumes two's complement.
3162 // We use the Thumb-2 encoding, which is safe even if dealing with
3163 // a Thumb-1 instruction by virtue of our overflow check above. */
3164 upper_insn
= This::thumb32_branch_upper(upper_insn
, branch_offset
);
3165 lower_insn
= This::thumb32_branch_lower(lower_insn
, branch_offset
);
3167 elfcpp::Swap
<16, big_endian
>::writeval(wv
, upper_insn
);
3168 elfcpp::Swap
<16, big_endian
>::writeval(wv
+ 1, lower_insn
);
3171 ? utils::has_overflow
<25>(branch_offset
)
3172 : utils::has_overflow
<23>(branch_offset
))
3173 ? This::STATUS_OVERFLOW
3174 : This::STATUS_OKAY
);
3177 // Relocate THUMB-2 long conditional branches.
3178 // If IS_WEAK_UNDEFINED_WITH_PLT is true. The target symbol is weakly
3179 // undefined and we do not use PLT in this relocation. In such a case,
3180 // the branch is converted into an NOP.
3182 template<bool big_endian
>
3183 typename Arm_relocate_functions
<big_endian
>::Status
3184 Arm_relocate_functions
<big_endian
>::thm_jump19(
3185 unsigned char *view
,
3186 const Arm_relobj
<big_endian
>* object
,
3187 const Symbol_value
<32>* psymval
,
3188 Arm_address address
,
3189 Arm_address thumb_bit
)
3191 typedef typename
elfcpp::Swap
<16, big_endian
>::Valtype Valtype
;
3192 Valtype
* wv
= reinterpret_cast<Valtype
*>(view
);
3193 uint32_t upper_insn
= elfcpp::Swap
<16, big_endian
>::readval(wv
);
3194 uint32_t lower_insn
= elfcpp::Swap
<16, big_endian
>::readval(wv
+ 1);
3195 int32_t addend
= This::thumb32_cond_branch_offset(upper_insn
, lower_insn
);
3197 Arm_address branch_target
= psymval
->value(object
, addend
);
3198 int32_t branch_offset
= branch_target
- address
;
3200 // ??? Should handle interworking? GCC might someday try to
3201 // use this for tail calls.
3202 // FIXME: We do support thumb entry to PLT yet.
3205 gold_error(_("conditional branch to PLT in THUMB-2 not supported yet."));
3206 return This::STATUS_BAD_RELOC
;
3209 // Put RELOCATION back into the insn.
3210 upper_insn
= This::thumb32_cond_branch_upper(upper_insn
, branch_offset
);
3211 lower_insn
= This::thumb32_cond_branch_lower(lower_insn
, branch_offset
);
3213 // Put the relocated value back in the object file:
3214 elfcpp::Swap
<16, big_endian
>::writeval(wv
, upper_insn
);
3215 elfcpp::Swap
<16, big_endian
>::writeval(wv
+ 1, lower_insn
);
3217 return (utils::has_overflow
<21>(branch_offset
)
3218 ? This::STATUS_OVERFLOW
3219 : This::STATUS_OKAY
);
3222 // Get the GOT section, creating it if necessary.
3224 template<bool big_endian
>
3225 Output_data_got
<32, big_endian
>*
3226 Target_arm
<big_endian
>::got_section(Symbol_table
* symtab
, Layout
* layout
)
3228 if (this->got_
== NULL
)
3230 gold_assert(symtab
!= NULL
&& layout
!= NULL
);
3232 this->got_
= new Output_data_got
<32, big_endian
>();
3235 os
= layout
->add_output_section_data(".got", elfcpp::SHT_PROGBITS
,
3237 | elfcpp::SHF_WRITE
),
3238 this->got_
, false, true, true,
3241 // The old GNU linker creates a .got.plt section. We just
3242 // create another set of data in the .got section. Note that we
3243 // always create a PLT if we create a GOT, although the PLT
3245 this->got_plt_
= new Output_data_space(4, "** GOT PLT");
3246 os
= layout
->add_output_section_data(".got", elfcpp::SHT_PROGBITS
,
3248 | elfcpp::SHF_WRITE
),
3249 this->got_plt_
, false, false,
3252 // The first three entries are reserved.
3253 this->got_plt_
->set_current_data_size(3 * 4);
3255 // Define _GLOBAL_OFFSET_TABLE_ at the start of the PLT.
3256 symtab
->define_in_output_data("_GLOBAL_OFFSET_TABLE_", NULL
,
3257 Symbol_table::PREDEFINED
,
3259 0, 0, elfcpp::STT_OBJECT
,
3261 elfcpp::STV_HIDDEN
, 0,
3267 // Get the dynamic reloc section, creating it if necessary.
3269 template<bool big_endian
>
3270 typename Target_arm
<big_endian
>::Reloc_section
*
3271 Target_arm
<big_endian
>::rel_dyn_section(Layout
* layout
)
3273 if (this->rel_dyn_
== NULL
)
3275 gold_assert(layout
!= NULL
);
3276 this->rel_dyn_
= new Reloc_section(parameters
->options().combreloc());
3277 layout
->add_output_section_data(".rel.dyn", elfcpp::SHT_REL
,
3278 elfcpp::SHF_ALLOC
, this->rel_dyn_
, true,
3279 false, false, false);
3281 return this->rel_dyn_
;
3284 // Insn_template methods.
3286 // Return byte size of an instruction template.
3289 Insn_template::size() const
3291 switch (this->type())
3294 case THUMB16_SPECIAL_TYPE
:
3305 // Return alignment of an instruction template.
3308 Insn_template::alignment() const
3310 switch (this->type())
3313 case THUMB16_SPECIAL_TYPE
:
3324 // Stub_template methods.
3326 Stub_template::Stub_template(
3327 Stub_type type
, const Insn_template
* insns
,
3329 : type_(type
), insns_(insns
), insn_count_(insn_count
), alignment_(1),
3330 entry_in_thumb_mode_(false), relocs_()
3334 // Compute byte size and alignment of stub template.
3335 for (size_t i
= 0; i
< insn_count
; i
++)
3337 unsigned insn_alignment
= insns
[i
].alignment();
3338 size_t insn_size
= insns
[i
].size();
3339 gold_assert((offset
& (insn_alignment
- 1)) == 0);
3340 this->alignment_
= std::max(this->alignment_
, insn_alignment
);
3341 switch (insns
[i
].type())
3343 case Insn_template::THUMB16_TYPE
:
3344 case Insn_template::THUMB16_SPECIAL_TYPE
:
3346 this->entry_in_thumb_mode_
= true;
3349 case Insn_template::THUMB32_TYPE
:
3350 if (insns
[i
].r_type() != elfcpp::R_ARM_NONE
)
3351 this->relocs_
.push_back(Reloc(i
, offset
));
3353 this->entry_in_thumb_mode_
= true;
3356 case Insn_template::ARM_TYPE
:
3357 // Handle cases where the target is encoded within the
3359 if (insns
[i
].r_type() == elfcpp::R_ARM_JUMP24
)
3360 this->relocs_
.push_back(Reloc(i
, offset
));
3363 case Insn_template::DATA_TYPE
:
3364 // Entry point cannot be data.
3365 gold_assert(i
!= 0);
3366 this->relocs_
.push_back(Reloc(i
, offset
));
3372 offset
+= insn_size
;
3374 this->size_
= offset
;
3379 // Template to implement do_write for a specific target endianity.
3381 template<bool big_endian
>
3383 Stub::do_fixed_endian_write(unsigned char* view
, section_size_type view_size
)
3385 const Stub_template
* stub_template
= this->stub_template();
3386 const Insn_template
* insns
= stub_template
->insns();
3388 // FIXME: We do not handle BE8 encoding yet.
3389 unsigned char* pov
= view
;
3390 for (size_t i
= 0; i
< stub_template
->insn_count(); i
++)
3392 switch (insns
[i
].type())
3394 case Insn_template::THUMB16_TYPE
:
3395 elfcpp::Swap
<16, big_endian
>::writeval(pov
, insns
[i
].data() & 0xffff);
3397 case Insn_template::THUMB16_SPECIAL_TYPE
:
3398 elfcpp::Swap
<16, big_endian
>::writeval(
3400 this->thumb16_special(i
));
3402 case Insn_template::THUMB32_TYPE
:
3404 uint32_t hi
= (insns
[i
].data() >> 16) & 0xffff;
3405 uint32_t lo
= insns
[i
].data() & 0xffff;
3406 elfcpp::Swap
<16, big_endian
>::writeval(pov
, hi
);
3407 elfcpp::Swap
<16, big_endian
>::writeval(pov
+ 2, lo
);
3410 case Insn_template::ARM_TYPE
:
3411 case Insn_template::DATA_TYPE
:
3412 elfcpp::Swap
<32, big_endian
>::writeval(pov
, insns
[i
].data());
3417 pov
+= insns
[i
].size();
3419 gold_assert(static_cast<section_size_type
>(pov
- view
) == view_size
);
3422 // Reloc_stub::Key methods.
3424 // Dump a Key as a string for debugging.
3427 Reloc_stub::Key::name() const
3429 if (this->r_sym_
== invalid_index
)
3431 // Global symbol key name
3432 // <stub-type>:<symbol name>:<addend>.
3433 const std::string sym_name
= this->u_
.symbol
->name();
3434 // We need to print two hex number and two colons. So just add 100 bytes
3435 // to the symbol name size.
3436 size_t len
= sym_name
.size() + 100;
3437 char* buffer
= new char[len
];
3438 int c
= snprintf(buffer
, len
, "%d:%s:%x", this->stub_type_
,
3439 sym_name
.c_str(), this->addend_
);
3440 gold_assert(c
> 0 && c
< static_cast<int>(len
));
3442 return std::string(buffer
);
3446 // local symbol key name
3447 // <stub-type>:<object>:<r_sym>:<addend>.
3448 const size_t len
= 200;
3450 int c
= snprintf(buffer
, len
, "%d:%p:%u:%x", this->stub_type_
,
3451 this->u_
.relobj
, this->r_sym_
, this->addend_
);
3452 gold_assert(c
> 0 && c
< static_cast<int>(len
));
3453 return std::string(buffer
);
3457 // Reloc_stub methods.
3459 // Determine the type of stub needed, if any, for a relocation of R_TYPE at
3460 // LOCATION to DESTINATION.
3461 // This code is based on the arm_type_of_stub function in
3462 // bfd/elf32-arm.c. We have changed the interface a liitle to keep the Stub
3466 Reloc_stub::stub_type_for_reloc(
3467 unsigned int r_type
,
3468 Arm_address location
,
3469 Arm_address destination
,
3470 bool target_is_thumb
)
3472 Stub_type stub_type
= arm_stub_none
;
3474 // This is a bit ugly but we want to avoid using a templated class for
3475 // big and little endianities.
3477 bool should_force_pic_veneer
;
3480 if (parameters
->target().is_big_endian())
3482 const Target_arm
<true>* big_endian_target
=
3483 Target_arm
<true>::default_target();
3484 may_use_blx
= big_endian_target
->may_use_blx();
3485 should_force_pic_veneer
= big_endian_target
->should_force_pic_veneer();
3486 thumb2
= big_endian_target
->using_thumb2();
3487 thumb_only
= big_endian_target
->using_thumb_only();
3491 const Target_arm
<false>* little_endian_target
=
3492 Target_arm
<false>::default_target();
3493 may_use_blx
= little_endian_target
->may_use_blx();
3494 should_force_pic_veneer
= little_endian_target
->should_force_pic_veneer();
3495 thumb2
= little_endian_target
->using_thumb2();
3496 thumb_only
= little_endian_target
->using_thumb_only();
3499 int64_t branch_offset
= (int64_t)destination
- location
;
3501 if (r_type
== elfcpp::R_ARM_THM_CALL
|| r_type
== elfcpp::R_ARM_THM_JUMP24
)
3503 // Handle cases where:
3504 // - this call goes too far (different Thumb/Thumb2 max
3506 // - it's a Thumb->Arm call and blx is not available, or it's a
3507 // Thumb->Arm branch (not bl). A stub is needed in this case.
3509 && (branch_offset
> THM_MAX_FWD_BRANCH_OFFSET
3510 || (branch_offset
< THM_MAX_BWD_BRANCH_OFFSET
)))
3512 && (branch_offset
> THM2_MAX_FWD_BRANCH_OFFSET
3513 || (branch_offset
< THM2_MAX_BWD_BRANCH_OFFSET
)))
3514 || ((!target_is_thumb
)
3515 && (((r_type
== elfcpp::R_ARM_THM_CALL
) && !may_use_blx
)
3516 || (r_type
== elfcpp::R_ARM_THM_JUMP24
))))
3518 if (target_is_thumb
)
3523 stub_type
= (parameters
->options().shared()
3524 || should_force_pic_veneer
)
3527 && (r_type
== elfcpp::R_ARM_THM_CALL
))
3528 // V5T and above. Stub starts with ARM code, so
3529 // we must be able to switch mode before
3530 // reaching it, which is only possible for 'bl'
3531 // (ie R_ARM_THM_CALL relocation).
3532 ? arm_stub_long_branch_any_thumb_pic
3533 // On V4T, use Thumb code only.
3534 : arm_stub_long_branch_v4t_thumb_thumb_pic
)
3538 && (r_type
== elfcpp::R_ARM_THM_CALL
))
3539 ? arm_stub_long_branch_any_any
// V5T and above.
3540 : arm_stub_long_branch_v4t_thumb_thumb
); // V4T.
3544 stub_type
= (parameters
->options().shared()
3545 || should_force_pic_veneer
)
3546 ? arm_stub_long_branch_thumb_only_pic
// PIC stub.
3547 : arm_stub_long_branch_thumb_only
; // non-PIC stub.
3554 // FIXME: We should check that the input section is from an
3555 // object that has interwork enabled.
3557 stub_type
= (parameters
->options().shared()
3558 || should_force_pic_veneer
)
3561 && (r_type
== elfcpp::R_ARM_THM_CALL
))
3562 ? arm_stub_long_branch_any_arm_pic
// V5T and above.
3563 : arm_stub_long_branch_v4t_thumb_arm_pic
) // V4T.
3567 && (r_type
== elfcpp::R_ARM_THM_CALL
))
3568 ? arm_stub_long_branch_any_any
// V5T and above.
3569 : arm_stub_long_branch_v4t_thumb_arm
); // V4T.
3571 // Handle v4t short branches.
3572 if ((stub_type
== arm_stub_long_branch_v4t_thumb_arm
)
3573 && (branch_offset
<= THM_MAX_FWD_BRANCH_OFFSET
)
3574 && (branch_offset
>= THM_MAX_BWD_BRANCH_OFFSET
))
3575 stub_type
= arm_stub_short_branch_v4t_thumb_arm
;
3579 else if (r_type
== elfcpp::R_ARM_CALL
3580 || r_type
== elfcpp::R_ARM_JUMP24
3581 || r_type
== elfcpp::R_ARM_PLT32
)
3583 if (target_is_thumb
)
3587 // FIXME: We should check that the input section is from an
3588 // object that has interwork enabled.
3590 // We have an extra 2-bytes reach because of
3591 // the mode change (bit 24 (H) of BLX encoding).
3592 if (branch_offset
> (ARM_MAX_FWD_BRANCH_OFFSET
+ 2)
3593 || (branch_offset
< ARM_MAX_BWD_BRANCH_OFFSET
)
3594 || ((r_type
== elfcpp::R_ARM_CALL
) && !may_use_blx
)
3595 || (r_type
== elfcpp::R_ARM_JUMP24
)
3596 || (r_type
== elfcpp::R_ARM_PLT32
))
3598 stub_type
= (parameters
->options().shared()
3599 || should_force_pic_veneer
)
3602 ? arm_stub_long_branch_any_thumb_pic
// V5T and above.
3603 : arm_stub_long_branch_v4t_arm_thumb_pic
) // V4T stub.
3607 ? arm_stub_long_branch_any_any
// V5T and above.
3608 : arm_stub_long_branch_v4t_arm_thumb
); // V4T.
3614 if (branch_offset
> ARM_MAX_FWD_BRANCH_OFFSET
3615 || (branch_offset
< ARM_MAX_BWD_BRANCH_OFFSET
))
3617 stub_type
= (parameters
->options().shared()
3618 || should_force_pic_veneer
)
3619 ? arm_stub_long_branch_any_arm_pic
// PIC stubs.
3620 : arm_stub_long_branch_any_any
; /// non-PIC.
3628 // Cortex_a8_stub methods.
3630 // Return the instruction for a THUMB16_SPECIAL_TYPE instruction template.
3631 // I is the position of the instruction template in the stub template.
3634 Cortex_a8_stub::do_thumb16_special(size_t i
)
3636 // The only use of this is to copy condition code from a conditional
3637 // branch being worked around to the corresponding conditional branch in
3639 gold_assert(this->stub_template()->type() == arm_stub_a8_veneer_b_cond
3641 uint16_t data
= this->stub_template()->insns()[i
].data();
3642 gold_assert((data
& 0xff00U
) == 0xd000U
);
3643 data
|= ((this->original_insn_
>> 22) & 0xf) << 8;
3647 // Stub_factory methods.
3649 Stub_factory::Stub_factory()
3651 // The instruction template sequences are declared as static
3652 // objects and initialized first time the constructor runs.
3654 // Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
3655 // to reach the stub if necessary.
3656 static const Insn_template elf32_arm_stub_long_branch_any_any
[] =
3658 Insn_template::arm_insn(0xe51ff004), // ldr pc, [pc, #-4]
3659 Insn_template::data_word(0, elfcpp::R_ARM_ABS32
, 0),
3660 // dcd R_ARM_ABS32(X)
3663 // V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
3665 static const Insn_template elf32_arm_stub_long_branch_v4t_arm_thumb
[] =
3667 Insn_template::arm_insn(0xe59fc000), // ldr ip, [pc, #0]
3668 Insn_template::arm_insn(0xe12fff1c), // bx ip
3669 Insn_template::data_word(0, elfcpp::R_ARM_ABS32
, 0),
3670 // dcd R_ARM_ABS32(X)
3673 // Thumb -> Thumb long branch stub. Used on M-profile architectures.
3674 static const Insn_template elf32_arm_stub_long_branch_thumb_only
[] =
3676 Insn_template::thumb16_insn(0xb401), // push {r0}
3677 Insn_template::thumb16_insn(0x4802), // ldr r0, [pc, #8]
3678 Insn_template::thumb16_insn(0x4684), // mov ip, r0
3679 Insn_template::thumb16_insn(0xbc01), // pop {r0}
3680 Insn_template::thumb16_insn(0x4760), // bx ip
3681 Insn_template::thumb16_insn(0xbf00), // nop
3682 Insn_template::data_word(0, elfcpp::R_ARM_ABS32
, 0),
3683 // dcd R_ARM_ABS32(X)
3686 // V4T Thumb -> Thumb long branch stub. Using the stack is not
3688 static const Insn_template elf32_arm_stub_long_branch_v4t_thumb_thumb
[] =
3690 Insn_template::thumb16_insn(0x4778), // bx pc
3691 Insn_template::thumb16_insn(0x46c0), // nop
3692 Insn_template::arm_insn(0xe59fc000), // ldr ip, [pc, #0]
3693 Insn_template::arm_insn(0xe12fff1c), // bx ip
3694 Insn_template::data_word(0, elfcpp::R_ARM_ABS32
, 0),
3695 // dcd R_ARM_ABS32(X)
3698 // V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
3700 static const Insn_template elf32_arm_stub_long_branch_v4t_thumb_arm
[] =
3702 Insn_template::thumb16_insn(0x4778), // bx pc
3703 Insn_template::thumb16_insn(0x46c0), // nop
3704 Insn_template::arm_insn(0xe51ff004), // ldr pc, [pc, #-4]
3705 Insn_template::data_word(0, elfcpp::R_ARM_ABS32
, 0),
3706 // dcd R_ARM_ABS32(X)
3709 // V4T Thumb -> ARM short branch stub. Shorter variant of the above
3710 // one, when the destination is close enough.
3711 static const Insn_template elf32_arm_stub_short_branch_v4t_thumb_arm
[] =
3713 Insn_template::thumb16_insn(0x4778), // bx pc
3714 Insn_template::thumb16_insn(0x46c0), // nop
3715 Insn_template::arm_rel_insn(0xea000000, -8), // b (X-8)
3718 // ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
3719 // blx to reach the stub if necessary.
3720 static const Insn_template elf32_arm_stub_long_branch_any_arm_pic
[] =
3722 Insn_template::arm_insn(0xe59fc000), // ldr r12, [pc]
3723 Insn_template::arm_insn(0xe08ff00c), // add pc, pc, ip
3724 Insn_template::data_word(0, elfcpp::R_ARM_REL32
, -4),
3725 // dcd R_ARM_REL32(X-4)
3728 // ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
3729 // blx to reach the stub if necessary. We can not add into pc;
3730 // it is not guaranteed to mode switch (different in ARMv6 and
3732 static const Insn_template elf32_arm_stub_long_branch_any_thumb_pic
[] =
3734 Insn_template::arm_insn(0xe59fc004), // ldr r12, [pc, #4]
3735 Insn_template::arm_insn(0xe08fc00c), // add ip, pc, ip
3736 Insn_template::arm_insn(0xe12fff1c), // bx ip
3737 Insn_template::data_word(0, elfcpp::R_ARM_REL32
, 0),
3738 // dcd R_ARM_REL32(X)
3741 // V4T ARM -> ARM long branch stub, PIC.
3742 static const Insn_template elf32_arm_stub_long_branch_v4t_arm_thumb_pic
[] =
3744 Insn_template::arm_insn(0xe59fc004), // ldr ip, [pc, #4]
3745 Insn_template::arm_insn(0xe08fc00c), // add ip, pc, ip
3746 Insn_template::arm_insn(0xe12fff1c), // bx ip
3747 Insn_template::data_word(0, elfcpp::R_ARM_REL32
, 0),
3748 // dcd R_ARM_REL32(X)
3751 // V4T Thumb -> ARM long branch stub, PIC.
3752 static const Insn_template elf32_arm_stub_long_branch_v4t_thumb_arm_pic
[] =
3754 Insn_template::thumb16_insn(0x4778), // bx pc
3755 Insn_template::thumb16_insn(0x46c0), // nop
3756 Insn_template::arm_insn(0xe59fc000), // ldr ip, [pc, #0]
3757 Insn_template::arm_insn(0xe08cf00f), // add pc, ip, pc
3758 Insn_template::data_word(0, elfcpp::R_ARM_REL32
, -4),
3759 // dcd R_ARM_REL32(X)
3762 // Thumb -> Thumb long branch stub, PIC. Used on M-profile
3764 static const Insn_template elf32_arm_stub_long_branch_thumb_only_pic
[] =
3766 Insn_template::thumb16_insn(0xb401), // push {r0}
3767 Insn_template::thumb16_insn(0x4802), // ldr r0, [pc, #8]
3768 Insn_template::thumb16_insn(0x46fc), // mov ip, pc
3769 Insn_template::thumb16_insn(0x4484), // add ip, r0
3770 Insn_template::thumb16_insn(0xbc01), // pop {r0}
3771 Insn_template::thumb16_insn(0x4760), // bx ip
3772 Insn_template::data_word(0, elfcpp::R_ARM_REL32
, 4),
3773 // dcd R_ARM_REL32(X)
3776 // V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
3778 static const Insn_template elf32_arm_stub_long_branch_v4t_thumb_thumb_pic
[] =
3780 Insn_template::thumb16_insn(0x4778), // bx pc
3781 Insn_template::thumb16_insn(0x46c0), // nop
3782 Insn_template::arm_insn(0xe59fc004), // ldr ip, [pc, #4]
3783 Insn_template::arm_insn(0xe08fc00c), // add ip, pc, ip
3784 Insn_template::arm_insn(0xe12fff1c), // bx ip
3785 Insn_template::data_word(0, elfcpp::R_ARM_REL32
, 0),
3786 // dcd R_ARM_REL32(X)
3789 // Cortex-A8 erratum-workaround stubs.
3791 // Stub used for conditional branches (which may be beyond +/-1MB away,
3792 // so we can't use a conditional branch to reach this stub).
3799 static const Insn_template elf32_arm_stub_a8_veneer_b_cond
[] =
3801 Insn_template::thumb16_bcond_insn(0xd001), // b<cond>.n true
3802 Insn_template::thumb32_b_insn(0xf000b800, -4), // b.w after
3803 Insn_template::thumb32_b_insn(0xf000b800, -4) // true:
3807 // Stub used for b.w and bl.w instructions.
3809 static const Insn_template elf32_arm_stub_a8_veneer_b
[] =
3811 Insn_template::thumb32_b_insn(0xf000b800, -4) // b.w dest
3814 static const Insn_template elf32_arm_stub_a8_veneer_bl
[] =
3816 Insn_template::thumb32_b_insn(0xf000b800, -4) // b.w dest
3819 // Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
3820 // instruction (which switches to ARM mode) to point to this stub. Jump to
3821 // the real destination using an ARM-mode branch.
3822 static const Insn_template elf32_arm_stub_a8_veneer_blx
[] =
3824 Insn_template::arm_rel_insn(0xea000000, -8) // b dest
3827 // Stub used to provide an interworking for R_ARM_V4BX relocation
3828 // (bx r[n] instruction).
3829 static const Insn_template elf32_arm_stub_v4_veneer_bx
[] =
3831 Insn_template::arm_insn(0xe3100001), // tst r<n>, #1
3832 Insn_template::arm_insn(0x01a0f000), // moveq pc, r<n>
3833 Insn_template::arm_insn(0xe12fff10) // bx r<n>
3836 // Fill in the stub template look-up table. Stub templates are constructed
3837 // per instance of Stub_factory for fast look-up without locking
3838 // in a thread-enabled environment.
3840 this->stub_templates_
[arm_stub_none
] =
3841 new Stub_template(arm_stub_none
, NULL
, 0);
3843 #define DEF_STUB(x) \
3847 = sizeof(elf32_arm_stub_##x) / sizeof(elf32_arm_stub_##x[0]); \
3848 Stub_type type = arm_stub_##x; \
3849 this->stub_templates_[type] = \
3850 new Stub_template(type, elf32_arm_stub_##x, array_size); \
3858 // Stub_table methods.
3860 // Removel all Cortex-A8 stub.
3862 template<bool big_endian
>
3864 Stub_table
<big_endian
>::remove_all_cortex_a8_stubs()
3866 for (Cortex_a8_stub_list::iterator p
= this->cortex_a8_stubs_
.begin();
3867 p
!= this->cortex_a8_stubs_
.end();
3870 this->cortex_a8_stubs_
.clear();
3873 // Relocate one stub. This is a helper for Stub_table::relocate_stubs().
3875 template<bool big_endian
>
3877 Stub_table
<big_endian
>::relocate_stub(
3879 const Relocate_info
<32, big_endian
>* relinfo
,
3880 Target_arm
<big_endian
>* arm_target
,
3881 Output_section
* output_section
,
3882 unsigned char* view
,
3883 Arm_address address
,
3884 section_size_type view_size
)
3886 const Stub_template
* stub_template
= stub
->stub_template();
3887 if (stub_template
->reloc_count() != 0)
3889 // Adjust view to cover the stub only.
3890 section_size_type offset
= stub
->offset();
3891 section_size_type stub_size
= stub_template
->size();
3892 gold_assert(offset
+ stub_size
<= view_size
);
3894 arm_target
->relocate_stub(stub
, relinfo
, output_section
, view
+ offset
,
3895 address
+ offset
, stub_size
);
3899 // Relocate all stubs in this stub table.
3901 template<bool big_endian
>
3903 Stub_table
<big_endian
>::relocate_stubs(
3904 const Relocate_info
<32, big_endian
>* relinfo
,
3905 Target_arm
<big_endian
>* arm_target
,
3906 Output_section
* output_section
,
3907 unsigned char* view
,
3908 Arm_address address
,
3909 section_size_type view_size
)
3911 // If we are passed a view bigger than the stub table's. we need to
3913 gold_assert(address
== this->address()
3915 == static_cast<section_size_type
>(this->data_size())));
3917 // Relocate all relocation stubs.
3918 for (typename
Reloc_stub_map::const_iterator p
= this->reloc_stubs_
.begin();
3919 p
!= this->reloc_stubs_
.end();
3921 this->relocate_stub(p
->second
, relinfo
, arm_target
, output_section
, view
,
3922 address
, view_size
);
3924 // Relocate all Cortex-A8 stubs.
3925 for (Cortex_a8_stub_list::iterator p
= this->cortex_a8_stubs_
.begin();
3926 p
!= this->cortex_a8_stubs_
.end();
3928 this->relocate_stub(p
->second
, relinfo
, arm_target
, output_section
, view
,
3929 address
, view_size
);
3931 // Relocate all ARM V4BX stubs.
3932 for (Arm_v4bx_stub_list::iterator p
= this->arm_v4bx_stubs_
.begin();
3933 p
!= this->arm_v4bx_stubs_
.end();
3937 this->relocate_stub(*p
, relinfo
, arm_target
, output_section
, view
,
3938 address
, view_size
);
3942 // Write out the stubs to file.
3944 template<bool big_endian
>
3946 Stub_table
<big_endian
>::do_write(Output_file
* of
)
3948 off_t offset
= this->offset();
3949 const section_size_type oview_size
=
3950 convert_to_section_size_type(this->data_size());
3951 unsigned char* const oview
= of
->get_output_view(offset
, oview_size
);
3953 // Write relocation stubs.
3954 for (typename
Reloc_stub_map::const_iterator p
= this->reloc_stubs_
.begin();
3955 p
!= this->reloc_stubs_
.end();
3958 Reloc_stub
* stub
= p
->second
;
3959 Arm_address address
= this->address() + stub
->offset();
3961 == align_address(address
,
3962 stub
->stub_template()->alignment()));
3963 stub
->write(oview
+ stub
->offset(), stub
->stub_template()->size(),
3967 // Write Cortex-A8 stubs.
3968 for (Cortex_a8_stub_list::const_iterator p
= this->cortex_a8_stubs_
.begin();
3969 p
!= this->cortex_a8_stubs_
.end();
3972 Cortex_a8_stub
* stub
= p
->second
;
3973 Arm_address address
= this->address() + stub
->offset();
3975 == align_address(address
,
3976 stub
->stub_template()->alignment()));
3977 stub
->write(oview
+ stub
->offset(), stub
->stub_template()->size(),
3981 // Write ARM V4BX relocation stubs.
3982 for (Arm_v4bx_stub_list::const_iterator p
= this->arm_v4bx_stubs_
.begin();
3983 p
!= this->arm_v4bx_stubs_
.end();
3989 Arm_address address
= this->address() + (*p
)->offset();
3991 == align_address(address
,
3992 (*p
)->stub_template()->alignment()));
3993 (*p
)->write(oview
+ (*p
)->offset(), (*p
)->stub_template()->size(),
3997 of
->write_output_view(this->offset(), oview_size
, oview
);
4000 // Update the data size and address alignment of the stub table at the end
4001 // of a relaxation pass. Return true if either the data size or the
4002 // alignment changed in this relaxation pass.
4004 template<bool big_endian
>
4006 Stub_table
<big_endian
>::update_data_size_and_addralign()
4009 unsigned addralign
= 1;
4011 // Go over all stubs in table to compute data size and address alignment.
4013 for (typename
Reloc_stub_map::const_iterator p
= this->reloc_stubs_
.begin();
4014 p
!= this->reloc_stubs_
.end();
4017 const Stub_template
* stub_template
= p
->second
->stub_template();
4018 addralign
= std::max(addralign
, stub_template
->alignment());
4019 size
= (align_address(size
, stub_template
->alignment())
4020 + stub_template
->size());
4023 for (Cortex_a8_stub_list::const_iterator p
= this->cortex_a8_stubs_
.begin();
4024 p
!= this->cortex_a8_stubs_
.end();
4027 const Stub_template
* stub_template
= p
->second
->stub_template();
4028 addralign
= std::max(addralign
, stub_template
->alignment());
4029 size
= (align_address(size
, stub_template
->alignment())
4030 + stub_template
->size());
4033 for (Arm_v4bx_stub_list::const_iterator p
= this->arm_v4bx_stubs_
.begin();
4034 p
!= this->arm_v4bx_stubs_
.end();
4040 const Stub_template
* stub_template
= (*p
)->stub_template();
4041 addralign
= std::max(addralign
, stub_template
->alignment());
4042 size
= (align_address(size
, stub_template
->alignment())
4043 + stub_template
->size());
4046 // Check if either data size or alignment changed in this pass.
4047 // Update prev_data_size_ and prev_addralign_. These will be used
4048 // as the current data size and address alignment for the next pass.
4049 bool changed
= size
!= this->prev_data_size_
;
4050 this->prev_data_size_
= size
;
4052 if (addralign
!= this->prev_addralign_
)
4054 this->prev_addralign_
= addralign
;
4059 // Finalize the stubs. This sets the offsets of the stubs within the stub
4060 // table. It also marks all input sections needing Cortex-A8 workaround.
4062 template<bool big_endian
>
4064 Stub_table
<big_endian
>::finalize_stubs()
4067 for (typename
Reloc_stub_map::const_iterator p
= this->reloc_stubs_
.begin();
4068 p
!= this->reloc_stubs_
.end();
4071 Reloc_stub
* stub
= p
->second
;
4072 const Stub_template
* stub_template
= stub
->stub_template();
4073 uint64_t stub_addralign
= stub_template
->alignment();
4074 off
= align_address(off
, stub_addralign
);
4075 stub
->set_offset(off
);
4076 off
+= stub_template
->size();
4079 for (Cortex_a8_stub_list::const_iterator p
= this->cortex_a8_stubs_
.begin();
4080 p
!= this->cortex_a8_stubs_
.end();
4083 Cortex_a8_stub
* stub
= p
->second
;
4084 const Stub_template
* stub_template
= stub
->stub_template();
4085 uint64_t stub_addralign
= stub_template
->alignment();
4086 off
= align_address(off
, stub_addralign
);
4087 stub
->set_offset(off
);
4088 off
+= stub_template
->size();
4090 // Mark input section so that we can determine later if a code section
4091 // needs the Cortex-A8 workaround quickly.
4092 Arm_relobj
<big_endian
>* arm_relobj
=
4093 Arm_relobj
<big_endian
>::as_arm_relobj(stub
->relobj());
4094 arm_relobj
->mark_section_for_cortex_a8_workaround(stub
->shndx());
4097 for (Arm_v4bx_stub_list::const_iterator p
= this->arm_v4bx_stubs_
.begin();
4098 p
!= this->arm_v4bx_stubs_
.end();
4104 const Stub_template
* stub_template
= (*p
)->stub_template();
4105 uint64_t stub_addralign
= stub_template
->alignment();
4106 off
= align_address(off
, stub_addralign
);
4107 (*p
)->set_offset(off
);
4108 off
+= stub_template
->size();
4111 gold_assert(off
<= this->prev_data_size_
);
4114 // Apply Cortex-A8 workaround to an address range between VIEW_ADDRESS
4115 // and VIEW_ADDRESS + VIEW_SIZE - 1. VIEW points to the mapped address
4116 // of the address range seen by the linker.
4118 template<bool big_endian
>
4120 Stub_table
<big_endian
>::apply_cortex_a8_workaround_to_address_range(
4121 Target_arm
<big_endian
>* arm_target
,
4122 unsigned char* view
,
4123 Arm_address view_address
,
4124 section_size_type view_size
)
4126 // Cortex-A8 stubs are sorted by addresses of branches being fixed up.
4127 for (Cortex_a8_stub_list::const_iterator p
=
4128 this->cortex_a8_stubs_
.lower_bound(view_address
);
4129 ((p
!= this->cortex_a8_stubs_
.end())
4130 && (p
->first
< (view_address
+ view_size
)));
4133 // We do not store the THUMB bit in the LSB of either the branch address
4134 // or the stub offset. There is no need to strip the LSB.
4135 Arm_address branch_address
= p
->first
;
4136 const Cortex_a8_stub
* stub
= p
->second
;
4137 Arm_address stub_address
= this->address() + stub
->offset();
4139 // Offset of the branch instruction relative to this view.
4140 section_size_type offset
=
4141 convert_to_section_size_type(branch_address
- view_address
);
4142 gold_assert((offset
+ 4) <= view_size
);
4144 arm_target
->apply_cortex_a8_workaround(stub
, stub_address
,
4145 view
+ offset
, branch_address
);
4149 // Arm_input_section methods.
4151 // Initialize an Arm_input_section.
4153 template<bool big_endian
>
4155 Arm_input_section
<big_endian
>::init()
4157 Relobj
* relobj
= this->relobj();
4158 unsigned int shndx
= this->shndx();
4160 // Cache these to speed up size and alignment queries. It is too slow
4161 // to call section_addraglin and section_size every time.
4162 this->original_addralign_
= relobj
->section_addralign(shndx
);
4163 this->original_size_
= relobj
->section_size(shndx
);
4165 // We want to make this look like the original input section after
4166 // output sections are finalized.
4167 Output_section
* os
= relobj
->output_section(shndx
);
4168 off_t offset
= relobj
->output_section_offset(shndx
);
4169 gold_assert(os
!= NULL
&& !relobj
->is_output_section_offset_invalid(shndx
));
4170 this->set_address(os
->address() + offset
);
4171 this->set_file_offset(os
->offset() + offset
);
4173 this->set_current_data_size(this->original_size_
);
4174 this->finalize_data_size();
4177 template<bool big_endian
>
4179 Arm_input_section
<big_endian
>::do_write(Output_file
* of
)
4181 // We have to write out the original section content.
4182 section_size_type section_size
;
4183 const unsigned char* section_contents
=
4184 this->relobj()->section_contents(this->shndx(), §ion_size
, false);
4185 of
->write(this->offset(), section_contents
, section_size
);
4187 // If this owns a stub table and it is not empty, write it.
4188 if (this->is_stub_table_owner() && !this->stub_table_
->empty())
4189 this->stub_table_
->write(of
);
4192 // Finalize data size.
4194 template<bool big_endian
>
4196 Arm_input_section
<big_endian
>::set_final_data_size()
4198 // If this owns a stub table, finalize its data size as well.
4199 if (this->is_stub_table_owner())
4201 uint64_t address
= this->address();
4203 // The stub table comes after the original section contents.
4204 address
+= this->original_size_
;
4205 address
= align_address(address
, this->stub_table_
->addralign());
4206 off_t offset
= this->offset() + (address
- this->address());
4207 this->stub_table_
->set_address_and_file_offset(address
, offset
);
4208 address
+= this->stub_table_
->data_size();
4209 gold_assert(address
== this->address() + this->current_data_size());
4212 this->set_data_size(this->current_data_size());
4215 // Reset address and file offset.
4217 template<bool big_endian
>
4219 Arm_input_section
<big_endian
>::do_reset_address_and_file_offset()
4221 // Size of the original input section contents.
4222 off_t off
= convert_types
<off_t
, uint64_t>(this->original_size_
);
4224 // If this is a stub table owner, account for the stub table size.
4225 if (this->is_stub_table_owner())
4227 Stub_table
<big_endian
>* stub_table
= this->stub_table_
;
4229 // Reset the stub table's address and file offset. The
4230 // current data size for child will be updated after that.
4231 stub_table_
->reset_address_and_file_offset();
4232 off
= align_address(off
, stub_table_
->addralign());
4233 off
+= stub_table
->current_data_size();
4236 this->set_current_data_size(off
);
4239 // Arm_output_section methods.
4241 // Create a stub group for input sections from BEGIN to END. OWNER
4242 // points to the input section to be the owner a new stub table.
4244 template<bool big_endian
>
4246 Arm_output_section
<big_endian
>::create_stub_group(
4247 Input_section_list::const_iterator begin
,
4248 Input_section_list::const_iterator end
,
4249 Input_section_list::const_iterator owner
,
4250 Target_arm
<big_endian
>* target
,
4251 std::vector
<Output_relaxed_input_section
*>* new_relaxed_sections
)
4253 // Currently we convert ordinary input sections into relaxed sections only
4254 // at this point but we may want to support creating relaxed input section
4255 // very early. So we check here to see if owner is already a relaxed
4258 Arm_input_section
<big_endian
>* arm_input_section
;
4259 if (owner
->is_relaxed_input_section())
4262 Arm_input_section
<big_endian
>::as_arm_input_section(
4263 owner
->relaxed_input_section());
4267 gold_assert(owner
->is_input_section());
4268 // Create a new relaxed input section.
4270 target
->new_arm_input_section(owner
->relobj(), owner
->shndx());
4271 new_relaxed_sections
->push_back(arm_input_section
);
4274 // Create a stub table.
4275 Stub_table
<big_endian
>* stub_table
=
4276 target
->new_stub_table(arm_input_section
);
4278 arm_input_section
->set_stub_table(stub_table
);
4280 Input_section_list::const_iterator p
= begin
;
4281 Input_section_list::const_iterator prev_p
;
4283 // Look for input sections or relaxed input sections in [begin ... end].
4286 if (p
->is_input_section() || p
->is_relaxed_input_section())
4288 // The stub table information for input sections live
4289 // in their objects.
4290 Arm_relobj
<big_endian
>* arm_relobj
=
4291 Arm_relobj
<big_endian
>::as_arm_relobj(p
->relobj());
4292 arm_relobj
->set_stub_table(p
->shndx(), stub_table
);
4296 while (prev_p
!= end
);
4299 // Group input sections for stub generation. GROUP_SIZE is roughly the limit
4300 // of stub groups. We grow a stub group by adding input section until the
4301 // size is just below GROUP_SIZE. The last input section will be converted
4302 // into a stub table. If STUB_ALWAYS_AFTER_BRANCH is false, we also add
4303 // input section after the stub table, effectively double the group size.
4305 // This is similar to the group_sections() function in elf32-arm.c but is
4306 // implemented differently.
4308 template<bool big_endian
>
4310 Arm_output_section
<big_endian
>::group_sections(
4311 section_size_type group_size
,
4312 bool stubs_always_after_branch
,
4313 Target_arm
<big_endian
>* target
)
4315 // We only care about sections containing code.
4316 if ((this->flags() & elfcpp::SHF_EXECINSTR
) == 0)
4319 // States for grouping.
4322 // No group is being built.
4324 // A group is being built but the stub table is not found yet.
4325 // We keep group a stub group until the size is just under GROUP_SIZE.
4326 // The last input section in the group will be used as the stub table.
4327 FINDING_STUB_SECTION
,
4328 // A group is being built and we have already found a stub table.
4329 // We enter this state to grow a stub group by adding input section
4330 // after the stub table. This effectively doubles the group size.
4334 // Any newly created relaxed sections are stored here.
4335 std::vector
<Output_relaxed_input_section
*> new_relaxed_sections
;
4337 State state
= NO_GROUP
;
4338 section_size_type off
= 0;
4339 section_size_type group_begin_offset
= 0;
4340 section_size_type group_end_offset
= 0;
4341 section_size_type stub_table_end_offset
= 0;
4342 Input_section_list::const_iterator group_begin
=
4343 this->input_sections().end();
4344 Input_section_list::const_iterator stub_table
=
4345 this->input_sections().end();
4346 Input_section_list::const_iterator group_end
= this->input_sections().end();
4347 for (Input_section_list::const_iterator p
= this->input_sections().begin();
4348 p
!= this->input_sections().end();
4351 section_size_type section_begin_offset
=
4352 align_address(off
, p
->addralign());
4353 section_size_type section_end_offset
=
4354 section_begin_offset
+ p
->data_size();
4356 // Check to see if we should group the previously seens sections.
4362 case FINDING_STUB_SECTION
:
4363 // Adding this section makes the group larger than GROUP_SIZE.
4364 if (section_end_offset
- group_begin_offset
>= group_size
)
4366 if (stubs_always_after_branch
)
4368 gold_assert(group_end
!= this->input_sections().end());
4369 this->create_stub_group(group_begin
, group_end
, group_end
,
4370 target
, &new_relaxed_sections
);
4375 // But wait, there's more! Input sections up to
4376 // stub_group_size bytes after the stub table can be
4377 // handled by it too.
4378 state
= HAS_STUB_SECTION
;
4379 stub_table
= group_end
;
4380 stub_table_end_offset
= group_end_offset
;
4385 case HAS_STUB_SECTION
:
4386 // Adding this section makes the post stub-section group larger
4388 if (section_end_offset
- stub_table_end_offset
>= group_size
)
4390 gold_assert(group_end
!= this->input_sections().end());
4391 this->create_stub_group(group_begin
, group_end
, stub_table
,
4392 target
, &new_relaxed_sections
);
4401 // If we see an input section and currently there is no group, start
4402 // a new one. Skip any empty sections.
4403 if ((p
->is_input_section() || p
->is_relaxed_input_section())
4404 && (p
->relobj()->section_size(p
->shndx()) != 0))
4406 if (state
== NO_GROUP
)
4408 state
= FINDING_STUB_SECTION
;
4410 group_begin_offset
= section_begin_offset
;
4413 // Keep track of the last input section seen.
4415 group_end_offset
= section_end_offset
;
4418 off
= section_end_offset
;
4421 // Create a stub group for any ungrouped sections.
4422 if (state
== FINDING_STUB_SECTION
|| state
== HAS_STUB_SECTION
)
4424 gold_assert(group_end
!= this->input_sections().end());
4425 this->create_stub_group(group_begin
, group_end
,
4426 (state
== FINDING_STUB_SECTION
4429 target
, &new_relaxed_sections
);
4432 // Convert input section into relaxed input section in a batch.
4433 if (!new_relaxed_sections
.empty())
4434 this->convert_input_sections_to_relaxed_sections(new_relaxed_sections
);
4436 // Update the section offsets
4437 for (size_t i
= 0; i
< new_relaxed_sections
.size(); ++i
)
4439 Arm_relobj
<big_endian
>* arm_relobj
=
4440 Arm_relobj
<big_endian
>::as_arm_relobj(
4441 new_relaxed_sections
[i
]->relobj());
4442 unsigned int shndx
= new_relaxed_sections
[i
]->shndx();
4443 // Tell Arm_relobj that this input section is converted.
4444 arm_relobj
->convert_input_section_to_relaxed_section(shndx
);
4448 // Arm_relobj methods.
4450 // Determine if we want to scan the SHNDX-th section for relocation stubs.
4451 // This is a helper for Arm_relobj::scan_sections_for_stubs() below.
4453 template<bool big_endian
>
4455 Arm_relobj
<big_endian
>::section_needs_reloc_stub_scanning(
4456 const elfcpp::Shdr
<32, big_endian
>& shdr
,
4457 const Relobj::Output_sections
& out_sections
,
4458 const Symbol_table
*symtab
)
4460 unsigned int sh_type
= shdr
.get_sh_type();
4461 if (sh_type
!= elfcpp::SHT_REL
&& sh_type
!= elfcpp::SHT_RELA
)
4464 // Ignore empty section.
4465 off_t sh_size
= shdr
.get_sh_size();
4469 // Ignore reloc section with bad info. This error will be
4470 // reported in the final link.
4471 unsigned int index
= this->adjust_shndx(shdr
.get_sh_info());
4472 if (index
>= this->shnum())
4475 // This relocation section is against a section which we
4476 // discarded or if the section is folded into another
4477 // section due to ICF.
4478 if (out_sections
[index
] == NULL
|| symtab
->is_section_folded(this, index
))
4481 // Ignore reloc section with unexpected symbol table. The
4482 // error will be reported in the final link.
4483 if (this->adjust_shndx(shdr
.get_sh_link()) != this->symtab_shndx())
4486 unsigned int reloc_size
;
4487 if (sh_type
== elfcpp::SHT_REL
)
4488 reloc_size
= elfcpp::Elf_sizes
<32>::rel_size
;
4490 reloc_size
= elfcpp::Elf_sizes
<32>::rela_size
;
4492 // Ignore reloc section with unexpected entsize or uneven size.
4493 // The error will be reported in the final link.
4494 if (reloc_size
!= shdr
.get_sh_entsize() || sh_size
% reloc_size
!= 0)
4500 // Determine if we want to scan the SHNDX-th section for non-relocation stubs.
4501 // This is a helper for Arm_relobj::scan_sections_for_stubs() below.
4503 template<bool big_endian
>
4505 Arm_relobj
<big_endian
>::section_needs_cortex_a8_stub_scanning(
4506 const elfcpp::Shdr
<32, big_endian
>& shdr
,
4509 const Symbol_table
* symtab
)
4511 // We only scan non-empty code sections.
4512 if ((shdr
.get_sh_flags() & elfcpp::SHF_EXECINSTR
) == 0
4513 || shdr
.get_sh_size() == 0)
4516 // Ignore discarded or ICF'ed sections.
4517 if (os
== NULL
|| symtab
->is_section_folded(this, shndx
))
4520 // Find output address of section.
4521 Arm_address address
= os
->output_address(this, shndx
, 0);
4523 // If the section does not cross any 4K-boundaries, it does not need to
4525 if ((address
& ~0xfffU
) == ((address
+ shdr
.get_sh_size() - 1) & ~0xfffU
))
4531 // Scan a section for Cortex-A8 workaround.
4533 template<bool big_endian
>
4535 Arm_relobj
<big_endian
>::scan_section_for_cortex_a8_erratum(
4536 const elfcpp::Shdr
<32, big_endian
>& shdr
,
4539 Target_arm
<big_endian
>* arm_target
)
4541 Arm_address output_address
= os
->output_address(this, shndx
, 0);
4543 // Get the section contents.
4544 section_size_type input_view_size
= 0;
4545 const unsigned char* input_view
=
4546 this->section_contents(shndx
, &input_view_size
, false);
4548 // We need to go through the mapping symbols to determine what to
4549 // scan. There are two reasons. First, we should look at THUMB code and
4550 // THUMB code only. Second, we only want to look at the 4K-page boundary
4551 // to speed up the scanning.
4553 // Look for the first mapping symbol in this section. It should be
4555 Mapping_symbol_position
section_start(shndx
, 0);
4556 typename
Mapping_symbols_info::const_iterator p
=
4557 this->mapping_symbols_info_
.lower_bound(section_start
);
4559 if (p
== this->mapping_symbols_info_
.end()
4560 || p
->first
!= section_start
)
4562 gold_warning(_("Cortex-A8 erratum scanning failed because there "
4563 "is no mapping symbols for section %u of %s"),
4564 shndx
, this->name().c_str());
4568 while (p
!= this->mapping_symbols_info_
.end()
4569 && p
->first
.first
== shndx
)
4571 typename
Mapping_symbols_info::const_iterator next
=
4572 this->mapping_symbols_info_
.upper_bound(p
->first
);
4574 // Only scan part of a section with THUMB code.
4575 if (p
->second
== 't')
4577 // Determine the end of this range.
4578 section_size_type span_start
=
4579 convert_to_section_size_type(p
->first
.second
);
4580 section_size_type span_end
;
4581 if (next
!= this->mapping_symbols_info_
.end()
4582 && next
->first
.first
== shndx
)
4583 span_end
= convert_to_section_size_type(next
->first
.second
);
4585 span_end
= convert_to_section_size_type(shdr
.get_sh_size());
4587 if (((span_start
+ output_address
) & ~0xfffUL
)
4588 != ((span_end
+ output_address
- 1) & ~0xfffUL
))
4590 arm_target
->scan_span_for_cortex_a8_erratum(this, shndx
,
4591 span_start
, span_end
,
4601 // Scan relocations for stub generation.
4603 template<bool big_endian
>
4605 Arm_relobj
<big_endian
>::scan_sections_for_stubs(
4606 Target_arm
<big_endian
>* arm_target
,
4607 const Symbol_table
* symtab
,
4608 const Layout
* layout
)
4610 unsigned int shnum
= this->shnum();
4611 const unsigned int shdr_size
= elfcpp::Elf_sizes
<32>::shdr_size
;
4613 // Read the section headers.
4614 const unsigned char* pshdrs
= this->get_view(this->elf_file()->shoff(),
4618 // To speed up processing, we set up hash tables for fast lookup of
4619 // input offsets to output addresses.
4620 this->initialize_input_to_output_maps();
4622 const Relobj::Output_sections
& out_sections(this->output_sections());
4624 Relocate_info
<32, big_endian
> relinfo
;
4625 relinfo
.symtab
= symtab
;
4626 relinfo
.layout
= layout
;
4627 relinfo
.object
= this;
4629 // Do relocation stubs scanning.
4630 const unsigned char* p
= pshdrs
+ shdr_size
;
4631 for (unsigned int i
= 1; i
< shnum
; ++i
, p
+= shdr_size
)
4633 const elfcpp::Shdr
<32, big_endian
> shdr(p
);
4634 if (this->section_needs_reloc_stub_scanning(shdr
, out_sections
, symtab
))
4636 unsigned int index
= this->adjust_shndx(shdr
.get_sh_info());
4637 Arm_address output_offset
= this->get_output_section_offset(index
);
4638 Arm_address output_address
;
4639 if(output_offset
!= invalid_address
)
4640 output_address
= out_sections
[index
]->address() + output_offset
;
4643 // Currently this only happens for a relaxed section.
4644 const Output_relaxed_input_section
* poris
=
4645 out_sections
[index
]->find_relaxed_input_section(this, index
);
4646 gold_assert(poris
!= NULL
);
4647 output_address
= poris
->address();
4650 // Get the relocations.
4651 const unsigned char* prelocs
= this->get_view(shdr
.get_sh_offset(),
4655 // Get the section contents. This does work for the case in which
4656 // we modify the contents of an input section. We need to pass the
4657 // output view under such circumstances.
4658 section_size_type input_view_size
= 0;
4659 const unsigned char* input_view
=
4660 this->section_contents(index
, &input_view_size
, false);
4662 relinfo
.reloc_shndx
= i
;
4663 relinfo
.data_shndx
= index
;
4664 unsigned int sh_type
= shdr
.get_sh_type();
4665 unsigned int reloc_size
;
4666 if (sh_type
== elfcpp::SHT_REL
)
4667 reloc_size
= elfcpp::Elf_sizes
<32>::rel_size
;
4669 reloc_size
= elfcpp::Elf_sizes
<32>::rela_size
;
4671 Output_section
* os
= out_sections
[index
];
4672 arm_target
->scan_section_for_stubs(&relinfo
, sh_type
, prelocs
,
4673 shdr
.get_sh_size() / reloc_size
,
4675 output_offset
== invalid_address
,
4676 input_view
, output_address
,
4681 // Do Cortex-A8 erratum stubs scanning. This has to be done for a section
4682 // after its relocation section, if there is one, is processed for
4683 // relocation stubs. Merging this loop with the one above would have been
4684 // complicated since we would have had to make sure that relocation stub
4685 // scanning is done first.
4686 if (arm_target
->fix_cortex_a8())
4688 const unsigned char* p
= pshdrs
+ shdr_size
;
4689 for (unsigned int i
= 1; i
< shnum
; ++i
, p
+= shdr_size
)
4691 const elfcpp::Shdr
<32, big_endian
> shdr(p
);
4692 if (this->section_needs_cortex_a8_stub_scanning(shdr
, i
,
4695 this->scan_section_for_cortex_a8_erratum(shdr
, i
, out_sections
[i
],
4700 // After we've done the relocations, we release the hash tables,
4701 // since we no longer need them.
4702 this->free_input_to_output_maps();
4705 // Count the local symbols. The ARM backend needs to know if a symbol
4706 // is a THUMB function or not. For global symbols, it is easy because
4707 // the Symbol object keeps the ELF symbol type. For local symbol it is
4708 // harder because we cannot access this information. So we override the
4709 // do_count_local_symbol in parent and scan local symbols to mark
4710 // THUMB functions. This is not the most efficient way but I do not want to
4711 // slow down other ports by calling a per symbol targer hook inside
4712 // Sized_relobj<size, big_endian>::do_count_local_symbols.
4714 template<bool big_endian
>
4716 Arm_relobj
<big_endian
>::do_count_local_symbols(
4717 Stringpool_template
<char>* pool
,
4718 Stringpool_template
<char>* dynpool
)
4720 // We need to fix-up the values of any local symbols whose type are
4723 // Ask parent to count the local symbols.
4724 Sized_relobj
<32, big_endian
>::do_count_local_symbols(pool
, dynpool
);
4725 const unsigned int loccount
= this->local_symbol_count();
4729 // Intialize the thumb function bit-vector.
4730 std::vector
<bool> empty_vector(loccount
, false);
4731 this->local_symbol_is_thumb_function_
.swap(empty_vector
);
4733 // Read the symbol table section header.
4734 const unsigned int symtab_shndx
= this->symtab_shndx();
4735 elfcpp::Shdr
<32, big_endian
>
4736 symtabshdr(this, this->elf_file()->section_header(symtab_shndx
));
4737 gold_assert(symtabshdr
.get_sh_type() == elfcpp::SHT_SYMTAB
);
4739 // Read the local symbols.
4740 const int sym_size
=elfcpp::Elf_sizes
<32>::sym_size
;
4741 gold_assert(loccount
== symtabshdr
.get_sh_info());
4742 off_t locsize
= loccount
* sym_size
;
4743 const unsigned char* psyms
= this->get_view(symtabshdr
.get_sh_offset(),
4744 locsize
, true, true);
4746 // For mapping symbol processing, we need to read the symbol names.
4747 unsigned int strtab_shndx
= this->adjust_shndx(symtabshdr
.get_sh_link());
4748 if (strtab_shndx
>= this->shnum())
4750 this->error(_("invalid symbol table name index: %u"), strtab_shndx
);
4754 elfcpp::Shdr
<32, big_endian
>
4755 strtabshdr(this, this->elf_file()->section_header(strtab_shndx
));
4756 if (strtabshdr
.get_sh_type() != elfcpp::SHT_STRTAB
)
4758 this->error(_("symbol table name section has wrong type: %u"),
4759 static_cast<unsigned int>(strtabshdr
.get_sh_type()));
4762 const char* pnames
=
4763 reinterpret_cast<const char*>(this->get_view(strtabshdr
.get_sh_offset(),
4764 strtabshdr
.get_sh_size(),
4767 // Loop over the local symbols and mark any local symbols pointing
4768 // to THUMB functions.
4770 // Skip the first dummy symbol.
4772 typename Sized_relobj
<32, big_endian
>::Local_values
* plocal_values
=
4773 this->local_values();
4774 for (unsigned int i
= 1; i
< loccount
; ++i
, psyms
+= sym_size
)
4776 elfcpp::Sym
<32, big_endian
> sym(psyms
);
4777 elfcpp::STT st_type
= sym
.get_st_type();
4778 Symbol_value
<32>& lv((*plocal_values
)[i
]);
4779 Arm_address input_value
= lv
.input_value();
4781 // Check to see if this is a mapping symbol.
4782 const char* sym_name
= pnames
+ sym
.get_st_name();
4783 if (Target_arm
<big_endian
>::is_mapping_symbol_name(sym_name
))
4785 unsigned int input_shndx
= sym
.get_st_shndx();
4787 // Strip of LSB in case this is a THUMB symbol.
4788 Mapping_symbol_position
msp(input_shndx
, input_value
& ~1U);
4789 this->mapping_symbols_info_
[msp
] = sym_name
[1];
4792 if (st_type
== elfcpp::STT_ARM_TFUNC
4793 || (st_type
== elfcpp::STT_FUNC
&& ((input_value
& 1) != 0)))
4795 // This is a THUMB function. Mark this and canonicalize the
4796 // symbol value by setting LSB.
4797 this->local_symbol_is_thumb_function_
[i
] = true;
4798 if ((input_value
& 1) == 0)
4799 lv
.set_input_value(input_value
| 1);
4804 // Relocate sections.
4805 template<bool big_endian
>
4807 Arm_relobj
<big_endian
>::do_relocate_sections(
4808 const Symbol_table
* symtab
,
4809 const Layout
* layout
,
4810 const unsigned char* pshdrs
,
4811 typename Sized_relobj
<32, big_endian
>::Views
* pviews
)
4813 // Call parent to relocate sections.
4814 Sized_relobj
<32, big_endian
>::do_relocate_sections(symtab
, layout
, pshdrs
,
4817 // We do not generate stubs if doing a relocatable link.
4818 if (parameters
->options().relocatable())
4821 // Relocate stub tables.
4822 unsigned int shnum
= this->shnum();
4824 Target_arm
<big_endian
>* arm_target
=
4825 Target_arm
<big_endian
>::default_target();
4827 Relocate_info
<32, big_endian
> relinfo
;
4828 relinfo
.symtab
= symtab
;
4829 relinfo
.layout
= layout
;
4830 relinfo
.object
= this;
4832 for (unsigned int i
= 1; i
< shnum
; ++i
)
4834 Arm_input_section
<big_endian
>* arm_input_section
=
4835 arm_target
->find_arm_input_section(this, i
);
4837 if (arm_input_section
!= NULL
4838 && arm_input_section
->is_stub_table_owner()
4839 && !arm_input_section
->stub_table()->empty())
4841 // We cannot discard a section if it owns a stub table.
4842 Output_section
* os
= this->output_section(i
);
4843 gold_assert(os
!= NULL
);
4845 relinfo
.reloc_shndx
= elfcpp::SHN_UNDEF
;
4846 relinfo
.reloc_shdr
= NULL
;
4847 relinfo
.data_shndx
= i
;
4848 relinfo
.data_shdr
= pshdrs
+ i
* elfcpp::Elf_sizes
<32>::shdr_size
;
4850 gold_assert((*pviews
)[i
].view
!= NULL
);
4852 // We are passed the output section view. Adjust it to cover the
4854 Stub_table
<big_endian
>* stub_table
= arm_input_section
->stub_table();
4855 gold_assert((stub_table
->address() >= (*pviews
)[i
].address
)
4856 && ((stub_table
->address() + stub_table
->data_size())
4857 <= (*pviews
)[i
].address
+ (*pviews
)[i
].view_size
));
4859 off_t offset
= stub_table
->address() - (*pviews
)[i
].address
;
4860 unsigned char* view
= (*pviews
)[i
].view
+ offset
;
4861 Arm_address address
= stub_table
->address();
4862 section_size_type view_size
= stub_table
->data_size();
4864 stub_table
->relocate_stubs(&relinfo
, arm_target
, os
, view
, address
,
4868 // Apply Cortex A8 workaround if applicable.
4869 if (this->section_has_cortex_a8_workaround(i
))
4871 unsigned char* view
= (*pviews
)[i
].view
;
4872 Arm_address view_address
= (*pviews
)[i
].address
;
4873 section_size_type view_size
= (*pviews
)[i
].view_size
;
4874 Stub_table
<big_endian
>* stub_table
= this->stub_tables_
[i
];
4876 // Adjust view to cover section.
4877 Output_section
* os
= this->output_section(i
);
4878 gold_assert(os
!= NULL
);
4879 Arm_address section_address
= os
->output_address(this, i
, 0);
4880 uint64_t section_size
= this->section_size(i
);
4882 gold_assert(section_address
>= view_address
4883 && ((section_address
+ section_size
)
4884 <= (view_address
+ view_size
)));
4886 unsigned char* section_view
= view
+ (section_address
- view_address
);
4888 // Apply the Cortex-A8 workaround to the output address range
4889 // corresponding to this input section.
4890 stub_table
->apply_cortex_a8_workaround_to_address_range(
4899 // Helper functions for both Arm_relobj and Arm_dynobj to read ARM
4902 template<bool big_endian
>
4903 Attributes_section_data
*
4904 read_arm_attributes_section(
4906 Read_symbols_data
*sd
)
4908 // Read the attributes section if there is one.
4909 // We read from the end because gas seems to put it near the end of
4910 // the section headers.
4911 const size_t shdr_size
= elfcpp::Elf_sizes
<32>::shdr_size
;
4912 const unsigned char *ps
=
4913 sd
->section_headers
->data() + shdr_size
* (object
->shnum() - 1);
4914 for (unsigned int i
= object
->shnum(); i
> 0; --i
, ps
-= shdr_size
)
4916 elfcpp::Shdr
<32, big_endian
> shdr(ps
);
4917 if (shdr
.get_sh_type() == elfcpp::SHT_ARM_ATTRIBUTES
)
4919 section_offset_type section_offset
= shdr
.get_sh_offset();
4920 section_size_type section_size
=
4921 convert_to_section_size_type(shdr
.get_sh_size());
4922 File_view
* view
= object
->get_lasting_view(section_offset
,
4923 section_size
, true, false);
4924 return new Attributes_section_data(view
->data(), section_size
);
4930 // Read the symbol information.
4932 template<bool big_endian
>
4934 Arm_relobj
<big_endian
>::do_read_symbols(Read_symbols_data
* sd
)
4936 // Call parent class to read symbol information.
4937 Sized_relobj
<32, big_endian
>::do_read_symbols(sd
);
4939 // Read processor-specific flags in ELF file header.
4940 const unsigned char* pehdr
= this->get_view(elfcpp::file_header_offset
,
4941 elfcpp::Elf_sizes
<32>::ehdr_size
,
4943 elfcpp::Ehdr
<32, big_endian
> ehdr(pehdr
);
4944 this->processor_specific_flags_
= ehdr
.get_e_flags();
4945 this->attributes_section_data_
=
4946 read_arm_attributes_section
<big_endian
>(this, sd
);
4949 // Process relocations for garbage collection. The ARM target uses .ARM.exidx
4950 // sections for unwinding. These sections are referenced implicitly by
4951 // text sections linked in the section headers. If we ignore these implict
4952 // references, the .ARM.exidx sections and any .ARM.extab sections they use
4953 // will be garbage-collected incorrectly. Hence we override the same function
4954 // in the base class to handle these implicit references.
4956 template<bool big_endian
>
4958 Arm_relobj
<big_endian
>::do_gc_process_relocs(Symbol_table
* symtab
,
4960 Read_relocs_data
* rd
)
4962 // First, call base class method to process relocations in this object.
4963 Sized_relobj
<32, big_endian
>::do_gc_process_relocs(symtab
, layout
, rd
);
4965 unsigned int shnum
= this->shnum();
4966 const unsigned int shdr_size
= elfcpp::Elf_sizes
<32>::shdr_size
;
4967 const unsigned char* pshdrs
= this->get_view(this->elf_file()->shoff(),
4971 // Scan section headers for sections of type SHT_ARM_EXIDX. Add references
4972 // to these from the linked text sections.
4973 const unsigned char* ps
= pshdrs
+ shdr_size
;
4974 for (unsigned int i
= 1; i
< shnum
; ++i
, ps
+= shdr_size
)
4976 elfcpp::Shdr
<32, big_endian
> shdr(ps
);
4977 if (shdr
.get_sh_type() == elfcpp::SHT_ARM_EXIDX
)
4979 // Found an .ARM.exidx section, add it to the set of reachable
4980 // sections from its linked text section.
4981 unsigned int text_shndx
= this->adjust_shndx(shdr
.get_sh_link());
4982 symtab
->gc()->add_reference(this, text_shndx
, this, i
);
4987 // Arm_dynobj methods.
4989 // Read the symbol information.
4991 template<bool big_endian
>
4993 Arm_dynobj
<big_endian
>::do_read_symbols(Read_symbols_data
* sd
)
4995 // Call parent class to read symbol information.
4996 Sized_dynobj
<32, big_endian
>::do_read_symbols(sd
);
4998 // Read processor-specific flags in ELF file header.
4999 const unsigned char* pehdr
= this->get_view(elfcpp::file_header_offset
,
5000 elfcpp::Elf_sizes
<32>::ehdr_size
,
5002 elfcpp::Ehdr
<32, big_endian
> ehdr(pehdr
);
5003 this->processor_specific_flags_
= ehdr
.get_e_flags();
5004 this->attributes_section_data_
=
5005 read_arm_attributes_section
<big_endian
>(this, sd
);
5008 // Stub_addend_reader methods.
5010 // Read the addend of a REL relocation of type R_TYPE at VIEW.
5012 template<bool big_endian
>
5013 elfcpp::Elf_types
<32>::Elf_Swxword
5014 Stub_addend_reader
<elfcpp::SHT_REL
, big_endian
>::operator()(
5015 unsigned int r_type
,
5016 const unsigned char* view
,
5017 const typename Reloc_types
<elfcpp::SHT_REL
, 32, big_endian
>::Reloc
&) const
5019 typedef struct Arm_relocate_functions
<big_endian
> RelocFuncs
;
5023 case elfcpp::R_ARM_CALL
:
5024 case elfcpp::R_ARM_JUMP24
:
5025 case elfcpp::R_ARM_PLT32
:
5027 typedef typename
elfcpp::Swap
<32, big_endian
>::Valtype Valtype
;
5028 const Valtype
* wv
= reinterpret_cast<const Valtype
*>(view
);
5029 Valtype val
= elfcpp::Swap
<32, big_endian
>::readval(wv
);
5030 return utils::sign_extend
<26>(val
<< 2);
5033 case elfcpp::R_ARM_THM_CALL
:
5034 case elfcpp::R_ARM_THM_JUMP24
:
5035 case elfcpp::R_ARM_THM_XPC22
:
5037 typedef typename
elfcpp::Swap
<16, big_endian
>::Valtype Valtype
;
5038 const Valtype
* wv
= reinterpret_cast<const Valtype
*>(view
);
5039 Valtype upper_insn
= elfcpp::Swap
<16, big_endian
>::readval(wv
);
5040 Valtype lower_insn
= elfcpp::Swap
<16, big_endian
>::readval(wv
+ 1);
5041 return RelocFuncs::thumb32_branch_offset(upper_insn
, lower_insn
);
5044 case elfcpp::R_ARM_THM_JUMP19
:
5046 typedef typename
elfcpp::Swap
<16, big_endian
>::Valtype Valtype
;
5047 const Valtype
* wv
= reinterpret_cast<const Valtype
*>(view
);
5048 Valtype upper_insn
= elfcpp::Swap
<16, big_endian
>::readval(wv
);
5049 Valtype lower_insn
= elfcpp::Swap
<16, big_endian
>::readval(wv
+ 1);
5050 return RelocFuncs::thumb32_cond_branch_offset(upper_insn
, lower_insn
);
5058 // A class to handle the PLT data.
5060 template<bool big_endian
>
5061 class Output_data_plt_arm
: public Output_section_data
5064 typedef Output_data_reloc
<elfcpp::SHT_REL
, true, 32, big_endian
>
5067 Output_data_plt_arm(Layout
*, Output_data_space
*);
5069 // Add an entry to the PLT.
5071 add_entry(Symbol
* gsym
);
5073 // Return the .rel.plt section data.
5074 const Reloc_section
*
5076 { return this->rel_
; }
5080 do_adjust_output_section(Output_section
* os
);
5082 // Write to a map file.
5084 do_print_to_mapfile(Mapfile
* mapfile
) const
5085 { mapfile
->print_output_data(this, _("** PLT")); }
5088 // Template for the first PLT entry.
5089 static const uint32_t first_plt_entry
[5];
5091 // Template for subsequent PLT entries.
5092 static const uint32_t plt_entry
[3];
5094 // Set the final size.
5096 set_final_data_size()
5098 this->set_data_size(sizeof(first_plt_entry
)
5099 + this->count_
* sizeof(plt_entry
));
5102 // Write out the PLT data.
5104 do_write(Output_file
*);
5106 // The reloc section.
5107 Reloc_section
* rel_
;
5108 // The .got.plt section.
5109 Output_data_space
* got_plt_
;
5110 // The number of PLT entries.
5111 unsigned int count_
;
5114 // Create the PLT section. The ordinary .got section is an argument,
5115 // since we need to refer to the start. We also create our own .got
5116 // section just for PLT entries.
5118 template<bool big_endian
>
5119 Output_data_plt_arm
<big_endian
>::Output_data_plt_arm(Layout
* layout
,
5120 Output_data_space
* got_plt
)
5121 : Output_section_data(4), got_plt_(got_plt
), count_(0)
5123 this->rel_
= new Reloc_section(false);
5124 layout
->add_output_section_data(".rel.plt", elfcpp::SHT_REL
,
5125 elfcpp::SHF_ALLOC
, this->rel_
, true, false,
5129 template<bool big_endian
>
5131 Output_data_plt_arm
<big_endian
>::do_adjust_output_section(Output_section
* os
)
5136 // Add an entry to the PLT.
5138 template<bool big_endian
>
5140 Output_data_plt_arm
<big_endian
>::add_entry(Symbol
* gsym
)
5142 gold_assert(!gsym
->has_plt_offset());
5144 // Note that when setting the PLT offset we skip the initial
5145 // reserved PLT entry.
5146 gsym
->set_plt_offset((this->count_
) * sizeof(plt_entry
)
5147 + sizeof(first_plt_entry
));
5151 section_offset_type got_offset
= this->got_plt_
->current_data_size();
5153 // Every PLT entry needs a GOT entry which points back to the PLT
5154 // entry (this will be changed by the dynamic linker, normally
5155 // lazily when the function is called).
5156 this->got_plt_
->set_current_data_size(got_offset
+ 4);
5158 // Every PLT entry needs a reloc.
5159 gsym
->set_needs_dynsym_entry();
5160 this->rel_
->add_global(gsym
, elfcpp::R_ARM_JUMP_SLOT
, this->got_plt_
,
5163 // Note that we don't need to save the symbol. The contents of the
5164 // PLT are independent of which symbols are used. The symbols only
5165 // appear in the relocations.
5169 // FIXME: This is not very flexible. Right now this has only been tested
5170 // on armv5te. If we are to support additional architecture features like
5171 // Thumb-2 or BE8, we need to make this more flexible like GNU ld.
5173 // The first entry in the PLT.
5174 template<bool big_endian
>
5175 const uint32_t Output_data_plt_arm
<big_endian
>::first_plt_entry
[5] =
5177 0xe52de004, // str lr, [sp, #-4]!
5178 0xe59fe004, // ldr lr, [pc, #4]
5179 0xe08fe00e, // add lr, pc, lr
5180 0xe5bef008, // ldr pc, [lr, #8]!
5181 0x00000000, // &GOT[0] - .
5184 // Subsequent entries in the PLT.
5186 template<bool big_endian
>
5187 const uint32_t Output_data_plt_arm
<big_endian
>::plt_entry
[3] =
5189 0xe28fc600, // add ip, pc, #0xNN00000
5190 0xe28cca00, // add ip, ip, #0xNN000
5191 0xe5bcf000, // ldr pc, [ip, #0xNNN]!
5194 // Write out the PLT. This uses the hand-coded instructions above,
5195 // and adjusts them as needed. This is all specified by the arm ELF
5196 // Processor Supplement.
5198 template<bool big_endian
>
5200 Output_data_plt_arm
<big_endian
>::do_write(Output_file
* of
)
5202 const off_t offset
= this->offset();
5203 const section_size_type oview_size
=
5204 convert_to_section_size_type(this->data_size());
5205 unsigned char* const oview
= of
->get_output_view(offset
, oview_size
);
5207 const off_t got_file_offset
= this->got_plt_
->offset();
5208 const section_size_type got_size
=
5209 convert_to_section_size_type(this->got_plt_
->data_size());
5210 unsigned char* const got_view
= of
->get_output_view(got_file_offset
,
5212 unsigned char* pov
= oview
;
5214 Arm_address plt_address
= this->address();
5215 Arm_address got_address
= this->got_plt_
->address();
5217 // Write first PLT entry. All but the last word are constants.
5218 const size_t num_first_plt_words
= (sizeof(first_plt_entry
)
5219 / sizeof(plt_entry
[0]));
5220 for (size_t i
= 0; i
< num_first_plt_words
- 1; i
++)
5221 elfcpp::Swap
<32, big_endian
>::writeval(pov
+ i
* 4, first_plt_entry
[i
]);
5222 // Last word in first PLT entry is &GOT[0] - .
5223 elfcpp::Swap
<32, big_endian
>::writeval(pov
+ 16,
5224 got_address
- (plt_address
+ 16));
5225 pov
+= sizeof(first_plt_entry
);
5227 unsigned char* got_pov
= got_view
;
5229 memset(got_pov
, 0, 12);
5232 const int rel_size
= elfcpp::Elf_sizes
<32>::rel_size
;
5233 unsigned int plt_offset
= sizeof(first_plt_entry
);
5234 unsigned int plt_rel_offset
= 0;
5235 unsigned int got_offset
= 12;
5236 const unsigned int count
= this->count_
;
5237 for (unsigned int i
= 0;
5240 pov
+= sizeof(plt_entry
),
5242 plt_offset
+= sizeof(plt_entry
),
5243 plt_rel_offset
+= rel_size
,
5246 // Set and adjust the PLT entry itself.
5247 int32_t offset
= ((got_address
+ got_offset
)
5248 - (plt_address
+ plt_offset
+ 8));
5250 gold_assert(offset
>= 0 && offset
< 0x0fffffff);
5251 uint32_t plt_insn0
= plt_entry
[0] | ((offset
>> 20) & 0xff);
5252 elfcpp::Swap
<32, big_endian
>::writeval(pov
, plt_insn0
);
5253 uint32_t plt_insn1
= plt_entry
[1] | ((offset
>> 12) & 0xff);
5254 elfcpp::Swap
<32, big_endian
>::writeval(pov
+ 4, plt_insn1
);
5255 uint32_t plt_insn2
= plt_entry
[2] | (offset
& 0xfff);
5256 elfcpp::Swap
<32, big_endian
>::writeval(pov
+ 8, plt_insn2
);
5258 // Set the entry in the GOT.
5259 elfcpp::Swap
<32, big_endian
>::writeval(got_pov
, plt_address
);
5262 gold_assert(static_cast<section_size_type
>(pov
- oview
) == oview_size
);
5263 gold_assert(static_cast<section_size_type
>(got_pov
- got_view
) == got_size
);
5265 of
->write_output_view(offset
, oview_size
, oview
);
5266 of
->write_output_view(got_file_offset
, got_size
, got_view
);
5269 // Create a PLT entry for a global symbol.
5271 template<bool big_endian
>
5273 Target_arm
<big_endian
>::make_plt_entry(Symbol_table
* symtab
, Layout
* layout
,
5276 if (gsym
->has_plt_offset())
5279 if (this->plt_
== NULL
)
5281 // Create the GOT sections first.
5282 this->got_section(symtab
, layout
);
5284 this->plt_
= new Output_data_plt_arm
<big_endian
>(layout
, this->got_plt_
);
5285 layout
->add_output_section_data(".plt", elfcpp::SHT_PROGBITS
,
5287 | elfcpp::SHF_EXECINSTR
),
5288 this->plt_
, false, false, false, false);
5290 this->plt_
->add_entry(gsym
);
5293 // Report an unsupported relocation against a local symbol.
5295 template<bool big_endian
>
5297 Target_arm
<big_endian
>::Scan::unsupported_reloc_local(
5298 Sized_relobj
<32, big_endian
>* object
,
5299 unsigned int r_type
)
5301 gold_error(_("%s: unsupported reloc %u against local symbol"),
5302 object
->name().c_str(), r_type
);
5305 // We are about to emit a dynamic relocation of type R_TYPE. If the
5306 // dynamic linker does not support it, issue an error. The GNU linker
5307 // only issues a non-PIC error for an allocated read-only section.
5308 // Here we know the section is allocated, but we don't know that it is
5309 // read-only. But we check for all the relocation types which the
5310 // glibc dynamic linker supports, so it seems appropriate to issue an
5311 // error even if the section is not read-only.
5313 template<bool big_endian
>
5315 Target_arm
<big_endian
>::Scan::check_non_pic(Relobj
* object
,
5316 unsigned int r_type
)
5320 // These are the relocation types supported by glibc for ARM.
5321 case elfcpp::R_ARM_RELATIVE
:
5322 case elfcpp::R_ARM_COPY
:
5323 case elfcpp::R_ARM_GLOB_DAT
:
5324 case elfcpp::R_ARM_JUMP_SLOT
:
5325 case elfcpp::R_ARM_ABS32
:
5326 case elfcpp::R_ARM_ABS32_NOI
:
5327 case elfcpp::R_ARM_PC24
:
5328 // FIXME: The following 3 types are not supported by Android's dynamic
5330 case elfcpp::R_ARM_TLS_DTPMOD32
:
5331 case elfcpp::R_ARM_TLS_DTPOFF32
:
5332 case elfcpp::R_ARM_TLS_TPOFF32
:
5336 // This prevents us from issuing more than one error per reloc
5337 // section. But we can still wind up issuing more than one
5338 // error per object file.
5339 if (this->issued_non_pic_error_
)
5341 object
->error(_("requires unsupported dynamic reloc; "
5342 "recompile with -fPIC"));
5343 this->issued_non_pic_error_
= true;
5346 case elfcpp::R_ARM_NONE
:
5351 // Scan a relocation for a local symbol.
5352 // FIXME: This only handles a subset of relocation types used by Android
5353 // on ARM v5te devices.
5355 template<bool big_endian
>
5357 Target_arm
<big_endian
>::Scan::local(Symbol_table
* symtab
,
5360 Sized_relobj
<32, big_endian
>* object
,
5361 unsigned int data_shndx
,
5362 Output_section
* output_section
,
5363 const elfcpp::Rel
<32, big_endian
>& reloc
,
5364 unsigned int r_type
,
5365 const elfcpp::Sym
<32, big_endian
>&)
5367 r_type
= get_real_reloc_type(r_type
);
5370 case elfcpp::R_ARM_NONE
:
5373 case elfcpp::R_ARM_ABS32
:
5374 case elfcpp::R_ARM_ABS32_NOI
:
5375 // If building a shared library (or a position-independent
5376 // executable), we need to create a dynamic relocation for
5377 // this location. The relocation applied at link time will
5378 // apply the link-time value, so we flag the location with
5379 // an R_ARM_RELATIVE relocation so the dynamic loader can
5380 // relocate it easily.
5381 if (parameters
->options().output_is_position_independent())
5383 Reloc_section
* rel_dyn
= target
->rel_dyn_section(layout
);
5384 unsigned int r_sym
= elfcpp::elf_r_sym
<32>(reloc
.get_r_info());
5385 // If we are to add more other reloc types than R_ARM_ABS32,
5386 // we need to add check_non_pic(object, r_type) here.
5387 rel_dyn
->add_local_relative(object
, r_sym
, elfcpp::R_ARM_RELATIVE
,
5388 output_section
, data_shndx
,
5389 reloc
.get_r_offset());
5393 case elfcpp::R_ARM_REL32
:
5394 case elfcpp::R_ARM_THM_CALL
:
5395 case elfcpp::R_ARM_CALL
:
5396 case elfcpp::R_ARM_PREL31
:
5397 case elfcpp::R_ARM_JUMP24
:
5398 case elfcpp::R_ARM_THM_JUMP24
:
5399 case elfcpp::R_ARM_THM_JUMP19
:
5400 case elfcpp::R_ARM_PLT32
:
5401 case elfcpp::R_ARM_THM_ABS5
:
5402 case elfcpp::R_ARM_ABS8
:
5403 case elfcpp::R_ARM_ABS12
:
5404 case elfcpp::R_ARM_ABS16
:
5405 case elfcpp::R_ARM_BASE_ABS
:
5406 case elfcpp::R_ARM_MOVW_ABS_NC
:
5407 case elfcpp::R_ARM_MOVT_ABS
:
5408 case elfcpp::R_ARM_THM_MOVW_ABS_NC
:
5409 case elfcpp::R_ARM_THM_MOVT_ABS
:
5410 case elfcpp::R_ARM_MOVW_PREL_NC
:
5411 case elfcpp::R_ARM_MOVT_PREL
:
5412 case elfcpp::R_ARM_THM_MOVW_PREL_NC
:
5413 case elfcpp::R_ARM_THM_MOVT_PREL
:
5414 case elfcpp::R_ARM_THM_JUMP6
:
5415 case elfcpp::R_ARM_THM_JUMP8
:
5416 case elfcpp::R_ARM_THM_JUMP11
:
5417 case elfcpp::R_ARM_V4BX
:
5420 case elfcpp::R_ARM_GOTOFF32
:
5421 // We need a GOT section:
5422 target
->got_section(symtab
, layout
);
5425 case elfcpp::R_ARM_BASE_PREL
:
5426 // FIXME: What about this?
5429 case elfcpp::R_ARM_GOT_BREL
:
5430 case elfcpp::R_ARM_GOT_PREL
:
5432 // The symbol requires a GOT entry.
5433 Output_data_got
<32, big_endian
>* got
=
5434 target
->got_section(symtab
, layout
);
5435 unsigned int r_sym
= elfcpp::elf_r_sym
<32>(reloc
.get_r_info());
5436 if (got
->add_local(object
, r_sym
, GOT_TYPE_STANDARD
))
5438 // If we are generating a shared object, we need to add a
5439 // dynamic RELATIVE relocation for this symbol's GOT entry.
5440 if (parameters
->options().output_is_position_independent())
5442 Reloc_section
* rel_dyn
= target
->rel_dyn_section(layout
);
5443 unsigned int r_sym
= elfcpp::elf_r_sym
<32>(reloc
.get_r_info());
5444 rel_dyn
->add_local_relative(
5445 object
, r_sym
, elfcpp::R_ARM_RELATIVE
, got
,
5446 object
->local_got_offset(r_sym
, GOT_TYPE_STANDARD
));
5452 case elfcpp::R_ARM_TARGET1
:
5453 // This should have been mapped to another type already.
5455 case elfcpp::R_ARM_COPY
:
5456 case elfcpp::R_ARM_GLOB_DAT
:
5457 case elfcpp::R_ARM_JUMP_SLOT
:
5458 case elfcpp::R_ARM_RELATIVE
:
5459 // These are relocations which should only be seen by the
5460 // dynamic linker, and should never be seen here.
5461 gold_error(_("%s: unexpected reloc %u in object file"),
5462 object
->name().c_str(), r_type
);
5466 unsupported_reloc_local(object
, r_type
);
5471 // Report an unsupported relocation against a global symbol.
5473 template<bool big_endian
>
5475 Target_arm
<big_endian
>::Scan::unsupported_reloc_global(
5476 Sized_relobj
<32, big_endian
>* object
,
5477 unsigned int r_type
,
5480 gold_error(_("%s: unsupported reloc %u against global symbol %s"),
5481 object
->name().c_str(), r_type
, gsym
->demangled_name().c_str());
5484 // Scan a relocation for a global symbol.
5485 // FIXME: This only handles a subset of relocation types used by Android
5486 // on ARM v5te devices.
5488 template<bool big_endian
>
5490 Target_arm
<big_endian
>::Scan::global(Symbol_table
* symtab
,
5493 Sized_relobj
<32, big_endian
>* object
,
5494 unsigned int data_shndx
,
5495 Output_section
* output_section
,
5496 const elfcpp::Rel
<32, big_endian
>& reloc
,
5497 unsigned int r_type
,
5500 r_type
= get_real_reloc_type(r_type
);
5503 case elfcpp::R_ARM_NONE
:
5506 case elfcpp::R_ARM_ABS32
:
5507 case elfcpp::R_ARM_ABS32_NOI
:
5509 // Make a dynamic relocation if necessary.
5510 if (gsym
->needs_dynamic_reloc(Symbol::ABSOLUTE_REF
))
5512 if (target
->may_need_copy_reloc(gsym
))
5514 target
->copy_reloc(symtab
, layout
, object
,
5515 data_shndx
, output_section
, gsym
, reloc
);
5517 else if (gsym
->can_use_relative_reloc(false))
5519 // If we are to add more other reloc types than R_ARM_ABS32,
5520 // we need to add check_non_pic(object, r_type) here.
5521 Reloc_section
* rel_dyn
= target
->rel_dyn_section(layout
);
5522 rel_dyn
->add_global_relative(gsym
, elfcpp::R_ARM_RELATIVE
,
5523 output_section
, object
,
5524 data_shndx
, reloc
.get_r_offset());
5528 // If we are to add more other reloc types than R_ARM_ABS32,
5529 // we need to add check_non_pic(object, r_type) here.
5530 Reloc_section
* rel_dyn
= target
->rel_dyn_section(layout
);
5531 rel_dyn
->add_global(gsym
, r_type
, output_section
, object
,
5532 data_shndx
, reloc
.get_r_offset());
5538 case elfcpp::R_ARM_MOVW_ABS_NC
:
5539 case elfcpp::R_ARM_MOVT_ABS
:
5540 case elfcpp::R_ARM_THM_MOVW_ABS_NC
:
5541 case elfcpp::R_ARM_THM_MOVT_ABS
:
5542 case elfcpp::R_ARM_MOVW_PREL_NC
:
5543 case elfcpp::R_ARM_MOVT_PREL
:
5544 case elfcpp::R_ARM_THM_MOVW_PREL_NC
:
5545 case elfcpp::R_ARM_THM_MOVT_PREL
:
5546 case elfcpp::R_ARM_THM_JUMP6
:
5547 case elfcpp::R_ARM_THM_JUMP8
:
5548 case elfcpp::R_ARM_THM_JUMP11
:
5549 case elfcpp::R_ARM_V4BX
:
5552 case elfcpp::R_ARM_THM_ABS5
:
5553 case elfcpp::R_ARM_ABS8
:
5554 case elfcpp::R_ARM_ABS12
:
5555 case elfcpp::R_ARM_ABS16
:
5556 case elfcpp::R_ARM_BASE_ABS
:
5558 // No dynamic relocs of this kinds.
5559 // Report the error in case of PIC.
5560 int flags
= Symbol::NON_PIC_REF
;
5561 if (gsym
->type() == elfcpp::STT_FUNC
5562 || gsym
->type() == elfcpp::STT_ARM_TFUNC
)
5563 flags
|= Symbol::FUNCTION_CALL
;
5564 if (gsym
->needs_dynamic_reloc(flags
))
5565 check_non_pic(object
, r_type
);
5569 case elfcpp::R_ARM_REL32
:
5570 case elfcpp::R_ARM_PREL31
:
5572 // Make a dynamic relocation if necessary.
5573 int flags
= Symbol::NON_PIC_REF
;
5574 if (gsym
->needs_dynamic_reloc(flags
))
5576 if (target
->may_need_copy_reloc(gsym
))
5578 target
->copy_reloc(symtab
, layout
, object
,
5579 data_shndx
, output_section
, gsym
, reloc
);
5583 check_non_pic(object
, r_type
);
5584 Reloc_section
* rel_dyn
= target
->rel_dyn_section(layout
);
5585 rel_dyn
->add_global(gsym
, r_type
, output_section
, object
,
5586 data_shndx
, reloc
.get_r_offset());
5592 case elfcpp::R_ARM_JUMP24
:
5593 case elfcpp::R_ARM_THM_JUMP24
:
5594 case elfcpp::R_ARM_THM_JUMP19
:
5595 case elfcpp::R_ARM_CALL
:
5596 case elfcpp::R_ARM_THM_CALL
:
5598 if (Target_arm
<big_endian
>::Scan::symbol_needs_plt_entry(gsym
))
5599 target
->make_plt_entry(symtab
, layout
, gsym
);
5602 // Check to see if this is a function that would need a PLT
5603 // but does not get one because the function symbol is untyped.
5604 // This happens in assembly code missing a proper .type directive.
5605 if ((!gsym
->is_undefined() || parameters
->options().shared())
5606 && !parameters
->doing_static_link()
5607 && gsym
->type() == elfcpp::STT_NOTYPE
5608 && (gsym
->is_from_dynobj()
5609 || gsym
->is_undefined()
5610 || gsym
->is_preemptible()))
5611 gold_error(_("%s is not a function."),
5612 gsym
->demangled_name().c_str());
5616 case elfcpp::R_ARM_PLT32
:
5617 // If the symbol is fully resolved, this is just a relative
5618 // local reloc. Otherwise we need a PLT entry.
5619 if (gsym
->final_value_is_known())
5621 // If building a shared library, we can also skip the PLT entry
5622 // if the symbol is defined in the output file and is protected
5624 if (gsym
->is_defined()
5625 && !gsym
->is_from_dynobj()
5626 && !gsym
->is_preemptible())
5628 target
->make_plt_entry(symtab
, layout
, gsym
);
5631 case elfcpp::R_ARM_GOTOFF32
:
5632 // We need a GOT section.
5633 target
->got_section(symtab
, layout
);
5636 case elfcpp::R_ARM_BASE_PREL
:
5637 // FIXME: What about this?
5640 case elfcpp::R_ARM_GOT_BREL
:
5641 case elfcpp::R_ARM_GOT_PREL
:
5643 // The symbol requires a GOT entry.
5644 Output_data_got
<32, big_endian
>* got
=
5645 target
->got_section(symtab
, layout
);
5646 if (gsym
->final_value_is_known())
5647 got
->add_global(gsym
, GOT_TYPE_STANDARD
);
5650 // If this symbol is not fully resolved, we need to add a
5651 // GOT entry with a dynamic relocation.
5652 Reloc_section
* rel_dyn
= target
->rel_dyn_section(layout
);
5653 if (gsym
->is_from_dynobj()
5654 || gsym
->is_undefined()
5655 || gsym
->is_preemptible())
5656 got
->add_global_with_rel(gsym
, GOT_TYPE_STANDARD
,
5657 rel_dyn
, elfcpp::R_ARM_GLOB_DAT
);
5660 if (got
->add_global(gsym
, GOT_TYPE_STANDARD
))
5661 rel_dyn
->add_global_relative(
5662 gsym
, elfcpp::R_ARM_RELATIVE
, got
,
5663 gsym
->got_offset(GOT_TYPE_STANDARD
));
5669 case elfcpp::R_ARM_TARGET1
:
5670 // This should have been mapped to another type already.
5672 case elfcpp::R_ARM_COPY
:
5673 case elfcpp::R_ARM_GLOB_DAT
:
5674 case elfcpp::R_ARM_JUMP_SLOT
:
5675 case elfcpp::R_ARM_RELATIVE
:
5676 // These are relocations which should only be seen by the
5677 // dynamic linker, and should never be seen here.
5678 gold_error(_("%s: unexpected reloc %u in object file"),
5679 object
->name().c_str(), r_type
);
5683 unsupported_reloc_global(object
, r_type
, gsym
);
5688 // Process relocations for gc.
5690 template<bool big_endian
>
5692 Target_arm
<big_endian
>::gc_process_relocs(Symbol_table
* symtab
,
5694 Sized_relobj
<32, big_endian
>* object
,
5695 unsigned int data_shndx
,
5697 const unsigned char* prelocs
,
5699 Output_section
* output_section
,
5700 bool needs_special_offset_handling
,
5701 size_t local_symbol_count
,
5702 const unsigned char* plocal_symbols
)
5704 typedef Target_arm
<big_endian
> Arm
;
5705 typedef typename Target_arm
<big_endian
>::Scan Scan
;
5707 gold::gc_process_relocs
<32, big_endian
, Arm
, elfcpp::SHT_REL
, Scan
>(
5716 needs_special_offset_handling
,
5721 // Scan relocations for a section.
5723 template<bool big_endian
>
5725 Target_arm
<big_endian
>::scan_relocs(Symbol_table
* symtab
,
5727 Sized_relobj
<32, big_endian
>* object
,
5728 unsigned int data_shndx
,
5729 unsigned int sh_type
,
5730 const unsigned char* prelocs
,
5732 Output_section
* output_section
,
5733 bool needs_special_offset_handling
,
5734 size_t local_symbol_count
,
5735 const unsigned char* plocal_symbols
)
5737 typedef typename Target_arm
<big_endian
>::Scan Scan
;
5738 if (sh_type
== elfcpp::SHT_RELA
)
5740 gold_error(_("%s: unsupported RELA reloc section"),
5741 object
->name().c_str());
5745 gold::scan_relocs
<32, big_endian
, Target_arm
, elfcpp::SHT_REL
, Scan
>(
5754 needs_special_offset_handling
,
5759 // Finalize the sections.
5761 template<bool big_endian
>
5763 Target_arm
<big_endian
>::do_finalize_sections(
5765 const Input_objects
* input_objects
,
5766 Symbol_table
* symtab
)
5768 // Merge processor-specific flags.
5769 for (Input_objects::Relobj_iterator p
= input_objects
->relobj_begin();
5770 p
!= input_objects
->relobj_end();
5773 Arm_relobj
<big_endian
>* arm_relobj
=
5774 Arm_relobj
<big_endian
>::as_arm_relobj(*p
);
5775 this->merge_processor_specific_flags(
5777 arm_relobj
->processor_specific_flags());
5778 this->merge_object_attributes(arm_relobj
->name().c_str(),
5779 arm_relobj
->attributes_section_data());
5783 for (Input_objects::Dynobj_iterator p
= input_objects
->dynobj_begin();
5784 p
!= input_objects
->dynobj_end();
5787 Arm_dynobj
<big_endian
>* arm_dynobj
=
5788 Arm_dynobj
<big_endian
>::as_arm_dynobj(*p
);
5789 this->merge_processor_specific_flags(
5791 arm_dynobj
->processor_specific_flags());
5792 this->merge_object_attributes(arm_dynobj
->name().c_str(),
5793 arm_dynobj
->attributes_section_data());
5797 const Object_attribute
* cpu_arch_attr
=
5798 this->get_aeabi_object_attribute(elfcpp::Tag_CPU_arch
);
5799 if (cpu_arch_attr
->int_value() > elfcpp::TAG_CPU_ARCH_V4
)
5800 this->set_may_use_blx(true);
5802 // Check if we need to use Cortex-A8 workaround.
5803 if (parameters
->options().user_set_fix_cortex_a8())
5804 this->fix_cortex_a8_
= parameters
->options().fix_cortex_a8();
5807 // If neither --fix-cortex-a8 nor --no-fix-cortex-a8 is used, turn on
5808 // Cortex-A8 erratum workaround for ARMv7-A or ARMv7 with unknown
5810 const Object_attribute
* cpu_arch_profile_attr
=
5811 this->get_aeabi_object_attribute(elfcpp::Tag_CPU_arch_profile
);
5812 this->fix_cortex_a8_
=
5813 (cpu_arch_attr
->int_value() == elfcpp::TAG_CPU_ARCH_V7
5814 && (cpu_arch_profile_attr
->int_value() == 'A'
5815 || cpu_arch_profile_attr
->int_value() == 0));
5818 // Check if we can use V4BX interworking.
5819 // The V4BX interworking stub contains BX instruction,
5820 // which is not specified for some profiles.
5821 if (this->fix_v4bx() == 2 && !this->may_use_blx())
5822 gold_error(_("unable to provide V4BX reloc interworking fix up; "
5823 "the target profile does not support BX instruction"));
5825 // Fill in some more dynamic tags.
5826 const Reloc_section
* rel_plt
= (this->plt_
== NULL
5828 : this->plt_
->rel_plt());
5829 layout
->add_target_dynamic_tags(true, this->got_plt_
, rel_plt
,
5830 this->rel_dyn_
, true);
5832 // Emit any relocs we saved in an attempt to avoid generating COPY
5834 if (this->copy_relocs_
.any_saved_relocs())
5835 this->copy_relocs_
.emit(this->rel_dyn_section(layout
));
5837 // Handle the .ARM.exidx section.
5838 Output_section
* exidx_section
= layout
->find_output_section(".ARM.exidx");
5839 if (exidx_section
!= NULL
5840 && exidx_section
->type() == elfcpp::SHT_ARM_EXIDX
5841 && !parameters
->options().relocatable())
5843 // Create __exidx_start and __exdix_end symbols.
5844 symtab
->define_in_output_data("__exidx_start", NULL
,
5845 Symbol_table::PREDEFINED
,
5846 exidx_section
, 0, 0, elfcpp::STT_OBJECT
,
5847 elfcpp::STB_GLOBAL
, elfcpp::STV_HIDDEN
, 0,
5849 symtab
->define_in_output_data("__exidx_end", NULL
,
5850 Symbol_table::PREDEFINED
,
5851 exidx_section
, 0, 0, elfcpp::STT_OBJECT
,
5852 elfcpp::STB_GLOBAL
, elfcpp::STV_HIDDEN
, 0,
5855 // For the ARM target, we need to add a PT_ARM_EXIDX segment for
5856 // the .ARM.exidx section.
5857 if (!layout
->script_options()->saw_phdrs_clause())
5859 gold_assert(layout
->find_output_segment(elfcpp::PT_ARM_EXIDX
, 0, 0)
5861 Output_segment
* exidx_segment
=
5862 layout
->make_output_segment(elfcpp::PT_ARM_EXIDX
, elfcpp::PF_R
);
5863 exidx_segment
->add_output_section(exidx_section
, elfcpp::PF_R
,
5868 // Create an .ARM.attributes section if there is not one already.
5869 Output_attributes_section_data
* attributes_section
=
5870 new Output_attributes_section_data(*this->attributes_section_data_
);
5871 layout
->add_output_section_data(".ARM.attributes",
5872 elfcpp::SHT_ARM_ATTRIBUTES
, 0,
5873 attributes_section
, false, false, false,
5877 // Return whether a direct absolute static relocation needs to be applied.
5878 // In cases where Scan::local() or Scan::global() has created
5879 // a dynamic relocation other than R_ARM_RELATIVE, the addend
5880 // of the relocation is carried in the data, and we must not
5881 // apply the static relocation.
5883 template<bool big_endian
>
5885 Target_arm
<big_endian
>::Relocate::should_apply_static_reloc(
5886 const Sized_symbol
<32>* gsym
,
5889 Output_section
* output_section
)
5891 // If the output section is not allocated, then we didn't call
5892 // scan_relocs, we didn't create a dynamic reloc, and we must apply
5894 if ((output_section
->flags() & elfcpp::SHF_ALLOC
) == 0)
5897 // For local symbols, we will have created a non-RELATIVE dynamic
5898 // relocation only if (a) the output is position independent,
5899 // (b) the relocation is absolute (not pc- or segment-relative), and
5900 // (c) the relocation is not 32 bits wide.
5902 return !(parameters
->options().output_is_position_independent()
5903 && (ref_flags
& Symbol::ABSOLUTE_REF
)
5906 // For global symbols, we use the same helper routines used in the
5907 // scan pass. If we did not create a dynamic relocation, or if we
5908 // created a RELATIVE dynamic relocation, we should apply the static
5910 bool has_dyn
= gsym
->needs_dynamic_reloc(ref_flags
);
5911 bool is_rel
= (ref_flags
& Symbol::ABSOLUTE_REF
)
5912 && gsym
->can_use_relative_reloc(ref_flags
5913 & Symbol::FUNCTION_CALL
);
5914 return !has_dyn
|| is_rel
;
5917 // Perform a relocation.
5919 template<bool big_endian
>
5921 Target_arm
<big_endian
>::Relocate::relocate(
5922 const Relocate_info
<32, big_endian
>* relinfo
,
5924 Output_section
*output_section
,
5926 const elfcpp::Rel
<32, big_endian
>& rel
,
5927 unsigned int r_type
,
5928 const Sized_symbol
<32>* gsym
,
5929 const Symbol_value
<32>* psymval
,
5930 unsigned char* view
,
5931 Arm_address address
,
5932 section_size_type
/* view_size */ )
5934 typedef Arm_relocate_functions
<big_endian
> Arm_relocate_functions
;
5936 r_type
= get_real_reloc_type(r_type
);
5938 const Arm_relobj
<big_endian
>* object
=
5939 Arm_relobj
<big_endian
>::as_arm_relobj(relinfo
->object
);
5941 // If the final branch target of a relocation is THUMB instruction, this
5942 // is 1. Otherwise it is 0.
5943 Arm_address thumb_bit
= 0;
5944 Symbol_value
<32> symval
;
5945 bool is_weakly_undefined_without_plt
= false;
5946 if (relnum
!= Target_arm
<big_endian
>::fake_relnum_for_stubs
)
5950 // This is a global symbol. Determine if we use PLT and if the
5951 // final target is THUMB.
5952 if (gsym
->use_plt_offset(reloc_is_non_pic(r_type
)))
5954 // This uses a PLT, change the symbol value.
5955 symval
.set_output_value(target
->plt_section()->address()
5956 + gsym
->plt_offset());
5959 else if (gsym
->is_weak_undefined())
5961 // This is a weakly undefined symbol and we do not use PLT
5962 // for this relocation. A branch targeting this symbol will
5963 // be converted into an NOP.
5964 is_weakly_undefined_without_plt
= true;
5968 // Set thumb bit if symbol:
5969 // -Has type STT_ARM_TFUNC or
5970 // -Has type STT_FUNC, is defined and with LSB in value set.
5972 (((gsym
->type() == elfcpp::STT_ARM_TFUNC
)
5973 || (gsym
->type() == elfcpp::STT_FUNC
5974 && !gsym
->is_undefined()
5975 && ((psymval
->value(object
, 0) & 1) != 0)))
5982 // This is a local symbol. Determine if the final target is THUMB.
5983 // We saved this information when all the local symbols were read.
5984 elfcpp::Elf_types
<32>::Elf_WXword r_info
= rel
.get_r_info();
5985 unsigned int r_sym
= elfcpp::elf_r_sym
<32>(r_info
);
5986 thumb_bit
= object
->local_symbol_is_thumb_function(r_sym
) ? 1 : 0;
5991 // This is a fake relocation synthesized for a stub. It does not have
5992 // a real symbol. We just look at the LSB of the symbol value to
5993 // determine if the target is THUMB or not.
5994 thumb_bit
= ((psymval
->value(object
, 0) & 1) != 0);
5997 // Strip LSB if this points to a THUMB target.
5999 && Target_arm
<big_endian
>::reloc_uses_thumb_bit(r_type
)
6000 && ((psymval
->value(object
, 0) & 1) != 0))
6002 Arm_address stripped_value
=
6003 psymval
->value(object
, 0) & ~static_cast<Arm_address
>(1);
6004 symval
.set_output_value(stripped_value
);
6008 // Get the GOT offset if needed.
6009 // The GOT pointer points to the end of the GOT section.
6010 // We need to subtract the size of the GOT section to get
6011 // the actual offset to use in the relocation.
6012 bool have_got_offset
= false;
6013 unsigned int got_offset
= 0;
6016 case elfcpp::R_ARM_GOT_BREL
:
6017 case elfcpp::R_ARM_GOT_PREL
:
6020 gold_assert(gsym
->has_got_offset(GOT_TYPE_STANDARD
));
6021 got_offset
= (gsym
->got_offset(GOT_TYPE_STANDARD
)
6022 - target
->got_size());
6026 unsigned int r_sym
= elfcpp::elf_r_sym
<32>(rel
.get_r_info());
6027 gold_assert(object
->local_has_got_offset(r_sym
, GOT_TYPE_STANDARD
));
6028 got_offset
= (object
->local_got_offset(r_sym
, GOT_TYPE_STANDARD
)
6029 - target
->got_size());
6031 have_got_offset
= true;
6038 // To look up relocation stubs, we need to pass the symbol table index of
6040 unsigned int r_sym
= elfcpp::elf_r_sym
<32>(rel
.get_r_info());
6042 typename
Arm_relocate_functions::Status reloc_status
=
6043 Arm_relocate_functions::STATUS_OKAY
;
6046 case elfcpp::R_ARM_NONE
:
6049 case elfcpp::R_ARM_ABS8
:
6050 if (should_apply_static_reloc(gsym
, Symbol::ABSOLUTE_REF
, false,
6052 reloc_status
= Arm_relocate_functions::abs8(view
, object
, psymval
);
6055 case elfcpp::R_ARM_ABS12
:
6056 if (should_apply_static_reloc(gsym
, Symbol::ABSOLUTE_REF
, false,
6058 reloc_status
= Arm_relocate_functions::abs12(view
, object
, psymval
);
6061 case elfcpp::R_ARM_ABS16
:
6062 if (should_apply_static_reloc(gsym
, Symbol::ABSOLUTE_REF
, false,
6064 reloc_status
= Arm_relocate_functions::abs16(view
, object
, psymval
);
6067 case elfcpp::R_ARM_ABS32
:
6068 if (should_apply_static_reloc(gsym
, Symbol::ABSOLUTE_REF
, true,
6070 reloc_status
= Arm_relocate_functions::abs32(view
, object
, psymval
,
6074 case elfcpp::R_ARM_ABS32_NOI
:
6075 if (should_apply_static_reloc(gsym
, Symbol::ABSOLUTE_REF
, true,
6077 // No thumb bit for this relocation: (S + A)
6078 reloc_status
= Arm_relocate_functions::abs32(view
, object
, psymval
,
6082 case elfcpp::R_ARM_MOVW_ABS_NC
:
6083 if (should_apply_static_reloc(gsym
, Symbol::ABSOLUTE_REF
, true,
6085 reloc_status
= Arm_relocate_functions::movw_abs_nc(view
, object
,
6089 gold_error(_("relocation R_ARM_MOVW_ABS_NC cannot be used when making"
6090 "a shared object; recompile with -fPIC"));
6093 case elfcpp::R_ARM_MOVT_ABS
:
6094 if (should_apply_static_reloc(gsym
, Symbol::ABSOLUTE_REF
, true,
6096 reloc_status
= Arm_relocate_functions::movt_abs(view
, object
, psymval
);
6098 gold_error(_("relocation R_ARM_MOVT_ABS cannot be used when making"
6099 "a shared object; recompile with -fPIC"));
6102 case elfcpp::R_ARM_THM_MOVW_ABS_NC
:
6103 if (should_apply_static_reloc(gsym
, Symbol::ABSOLUTE_REF
, true,
6105 reloc_status
= Arm_relocate_functions::thm_movw_abs_nc(view
, object
,
6109 gold_error(_("relocation R_ARM_THM_MOVW_ABS_NC cannot be used when"
6110 "making a shared object; recompile with -fPIC"));
6113 case elfcpp::R_ARM_THM_MOVT_ABS
:
6114 if (should_apply_static_reloc(gsym
, Symbol::ABSOLUTE_REF
, true,
6116 reloc_status
= Arm_relocate_functions::thm_movt_abs(view
, object
,
6119 gold_error(_("relocation R_ARM_THM_MOVT_ABS cannot be used when"
6120 "making a shared object; recompile with -fPIC"));
6123 case elfcpp::R_ARM_MOVW_PREL_NC
:
6124 reloc_status
= Arm_relocate_functions::movw_prel_nc(view
, object
,
6129 case elfcpp::R_ARM_MOVT_PREL
:
6130 reloc_status
= Arm_relocate_functions::movt_prel(view
, object
,
6134 case elfcpp::R_ARM_THM_MOVW_PREL_NC
:
6135 reloc_status
= Arm_relocate_functions::thm_movw_prel_nc(view
, object
,
6140 case elfcpp::R_ARM_THM_MOVT_PREL
:
6141 reloc_status
= Arm_relocate_functions::thm_movt_prel(view
, object
,
6145 case elfcpp::R_ARM_REL32
:
6146 reloc_status
= Arm_relocate_functions::rel32(view
, object
, psymval
,
6147 address
, thumb_bit
);
6150 case elfcpp::R_ARM_THM_ABS5
:
6151 if (should_apply_static_reloc(gsym
, Symbol::ABSOLUTE_REF
, false,
6153 reloc_status
= Arm_relocate_functions::thm_abs5(view
, object
, psymval
);
6156 case elfcpp::R_ARM_THM_CALL
:
6158 Arm_relocate_functions::thm_call(relinfo
, view
, gsym
, object
, r_sym
,
6159 psymval
, address
, thumb_bit
,
6160 is_weakly_undefined_without_plt
);
6163 case elfcpp::R_ARM_XPC25
:
6165 Arm_relocate_functions::xpc25(relinfo
, view
, gsym
, object
, r_sym
,
6166 psymval
, address
, thumb_bit
,
6167 is_weakly_undefined_without_plt
);
6170 case elfcpp::R_ARM_THM_XPC22
:
6172 Arm_relocate_functions::thm_xpc22(relinfo
, view
, gsym
, object
, r_sym
,
6173 psymval
, address
, thumb_bit
,
6174 is_weakly_undefined_without_plt
);
6177 case elfcpp::R_ARM_GOTOFF32
:
6179 Arm_address got_origin
;
6180 got_origin
= target
->got_plt_section()->address();
6181 reloc_status
= Arm_relocate_functions::rel32(view
, object
, psymval
,
6182 got_origin
, thumb_bit
);
6186 case elfcpp::R_ARM_BASE_PREL
:
6189 // Get the addressing origin of the output segment defining the
6190 // symbol gsym (AAELF 4.6.1.2 Relocation types)
6191 gold_assert(gsym
!= NULL
);
6192 if (gsym
->source() == Symbol::IN_OUTPUT_SEGMENT
)
6193 origin
= gsym
->output_segment()->vaddr();
6194 else if (gsym
->source () == Symbol::IN_OUTPUT_DATA
)
6195 origin
= gsym
->output_data()->address();
6198 gold_error_at_location(relinfo
, relnum
, rel
.get_r_offset(),
6199 _("cannot find origin of R_ARM_BASE_PREL"));
6202 reloc_status
= Arm_relocate_functions::base_prel(view
, origin
, address
);
6206 case elfcpp::R_ARM_BASE_ABS
:
6208 if (!should_apply_static_reloc(gsym
, Symbol::ABSOLUTE_REF
, true,
6213 // Get the addressing origin of the output segment defining
6214 // the symbol gsym (AAELF 4.6.1.2 Relocation types).
6216 // R_ARM_BASE_ABS with the NULL symbol will give the
6217 // absolute address of the GOT origin (GOT_ORG) (see ARM IHI
6218 // 0044C (AAELF): 4.6.1.8 Proxy generating relocations).
6219 origin
= target
->got_plt_section()->address();
6220 else if (gsym
->source() == Symbol::IN_OUTPUT_SEGMENT
)
6221 origin
= gsym
->output_segment()->vaddr();
6222 else if (gsym
->source () == Symbol::IN_OUTPUT_DATA
)
6223 origin
= gsym
->output_data()->address();
6226 gold_error_at_location(relinfo
, relnum
, rel
.get_r_offset(),
6227 _("cannot find origin of R_ARM_BASE_ABS"));
6231 reloc_status
= Arm_relocate_functions::base_abs(view
, origin
);
6235 case elfcpp::R_ARM_GOT_BREL
:
6236 gold_assert(have_got_offset
);
6237 reloc_status
= Arm_relocate_functions::got_brel(view
, got_offset
);
6240 case elfcpp::R_ARM_GOT_PREL
:
6241 gold_assert(have_got_offset
);
6242 // Get the address origin for GOT PLT, which is allocated right
6243 // after the GOT section, to calculate an absolute address of
6244 // the symbol GOT entry (got_origin + got_offset).
6245 Arm_address got_origin
;
6246 got_origin
= target
->got_plt_section()->address();
6247 reloc_status
= Arm_relocate_functions::got_prel(view
,
6248 got_origin
+ got_offset
,
6252 case elfcpp::R_ARM_PLT32
:
6253 gold_assert(gsym
== NULL
6254 || gsym
->has_plt_offset()
6255 || gsym
->final_value_is_known()
6256 || (gsym
->is_defined()
6257 && !gsym
->is_from_dynobj()
6258 && !gsym
->is_preemptible()));
6260 Arm_relocate_functions::plt32(relinfo
, view
, gsym
, object
, r_sym
,
6261 psymval
, address
, thumb_bit
,
6262 is_weakly_undefined_without_plt
);
6265 case elfcpp::R_ARM_CALL
:
6267 Arm_relocate_functions::call(relinfo
, view
, gsym
, object
, r_sym
,
6268 psymval
, address
, thumb_bit
,
6269 is_weakly_undefined_without_plt
);
6272 case elfcpp::R_ARM_JUMP24
:
6274 Arm_relocate_functions::jump24(relinfo
, view
, gsym
, object
, r_sym
,
6275 psymval
, address
, thumb_bit
,
6276 is_weakly_undefined_without_plt
);
6279 case elfcpp::R_ARM_THM_JUMP24
:
6281 Arm_relocate_functions::thm_jump24(relinfo
, view
, gsym
, object
, r_sym
,
6282 psymval
, address
, thumb_bit
,
6283 is_weakly_undefined_without_plt
);
6286 case elfcpp::R_ARM_THM_JUMP19
:
6288 Arm_relocate_functions::thm_jump19(view
, object
, psymval
, address
,
6292 case elfcpp::R_ARM_THM_JUMP6
:
6294 Arm_relocate_functions::thm_jump6(view
, object
, psymval
, address
);
6297 case elfcpp::R_ARM_THM_JUMP8
:
6299 Arm_relocate_functions::thm_jump8(view
, object
, psymval
, address
);
6302 case elfcpp::R_ARM_THM_JUMP11
:
6304 Arm_relocate_functions::thm_jump11(view
, object
, psymval
, address
);
6307 case elfcpp::R_ARM_PREL31
:
6308 reloc_status
= Arm_relocate_functions::prel31(view
, object
, psymval
,
6309 address
, thumb_bit
);
6312 case elfcpp::R_ARM_V4BX
:
6313 if (target
->fix_v4bx() > 0)
6315 Arm_relocate_functions::v4bx(relinfo
, view
, object
, address
,
6316 (target
->fix_v4bx() == 2));
6319 case elfcpp::R_ARM_TARGET1
:
6320 // This should have been mapped to another type already.
6322 case elfcpp::R_ARM_COPY
:
6323 case elfcpp::R_ARM_GLOB_DAT
:
6324 case elfcpp::R_ARM_JUMP_SLOT
:
6325 case elfcpp::R_ARM_RELATIVE
:
6326 // These are relocations which should only be seen by the
6327 // dynamic linker, and should never be seen here.
6328 gold_error_at_location(relinfo
, relnum
, rel
.get_r_offset(),
6329 _("unexpected reloc %u in object file"),
6334 gold_error_at_location(relinfo
, relnum
, rel
.get_r_offset(),
6335 _("unsupported reloc %u"),
6340 // Report any errors.
6341 switch (reloc_status
)
6343 case Arm_relocate_functions::STATUS_OKAY
:
6345 case Arm_relocate_functions::STATUS_OVERFLOW
:
6346 gold_error_at_location(relinfo
, relnum
, rel
.get_r_offset(),
6347 _("relocation overflow in relocation %u"),
6350 case Arm_relocate_functions::STATUS_BAD_RELOC
:
6351 gold_error_at_location(
6355 _("unexpected opcode while processing relocation %u"),
6365 // Relocate section data.
6367 template<bool big_endian
>
6369 Target_arm
<big_endian
>::relocate_section(
6370 const Relocate_info
<32, big_endian
>* relinfo
,
6371 unsigned int sh_type
,
6372 const unsigned char* prelocs
,
6374 Output_section
* output_section
,
6375 bool needs_special_offset_handling
,
6376 unsigned char* view
,
6377 Arm_address address
,
6378 section_size_type view_size
,
6379 const Reloc_symbol_changes
* reloc_symbol_changes
)
6381 typedef typename Target_arm
<big_endian
>::Relocate Arm_relocate
;
6382 gold_assert(sh_type
== elfcpp::SHT_REL
);
6384 Arm_input_section
<big_endian
>* arm_input_section
=
6385 this->find_arm_input_section(relinfo
->object
, relinfo
->data_shndx
);
6387 // This is an ARM input section and the view covers the whole output
6389 if (arm_input_section
!= NULL
)
6391 gold_assert(needs_special_offset_handling
);
6392 Arm_address section_address
= arm_input_section
->address();
6393 section_size_type section_size
= arm_input_section
->data_size();
6395 gold_assert((arm_input_section
->address() >= address
)
6396 && ((arm_input_section
->address()
6397 + arm_input_section
->data_size())
6398 <= (address
+ view_size
)));
6400 off_t offset
= section_address
- address
;
6403 view_size
= section_size
;
6406 gold::relocate_section
<32, big_endian
, Target_arm
, elfcpp::SHT_REL
,
6413 needs_special_offset_handling
,
6417 reloc_symbol_changes
);
6420 // Return the size of a relocation while scanning during a relocatable
6423 template<bool big_endian
>
6425 Target_arm
<big_endian
>::Relocatable_size_for_reloc::get_size_for_reloc(
6426 unsigned int r_type
,
6429 r_type
= get_real_reloc_type(r_type
);
6432 case elfcpp::R_ARM_NONE
:
6435 case elfcpp::R_ARM_ABS8
:
6438 case elfcpp::R_ARM_ABS16
:
6439 case elfcpp::R_ARM_THM_ABS5
:
6440 case elfcpp::R_ARM_THM_JUMP6
:
6441 case elfcpp::R_ARM_THM_JUMP8
:
6442 case elfcpp::R_ARM_THM_JUMP11
:
6445 case elfcpp::R_ARM_ABS32
:
6446 case elfcpp::R_ARM_ABS32_NOI
:
6447 case elfcpp::R_ARM_ABS12
:
6448 case elfcpp::R_ARM_BASE_ABS
:
6449 case elfcpp::R_ARM_REL32
:
6450 case elfcpp::R_ARM_THM_CALL
:
6451 case elfcpp::R_ARM_GOTOFF32
:
6452 case elfcpp::R_ARM_BASE_PREL
:
6453 case elfcpp::R_ARM_GOT_BREL
:
6454 case elfcpp::R_ARM_GOT_PREL
:
6455 case elfcpp::R_ARM_PLT32
:
6456 case elfcpp::R_ARM_CALL
:
6457 case elfcpp::R_ARM_JUMP24
:
6458 case elfcpp::R_ARM_PREL31
:
6459 case elfcpp::R_ARM_MOVW_ABS_NC
:
6460 case elfcpp::R_ARM_MOVT_ABS
:
6461 case elfcpp::R_ARM_THM_MOVW_ABS_NC
:
6462 case elfcpp::R_ARM_THM_MOVT_ABS
:
6463 case elfcpp::R_ARM_MOVW_PREL_NC
:
6464 case elfcpp::R_ARM_MOVT_PREL
:
6465 case elfcpp::R_ARM_THM_MOVW_PREL_NC
:
6466 case elfcpp::R_ARM_THM_MOVT_PREL
:
6467 case elfcpp::R_ARM_V4BX
:
6470 case elfcpp::R_ARM_TARGET1
:
6471 // This should have been mapped to another type already.
6473 case elfcpp::R_ARM_COPY
:
6474 case elfcpp::R_ARM_GLOB_DAT
:
6475 case elfcpp::R_ARM_JUMP_SLOT
:
6476 case elfcpp::R_ARM_RELATIVE
:
6477 // These are relocations which should only be seen by the
6478 // dynamic linker, and should never be seen here.
6479 gold_error(_("%s: unexpected reloc %u in object file"),
6480 object
->name().c_str(), r_type
);
6484 object
->error(_("unsupported reloc %u in object file"), r_type
);
6489 // Scan the relocs during a relocatable link.
6491 template<bool big_endian
>
6493 Target_arm
<big_endian
>::scan_relocatable_relocs(
6494 Symbol_table
* symtab
,
6496 Sized_relobj
<32, big_endian
>* object
,
6497 unsigned int data_shndx
,
6498 unsigned int sh_type
,
6499 const unsigned char* prelocs
,
6501 Output_section
* output_section
,
6502 bool needs_special_offset_handling
,
6503 size_t local_symbol_count
,
6504 const unsigned char* plocal_symbols
,
6505 Relocatable_relocs
* rr
)
6507 gold_assert(sh_type
== elfcpp::SHT_REL
);
6509 typedef gold::Default_scan_relocatable_relocs
<elfcpp::SHT_REL
,
6510 Relocatable_size_for_reloc
> Scan_relocatable_relocs
;
6512 gold::scan_relocatable_relocs
<32, big_endian
, elfcpp::SHT_REL
,
6513 Scan_relocatable_relocs
>(
6521 needs_special_offset_handling
,
6527 // Relocate a section during a relocatable link.
6529 template<bool big_endian
>
6531 Target_arm
<big_endian
>::relocate_for_relocatable(
6532 const Relocate_info
<32, big_endian
>* relinfo
,
6533 unsigned int sh_type
,
6534 const unsigned char* prelocs
,
6536 Output_section
* output_section
,
6537 off_t offset_in_output_section
,
6538 const Relocatable_relocs
* rr
,
6539 unsigned char* view
,
6540 Arm_address view_address
,
6541 section_size_type view_size
,
6542 unsigned char* reloc_view
,
6543 section_size_type reloc_view_size
)
6545 gold_assert(sh_type
== elfcpp::SHT_REL
);
6547 gold::relocate_for_relocatable
<32, big_endian
, elfcpp::SHT_REL
>(
6552 offset_in_output_section
,
6561 // Return the value to use for a dynamic symbol which requires special
6562 // treatment. This is how we support equality comparisons of function
6563 // pointers across shared library boundaries, as described in the
6564 // processor specific ABI supplement.
6566 template<bool big_endian
>
6568 Target_arm
<big_endian
>::do_dynsym_value(const Symbol
* gsym
) const
6570 gold_assert(gsym
->is_from_dynobj() && gsym
->has_plt_offset());
6571 return this->plt_section()->address() + gsym
->plt_offset();
6574 // Map platform-specific relocs to real relocs
6576 template<bool big_endian
>
6578 Target_arm
<big_endian
>::get_real_reloc_type (unsigned int r_type
)
6582 case elfcpp::R_ARM_TARGET1
:
6583 // This is either R_ARM_ABS32 or R_ARM_REL32;
6584 return elfcpp::R_ARM_ABS32
;
6586 case elfcpp::R_ARM_TARGET2
:
6587 // This can be any reloc type but ususally is R_ARM_GOT_PREL
6588 return elfcpp::R_ARM_GOT_PREL
;
6595 // Whether if two EABI versions V1 and V2 are compatible.
6597 template<bool big_endian
>
6599 Target_arm
<big_endian
>::are_eabi_versions_compatible(
6600 elfcpp::Elf_Word v1
,
6601 elfcpp::Elf_Word v2
)
6603 // v4 and v5 are the same spec before and after it was released,
6604 // so allow mixing them.
6605 if ((v1
== elfcpp::EF_ARM_EABI_VER4
&& v2
== elfcpp::EF_ARM_EABI_VER5
)
6606 || (v1
== elfcpp::EF_ARM_EABI_VER5
&& v2
== elfcpp::EF_ARM_EABI_VER4
))
6612 // Combine FLAGS from an input object called NAME and the processor-specific
6613 // flags in the ELF header of the output. Much of this is adapted from the
6614 // processor-specific flags merging code in elf32_arm_merge_private_bfd_data
6615 // in bfd/elf32-arm.c.
6617 template<bool big_endian
>
6619 Target_arm
<big_endian
>::merge_processor_specific_flags(
6620 const std::string
& name
,
6621 elfcpp::Elf_Word flags
)
6623 if (this->are_processor_specific_flags_set())
6625 elfcpp::Elf_Word out_flags
= this->processor_specific_flags();
6627 // Nothing to merge if flags equal to those in output.
6628 if (flags
== out_flags
)
6631 // Complain about various flag mismatches.
6632 elfcpp::Elf_Word version1
= elfcpp::arm_eabi_version(flags
);
6633 elfcpp::Elf_Word version2
= elfcpp::arm_eabi_version(out_flags
);
6634 if (!this->are_eabi_versions_compatible(version1
, version2
))
6635 gold_error(_("Source object %s has EABI version %d but output has "
6636 "EABI version %d."),
6638 (flags
& elfcpp::EF_ARM_EABIMASK
) >> 24,
6639 (out_flags
& elfcpp::EF_ARM_EABIMASK
) >> 24);
6643 // If the input is the default architecture and had the default
6644 // flags then do not bother setting the flags for the output
6645 // architecture, instead allow future merges to do this. If no
6646 // future merges ever set these flags then they will retain their
6647 // uninitialised values, which surprise surprise, correspond
6648 // to the default values.
6652 // This is the first time, just copy the flags.
6653 // We only copy the EABI version for now.
6654 this->set_processor_specific_flags(flags
& elfcpp::EF_ARM_EABIMASK
);
6658 // Adjust ELF file header.
6659 template<bool big_endian
>
6661 Target_arm
<big_endian
>::do_adjust_elf_header(
6662 unsigned char* view
,
6665 gold_assert(len
== elfcpp::Elf_sizes
<32>::ehdr_size
);
6667 elfcpp::Ehdr
<32, big_endian
> ehdr(view
);
6668 unsigned char e_ident
[elfcpp::EI_NIDENT
];
6669 memcpy(e_ident
, ehdr
.get_e_ident(), elfcpp::EI_NIDENT
);
6671 if (elfcpp::arm_eabi_version(this->processor_specific_flags())
6672 == elfcpp::EF_ARM_EABI_UNKNOWN
)
6673 e_ident
[elfcpp::EI_OSABI
] = elfcpp::ELFOSABI_ARM
;
6675 e_ident
[elfcpp::EI_OSABI
] = 0;
6676 e_ident
[elfcpp::EI_ABIVERSION
] = 0;
6678 // FIXME: Do EF_ARM_BE8 adjustment.
6680 elfcpp::Ehdr_write
<32, big_endian
> oehdr(view
);
6681 oehdr
.put_e_ident(e_ident
);
6684 // do_make_elf_object to override the same function in the base class.
6685 // We need to use a target-specific sub-class of Sized_relobj<32, big_endian>
6686 // to store ARM specific information. Hence we need to have our own
6687 // ELF object creation.
6689 template<bool big_endian
>
6691 Target_arm
<big_endian
>::do_make_elf_object(
6692 const std::string
& name
,
6693 Input_file
* input_file
,
6694 off_t offset
, const elfcpp::Ehdr
<32, big_endian
>& ehdr
)
6696 int et
= ehdr
.get_e_type();
6697 if (et
== elfcpp::ET_REL
)
6699 Arm_relobj
<big_endian
>* obj
=
6700 new Arm_relobj
<big_endian
>(name
, input_file
, offset
, ehdr
);
6704 else if (et
== elfcpp::ET_DYN
)
6706 Sized_dynobj
<32, big_endian
>* obj
=
6707 new Arm_dynobj
<big_endian
>(name
, input_file
, offset
, ehdr
);
6713 gold_error(_("%s: unsupported ELF file type %d"),
6719 // Read the architecture from the Tag_also_compatible_with attribute, if any.
6720 // Returns -1 if no architecture could be read.
6721 // This is adapted from get_secondary_compatible_arch() in bfd/elf32-arm.c.
6723 template<bool big_endian
>
6725 Target_arm
<big_endian
>::get_secondary_compatible_arch(
6726 const Attributes_section_data
* pasd
)
6728 const Object_attribute
*known_attributes
=
6729 pasd
->known_attributes(Object_attribute::OBJ_ATTR_PROC
);
6731 // Note: the tag and its argument below are uleb128 values, though
6732 // currently-defined values fit in one byte for each.
6733 const std::string
& sv
=
6734 known_attributes
[elfcpp::Tag_also_compatible_with
].string_value();
6736 && sv
.data()[0] == elfcpp::Tag_CPU_arch
6737 && (sv
.data()[1] & 128) != 128)
6738 return sv
.data()[1];
6740 // This tag is "safely ignorable", so don't complain if it looks funny.
6744 // Set, or unset, the architecture of the Tag_also_compatible_with attribute.
6745 // The tag is removed if ARCH is -1.
6746 // This is adapted from set_secondary_compatible_arch() in bfd/elf32-arm.c.
6748 template<bool big_endian
>
6750 Target_arm
<big_endian
>::set_secondary_compatible_arch(
6751 Attributes_section_data
* pasd
,
6754 Object_attribute
*known_attributes
=
6755 pasd
->known_attributes(Object_attribute::OBJ_ATTR_PROC
);
6759 known_attributes
[elfcpp::Tag_also_compatible_with
].set_string_value("");
6763 // Note: the tag and its argument below are uleb128 values, though
6764 // currently-defined values fit in one byte for each.
6766 sv
[0] = elfcpp::Tag_CPU_arch
;
6767 gold_assert(arch
!= 0);
6771 known_attributes
[elfcpp::Tag_also_compatible_with
].set_string_value(sv
);
6774 // Combine two values for Tag_CPU_arch, taking secondary compatibility tags
6776 // This is adapted from tag_cpu_arch_combine() in bfd/elf32-arm.c.
6778 template<bool big_endian
>
6780 Target_arm
<big_endian
>::tag_cpu_arch_combine(
6783 int* secondary_compat_out
,
6785 int secondary_compat
)
6787 #define T(X) elfcpp::TAG_CPU_ARCH_##X
6788 static const int v6t2
[] =
6800 static const int v6k
[] =
6813 static const int v7
[] =
6827 static const int v6_m
[] =
6842 static const int v6s_m
[] =
6858 static const int v7e_m
[] =
6875 static const int v4t_plus_v6_m
[] =
6891 T(V4T_PLUS_V6_M
) // V4T plus V6_M.
6893 static const int *comb
[] =
6901 // Pseudo-architecture.
6905 // Check we've not got a higher architecture than we know about.
6907 if (oldtag
>= elfcpp::MAX_TAG_CPU_ARCH
|| newtag
>= elfcpp::MAX_TAG_CPU_ARCH
)
6909 gold_error(_("%s: unknown CPU architecture"), name
);
6913 // Override old tag if we have a Tag_also_compatible_with on the output.
6915 if ((oldtag
== T(V6_M
) && *secondary_compat_out
== T(V4T
))
6916 || (oldtag
== T(V4T
) && *secondary_compat_out
== T(V6_M
)))
6917 oldtag
= T(V4T_PLUS_V6_M
);
6919 // And override the new tag if we have a Tag_also_compatible_with on the
6922 if ((newtag
== T(V6_M
) && secondary_compat
== T(V4T
))
6923 || (newtag
== T(V4T
) && secondary_compat
== T(V6_M
)))
6924 newtag
= T(V4T_PLUS_V6_M
);
6926 // Architectures before V6KZ add features monotonically.
6927 int tagh
= std::max(oldtag
, newtag
);
6928 if (tagh
<= elfcpp::TAG_CPU_ARCH_V6KZ
)
6931 int tagl
= std::min(oldtag
, newtag
);
6932 int result
= comb
[tagh
- T(V6T2
)][tagl
];
6934 // Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
6935 // as the canonical version.
6936 if (result
== T(V4T_PLUS_V6_M
))
6939 *secondary_compat_out
= T(V6_M
);
6942 *secondary_compat_out
= -1;
6946 gold_error(_("%s: conflicting CPU architectures %d/%d"),
6947 name
, oldtag
, newtag
);
6955 // Helper to print AEABI enum tag value.
6957 template<bool big_endian
>
6959 Target_arm
<big_endian
>::aeabi_enum_name(unsigned int value
)
6961 static const char *aeabi_enum_names
[] =
6962 { "", "variable-size", "32-bit", "" };
6963 const size_t aeabi_enum_names_size
=
6964 sizeof(aeabi_enum_names
) / sizeof(aeabi_enum_names
[0]);
6966 if (value
< aeabi_enum_names_size
)
6967 return std::string(aeabi_enum_names
[value
]);
6971 sprintf(buffer
, "<unknown value %u>", value
);
6972 return std::string(buffer
);
6976 // Return the string value to store in TAG_CPU_name.
6978 template<bool big_endian
>
6980 Target_arm
<big_endian
>::tag_cpu_name_value(unsigned int value
)
6982 static const char *name_table
[] = {
6983 // These aren't real CPU names, but we can't guess
6984 // that from the architecture version alone.
7000 const size_t name_table_size
= sizeof(name_table
) / sizeof(name_table
[0]);
7002 if (value
< name_table_size
)
7003 return std::string(name_table
[value
]);
7007 sprintf(buffer
, "<unknown CPU value %u>", value
);
7008 return std::string(buffer
);
7012 // Merge object attributes from input file called NAME with those of the
7013 // output. The input object attributes are in the object pointed by PASD.
7015 template<bool big_endian
>
7017 Target_arm
<big_endian
>::merge_object_attributes(
7019 const Attributes_section_data
* pasd
)
7021 // Return if there is no attributes section data.
7025 // If output has no object attributes, just copy.
7026 if (this->attributes_section_data_
== NULL
)
7028 this->attributes_section_data_
= new Attributes_section_data(*pasd
);
7032 const int vendor
= Object_attribute::OBJ_ATTR_PROC
;
7033 const Object_attribute
* in_attr
= pasd
->known_attributes(vendor
);
7034 Object_attribute
* out_attr
=
7035 this->attributes_section_data_
->known_attributes(vendor
);
7037 // This needs to happen before Tag_ABI_FP_number_model is merged. */
7038 if (in_attr
[elfcpp::Tag_ABI_VFP_args
].int_value()
7039 != out_attr
[elfcpp::Tag_ABI_VFP_args
].int_value())
7041 // Ignore mismatches if the object doesn't use floating point. */
7042 if (out_attr
[elfcpp::Tag_ABI_FP_number_model
].int_value() == 0)
7043 out_attr
[elfcpp::Tag_ABI_VFP_args
].set_int_value(
7044 in_attr
[elfcpp::Tag_ABI_VFP_args
].int_value());
7045 else if (in_attr
[elfcpp::Tag_ABI_FP_number_model
].int_value() != 0)
7046 gold_error(_("%s uses VFP register arguments, output does not"),
7050 for (int i
= 4; i
< Vendor_object_attributes::NUM_KNOWN_ATTRIBUTES
; ++i
)
7052 // Merge this attribute with existing attributes.
7055 case elfcpp::Tag_CPU_raw_name
:
7056 case elfcpp::Tag_CPU_name
:
7057 // These are merged after Tag_CPU_arch.
7060 case elfcpp::Tag_ABI_optimization_goals
:
7061 case elfcpp::Tag_ABI_FP_optimization_goals
:
7062 // Use the first value seen.
7065 case elfcpp::Tag_CPU_arch
:
7067 unsigned int saved_out_attr
= out_attr
->int_value();
7068 // Merge Tag_CPU_arch and Tag_also_compatible_with.
7069 int secondary_compat
=
7070 this->get_secondary_compatible_arch(pasd
);
7071 int secondary_compat_out
=
7072 this->get_secondary_compatible_arch(
7073 this->attributes_section_data_
);
7074 out_attr
[i
].set_int_value(
7075 tag_cpu_arch_combine(name
, out_attr
[i
].int_value(),
7076 &secondary_compat_out
,
7077 in_attr
[i
].int_value(),
7079 this->set_secondary_compatible_arch(this->attributes_section_data_
,
7080 secondary_compat_out
);
7082 // Merge Tag_CPU_name and Tag_CPU_raw_name.
7083 if (out_attr
[i
].int_value() == saved_out_attr
)
7084 ; // Leave the names alone.
7085 else if (out_attr
[i
].int_value() == in_attr
[i
].int_value())
7087 // The output architecture has been changed to match the
7088 // input architecture. Use the input names.
7089 out_attr
[elfcpp::Tag_CPU_name
].set_string_value(
7090 in_attr
[elfcpp::Tag_CPU_name
].string_value());
7091 out_attr
[elfcpp::Tag_CPU_raw_name
].set_string_value(
7092 in_attr
[elfcpp::Tag_CPU_raw_name
].string_value());
7096 out_attr
[elfcpp::Tag_CPU_name
].set_string_value("");
7097 out_attr
[elfcpp::Tag_CPU_raw_name
].set_string_value("");
7100 // If we still don't have a value for Tag_CPU_name,
7101 // make one up now. Tag_CPU_raw_name remains blank.
7102 if (out_attr
[elfcpp::Tag_CPU_name
].string_value() == "")
7104 const std::string cpu_name
=
7105 this->tag_cpu_name_value(out_attr
[i
].int_value());
7106 // FIXME: If we see an unknown CPU, this will be set
7107 // to "<unknown CPU n>", where n is the attribute value.
7108 // This is different from BFD, which leaves the name alone.
7109 out_attr
[elfcpp::Tag_CPU_name
].set_string_value(cpu_name
);
7114 case elfcpp::Tag_ARM_ISA_use
:
7115 case elfcpp::Tag_THUMB_ISA_use
:
7116 case elfcpp::Tag_WMMX_arch
:
7117 case elfcpp::Tag_Advanced_SIMD_arch
:
7118 // ??? Do Advanced_SIMD (NEON) and WMMX conflict?
7119 case elfcpp::Tag_ABI_FP_rounding
:
7120 case elfcpp::Tag_ABI_FP_exceptions
:
7121 case elfcpp::Tag_ABI_FP_user_exceptions
:
7122 case elfcpp::Tag_ABI_FP_number_model
:
7123 case elfcpp::Tag_VFP_HP_extension
:
7124 case elfcpp::Tag_CPU_unaligned_access
:
7125 case elfcpp::Tag_T2EE_use
:
7126 case elfcpp::Tag_Virtualization_use
:
7127 case elfcpp::Tag_MPextension_use
:
7128 // Use the largest value specified.
7129 if (in_attr
[i
].int_value() > out_attr
[i
].int_value())
7130 out_attr
[i
].set_int_value(in_attr
[i
].int_value());
7133 case elfcpp::Tag_ABI_align8_preserved
:
7134 case elfcpp::Tag_ABI_PCS_RO_data
:
7135 // Use the smallest value specified.
7136 if (in_attr
[i
].int_value() < out_attr
[i
].int_value())
7137 out_attr
[i
].set_int_value(in_attr
[i
].int_value());
7140 case elfcpp::Tag_ABI_align8_needed
:
7141 if ((in_attr
[i
].int_value() > 0 || out_attr
[i
].int_value() > 0)
7142 && (in_attr
[elfcpp::Tag_ABI_align8_preserved
].int_value() == 0
7143 || (out_attr
[elfcpp::Tag_ABI_align8_preserved
].int_value()
7146 // This error message should be enabled once all non-conformant
7147 // binaries in the toolchain have had the attributes set
7149 // gold_error(_("output 8-byte data alignment conflicts with %s"),
7153 case elfcpp::Tag_ABI_FP_denormal
:
7154 case elfcpp::Tag_ABI_PCS_GOT_use
:
7156 // These tags have 0 = don't care, 1 = strong requirement,
7157 // 2 = weak requirement.
7158 static const int order_021
[3] = {0, 2, 1};
7160 // Use the "greatest" from the sequence 0, 2, 1, or the largest
7161 // value if greater than 2 (for future-proofing).
7162 if ((in_attr
[i
].int_value() > 2
7163 && in_attr
[i
].int_value() > out_attr
[i
].int_value())
7164 || (in_attr
[i
].int_value() <= 2
7165 && out_attr
[i
].int_value() <= 2
7166 && (order_021
[in_attr
[i
].int_value()]
7167 > order_021
[out_attr
[i
].int_value()])))
7168 out_attr
[i
].set_int_value(in_attr
[i
].int_value());
7172 case elfcpp::Tag_CPU_arch_profile
:
7173 if (out_attr
[i
].int_value() != in_attr
[i
].int_value())
7175 // 0 will merge with anything.
7176 // 'A' and 'S' merge to 'A'.
7177 // 'R' and 'S' merge to 'R'.
7178 // 'M' and 'A|R|S' is an error.
7179 if (out_attr
[i
].int_value() == 0
7180 || (out_attr
[i
].int_value() == 'S'
7181 && (in_attr
[i
].int_value() == 'A'
7182 || in_attr
[i
].int_value() == 'R')))
7183 out_attr
[i
].set_int_value(in_attr
[i
].int_value());
7184 else if (in_attr
[i
].int_value() == 0
7185 || (in_attr
[i
].int_value() == 'S'
7186 && (out_attr
[i
].int_value() == 'A'
7187 || out_attr
[i
].int_value() == 'R')))
7192 (_("conflicting architecture profiles %c/%c"),
7193 in_attr
[i
].int_value() ? in_attr
[i
].int_value() : '0',
7194 out_attr
[i
].int_value() ? out_attr
[i
].int_value() : '0');
7198 case elfcpp::Tag_VFP_arch
:
7215 // Values greater than 6 aren't defined, so just pick the
7217 if (in_attr
[i
].int_value() > 6
7218 && in_attr
[i
].int_value() > out_attr
[i
].int_value())
7220 *out_attr
= *in_attr
;
7223 // The output uses the superset of input features
7224 // (ISA version) and registers.
7225 int ver
= std::max(vfp_versions
[in_attr
[i
].int_value()].ver
,
7226 vfp_versions
[out_attr
[i
].int_value()].ver
);
7227 int regs
= std::max(vfp_versions
[in_attr
[i
].int_value()].regs
,
7228 vfp_versions
[out_attr
[i
].int_value()].regs
);
7229 // This assumes all possible supersets are also a valid
7232 for (newval
= 6; newval
> 0; newval
--)
7234 if (regs
== vfp_versions
[newval
].regs
7235 && ver
== vfp_versions
[newval
].ver
)
7238 out_attr
[i
].set_int_value(newval
);
7241 case elfcpp::Tag_PCS_config
:
7242 if (out_attr
[i
].int_value() == 0)
7243 out_attr
[i
].set_int_value(in_attr
[i
].int_value());
7244 else if (in_attr
[i
].int_value() != 0 && out_attr
[i
].int_value() != 0)
7246 // It's sometimes ok to mix different configs, so this is only
7248 gold_warning(_("%s: conflicting platform configuration"), name
);
7251 case elfcpp::Tag_ABI_PCS_R9_use
:
7252 if (in_attr
[i
].int_value() != out_attr
[i
].int_value()
7253 && out_attr
[i
].int_value() != elfcpp::AEABI_R9_unused
7254 && in_attr
[i
].int_value() != elfcpp::AEABI_R9_unused
)
7256 gold_error(_("%s: conflicting use of R9"), name
);
7258 if (out_attr
[i
].int_value() == elfcpp::AEABI_R9_unused
)
7259 out_attr
[i
].set_int_value(in_attr
[i
].int_value());
7261 case elfcpp::Tag_ABI_PCS_RW_data
:
7262 if (in_attr
[i
].int_value() == elfcpp::AEABI_PCS_RW_data_SBrel
7263 && (in_attr
[elfcpp::Tag_ABI_PCS_R9_use
].int_value()
7264 != elfcpp::AEABI_R9_SB
)
7265 && (out_attr
[elfcpp::Tag_ABI_PCS_R9_use
].int_value()
7266 != elfcpp::AEABI_R9_unused
))
7268 gold_error(_("%s: SB relative addressing conflicts with use "
7272 // Use the smallest value specified.
7273 if (in_attr
[i
].int_value() < out_attr
[i
].int_value())
7274 out_attr
[i
].set_int_value(in_attr
[i
].int_value());
7276 case elfcpp::Tag_ABI_PCS_wchar_t
:
7277 // FIXME: Make it possible to turn off this warning.
7278 if (out_attr
[i
].int_value()
7279 && in_attr
[i
].int_value()
7280 && out_attr
[i
].int_value() != in_attr
[i
].int_value())
7282 gold_warning(_("%s uses %u-byte wchar_t yet the output is to "
7283 "use %u-byte wchar_t; use of wchar_t values "
7284 "across objects may fail"),
7285 name
, in_attr
[i
].int_value(),
7286 out_attr
[i
].int_value());
7288 else if (in_attr
[i
].int_value() && !out_attr
[i
].int_value())
7289 out_attr
[i
].set_int_value(in_attr
[i
].int_value());
7291 case elfcpp::Tag_ABI_enum_size
:
7292 if (in_attr
[i
].int_value() != elfcpp::AEABI_enum_unused
)
7294 if (out_attr
[i
].int_value() == elfcpp::AEABI_enum_unused
7295 || out_attr
[i
].int_value() == elfcpp::AEABI_enum_forced_wide
)
7297 // The existing object is compatible with anything.
7298 // Use whatever requirements the new object has.
7299 out_attr
[i
].set_int_value(in_attr
[i
].int_value());
7301 // FIXME: Make it possible to turn off this warning.
7302 else if (in_attr
[i
].int_value() != elfcpp::AEABI_enum_forced_wide
7303 && out_attr
[i
].int_value() != in_attr
[i
].int_value())
7305 unsigned int in_value
= in_attr
[i
].int_value();
7306 unsigned int out_value
= out_attr
[i
].int_value();
7307 gold_warning(_("%s uses %s enums yet the output is to use "
7308 "%s enums; use of enum values across objects "
7311 this->aeabi_enum_name(in_value
).c_str(),
7312 this->aeabi_enum_name(out_value
).c_str());
7316 case elfcpp::Tag_ABI_VFP_args
:
7319 case elfcpp::Tag_ABI_WMMX_args
:
7320 if (in_attr
[i
].int_value() != out_attr
[i
].int_value())
7322 gold_error(_("%s uses iWMMXt register arguments, output does "
7327 case Object_attribute::Tag_compatibility
:
7328 // Merged in target-independent code.
7330 case elfcpp::Tag_ABI_HardFP_use
:
7331 // 1 (SP) and 2 (DP) conflict, so combine to 3 (SP & DP).
7332 if ((in_attr
[i
].int_value() == 1 && out_attr
[i
].int_value() == 2)
7333 || (in_attr
[i
].int_value() == 2 && out_attr
[i
].int_value() == 1))
7334 out_attr
[i
].set_int_value(3);
7335 else if (in_attr
[i
].int_value() > out_attr
[i
].int_value())
7336 out_attr
[i
].set_int_value(in_attr
[i
].int_value());
7338 case elfcpp::Tag_ABI_FP_16bit_format
:
7339 if (in_attr
[i
].int_value() != 0 && out_attr
[i
].int_value() != 0)
7341 if (in_attr
[i
].int_value() != out_attr
[i
].int_value())
7342 gold_error(_("fp16 format mismatch between %s and output"),
7345 if (in_attr
[i
].int_value() != 0)
7346 out_attr
[i
].set_int_value(in_attr
[i
].int_value());
7349 case elfcpp::Tag_nodefaults
:
7350 // This tag is set if it exists, but the value is unused (and is
7351 // typically zero). We don't actually need to do anything here -
7352 // the merge happens automatically when the type flags are merged
7355 case elfcpp::Tag_also_compatible_with
:
7356 // Already done in Tag_CPU_arch.
7358 case elfcpp::Tag_conformance
:
7359 // Keep the attribute if it matches. Throw it away otherwise.
7360 // No attribute means no claim to conform.
7361 if (in_attr
[i
].string_value() != out_attr
[i
].string_value())
7362 out_attr
[i
].set_string_value("");
7367 const char* err_object
= NULL
;
7369 // The "known_obj_attributes" table does contain some undefined
7370 // attributes. Ensure that there are unused.
7371 if (out_attr
[i
].int_value() != 0
7372 || out_attr
[i
].string_value() != "")
7373 err_object
= "output";
7374 else if (in_attr
[i
].int_value() != 0
7375 || in_attr
[i
].string_value() != "")
7378 if (err_object
!= NULL
)
7380 // Attribute numbers >=64 (mod 128) can be safely ignored.
7382 gold_error(_("%s: unknown mandatory EABI object attribute "
7386 gold_warning(_("%s: unknown EABI object attribute %d"),
7390 // Only pass on attributes that match in both inputs.
7391 if (!in_attr
[i
].matches(out_attr
[i
]))
7393 out_attr
[i
].set_int_value(0);
7394 out_attr
[i
].set_string_value("");
7399 // If out_attr was copied from in_attr then it won't have a type yet.
7400 if (in_attr
[i
].type() && !out_attr
[i
].type())
7401 out_attr
[i
].set_type(in_attr
[i
].type());
7404 // Merge Tag_compatibility attributes and any common GNU ones.
7405 this->attributes_section_data_
->merge(name
, pasd
);
7407 // Check for any attributes not known on ARM.
7408 typedef Vendor_object_attributes::Other_attributes Other_attributes
;
7409 const Other_attributes
* in_other_attributes
= pasd
->other_attributes(vendor
);
7410 Other_attributes::const_iterator in_iter
= in_other_attributes
->begin();
7411 Other_attributes
* out_other_attributes
=
7412 this->attributes_section_data_
->other_attributes(vendor
);
7413 Other_attributes::iterator out_iter
= out_other_attributes
->begin();
7415 while (in_iter
!= in_other_attributes
->end()
7416 || out_iter
!= out_other_attributes
->end())
7418 const char* err_object
= NULL
;
7421 // The tags for each list are in numerical order.
7422 // If the tags are equal, then merge.
7423 if (out_iter
!= out_other_attributes
->end()
7424 && (in_iter
== in_other_attributes
->end()
7425 || in_iter
->first
> out_iter
->first
))
7427 // This attribute only exists in output. We can't merge, and we
7428 // don't know what the tag means, so delete it.
7429 err_object
= "output";
7430 err_tag
= out_iter
->first
;
7431 int saved_tag
= out_iter
->first
;
7432 delete out_iter
->second
;
7433 out_other_attributes
->erase(out_iter
);
7434 out_iter
= out_other_attributes
->upper_bound(saved_tag
);
7436 else if (in_iter
!= in_other_attributes
->end()
7437 && (out_iter
!= out_other_attributes
->end()
7438 || in_iter
->first
< out_iter
->first
))
7440 // This attribute only exists in input. We can't merge, and we
7441 // don't know what the tag means, so ignore it.
7443 err_tag
= in_iter
->first
;
7446 else // The tags are equal.
7448 // As present, all attributes in the list are unknown, and
7449 // therefore can't be merged meaningfully.
7450 err_object
= "output";
7451 err_tag
= out_iter
->first
;
7453 // Only pass on attributes that match in both inputs.
7454 if (!in_iter
->second
->matches(*(out_iter
->second
)))
7456 // No match. Delete the attribute.
7457 int saved_tag
= out_iter
->first
;
7458 delete out_iter
->second
;
7459 out_other_attributes
->erase(out_iter
);
7460 out_iter
= out_other_attributes
->upper_bound(saved_tag
);
7464 // Matched. Keep the attribute and move to the next.
7472 // Attribute numbers >=64 (mod 128) can be safely ignored. */
7473 if ((err_tag
& 127) < 64)
7475 gold_error(_("%s: unknown mandatory EABI object attribute %d"),
7476 err_object
, err_tag
);
7480 gold_warning(_("%s: unknown EABI object attribute %d"),
7481 err_object
, err_tag
);
7487 // Return whether a relocation type used the LSB to distinguish THUMB
7489 template<bool big_endian
>
7491 Target_arm
<big_endian
>::reloc_uses_thumb_bit(unsigned int r_type
)
7495 case elfcpp::R_ARM_PC24
:
7496 case elfcpp::R_ARM_ABS32
:
7497 case elfcpp::R_ARM_REL32
:
7498 case elfcpp::R_ARM_SBREL32
:
7499 case elfcpp::R_ARM_THM_CALL
:
7500 case elfcpp::R_ARM_GLOB_DAT
:
7501 case elfcpp::R_ARM_JUMP_SLOT
:
7502 case elfcpp::R_ARM_GOTOFF32
:
7503 case elfcpp::R_ARM_PLT32
:
7504 case elfcpp::R_ARM_CALL
:
7505 case elfcpp::R_ARM_JUMP24
:
7506 case elfcpp::R_ARM_THM_JUMP24
:
7507 case elfcpp::R_ARM_SBREL31
:
7508 case elfcpp::R_ARM_PREL31
:
7509 case elfcpp::R_ARM_MOVW_ABS_NC
:
7510 case elfcpp::R_ARM_MOVW_PREL_NC
:
7511 case elfcpp::R_ARM_THM_MOVW_ABS_NC
:
7512 case elfcpp::R_ARM_THM_MOVW_PREL_NC
:
7513 case elfcpp::R_ARM_THM_JUMP19
:
7514 case elfcpp::R_ARM_THM_ALU_PREL_11_0
:
7515 case elfcpp::R_ARM_ALU_PC_G0_NC
:
7516 case elfcpp::R_ARM_ALU_PC_G0
:
7517 case elfcpp::R_ARM_ALU_PC_G1_NC
:
7518 case elfcpp::R_ARM_ALU_PC_G1
:
7519 case elfcpp::R_ARM_ALU_PC_G2
:
7520 case elfcpp::R_ARM_ALU_SB_G0_NC
:
7521 case elfcpp::R_ARM_ALU_SB_G0
:
7522 case elfcpp::R_ARM_ALU_SB_G1_NC
:
7523 case elfcpp::R_ARM_ALU_SB_G1
:
7524 case elfcpp::R_ARM_ALU_SB_G2
:
7525 case elfcpp::R_ARM_MOVW_BREL_NC
:
7526 case elfcpp::R_ARM_MOVW_BREL
:
7527 case elfcpp::R_ARM_THM_MOVW_BREL_NC
:
7528 case elfcpp::R_ARM_THM_MOVW_BREL
:
7535 // Stub-generation methods for Target_arm.
7537 // Make a new Arm_input_section object.
7539 template<bool big_endian
>
7540 Arm_input_section
<big_endian
>*
7541 Target_arm
<big_endian
>::new_arm_input_section(
7545 Section_id
sid(relobj
, shndx
);
7547 Arm_input_section
<big_endian
>* arm_input_section
=
7548 new Arm_input_section
<big_endian
>(relobj
, shndx
);
7549 arm_input_section
->init();
7551 // Register new Arm_input_section in map for look-up.
7552 std::pair
<typename
Arm_input_section_map::iterator
, bool> ins
=
7553 this->arm_input_section_map_
.insert(std::make_pair(sid
, arm_input_section
));
7555 // Make sure that it we have not created another Arm_input_section
7556 // for this input section already.
7557 gold_assert(ins
.second
);
7559 return arm_input_section
;
7562 // Find the Arm_input_section object corresponding to the SHNDX-th input
7563 // section of RELOBJ.
7565 template<bool big_endian
>
7566 Arm_input_section
<big_endian
>*
7567 Target_arm
<big_endian
>::find_arm_input_section(
7569 unsigned int shndx
) const
7571 Section_id
sid(relobj
, shndx
);
7572 typename
Arm_input_section_map::const_iterator p
=
7573 this->arm_input_section_map_
.find(sid
);
7574 return (p
!= this->arm_input_section_map_
.end()) ? p
->second
: NULL
;
7577 // Make a new stub table.
7579 template<bool big_endian
>
7580 Stub_table
<big_endian
>*
7581 Target_arm
<big_endian
>::new_stub_table(Arm_input_section
<big_endian
>* owner
)
7583 Stub_table
<big_endian
>* stub_table
=
7584 new Stub_table
<big_endian
>(owner
);
7585 this->stub_tables_
.push_back(stub_table
);
7587 stub_table
->set_address(owner
->address() + owner
->data_size());
7588 stub_table
->set_file_offset(owner
->offset() + owner
->data_size());
7589 stub_table
->finalize_data_size();
7594 // Scan a relocation for stub generation.
7596 template<bool big_endian
>
7598 Target_arm
<big_endian
>::scan_reloc_for_stub(
7599 const Relocate_info
<32, big_endian
>* relinfo
,
7600 unsigned int r_type
,
7601 const Sized_symbol
<32>* gsym
,
7603 const Symbol_value
<32>* psymval
,
7604 elfcpp::Elf_types
<32>::Elf_Swxword addend
,
7605 Arm_address address
)
7607 typedef typename Target_arm
<big_endian
>::Relocate Relocate
;
7609 const Arm_relobj
<big_endian
>* arm_relobj
=
7610 Arm_relobj
<big_endian
>::as_arm_relobj(relinfo
->object
);
7612 if (r_type
== elfcpp::R_ARM_V4BX
)
7614 const uint32_t reg
= (addend
& 0xf);
7615 if (this->fix_v4bx() == 2 && reg
< 0xf)
7617 // Try looking up an existing stub from a stub table.
7618 Stub_table
<big_endian
>* stub_table
=
7619 arm_relobj
->stub_table(relinfo
->data_shndx
);
7620 gold_assert(stub_table
!= NULL
);
7622 if (stub_table
->find_arm_v4bx_stub(reg
) == NULL
)
7624 // create a new stub and add it to stub table.
7625 Arm_v4bx_stub
* stub
=
7626 this->stub_factory().make_arm_v4bx_stub(reg
);
7627 gold_assert(stub
!= NULL
);
7628 stub_table
->add_arm_v4bx_stub(stub
);
7635 bool target_is_thumb
;
7636 Symbol_value
<32> symval
;
7639 // This is a global symbol. Determine if we use PLT and if the
7640 // final target is THUMB.
7641 if (gsym
->use_plt_offset(Relocate::reloc_is_non_pic(r_type
)))
7643 // This uses a PLT, change the symbol value.
7644 symval
.set_output_value(this->plt_section()->address()
7645 + gsym
->plt_offset());
7647 target_is_thumb
= false;
7649 else if (gsym
->is_undefined())
7650 // There is no need to generate a stub symbol is undefined.
7655 ((gsym
->type() == elfcpp::STT_ARM_TFUNC
)
7656 || (gsym
->type() == elfcpp::STT_FUNC
7657 && !gsym
->is_undefined()
7658 && ((psymval
->value(arm_relobj
, 0) & 1) != 0)));
7663 // This is a local symbol. Determine if the final target is THUMB.
7664 target_is_thumb
= arm_relobj
->local_symbol_is_thumb_function(r_sym
);
7667 // Strip LSB if this points to a THUMB target.
7669 && Target_arm
<big_endian
>::reloc_uses_thumb_bit(r_type
)
7670 && ((psymval
->value(arm_relobj
, 0) & 1) != 0))
7672 Arm_address stripped_value
=
7673 psymval
->value(arm_relobj
, 0) & ~static_cast<Arm_address
>(1);
7674 symval
.set_output_value(stripped_value
);
7678 // Get the symbol value.
7679 Symbol_value
<32>::Value value
= psymval
->value(arm_relobj
, 0);
7681 // Owing to pipelining, the PC relative branches below actually skip
7682 // two instructions when the branch offset is 0.
7683 Arm_address destination
;
7686 case elfcpp::R_ARM_CALL
:
7687 case elfcpp::R_ARM_JUMP24
:
7688 case elfcpp::R_ARM_PLT32
:
7690 destination
= value
+ addend
+ 8;
7692 case elfcpp::R_ARM_THM_CALL
:
7693 case elfcpp::R_ARM_THM_XPC22
:
7694 case elfcpp::R_ARM_THM_JUMP24
:
7695 case elfcpp::R_ARM_THM_JUMP19
:
7697 destination
= value
+ addend
+ 4;
7703 Reloc_stub
* stub
= NULL
;
7704 Stub_type stub_type
=
7705 Reloc_stub::stub_type_for_reloc(r_type
, address
, destination
,
7707 if (stub_type
!= arm_stub_none
)
7709 // Try looking up an existing stub from a stub table.
7710 Stub_table
<big_endian
>* stub_table
=
7711 arm_relobj
->stub_table(relinfo
->data_shndx
);
7712 gold_assert(stub_table
!= NULL
);
7714 // Locate stub by destination.
7715 Reloc_stub::Key
stub_key(stub_type
, gsym
, arm_relobj
, r_sym
, addend
);
7717 // Create a stub if there is not one already
7718 stub
= stub_table
->find_reloc_stub(stub_key
);
7721 // create a new stub and add it to stub table.
7722 stub
= this->stub_factory().make_reloc_stub(stub_type
);
7723 stub_table
->add_reloc_stub(stub
, stub_key
);
7726 // Record the destination address.
7727 stub
->set_destination_address(destination
7728 | (target_is_thumb
? 1 : 0));
7731 // For Cortex-A8, we need to record a relocation at 4K page boundary.
7732 if (this->fix_cortex_a8_
7733 && (r_type
== elfcpp::R_ARM_THM_JUMP24
7734 || r_type
== elfcpp::R_ARM_THM_JUMP19
7735 || r_type
== elfcpp::R_ARM_THM_CALL
7736 || r_type
== elfcpp::R_ARM_THM_XPC22
)
7737 && (address
& 0xfffU
) == 0xffeU
)
7739 // Found a candidate. Note we haven't checked the destination is
7740 // within 4K here: if we do so (and don't create a record) we can't
7741 // tell that a branch should have been relocated when scanning later.
7742 this->cortex_a8_relocs_info_
[address
] =
7743 new Cortex_a8_reloc(stub
, r_type
,
7744 destination
| (target_is_thumb
? 1 : 0));
7748 // This function scans a relocation sections for stub generation.
7749 // The template parameter Relocate must be a class type which provides
7750 // a single function, relocate(), which implements the machine
7751 // specific part of a relocation.
7753 // BIG_ENDIAN is the endianness of the data. SH_TYPE is the section type:
7754 // SHT_REL or SHT_RELA.
7756 // PRELOCS points to the relocation data. RELOC_COUNT is the number
7757 // of relocs. OUTPUT_SECTION is the output section.
7758 // NEEDS_SPECIAL_OFFSET_HANDLING is true if input offsets need to be
7759 // mapped to output offsets.
7761 // VIEW is the section data, VIEW_ADDRESS is its memory address, and
7762 // VIEW_SIZE is the size. These refer to the input section, unless
7763 // NEEDS_SPECIAL_OFFSET_HANDLING is true, in which case they refer to
7764 // the output section.
7766 template<bool big_endian
>
7767 template<int sh_type
>
7769 Target_arm
<big_endian
>::scan_reloc_section_for_stubs(
7770 const Relocate_info
<32, big_endian
>* relinfo
,
7771 const unsigned char* prelocs
,
7773 Output_section
* output_section
,
7774 bool needs_special_offset_handling
,
7775 const unsigned char* view
,
7776 elfcpp::Elf_types
<32>::Elf_Addr view_address
,
7779 typedef typename Reloc_types
<sh_type
, 32, big_endian
>::Reloc Reltype
;
7780 const int reloc_size
=
7781 Reloc_types
<sh_type
, 32, big_endian
>::reloc_size
;
7783 Arm_relobj
<big_endian
>* arm_object
=
7784 Arm_relobj
<big_endian
>::as_arm_relobj(relinfo
->object
);
7785 unsigned int local_count
= arm_object
->local_symbol_count();
7787 Comdat_behavior comdat_behavior
= CB_UNDETERMINED
;
7789 for (size_t i
= 0; i
< reloc_count
; ++i
, prelocs
+= reloc_size
)
7791 Reltype
reloc(prelocs
);
7793 typename
elfcpp::Elf_types
<32>::Elf_WXword r_info
= reloc
.get_r_info();
7794 unsigned int r_sym
= elfcpp::elf_r_sym
<32>(r_info
);
7795 unsigned int r_type
= elfcpp::elf_r_type
<32>(r_info
);
7797 r_type
= this->get_real_reloc_type(r_type
);
7799 // Only a few relocation types need stubs.
7800 if ((r_type
!= elfcpp::R_ARM_CALL
)
7801 && (r_type
!= elfcpp::R_ARM_JUMP24
)
7802 && (r_type
!= elfcpp::R_ARM_PLT32
)
7803 && (r_type
!= elfcpp::R_ARM_THM_CALL
)
7804 && (r_type
!= elfcpp::R_ARM_THM_XPC22
)
7805 && (r_type
!= elfcpp::R_ARM_THM_JUMP24
)
7806 && (r_type
!= elfcpp::R_ARM_THM_JUMP19
)
7807 && (r_type
!= elfcpp::R_ARM_V4BX
))
7810 section_offset_type offset
=
7811 convert_to_section_size_type(reloc
.get_r_offset());
7813 if (needs_special_offset_handling
)
7815 offset
= output_section
->output_offset(relinfo
->object
,
7816 relinfo
->data_shndx
,
7822 if (r_type
== elfcpp::R_ARM_V4BX
)
7824 // Get the BX instruction.
7825 typedef typename
elfcpp::Swap
<32, big_endian
>::Valtype Valtype
;
7826 const Valtype
* wv
= reinterpret_cast<const Valtype
*>(view
+ offset
);
7827 elfcpp::Elf_types
<32>::Elf_Swxword insn
=
7828 elfcpp::Swap
<32, big_endian
>::readval(wv
);
7829 this->scan_reloc_for_stub(relinfo
, r_type
, NULL
, 0, NULL
,
7835 Stub_addend_reader
<sh_type
, big_endian
> stub_addend_reader
;
7836 elfcpp::Elf_types
<32>::Elf_Swxword addend
=
7837 stub_addend_reader(r_type
, view
+ offset
, reloc
);
7839 const Sized_symbol
<32>* sym
;
7841 Symbol_value
<32> symval
;
7842 const Symbol_value
<32> *psymval
;
7843 if (r_sym
< local_count
)
7846 psymval
= arm_object
->local_symbol(r_sym
);
7848 // If the local symbol belongs to a section we are discarding,
7849 // and that section is a debug section, try to find the
7850 // corresponding kept section and map this symbol to its
7851 // counterpart in the kept section. The symbol must not
7852 // correspond to a section we are folding.
7854 unsigned int shndx
= psymval
->input_shndx(&is_ordinary
);
7856 && shndx
!= elfcpp::SHN_UNDEF
7857 && !arm_object
->is_section_included(shndx
)
7858 && !(relinfo
->symtab
->is_section_folded(arm_object
, shndx
)))
7860 if (comdat_behavior
== CB_UNDETERMINED
)
7863 arm_object
->section_name(relinfo
->data_shndx
);
7864 comdat_behavior
= get_comdat_behavior(name
.c_str());
7866 if (comdat_behavior
== CB_PRETEND
)
7869 typename
elfcpp::Elf_types
<32>::Elf_Addr value
=
7870 arm_object
->map_to_kept_section(shndx
, &found
);
7872 symval
.set_output_value(value
+ psymval
->input_value());
7874 symval
.set_output_value(0);
7878 symval
.set_output_value(0);
7880 symval
.set_no_output_symtab_entry();
7886 const Symbol
* gsym
= arm_object
->global_symbol(r_sym
);
7887 gold_assert(gsym
!= NULL
);
7888 if (gsym
->is_forwarder())
7889 gsym
= relinfo
->symtab
->resolve_forwards(gsym
);
7891 sym
= static_cast<const Sized_symbol
<32>*>(gsym
);
7892 if (sym
->has_symtab_index())
7893 symval
.set_output_symtab_index(sym
->symtab_index());
7895 symval
.set_no_output_symtab_entry();
7897 // We need to compute the would-be final value of this global
7899 const Symbol_table
* symtab
= relinfo
->symtab
;
7900 const Sized_symbol
<32>* sized_symbol
=
7901 symtab
->get_sized_symbol
<32>(gsym
);
7902 Symbol_table::Compute_final_value_status status
;
7904 symtab
->compute_final_value
<32>(sized_symbol
, &status
);
7906 // Skip this if the symbol has not output section.
7907 if (status
== Symbol_table::CFVS_NO_OUTPUT_SECTION
)
7910 symval
.set_output_value(value
);
7914 // If symbol is a section symbol, we don't know the actual type of
7915 // destination. Give up.
7916 if (psymval
->is_section_symbol())
7919 this->scan_reloc_for_stub(relinfo
, r_type
, sym
, r_sym
, psymval
,
7920 addend
, view_address
+ offset
);
7924 // Scan an input section for stub generation.
7926 template<bool big_endian
>
7928 Target_arm
<big_endian
>::scan_section_for_stubs(
7929 const Relocate_info
<32, big_endian
>* relinfo
,
7930 unsigned int sh_type
,
7931 const unsigned char* prelocs
,
7933 Output_section
* output_section
,
7934 bool needs_special_offset_handling
,
7935 const unsigned char* view
,
7936 Arm_address view_address
,
7937 section_size_type view_size
)
7939 if (sh_type
== elfcpp::SHT_REL
)
7940 this->scan_reloc_section_for_stubs
<elfcpp::SHT_REL
>(
7945 needs_special_offset_handling
,
7949 else if (sh_type
== elfcpp::SHT_RELA
)
7950 // We do not support RELA type relocations yet. This is provided for
7952 this->scan_reloc_section_for_stubs
<elfcpp::SHT_RELA
>(
7957 needs_special_offset_handling
,
7965 // Group input sections for stub generation.
7967 // We goup input sections in an output sections so that the total size,
7968 // including any padding space due to alignment is smaller than GROUP_SIZE
7969 // unless the only input section in group is bigger than GROUP_SIZE already.
7970 // Then an ARM stub table is created to follow the last input section
7971 // in group. For each group an ARM stub table is created an is placed
7972 // after the last group. If STUB_ALWATS_AFTER_BRANCH is false, we further
7973 // extend the group after the stub table.
7975 template<bool big_endian
>
7977 Target_arm
<big_endian
>::group_sections(
7979 section_size_type group_size
,
7980 bool stubs_always_after_branch
)
7982 // Group input sections and insert stub table
7983 Layout::Section_list section_list
;
7984 layout
->get_allocated_sections(§ion_list
);
7985 for (Layout::Section_list::const_iterator p
= section_list
.begin();
7986 p
!= section_list
.end();
7989 Arm_output_section
<big_endian
>* output_section
=
7990 Arm_output_section
<big_endian
>::as_arm_output_section(*p
);
7991 output_section
->group_sections(group_size
, stubs_always_after_branch
,
7996 // Relaxation hook. This is where we do stub generation.
7998 template<bool big_endian
>
8000 Target_arm
<big_endian
>::do_relax(
8002 const Input_objects
* input_objects
,
8003 Symbol_table
* symtab
,
8006 // No need to generate stubs if this is a relocatable link.
8007 gold_assert(!parameters
->options().relocatable());
8009 // If this is the first pass, we need to group input sections into
8013 // Determine the stub group size. The group size is the absolute
8014 // value of the parameter --stub-group-size. If --stub-group-size
8015 // is passed a negative value, we restict stubs to be always after
8016 // the stubbed branches.
8017 int32_t stub_group_size_param
=
8018 parameters
->options().stub_group_size();
8019 bool stubs_always_after_branch
= stub_group_size_param
< 0;
8020 section_size_type stub_group_size
= abs(stub_group_size_param
);
8022 // The Cortex-A8 erratum fix depends on stubs not being in the same 4K
8023 // page as the first half of a 32-bit branch straddling two 4K pages.
8024 // This is a crude way of enforcing that.
8025 if (this->fix_cortex_a8_
)
8026 stubs_always_after_branch
= true;
8028 if (stub_group_size
== 1)
8031 // Thumb branch range is +-4MB has to be used as the default
8032 // maximum size (a given section can contain both ARM and Thumb
8033 // code, so the worst case has to be taken into account).
8035 // This value is 24K less than that, which allows for 2025
8036 // 12-byte stubs. If we exceed that, then we will fail to link.
8037 // The user will have to relink with an explicit group size
8039 stub_group_size
= 4170000;
8042 group_sections(layout
, stub_group_size
, stubs_always_after_branch
);
8045 // The Cortex-A8 stubs are sensitive to layout of code sections. At the
8046 // beginning of each relaxation pass, just blow away all the stubs.
8047 // Alternatively, we could selectively remove only the stubs and reloc
8048 // information for code sections that have moved since the last pass.
8049 // That would require more book-keeping.
8050 typedef typename
Stub_table_list::iterator Stub_table_iterator
;
8051 if (this->fix_cortex_a8_
)
8053 // Clear all Cortex-A8 reloc information.
8054 for (typename
Cortex_a8_relocs_info::const_iterator p
=
8055 this->cortex_a8_relocs_info_
.begin();
8056 p
!= this->cortex_a8_relocs_info_
.end();
8059 this->cortex_a8_relocs_info_
.clear();
8061 // Remove all Cortex-A8 stubs.
8062 for (Stub_table_iterator sp
= this->stub_tables_
.begin();
8063 sp
!= this->stub_tables_
.end();
8065 (*sp
)->remove_all_cortex_a8_stubs();
8068 // Scan relocs for relocation stubs
8069 for (Input_objects::Relobj_iterator op
= input_objects
->relobj_begin();
8070 op
!= input_objects
->relobj_end();
8073 Arm_relobj
<big_endian
>* arm_relobj
=
8074 Arm_relobj
<big_endian
>::as_arm_relobj(*op
);
8075 arm_relobj
->scan_sections_for_stubs(this, symtab
, layout
);
8078 // Check all stub tables to see if any of them have their data sizes
8079 // or addresses alignments changed. These are the only things that
8081 bool any_stub_table_changed
= false;
8082 for (Stub_table_iterator sp
= this->stub_tables_
.begin();
8083 (sp
!= this->stub_tables_
.end()) && !any_stub_table_changed
;
8086 if ((*sp
)->update_data_size_and_addralign())
8087 any_stub_table_changed
= true;
8090 // Finalize the stubs in the last relaxation pass.
8091 if (!any_stub_table_changed
)
8092 for (Stub_table_iterator sp
= this->stub_tables_
.begin();
8093 (sp
!= this->stub_tables_
.end()) && !any_stub_table_changed
;
8095 (*sp
)->finalize_stubs();
8097 return any_stub_table_changed
;
8102 template<bool big_endian
>
8104 Target_arm
<big_endian
>::relocate_stub(
8106 const Relocate_info
<32, big_endian
>* relinfo
,
8107 Output_section
* output_section
,
8108 unsigned char* view
,
8109 Arm_address address
,
8110 section_size_type view_size
)
8113 const Stub_template
* stub_template
= stub
->stub_template();
8114 for (size_t i
= 0; i
< stub_template
->reloc_count(); i
++)
8116 size_t reloc_insn_index
= stub_template
->reloc_insn_index(i
);
8117 const Insn_template
* insn
= &stub_template
->insns()[reloc_insn_index
];
8119 unsigned int r_type
= insn
->r_type();
8120 section_size_type reloc_offset
= stub_template
->reloc_offset(i
);
8121 section_size_type reloc_size
= insn
->size();
8122 gold_assert(reloc_offset
+ reloc_size
<= view_size
);
8124 // This is the address of the stub destination.
8125 Arm_address target
= stub
->reloc_target(i
) + insn
->reloc_addend();
8126 Symbol_value
<32> symval
;
8127 symval
.set_output_value(target
);
8129 // Synthesize a fake reloc just in case. We don't have a symbol so
8131 unsigned char reloc_buffer
[elfcpp::Elf_sizes
<32>::rel_size
];
8132 memset(reloc_buffer
, 0, sizeof(reloc_buffer
));
8133 elfcpp::Rel_write
<32, big_endian
> reloc_write(reloc_buffer
);
8134 reloc_write
.put_r_offset(reloc_offset
);
8135 reloc_write
.put_r_info(elfcpp::elf_r_info
<32>(0, r_type
));
8136 elfcpp::Rel
<32, big_endian
> rel(reloc_buffer
);
8138 relocate
.relocate(relinfo
, this, output_section
,
8139 this->fake_relnum_for_stubs
, rel
, r_type
,
8140 NULL
, &symval
, view
+ reloc_offset
,
8141 address
+ reloc_offset
, reloc_size
);
8145 // Determine whether an object attribute tag takes an integer, a
8148 template<bool big_endian
>
8150 Target_arm
<big_endian
>::do_attribute_arg_type(int tag
) const
8152 if (tag
== Object_attribute::Tag_compatibility
)
8153 return (Object_attribute::ATTR_TYPE_FLAG_INT_VAL
8154 | Object_attribute::ATTR_TYPE_FLAG_STR_VAL
);
8155 else if (tag
== elfcpp::Tag_nodefaults
)
8156 return (Object_attribute::ATTR_TYPE_FLAG_INT_VAL
8157 | Object_attribute::ATTR_TYPE_FLAG_NO_DEFAULT
);
8158 else if (tag
== elfcpp::Tag_CPU_raw_name
|| tag
== elfcpp::Tag_CPU_name
)
8159 return Object_attribute::ATTR_TYPE_FLAG_STR_VAL
;
8161 return Object_attribute::ATTR_TYPE_FLAG_INT_VAL
;
8163 return ((tag
& 1) != 0
8164 ? Object_attribute::ATTR_TYPE_FLAG_STR_VAL
8165 : Object_attribute::ATTR_TYPE_FLAG_INT_VAL
);
8168 // Reorder attributes.
8170 // The ABI defines that Tag_conformance should be emitted first, and that
8171 // Tag_nodefaults should be second (if either is defined). This sets those
8172 // two positions, and bumps up the position of all the remaining tags to
8175 template<bool big_endian
>
8177 Target_arm
<big_endian
>::do_attributes_order(int num
) const
8179 // Reorder the known object attributes in output. We want to move
8180 // Tag_conformance to position 4 and Tag_conformance to position 5
8181 // and shift eveything between 4 .. Tag_conformance - 1 to make room.
8183 return elfcpp::Tag_conformance
;
8185 return elfcpp::Tag_nodefaults
;
8186 if ((num
- 2) < elfcpp::Tag_nodefaults
)
8188 if ((num
- 1) < elfcpp::Tag_conformance
)
8193 // Scan a span of THUMB code for Cortex-A8 erratum.
8195 template<bool big_endian
>
8197 Target_arm
<big_endian
>::scan_span_for_cortex_a8_erratum(
8198 Arm_relobj
<big_endian
>* arm_relobj
,
8200 section_size_type span_start
,
8201 section_size_type span_end
,
8202 const unsigned char* view
,
8203 Arm_address address
)
8205 // Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
8207 // The opcode is BLX.W, BL.W, B.W, Bcc.W
8208 // The branch target is in the same 4KB region as the
8209 // first half of the branch.
8210 // The instruction before the branch is a 32-bit
8211 // length non-branch instruction.
8212 section_size_type i
= span_start
;
8213 bool last_was_32bit
= false;
8214 bool last_was_branch
= false;
8215 while (i
< span_end
)
8217 typedef typename
elfcpp::Swap
<16, big_endian
>::Valtype Valtype
;
8218 const Valtype
* wv
= reinterpret_cast<const Valtype
*>(view
+ i
);
8219 uint32_t insn
= elfcpp::Swap
<16, big_endian
>::readval(wv
);
8220 bool is_blx
= false, is_b
= false;
8221 bool is_bl
= false, is_bcc
= false;
8223 bool insn_32bit
= (insn
& 0xe000) == 0xe000 && (insn
& 0x1800) != 0x0000;
8226 // Load the rest of the insn (in manual-friendly order).
8227 insn
= (insn
<< 16) | elfcpp::Swap
<16, big_endian
>::readval(wv
+ 1);
8229 // Encoding T4: B<c>.W.
8230 is_b
= (insn
& 0xf800d000U
) == 0xf0009000U
;
8231 // Encoding T1: BL<c>.W.
8232 is_bl
= (insn
& 0xf800d000U
) == 0xf000d000U
;
8233 // Encoding T2: BLX<c>.W.
8234 is_blx
= (insn
& 0xf800d000U
) == 0xf000c000U
;
8235 // Encoding T3: B<c>.W (not permitted in IT block).
8236 is_bcc
= ((insn
& 0xf800d000U
) == 0xf0008000U
8237 && (insn
& 0x07f00000U
) != 0x03800000U
);
8240 bool is_32bit_branch
= is_b
|| is_bl
|| is_blx
|| is_bcc
;
8242 // If this instruction is a 32-bit THUMB branch that crosses a 4K
8243 // page boundary and it follows 32-bit non-branch instruction,
8244 // we need to work around.
8246 && ((address
+ i
) & 0xfffU
) == 0xffeU
8248 && !last_was_branch
)
8250 // Check to see if there is a relocation stub for this branch.
8251 bool force_target_arm
= false;
8252 bool force_target_thumb
= false;
8253 const Cortex_a8_reloc
* cortex_a8_reloc
= NULL
;
8254 Cortex_a8_relocs_info::const_iterator p
=
8255 this->cortex_a8_relocs_info_
.find(address
+ i
);
8257 if (p
!= this->cortex_a8_relocs_info_
.end())
8259 cortex_a8_reloc
= p
->second
;
8260 bool target_is_thumb
= (cortex_a8_reloc
->destination() & 1) != 0;
8262 if (cortex_a8_reloc
->r_type() == elfcpp::R_ARM_THM_CALL
8263 && !target_is_thumb
)
8264 force_target_arm
= true;
8265 else if (cortex_a8_reloc
->r_type() == elfcpp::R_ARM_THM_CALL
8267 force_target_thumb
= true;
8271 Stub_type stub_type
= arm_stub_none
;
8273 // Check if we have an offending branch instruction.
8274 uint16_t upper_insn
= (insn
>> 16) & 0xffffU
;
8275 uint16_t lower_insn
= insn
& 0xffffU
;
8276 typedef struct Arm_relocate_functions
<big_endian
> RelocFuncs
;
8278 if (cortex_a8_reloc
!= NULL
8279 && cortex_a8_reloc
->reloc_stub() != NULL
)
8280 // We've already made a stub for this instruction, e.g.
8281 // it's a long branch or a Thumb->ARM stub. Assume that
8282 // stub will suffice to work around the A8 erratum (see
8283 // setting of always_after_branch above).
8287 offset
= RelocFuncs::thumb32_cond_branch_offset(upper_insn
,
8289 stub_type
= arm_stub_a8_veneer_b_cond
;
8291 else if (is_b
|| is_bl
|| is_blx
)
8293 offset
= RelocFuncs::thumb32_branch_offset(upper_insn
,
8299 ? arm_stub_a8_veneer_blx
8301 ? arm_stub_a8_veneer_bl
8302 : arm_stub_a8_veneer_b
));
8305 if (stub_type
!= arm_stub_none
)
8307 Arm_address pc_for_insn
= address
+ i
+ 4;
8309 // The original instruction is a BL, but the target is
8310 // an ARM instruction. If we were not making a stub,
8311 // the BL would have been converted to a BLX. Use the
8312 // BLX stub instead in that case.
8313 if (this->may_use_blx() && force_target_arm
8314 && stub_type
== arm_stub_a8_veneer_bl
)
8316 stub_type
= arm_stub_a8_veneer_blx
;
8320 // Conversely, if the original instruction was
8321 // BLX but the target is Thumb mode, use the BL stub.
8322 else if (force_target_thumb
8323 && stub_type
== arm_stub_a8_veneer_blx
)
8325 stub_type
= arm_stub_a8_veneer_bl
;
8333 // If we found a relocation, use the proper destination,
8334 // not the offset in the (unrelocated) instruction.
8335 // Note this is always done if we switched the stub type above.
8336 if (cortex_a8_reloc
!= NULL
)
8337 offset
= (off_t
) (cortex_a8_reloc
->destination() - pc_for_insn
);
8339 Arm_address target
= (pc_for_insn
+ offset
) | (is_blx
? 0 : 1);
8341 // Add a new stub if destination address in in the same page.
8342 if (((address
+ i
) & ~0xfffU
) == (target
& ~0xfffU
))
8344 Cortex_a8_stub
* stub
=
8345 this->stub_factory_
.make_cortex_a8_stub(stub_type
,
8349 Stub_table
<big_endian
>* stub_table
=
8350 arm_relobj
->stub_table(shndx
);
8351 gold_assert(stub_table
!= NULL
);
8352 stub_table
->add_cortex_a8_stub(address
+ i
, stub
);
8357 i
+= insn_32bit
? 4 : 2;
8358 last_was_32bit
= insn_32bit
;
8359 last_was_branch
= is_32bit_branch
;
8363 // Apply the Cortex-A8 workaround.
8365 template<bool big_endian
>
8367 Target_arm
<big_endian
>::apply_cortex_a8_workaround(
8368 const Cortex_a8_stub
* stub
,
8369 Arm_address stub_address
,
8370 unsigned char* insn_view
,
8371 Arm_address insn_address
)
8373 typedef typename
elfcpp::Swap
<16, big_endian
>::Valtype Valtype
;
8374 Valtype
* wv
= reinterpret_cast<Valtype
*>(insn_view
);
8375 Valtype upper_insn
= elfcpp::Swap
<16, big_endian
>::readval(wv
);
8376 Valtype lower_insn
= elfcpp::Swap
<16, big_endian
>::readval(wv
+ 1);
8377 off_t branch_offset
= stub_address
- (insn_address
+ 4);
8379 typedef struct Arm_relocate_functions
<big_endian
> RelocFuncs
;
8380 switch (stub
->stub_template()->type())
8382 case arm_stub_a8_veneer_b_cond
:
8383 gold_assert(!utils::has_overflow
<21>(branch_offset
));
8384 upper_insn
= RelocFuncs::thumb32_cond_branch_upper(upper_insn
,
8386 lower_insn
= RelocFuncs::thumb32_cond_branch_lower(lower_insn
,
8390 case arm_stub_a8_veneer_b
:
8391 case arm_stub_a8_veneer_bl
:
8392 case arm_stub_a8_veneer_blx
:
8393 if ((lower_insn
& 0x5000U
) == 0x4000U
)
8394 // For a BLX instruction, make sure that the relocation is
8395 // rounded up to a word boundary. This follows the semantics of
8396 // the instruction which specifies that bit 1 of the target
8397 // address will come from bit 1 of the base address.
8398 branch_offset
= (branch_offset
+ 2) & ~3;
8400 // Put BRANCH_OFFSET back into the insn.
8401 gold_assert(!utils::has_overflow
<25>(branch_offset
));
8402 upper_insn
= RelocFuncs::thumb32_branch_upper(upper_insn
, branch_offset
);
8403 lower_insn
= RelocFuncs::thumb32_branch_lower(lower_insn
, branch_offset
);
8410 // Put the relocated value back in the object file:
8411 elfcpp::Swap
<16, big_endian
>::writeval(wv
, upper_insn
);
8412 elfcpp::Swap
<16, big_endian
>::writeval(wv
+ 1, lower_insn
);
8415 template<bool big_endian
>
8416 class Target_selector_arm
: public Target_selector
8419 Target_selector_arm()
8420 : Target_selector(elfcpp::EM_ARM
, 32, big_endian
,
8421 (big_endian
? "elf32-bigarm" : "elf32-littlearm"))
8425 do_instantiate_target()
8426 { return new Target_arm
<big_endian
>(); }
8429 Target_selector_arm
<false> target_selector_arm
;
8430 Target_selector_arm
<true> target_selector_armbe
;
8432 } // End anonymous namespace.