Fix internal error when applying TLSDESC relocations with no TLS segment.
[binutils-gdb.git] / gold / aarch64.cc
1 // aarch64.cc -- aarch64 target support for gold.
2
3 // Copyright (C) 2014-2016 Free Software Foundation, Inc.
4 // Written by Jing Yu <jingyu@google.com> and Han Shen <shenhan@google.com>.
5
6 // This file is part of gold.
7
8 // This program is free software; you can redistribute it and/or modify
9 // it under the terms of the GNU General Public License as published by
10 // the Free Software Foundation; either version 3 of the License, or
11 // (at your option) any later version.
12
13 // This program is distributed in the hope that it will be useful,
14 // but WITHOUT ANY WARRANTY; without even the implied warranty of
15 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 // GNU General Public License for more details.
17
18 // You should have received a copy of the GNU General Public License
19 // along with this program; if not, write to the Free Software
20 // Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
21 // MA 02110-1301, USA.
22
23 #include "gold.h"
24
25 #include <cstring>
26 #include <map>
27 #include <set>
28
29 #include "elfcpp.h"
30 #include "dwarf.h"
31 #include "parameters.h"
32 #include "reloc.h"
33 #include "aarch64.h"
34 #include "object.h"
35 #include "symtab.h"
36 #include "layout.h"
37 #include "output.h"
38 #include "copy-relocs.h"
39 #include "target.h"
40 #include "target-reloc.h"
41 #include "target-select.h"
42 #include "tls.h"
43 #include "freebsd.h"
44 #include "nacl.h"
45 #include "gc.h"
46 #include "icf.h"
47 #include "aarch64-reloc-property.h"
48
49 // The first three .got.plt entries are reserved.
50 const int32_t AARCH64_GOTPLT_RESERVE_COUNT = 3;
51
52
53 namespace
54 {
55
56 using namespace gold;
57
58 template<int size, bool big_endian>
59 class Output_data_plt_aarch64;
60
61 template<int size, bool big_endian>
62 class Output_data_plt_aarch64_standard;
63
64 template<int size, bool big_endian>
65 class Target_aarch64;
66
67 template<int size, bool big_endian>
68 class AArch64_relocate_functions;
69
70 // Utility class dealing with insns. This is ported from macros in
71 // bfd/elfnn-aarch64.cc, but wrapped inside a class as static members. This
72 // class is used in erratum sequence scanning.
73
74 template<bool big_endian>
75 class AArch64_insn_utilities
76 {
77 public:
78 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
79
80 static const int BYTES_PER_INSN;
81
82 // Zero register encoding - 31.
83 static const unsigned int AARCH64_ZR;
84
85 static unsigned int
86 aarch64_bit(Insntype insn, int pos)
87 { return ((1 << pos) & insn) >> pos; }
88
89 static unsigned int
90 aarch64_bits(Insntype insn, int pos, int l)
91 { return (insn >> pos) & ((1 << l) - 1); }
92
93 // Get the encoding field "op31" of 3-source data processing insns. "op31" is
94 // the name defined in armv8 insn manual C3.5.9.
95 static unsigned int
96 aarch64_op31(Insntype insn)
97 { return aarch64_bits(insn, 21, 3); }
98
99 // Get the encoding field "ra" of 3-source data processing insns. "ra" is the
100 // third source register. See armv8 insn manual C3.5.9.
101 static unsigned int
102 aarch64_ra(Insntype insn)
103 { return aarch64_bits(insn, 10, 5); }
104
105 static bool
106 is_adr(const Insntype insn)
107 { return (insn & 0x9F000000) == 0x10000000; }
108
109 static bool
110 is_adrp(const Insntype insn)
111 { return (insn & 0x9F000000) == 0x90000000; }
112
113 static unsigned int
114 aarch64_rm(const Insntype insn)
115 { return aarch64_bits(insn, 16, 5); }
116
117 static unsigned int
118 aarch64_rn(const Insntype insn)
119 { return aarch64_bits(insn, 5, 5); }
120
121 static unsigned int
122 aarch64_rd(const Insntype insn)
123 { return aarch64_bits(insn, 0, 5); }
124
125 static unsigned int
126 aarch64_rt(const Insntype insn)
127 { return aarch64_bits(insn, 0, 5); }
128
129 static unsigned int
130 aarch64_rt2(const Insntype insn)
131 { return aarch64_bits(insn, 10, 5); }
132
133 // Encode imm21 into adr. Signed imm21 is in the range of [-1M, 1M).
134 static Insntype
135 aarch64_adr_encode_imm(Insntype adr, int imm21)
136 {
137 gold_assert(is_adr(adr));
138 gold_assert(-(1 << 20) <= imm21 && imm21 < (1 << 20));
139 const int mask19 = (1 << 19) - 1;
140 const int mask2 = 3;
141 adr &= ~((mask19 << 5) | (mask2 << 29));
142 adr |= ((imm21 & mask2) << 29) | (((imm21 >> 2) & mask19) << 5);
143 return adr;
144 }
145
146 // Retrieve encoded adrp 33-bit signed imm value. This value is obtained by
147 // 21-bit signed imm encoded in the insn multiplied by 4k (page size) and
148 // 64-bit sign-extended, resulting in [-4G, 4G) with 12-lsb being 0.
149 static int64_t
150 aarch64_adrp_decode_imm(const Insntype adrp)
151 {
152 const int mask19 = (1 << 19) - 1;
153 const int mask2 = 3;
154 gold_assert(is_adrp(adrp));
155 // 21-bit imm encoded in adrp.
156 uint64_t imm = ((adrp >> 29) & mask2) | (((adrp >> 5) & mask19) << 2);
157 // Retrieve msb of 21-bit-signed imm for sign extension.
158 uint64_t msbt = (imm >> 20) & 1;
159 // Real value is imm multipled by 4k. Value now has 33-bit information.
160 int64_t value = imm << 12;
161 // Sign extend to 64-bit by repeating msbt 31 (64-33) times and merge it
162 // with value.
163 return ((((uint64_t)(1) << 32) - msbt) << 33) | value;
164 }
165
166 static bool
167 aarch64_b(const Insntype insn)
168 { return (insn & 0xFC000000) == 0x14000000; }
169
170 static bool
171 aarch64_bl(const Insntype insn)
172 { return (insn & 0xFC000000) == 0x94000000; }
173
174 static bool
175 aarch64_blr(const Insntype insn)
176 { return (insn & 0xFFFFFC1F) == 0xD63F0000; }
177
178 static bool
179 aarch64_br(const Insntype insn)
180 { return (insn & 0xFFFFFC1F) == 0xD61F0000; }
181
182 // All ld/st ops. See C4-182 of the ARM ARM. The encoding space for
183 // LD_PCREL, LDST_RO, LDST_UI and LDST_UIMM cover prefetch ops.
184 static bool
185 aarch64_ld(Insntype insn) { return aarch64_bit(insn, 22) == 1; }
186
187 static bool
188 aarch64_ldst(Insntype insn)
189 { return (insn & 0x0a000000) == 0x08000000; }
190
191 static bool
192 aarch64_ldst_ex(Insntype insn)
193 { return (insn & 0x3f000000) == 0x08000000; }
194
195 static bool
196 aarch64_ldst_pcrel(Insntype insn)
197 { return (insn & 0x3b000000) == 0x18000000; }
198
199 static bool
200 aarch64_ldst_nap(Insntype insn)
201 { return (insn & 0x3b800000) == 0x28000000; }
202
203 static bool
204 aarch64_ldstp_pi(Insntype insn)
205 { return (insn & 0x3b800000) == 0x28800000; }
206
207 static bool
208 aarch64_ldstp_o(Insntype insn)
209 { return (insn & 0x3b800000) == 0x29000000; }
210
211 static bool
212 aarch64_ldstp_pre(Insntype insn)
213 { return (insn & 0x3b800000) == 0x29800000; }
214
215 static bool
216 aarch64_ldst_ui(Insntype insn)
217 { return (insn & 0x3b200c00) == 0x38000000; }
218
219 static bool
220 aarch64_ldst_piimm(Insntype insn)
221 { return (insn & 0x3b200c00) == 0x38000400; }
222
223 static bool
224 aarch64_ldst_u(Insntype insn)
225 { return (insn & 0x3b200c00) == 0x38000800; }
226
227 static bool
228 aarch64_ldst_preimm(Insntype insn)
229 { return (insn & 0x3b200c00) == 0x38000c00; }
230
231 static bool
232 aarch64_ldst_ro(Insntype insn)
233 { return (insn & 0x3b200c00) == 0x38200800; }
234
235 static bool
236 aarch64_ldst_uimm(Insntype insn)
237 { return (insn & 0x3b000000) == 0x39000000; }
238
239 static bool
240 aarch64_ldst_simd_m(Insntype insn)
241 { return (insn & 0xbfbf0000) == 0x0c000000; }
242
243 static bool
244 aarch64_ldst_simd_m_pi(Insntype insn)
245 { return (insn & 0xbfa00000) == 0x0c800000; }
246
247 static bool
248 aarch64_ldst_simd_s(Insntype insn)
249 { return (insn & 0xbf9f0000) == 0x0d000000; }
250
251 static bool
252 aarch64_ldst_simd_s_pi(Insntype insn)
253 { return (insn & 0xbf800000) == 0x0d800000; }
254
255 // Classify an INSN if it is indeed a load/store. Return true if INSN is a
256 // LD/ST instruction otherwise return false. For scalar LD/ST instructions
257 // PAIR is FALSE, RT is returned and RT2 is set equal to RT. For LD/ST pair
258 // instructions PAIR is TRUE, RT and RT2 are returned.
259 static bool
260 aarch64_mem_op_p(Insntype insn, unsigned int *rt, unsigned int *rt2,
261 bool *pair, bool *load)
262 {
263 uint32_t opcode;
264 unsigned int r;
265 uint32_t opc = 0;
266 uint32_t v = 0;
267 uint32_t opc_v = 0;
268
269 /* Bail out quickly if INSN doesn't fall into the the load-store
270 encoding space. */
271 if (!aarch64_ldst (insn))
272 return false;
273
274 *pair = false;
275 *load = false;
276 if (aarch64_ldst_ex (insn))
277 {
278 *rt = aarch64_rt (insn);
279 *rt2 = *rt;
280 if (aarch64_bit (insn, 21) == 1)
281 {
282 *pair = true;
283 *rt2 = aarch64_rt2 (insn);
284 }
285 *load = aarch64_ld (insn);
286 return true;
287 }
288 else if (aarch64_ldst_nap (insn)
289 || aarch64_ldstp_pi (insn)
290 || aarch64_ldstp_o (insn)
291 || aarch64_ldstp_pre (insn))
292 {
293 *pair = true;
294 *rt = aarch64_rt (insn);
295 *rt2 = aarch64_rt2 (insn);
296 *load = aarch64_ld (insn);
297 return true;
298 }
299 else if (aarch64_ldst_pcrel (insn)
300 || aarch64_ldst_ui (insn)
301 || aarch64_ldst_piimm (insn)
302 || aarch64_ldst_u (insn)
303 || aarch64_ldst_preimm (insn)
304 || aarch64_ldst_ro (insn)
305 || aarch64_ldst_uimm (insn))
306 {
307 *rt = aarch64_rt (insn);
308 *rt2 = *rt;
309 if (aarch64_ldst_pcrel (insn))
310 *load = true;
311 opc = aarch64_bits (insn, 22, 2);
312 v = aarch64_bit (insn, 26);
313 opc_v = opc | (v << 2);
314 *load = (opc_v == 1 || opc_v == 2 || opc_v == 3
315 || opc_v == 5 || opc_v == 7);
316 return true;
317 }
318 else if (aarch64_ldst_simd_m (insn)
319 || aarch64_ldst_simd_m_pi (insn))
320 {
321 *rt = aarch64_rt (insn);
322 *load = aarch64_bit (insn, 22);
323 opcode = (insn >> 12) & 0xf;
324 switch (opcode)
325 {
326 case 0:
327 case 2:
328 *rt2 = *rt + 3;
329 break;
330
331 case 4:
332 case 6:
333 *rt2 = *rt + 2;
334 break;
335
336 case 7:
337 *rt2 = *rt;
338 break;
339
340 case 8:
341 case 10:
342 *rt2 = *rt + 1;
343 break;
344
345 default:
346 return false;
347 }
348 return true;
349 }
350 else if (aarch64_ldst_simd_s (insn)
351 || aarch64_ldst_simd_s_pi (insn))
352 {
353 *rt = aarch64_rt (insn);
354 r = (insn >> 21) & 1;
355 *load = aarch64_bit (insn, 22);
356 opcode = (insn >> 13) & 0x7;
357 switch (opcode)
358 {
359 case 0:
360 case 2:
361 case 4:
362 *rt2 = *rt + r;
363 break;
364
365 case 1:
366 case 3:
367 case 5:
368 *rt2 = *rt + (r == 0 ? 2 : 3);
369 break;
370
371 case 6:
372 *rt2 = *rt + r;
373 break;
374
375 case 7:
376 *rt2 = *rt + (r == 0 ? 2 : 3);
377 break;
378
379 default:
380 return false;
381 }
382 return true;
383 }
384 return false;
385 } // End of "aarch64_mem_op_p".
386
387 // Return true if INSN is mac insn.
388 static bool
389 aarch64_mac(Insntype insn)
390 { return (insn & 0xff000000) == 0x9b000000; }
391
392 // Return true if INSN is multiply-accumulate.
393 // (This is similar to implementaton in elfnn-aarch64.c.)
394 static bool
395 aarch64_mlxl(Insntype insn)
396 {
397 uint32_t op31 = aarch64_op31(insn);
398 if (aarch64_mac(insn)
399 && (op31 == 0 || op31 == 1 || op31 == 5)
400 /* Exclude MUL instructions which are encoded as a multiple-accumulate
401 with RA = XZR. */
402 && aarch64_ra(insn) != AARCH64_ZR)
403 {
404 return true;
405 }
406 return false;
407 }
408 }; // End of "AArch64_insn_utilities".
409
410
411 // Insn length in byte.
412
413 template<bool big_endian>
414 const int AArch64_insn_utilities<big_endian>::BYTES_PER_INSN = 4;
415
416
417 // Zero register encoding - 31.
418
419 template<bool big_endian>
420 const unsigned int AArch64_insn_utilities<big_endian>::AARCH64_ZR = 0x1f;
421
422
423 // Output_data_got_aarch64 class.
424
425 template<int size, bool big_endian>
426 class Output_data_got_aarch64 : public Output_data_got<size, big_endian>
427 {
428 public:
429 typedef typename elfcpp::Elf_types<size>::Elf_Addr Valtype;
430 Output_data_got_aarch64(Symbol_table* symtab, Layout* layout)
431 : Output_data_got<size, big_endian>(),
432 symbol_table_(symtab), layout_(layout)
433 { }
434
435 // Add a static entry for the GOT entry at OFFSET. GSYM is a global
436 // symbol and R_TYPE is the code of a dynamic relocation that needs to be
437 // applied in a static link.
438 void
439 add_static_reloc(unsigned int got_offset, unsigned int r_type, Symbol* gsym)
440 { this->static_relocs_.push_back(Static_reloc(got_offset, r_type, gsym)); }
441
442
443 // Add a static reloc for the GOT entry at OFFSET. RELOBJ is an object
444 // defining a local symbol with INDEX. R_TYPE is the code of a dynamic
445 // relocation that needs to be applied in a static link.
446 void
447 add_static_reloc(unsigned int got_offset, unsigned int r_type,
448 Sized_relobj_file<size, big_endian>* relobj,
449 unsigned int index)
450 {
451 this->static_relocs_.push_back(Static_reloc(got_offset, r_type, relobj,
452 index));
453 }
454
455
456 protected:
457 // Write out the GOT table.
458 void
459 do_write(Output_file* of) {
460 // The first entry in the GOT is the address of the .dynamic section.
461 gold_assert(this->data_size() >= size / 8);
462 Output_section* dynamic = this->layout_->dynamic_section();
463 Valtype dynamic_addr = dynamic == NULL ? 0 : dynamic->address();
464 this->replace_constant(0, dynamic_addr);
465 Output_data_got<size, big_endian>::do_write(of);
466
467 // Handling static relocs
468 if (this->static_relocs_.empty())
469 return;
470
471 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
472
473 gold_assert(parameters->doing_static_link());
474 const off_t offset = this->offset();
475 const section_size_type oview_size =
476 convert_to_section_size_type(this->data_size());
477 unsigned char* const oview = of->get_output_view(offset, oview_size);
478
479 Output_segment* tls_segment = this->layout_->tls_segment();
480 gold_assert(tls_segment != NULL);
481
482 AArch64_address aligned_tcb_address =
483 align_address(Target_aarch64<size, big_endian>::TCB_SIZE,
484 tls_segment->maximum_alignment());
485
486 for (size_t i = 0; i < this->static_relocs_.size(); ++i)
487 {
488 Static_reloc& reloc(this->static_relocs_[i]);
489 AArch64_address value;
490
491 if (!reloc.symbol_is_global())
492 {
493 Sized_relobj_file<size, big_endian>* object = reloc.relobj();
494 const Symbol_value<size>* psymval =
495 reloc.relobj()->local_symbol(reloc.index());
496
497 // We are doing static linking. Issue an error and skip this
498 // relocation if the symbol is undefined or in a discarded_section.
499 bool is_ordinary;
500 unsigned int shndx = psymval->input_shndx(&is_ordinary);
501 if ((shndx == elfcpp::SHN_UNDEF)
502 || (is_ordinary
503 && shndx != elfcpp::SHN_UNDEF
504 && !object->is_section_included(shndx)
505 && !this->symbol_table_->is_section_folded(object, shndx)))
506 {
507 gold_error(_("undefined or discarded local symbol %u from "
508 " object %s in GOT"),
509 reloc.index(), reloc.relobj()->name().c_str());
510 continue;
511 }
512 value = psymval->value(object, 0);
513 }
514 else
515 {
516 const Symbol* gsym = reloc.symbol();
517 gold_assert(gsym != NULL);
518 if (gsym->is_forwarder())
519 gsym = this->symbol_table_->resolve_forwards(gsym);
520
521 // We are doing static linking. Issue an error and skip this
522 // relocation if the symbol is undefined or in a discarded_section
523 // unless it is a weakly_undefined symbol.
524 if ((gsym->is_defined_in_discarded_section()
525 || gsym->is_undefined())
526 && !gsym->is_weak_undefined())
527 {
528 gold_error(_("undefined or discarded symbol %s in GOT"),
529 gsym->name());
530 continue;
531 }
532
533 if (!gsym->is_weak_undefined())
534 {
535 const Sized_symbol<size>* sym =
536 static_cast<const Sized_symbol<size>*>(gsym);
537 value = sym->value();
538 }
539 else
540 value = 0;
541 }
542
543 unsigned got_offset = reloc.got_offset();
544 gold_assert(got_offset < oview_size);
545
546 typedef typename elfcpp::Swap<size, big_endian>::Valtype Valtype;
547 Valtype* wv = reinterpret_cast<Valtype*>(oview + got_offset);
548 Valtype x;
549 switch (reloc.r_type())
550 {
551 case elfcpp::R_AARCH64_TLS_DTPREL64:
552 x = value;
553 break;
554 case elfcpp::R_AARCH64_TLS_TPREL64:
555 x = value + aligned_tcb_address;
556 break;
557 default:
558 gold_unreachable();
559 }
560 elfcpp::Swap<size, big_endian>::writeval(wv, x);
561 }
562
563 of->write_output_view(offset, oview_size, oview);
564 }
565
566 private:
567 // Symbol table of the output object.
568 Symbol_table* symbol_table_;
569 // A pointer to the Layout class, so that we can find the .dynamic
570 // section when we write out the GOT section.
571 Layout* layout_;
572
573 // This class represent dynamic relocations that need to be applied by
574 // gold because we are using TLS relocations in a static link.
575 class Static_reloc
576 {
577 public:
578 Static_reloc(unsigned int got_offset, unsigned int r_type, Symbol* gsym)
579 : got_offset_(got_offset), r_type_(r_type), symbol_is_global_(true)
580 { this->u_.global.symbol = gsym; }
581
582 Static_reloc(unsigned int got_offset, unsigned int r_type,
583 Sized_relobj_file<size, big_endian>* relobj, unsigned int index)
584 : got_offset_(got_offset), r_type_(r_type), symbol_is_global_(false)
585 {
586 this->u_.local.relobj = relobj;
587 this->u_.local.index = index;
588 }
589
590 // Return the GOT offset.
591 unsigned int
592 got_offset() const
593 { return this->got_offset_; }
594
595 // Relocation type.
596 unsigned int
597 r_type() const
598 { return this->r_type_; }
599
600 // Whether the symbol is global or not.
601 bool
602 symbol_is_global() const
603 { return this->symbol_is_global_; }
604
605 // For a relocation against a global symbol, the global symbol.
606 Symbol*
607 symbol() const
608 {
609 gold_assert(this->symbol_is_global_);
610 return this->u_.global.symbol;
611 }
612
613 // For a relocation against a local symbol, the defining object.
614 Sized_relobj_file<size, big_endian>*
615 relobj() const
616 {
617 gold_assert(!this->symbol_is_global_);
618 return this->u_.local.relobj;
619 }
620
621 // For a relocation against a local symbol, the local symbol index.
622 unsigned int
623 index() const
624 {
625 gold_assert(!this->symbol_is_global_);
626 return this->u_.local.index;
627 }
628
629 private:
630 // GOT offset of the entry to which this relocation is applied.
631 unsigned int got_offset_;
632 // Type of relocation.
633 unsigned int r_type_;
634 // Whether this relocation is against a global symbol.
635 bool symbol_is_global_;
636 // A global or local symbol.
637 union
638 {
639 struct
640 {
641 // For a global symbol, the symbol itself.
642 Symbol* symbol;
643 } global;
644 struct
645 {
646 // For a local symbol, the object defining the symbol.
647 Sized_relobj_file<size, big_endian>* relobj;
648 // For a local symbol, the symbol index.
649 unsigned int index;
650 } local;
651 } u_;
652 }; // End of inner class Static_reloc
653
654 std::vector<Static_reloc> static_relocs_;
655 }; // End of Output_data_got_aarch64
656
657
658 template<int size, bool big_endian>
659 class AArch64_input_section;
660
661
662 template<int size, bool big_endian>
663 class AArch64_output_section;
664
665
666 template<int size, bool big_endian>
667 class AArch64_relobj;
668
669
670 // Stub type enum constants.
671
672 enum
673 {
674 ST_NONE = 0,
675
676 // Using adrp/add pair, 4 insns (including alignment) without mem access,
677 // the fastest stub. This has a limited jump distance, which is tested by
678 // aarch64_valid_for_adrp_p.
679 ST_ADRP_BRANCH = 1,
680
681 // Using ldr-absolute-address/br-register, 4 insns with 1 mem access,
682 // unlimited in jump distance.
683 ST_LONG_BRANCH_ABS = 2,
684
685 // Using ldr/calculate-pcrel/jump, 8 insns (including alignment) with 1
686 // mem access, slowest one. Only used in position independent executables.
687 ST_LONG_BRANCH_PCREL = 3,
688
689 // Stub for erratum 843419 handling.
690 ST_E_843419 = 4,
691
692 // Stub for erratum 835769 handling.
693 ST_E_835769 = 5,
694
695 // Number of total stub types.
696 ST_NUMBER = 6
697 };
698
699
700 // Struct that wraps insns for a particular stub. All stub templates are
701 // created/initialized as constants by Stub_template_repertoire.
702
703 template<bool big_endian>
704 struct Stub_template
705 {
706 const typename AArch64_insn_utilities<big_endian>::Insntype* insns;
707 const int insn_num;
708 };
709
710
711 // Simple singleton class that creates/initializes/stores all types of stub
712 // templates.
713
714 template<bool big_endian>
715 class Stub_template_repertoire
716 {
717 public:
718 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype;
719
720 // Single static method to get stub template for a given stub type.
721 static const Stub_template<big_endian>*
722 get_stub_template(int type)
723 {
724 static Stub_template_repertoire<big_endian> singleton;
725 return singleton.stub_templates_[type];
726 }
727
728 private:
729 // Constructor - creates/initializes all stub templates.
730 Stub_template_repertoire();
731 ~Stub_template_repertoire()
732 { }
733
734 // Disallowing copy ctor and copy assignment operator.
735 Stub_template_repertoire(Stub_template_repertoire&);
736 Stub_template_repertoire& operator=(Stub_template_repertoire&);
737
738 // Data that stores all insn templates.
739 const Stub_template<big_endian>* stub_templates_[ST_NUMBER];
740 }; // End of "class Stub_template_repertoire".
741
742
743 // Constructor - creates/initilizes all stub templates.
744
745 template<bool big_endian>
746 Stub_template_repertoire<big_endian>::Stub_template_repertoire()
747 {
748 // Insn array definitions.
749 const static Insntype ST_NONE_INSNS[] = {};
750
751 const static Insntype ST_ADRP_BRANCH_INSNS[] =
752 {
753 0x90000010, /* adrp ip0, X */
754 /* ADR_PREL_PG_HI21(X) */
755 0x91000210, /* add ip0, ip0, :lo12:X */
756 /* ADD_ABS_LO12_NC(X) */
757 0xd61f0200, /* br ip0 */
758 0x00000000, /* alignment padding */
759 };
760
761 const static Insntype ST_LONG_BRANCH_ABS_INSNS[] =
762 {
763 0x58000050, /* ldr ip0, 0x8 */
764 0xd61f0200, /* br ip0 */
765 0x00000000, /* address field */
766 0x00000000, /* address fields */
767 };
768
769 const static Insntype ST_LONG_BRANCH_PCREL_INSNS[] =
770 {
771 0x58000090, /* ldr ip0, 0x10 */
772 0x10000011, /* adr ip1, #0 */
773 0x8b110210, /* add ip0, ip0, ip1 */
774 0xd61f0200, /* br ip0 */
775 0x00000000, /* address field */
776 0x00000000, /* address field */
777 0x00000000, /* alignment padding */
778 0x00000000, /* alignment padding */
779 };
780
781 const static Insntype ST_E_843419_INSNS[] =
782 {
783 0x00000000, /* Placeholder for erratum insn. */
784 0x14000000, /* b <label> */
785 };
786
787 // ST_E_835769 has the same stub template as ST_E_843419.
788 const static Insntype* ST_E_835769_INSNS = ST_E_843419_INSNS;
789
790 #define install_insn_template(T) \
791 const static Stub_template<big_endian> template_##T = { \
792 T##_INSNS, sizeof(T##_INSNS) / sizeof(T##_INSNS[0]) }; \
793 this->stub_templates_[T] = &template_##T
794
795 install_insn_template(ST_NONE);
796 install_insn_template(ST_ADRP_BRANCH);
797 install_insn_template(ST_LONG_BRANCH_ABS);
798 install_insn_template(ST_LONG_BRANCH_PCREL);
799 install_insn_template(ST_E_843419);
800 install_insn_template(ST_E_835769);
801
802 #undef install_insn_template
803 }
804
805
806 // Base class for stubs.
807
808 template<int size, bool big_endian>
809 class Stub_base
810 {
811 public:
812 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
813 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype;
814
815 static const AArch64_address invalid_address =
816 static_cast<AArch64_address>(-1);
817
818 static const section_offset_type invalid_offset =
819 static_cast<section_offset_type>(-1);
820
821 Stub_base(int type)
822 : destination_address_(invalid_address),
823 offset_(invalid_offset),
824 type_(type)
825 {}
826
827 ~Stub_base()
828 {}
829
830 // Get stub type.
831 int
832 type() const
833 { return this->type_; }
834
835 // Get stub template that provides stub insn information.
836 const Stub_template<big_endian>*
837 stub_template() const
838 {
839 return Stub_template_repertoire<big_endian>::
840 get_stub_template(this->type());
841 }
842
843 // Get destination address.
844 AArch64_address
845 destination_address() const
846 {
847 gold_assert(this->destination_address_ != this->invalid_address);
848 return this->destination_address_;
849 }
850
851 // Set destination address.
852 void
853 set_destination_address(AArch64_address address)
854 {
855 gold_assert(address != this->invalid_address);
856 this->destination_address_ = address;
857 }
858
859 // Reset the destination address.
860 void
861 reset_destination_address()
862 { this->destination_address_ = this->invalid_address; }
863
864 // Get offset of code stub. For Reloc_stub, it is the offset from the
865 // beginning of its containing stub table; for Erratum_stub, it is the offset
866 // from the end of reloc_stubs.
867 section_offset_type
868 offset() const
869 {
870 gold_assert(this->offset_ != this->invalid_offset);
871 return this->offset_;
872 }
873
874 // Set stub offset.
875 void
876 set_offset(section_offset_type offset)
877 { this->offset_ = offset; }
878
879 // Return the stub insn.
880 const Insntype*
881 insns() const
882 { return this->stub_template()->insns; }
883
884 // Return num of stub insns.
885 unsigned int
886 insn_num() const
887 { return this->stub_template()->insn_num; }
888
889 // Get size of the stub.
890 int
891 stub_size() const
892 {
893 return this->insn_num() *
894 AArch64_insn_utilities<big_endian>::BYTES_PER_INSN;
895 }
896
897 // Write stub to output file.
898 void
899 write(unsigned char* view, section_size_type view_size)
900 { this->do_write(view, view_size); }
901
902 protected:
903 // Abstract method to be implemented by sub-classes.
904 virtual void
905 do_write(unsigned char*, section_size_type) = 0;
906
907 private:
908 // The last insn of a stub is a jump to destination insn. This field records
909 // the destination address.
910 AArch64_address destination_address_;
911 // The stub offset. Note this has difference interpretations between an
912 // Reloc_stub and an Erratum_stub. For Reloc_stub this is the offset from the
913 // beginning of the containing stub_table, whereas for Erratum_stub, this is
914 // the offset from the end of reloc_stubs.
915 section_offset_type offset_;
916 // Stub type.
917 const int type_;
918 }; // End of "Stub_base".
919
920
921 // Erratum stub class. An erratum stub differs from a reloc stub in that for
922 // each erratum occurrence, we generate an erratum stub. We never share erratum
923 // stubs, whereas for reloc stubs, different branches insns share a single reloc
924 // stub as long as the branch targets are the same. (More to the point, reloc
925 // stubs can be shared because they're used to reach a specific target, whereas
926 // erratum stubs branch back to the original control flow.)
927
928 template<int size, bool big_endian>
929 class Erratum_stub : public Stub_base<size, big_endian>
930 {
931 public:
932 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
933 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
934 typedef AArch64_insn_utilities<big_endian> Insn_utilities;
935 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype;
936
937 static const int STUB_ADDR_ALIGN;
938
939 static const Insntype invalid_insn = static_cast<Insntype>(-1);
940
941 Erratum_stub(The_aarch64_relobj* relobj, int type,
942 unsigned shndx, unsigned int sh_offset)
943 : Stub_base<size, big_endian>(type), relobj_(relobj),
944 shndx_(shndx), sh_offset_(sh_offset),
945 erratum_insn_(invalid_insn),
946 erratum_address_(this->invalid_address)
947 {}
948
949 ~Erratum_stub() {}
950
951 // Return the object that contains the erratum.
952 The_aarch64_relobj*
953 relobj()
954 { return this->relobj_; }
955
956 // Get section index of the erratum.
957 unsigned int
958 shndx() const
959 { return this->shndx_; }
960
961 // Get section offset of the erratum.
962 unsigned int
963 sh_offset() const
964 { return this->sh_offset_; }
965
966 // Get the erratum insn. This is the insn located at erratum_insn_address.
967 Insntype
968 erratum_insn() const
969 {
970 gold_assert(this->erratum_insn_ != this->invalid_insn);
971 return this->erratum_insn_;
972 }
973
974 // Set the insn that the erratum happens to.
975 void
976 set_erratum_insn(Insntype insn)
977 { this->erratum_insn_ = insn; }
978
979 // For 843419, the erratum insn is ld/st xt, [xn, #uimm], which may be a
980 // relocation spot, in this case, the erratum_insn_ recorded at scanning phase
981 // is no longer the one we want to write out to the stub, update erratum_insn_
982 // with relocated version. Also note that in this case xn must not be "PC", so
983 // it is safe to move the erratum insn from the origin place to the stub. For
984 // 835769, the erratum insn is multiply-accumulate insn, which could not be a
985 // relocation spot (assertion added though).
986 void
987 update_erratum_insn(Insntype insn)
988 {
989 gold_assert(this->erratum_insn_ != this->invalid_insn);
990 switch (this->type())
991 {
992 case ST_E_843419:
993 gold_assert(Insn_utilities::aarch64_ldst_uimm(insn));
994 gold_assert(Insn_utilities::aarch64_ldst_uimm(this->erratum_insn()));
995 gold_assert(Insn_utilities::aarch64_rd(insn) ==
996 Insn_utilities::aarch64_rd(this->erratum_insn()));
997 gold_assert(Insn_utilities::aarch64_rn(insn) ==
998 Insn_utilities::aarch64_rn(this->erratum_insn()));
999 // Update plain ld/st insn with relocated insn.
1000 this->erratum_insn_ = insn;
1001 break;
1002 case ST_E_835769:
1003 gold_assert(insn == this->erratum_insn());
1004 break;
1005 default:
1006 gold_unreachable();
1007 }
1008 }
1009
1010
1011 // Return the address where an erratum must be done.
1012 AArch64_address
1013 erratum_address() const
1014 {
1015 gold_assert(this->erratum_address_ != this->invalid_address);
1016 return this->erratum_address_;
1017 }
1018
1019 // Set the address where an erratum must be done.
1020 void
1021 set_erratum_address(AArch64_address addr)
1022 { this->erratum_address_ = addr; }
1023
1024 // Comparator used to group Erratum_stubs in a set by (obj, shndx,
1025 // sh_offset). We do not include 'type' in the calculation, becuase there is
1026 // at most one stub type at (obj, shndx, sh_offset).
1027 bool
1028 operator<(const Erratum_stub<size, big_endian>& k) const
1029 {
1030 if (this == &k)
1031 return false;
1032 // We group stubs by relobj.
1033 if (this->relobj_ != k.relobj_)
1034 return this->relobj_ < k.relobj_;
1035 // Then by section index.
1036 if (this->shndx_ != k.shndx_)
1037 return this->shndx_ < k.shndx_;
1038 // Lastly by section offset.
1039 return this->sh_offset_ < k.sh_offset_;
1040 }
1041
1042 protected:
1043 virtual void
1044 do_write(unsigned char*, section_size_type);
1045
1046 private:
1047 // The object that needs to be fixed.
1048 The_aarch64_relobj* relobj_;
1049 // The shndx in the object that needs to be fixed.
1050 const unsigned int shndx_;
1051 // The section offset in the obejct that needs to be fixed.
1052 const unsigned int sh_offset_;
1053 // The insn to be fixed.
1054 Insntype erratum_insn_;
1055 // The address of the above insn.
1056 AArch64_address erratum_address_;
1057 }; // End of "Erratum_stub".
1058
1059
1060 // Erratum sub class to wrap additional info needed by 843419. In fixing this
1061 // erratum, we may choose to replace 'adrp' with 'adr', in this case, we need
1062 // adrp's code position (two or three insns before erratum insn itself).
1063
1064 template<int size, bool big_endian>
1065 class E843419_stub : public Erratum_stub<size, big_endian>
1066 {
1067 public:
1068 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype;
1069
1070 E843419_stub(AArch64_relobj<size, big_endian>* relobj,
1071 unsigned int shndx, unsigned int sh_offset,
1072 unsigned int adrp_sh_offset)
1073 : Erratum_stub<size, big_endian>(relobj, ST_E_843419, shndx, sh_offset),
1074 adrp_sh_offset_(adrp_sh_offset)
1075 {}
1076
1077 unsigned int
1078 adrp_sh_offset() const
1079 { return this->adrp_sh_offset_; }
1080
1081 private:
1082 // Section offset of "adrp". (We do not need a "adrp_shndx_" field, because we
1083 // can can obtain it from its parent.)
1084 const unsigned int adrp_sh_offset_;
1085 };
1086
1087
1088 template<int size, bool big_endian>
1089 const int Erratum_stub<size, big_endian>::STUB_ADDR_ALIGN = 4;
1090
1091 // Comparator used in set definition.
1092 template<int size, bool big_endian>
1093 struct Erratum_stub_less
1094 {
1095 bool
1096 operator()(const Erratum_stub<size, big_endian>* s1,
1097 const Erratum_stub<size, big_endian>* s2) const
1098 { return *s1 < *s2; }
1099 };
1100
1101 // Erratum_stub implementation for writing stub to output file.
1102
1103 template<int size, bool big_endian>
1104 void
1105 Erratum_stub<size, big_endian>::do_write(unsigned char* view, section_size_type)
1106 {
1107 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
1108 const Insntype* insns = this->insns();
1109 uint32_t num_insns = this->insn_num();
1110 Insntype* ip = reinterpret_cast<Insntype*>(view);
1111 // For current implemented erratum 843419 and 835769, the first insn in the
1112 // stub is always a copy of the problematic insn (in 843419, the mem access
1113 // insn, in 835769, the mac insn), followed by a jump-back.
1114 elfcpp::Swap<32, big_endian>::writeval(ip, this->erratum_insn());
1115 for (uint32_t i = 1; i < num_insns; ++i)
1116 elfcpp::Swap<32, big_endian>::writeval(ip + i, insns[i]);
1117 }
1118
1119
1120 // Reloc stub class.
1121
1122 template<int size, bool big_endian>
1123 class Reloc_stub : public Stub_base<size, big_endian>
1124 {
1125 public:
1126 typedef Reloc_stub<size, big_endian> This;
1127 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
1128
1129 // Branch range. This is used to calculate the section group size, as well as
1130 // determine whether a stub is needed.
1131 static const int MAX_BRANCH_OFFSET = ((1 << 25) - 1) << 2;
1132 static const int MIN_BRANCH_OFFSET = -((1 << 25) << 2);
1133
1134 // Constant used to determine if an offset fits in the adrp instruction
1135 // encoding.
1136 static const int MAX_ADRP_IMM = (1 << 20) - 1;
1137 static const int MIN_ADRP_IMM = -(1 << 20);
1138
1139 static const int BYTES_PER_INSN = 4;
1140 static const int STUB_ADDR_ALIGN;
1141
1142 // Determine whether the offset fits in the jump/branch instruction.
1143 static bool
1144 aarch64_valid_branch_offset_p(int64_t offset)
1145 { return offset >= MIN_BRANCH_OFFSET && offset <= MAX_BRANCH_OFFSET; }
1146
1147 // Determine whether the offset fits in the adrp immediate field.
1148 static bool
1149 aarch64_valid_for_adrp_p(AArch64_address location, AArch64_address dest)
1150 {
1151 typedef AArch64_relocate_functions<size, big_endian> Reloc;
1152 int64_t adrp_imm = (Reloc::Page(dest) - Reloc::Page(location)) >> 12;
1153 return adrp_imm >= MIN_ADRP_IMM && adrp_imm <= MAX_ADRP_IMM;
1154 }
1155
1156 // Determine the stub type for a certain relocation or ST_NONE, if no stub is
1157 // needed.
1158 static int
1159 stub_type_for_reloc(unsigned int r_type, AArch64_address address,
1160 AArch64_address target);
1161
1162 Reloc_stub(int type)
1163 : Stub_base<size, big_endian>(type)
1164 { }
1165
1166 ~Reloc_stub()
1167 { }
1168
1169 // The key class used to index the stub instance in the stub table's stub map.
1170 class Key
1171 {
1172 public:
1173 Key(int type, const Symbol* symbol, const Relobj* relobj,
1174 unsigned int r_sym, int32_t addend)
1175 : type_(type), addend_(addend)
1176 {
1177 if (symbol != NULL)
1178 {
1179 this->r_sym_ = Reloc_stub::invalid_index;
1180 this->u_.symbol = symbol;
1181 }
1182 else
1183 {
1184 gold_assert(relobj != NULL && r_sym != invalid_index);
1185 this->r_sym_ = r_sym;
1186 this->u_.relobj = relobj;
1187 }
1188 }
1189
1190 ~Key()
1191 { }
1192
1193 // Return stub type.
1194 int
1195 type() const
1196 { return this->type_; }
1197
1198 // Return the local symbol index or invalid_index.
1199 unsigned int
1200 r_sym() const
1201 { return this->r_sym_; }
1202
1203 // Return the symbol if there is one.
1204 const Symbol*
1205 symbol() const
1206 { return this->r_sym_ == invalid_index ? this->u_.symbol : NULL; }
1207
1208 // Return the relobj if there is one.
1209 const Relobj*
1210 relobj() const
1211 { return this->r_sym_ != invalid_index ? this->u_.relobj : NULL; }
1212
1213 // Whether this equals to another key k.
1214 bool
1215 eq(const Key& k) const
1216 {
1217 return ((this->type_ == k.type_)
1218 && (this->r_sym_ == k.r_sym_)
1219 && ((this->r_sym_ != Reloc_stub::invalid_index)
1220 ? (this->u_.relobj == k.u_.relobj)
1221 : (this->u_.symbol == k.u_.symbol))
1222 && (this->addend_ == k.addend_));
1223 }
1224
1225 // Return a hash value.
1226 size_t
1227 hash_value() const
1228 {
1229 size_t name_hash_value = gold::string_hash<char>(
1230 (this->r_sym_ != Reloc_stub::invalid_index)
1231 ? this->u_.relobj->name().c_str()
1232 : this->u_.symbol->name());
1233 // We only have 4 stub types.
1234 size_t stub_type_hash_value = 0x03 & this->type_;
1235 return (name_hash_value
1236 ^ stub_type_hash_value
1237 ^ ((this->r_sym_ & 0x3fff) << 2)
1238 ^ ((this->addend_ & 0xffff) << 16));
1239 }
1240
1241 // Functors for STL associative containers.
1242 struct hash
1243 {
1244 size_t
1245 operator()(const Key& k) const
1246 { return k.hash_value(); }
1247 };
1248
1249 struct equal_to
1250 {
1251 bool
1252 operator()(const Key& k1, const Key& k2) const
1253 { return k1.eq(k2); }
1254 };
1255
1256 private:
1257 // Stub type.
1258 const int type_;
1259 // If this is a local symbol, this is the index in the defining object.
1260 // Otherwise, it is invalid_index for a global symbol.
1261 unsigned int r_sym_;
1262 // If r_sym_ is an invalid index, this points to a global symbol.
1263 // Otherwise, it points to a relobj. We used the unsized and target
1264 // independent Symbol and Relobj classes instead of Sized_symbol<32> and
1265 // Arm_relobj, in order to avoid making the stub class a template
1266 // as most of the stub machinery is endianness-neutral. However, it
1267 // may require a bit of casting done by users of this class.
1268 union
1269 {
1270 const Symbol* symbol;
1271 const Relobj* relobj;
1272 } u_;
1273 // Addend associated with a reloc.
1274 int32_t addend_;
1275 }; // End of inner class Reloc_stub::Key
1276
1277 protected:
1278 // This may be overridden in the child class.
1279 virtual void
1280 do_write(unsigned char*, section_size_type);
1281
1282 private:
1283 static const unsigned int invalid_index = static_cast<unsigned int>(-1);
1284 }; // End of Reloc_stub
1285
1286 template<int size, bool big_endian>
1287 const int Reloc_stub<size, big_endian>::STUB_ADDR_ALIGN = 4;
1288
1289 // Write data to output file.
1290
1291 template<int size, bool big_endian>
1292 void
1293 Reloc_stub<size, big_endian>::
1294 do_write(unsigned char* view, section_size_type)
1295 {
1296 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
1297 const uint32_t* insns = this->insns();
1298 uint32_t num_insns = this->insn_num();
1299 Insntype* ip = reinterpret_cast<Insntype*>(view);
1300 for (uint32_t i = 0; i < num_insns; ++i)
1301 elfcpp::Swap<32, big_endian>::writeval(ip + i, insns[i]);
1302 }
1303
1304
1305 // Determine the stub type for a certain relocation or ST_NONE, if no stub is
1306 // needed.
1307
1308 template<int size, bool big_endian>
1309 inline int
1310 Reloc_stub<size, big_endian>::stub_type_for_reloc(
1311 unsigned int r_type, AArch64_address location, AArch64_address dest)
1312 {
1313 int64_t branch_offset = 0;
1314 switch(r_type)
1315 {
1316 case elfcpp::R_AARCH64_CALL26:
1317 case elfcpp::R_AARCH64_JUMP26:
1318 branch_offset = dest - location;
1319 break;
1320 default:
1321 gold_unreachable();
1322 }
1323
1324 if (aarch64_valid_branch_offset_p(branch_offset))
1325 return ST_NONE;
1326
1327 if (aarch64_valid_for_adrp_p(location, dest))
1328 return ST_ADRP_BRANCH;
1329
1330 if (parameters->options().output_is_position_independent()
1331 && parameters->options().output_is_executable())
1332 return ST_LONG_BRANCH_PCREL;
1333
1334 return ST_LONG_BRANCH_ABS;
1335 }
1336
1337 // A class to hold stubs for the ARM target.
1338
1339 template<int size, bool big_endian>
1340 class Stub_table : public Output_data
1341 {
1342 public:
1343 typedef Target_aarch64<size, big_endian> The_target_aarch64;
1344 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
1345 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
1346 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section;
1347 typedef Reloc_stub<size, big_endian> The_reloc_stub;
1348 typedef typename The_reloc_stub::Key The_reloc_stub_key;
1349 typedef Erratum_stub<size, big_endian> The_erratum_stub;
1350 typedef Erratum_stub_less<size, big_endian> The_erratum_stub_less;
1351 typedef typename The_reloc_stub_key::hash The_reloc_stub_key_hash;
1352 typedef typename The_reloc_stub_key::equal_to The_reloc_stub_key_equal_to;
1353 typedef Stub_table<size, big_endian> The_stub_table;
1354 typedef Unordered_map<The_reloc_stub_key, The_reloc_stub*,
1355 The_reloc_stub_key_hash, The_reloc_stub_key_equal_to>
1356 Reloc_stub_map;
1357 typedef typename Reloc_stub_map::const_iterator Reloc_stub_map_const_iter;
1358 typedef Relocate_info<size, big_endian> The_relocate_info;
1359
1360 typedef std::set<The_erratum_stub*, The_erratum_stub_less> Erratum_stub_set;
1361 typedef typename Erratum_stub_set::iterator Erratum_stub_set_iter;
1362
1363 Stub_table(The_aarch64_input_section* owner)
1364 : Output_data(), owner_(owner), reloc_stubs_size_(0),
1365 erratum_stubs_size_(0), prev_data_size_(0)
1366 { }
1367
1368 ~Stub_table()
1369 { }
1370
1371 The_aarch64_input_section*
1372 owner() const
1373 { return owner_; }
1374
1375 // Whether this stub table is empty.
1376 bool
1377 empty() const
1378 { return reloc_stubs_.empty() && erratum_stubs_.empty(); }
1379
1380 // Return the current data size.
1381 off_t
1382 current_data_size() const
1383 { return this->current_data_size_for_child(); }
1384
1385 // Add a STUB using KEY. The caller is responsible for avoiding addition
1386 // if a STUB with the same key has already been added.
1387 void
1388 add_reloc_stub(The_reloc_stub* stub, const The_reloc_stub_key& key);
1389
1390 // Add an erratum stub into the erratum stub set. The set is ordered by
1391 // (relobj, shndx, sh_offset).
1392 void
1393 add_erratum_stub(The_erratum_stub* stub);
1394
1395 // Find if such erratum exists for any given (obj, shndx, sh_offset).
1396 The_erratum_stub*
1397 find_erratum_stub(The_aarch64_relobj* a64relobj,
1398 unsigned int shndx, unsigned int sh_offset);
1399
1400 // Find all the erratums for a given input section. The return value is a pair
1401 // of iterators [begin, end).
1402 std::pair<Erratum_stub_set_iter, Erratum_stub_set_iter>
1403 find_erratum_stubs_for_input_section(The_aarch64_relobj* a64relobj,
1404 unsigned int shndx);
1405
1406 // Compute the erratum stub address.
1407 AArch64_address
1408 erratum_stub_address(The_erratum_stub* stub) const
1409 {
1410 AArch64_address r = align_address(this->address() + this->reloc_stubs_size_,
1411 The_erratum_stub::STUB_ADDR_ALIGN);
1412 r += stub->offset();
1413 return r;
1414 }
1415
1416 // Finalize stubs. No-op here, just for completeness.
1417 void
1418 finalize_stubs()
1419 { }
1420
1421 // Look up a relocation stub using KEY. Return NULL if there is none.
1422 The_reloc_stub*
1423 find_reloc_stub(The_reloc_stub_key& key)
1424 {
1425 Reloc_stub_map_const_iter p = this->reloc_stubs_.find(key);
1426 return (p != this->reloc_stubs_.end()) ? p->second : NULL;
1427 }
1428
1429 // Relocate stubs in this stub table.
1430 void
1431 relocate_stubs(const The_relocate_info*,
1432 The_target_aarch64*,
1433 Output_section*,
1434 unsigned char*,
1435 AArch64_address,
1436 section_size_type);
1437
1438 // Update data size at the end of a relaxation pass. Return true if data size
1439 // is different from that of the previous relaxation pass.
1440 bool
1441 update_data_size_changed_p()
1442 {
1443 // No addralign changed here.
1444 off_t s = align_address(this->reloc_stubs_size_,
1445 The_erratum_stub::STUB_ADDR_ALIGN)
1446 + this->erratum_stubs_size_;
1447 bool changed = (s != this->prev_data_size_);
1448 this->prev_data_size_ = s;
1449 return changed;
1450 }
1451
1452 protected:
1453 // Write out section contents.
1454 void
1455 do_write(Output_file*);
1456
1457 // Return the required alignment.
1458 uint64_t
1459 do_addralign() const
1460 {
1461 return std::max(The_reloc_stub::STUB_ADDR_ALIGN,
1462 The_erratum_stub::STUB_ADDR_ALIGN);
1463 }
1464
1465 // Reset address and file offset.
1466 void
1467 do_reset_address_and_file_offset()
1468 { this->set_current_data_size_for_child(this->prev_data_size_); }
1469
1470 // Set final data size.
1471 void
1472 set_final_data_size()
1473 { this->set_data_size(this->current_data_size()); }
1474
1475 private:
1476 // Relocate one stub.
1477 void
1478 relocate_stub(The_reloc_stub*,
1479 const The_relocate_info*,
1480 The_target_aarch64*,
1481 Output_section*,
1482 unsigned char*,
1483 AArch64_address,
1484 section_size_type);
1485
1486 private:
1487 // Owner of this stub table.
1488 The_aarch64_input_section* owner_;
1489 // The relocation stubs.
1490 Reloc_stub_map reloc_stubs_;
1491 // The erratum stubs.
1492 Erratum_stub_set erratum_stubs_;
1493 // Size of reloc stubs.
1494 off_t reloc_stubs_size_;
1495 // Size of erratum stubs.
1496 off_t erratum_stubs_size_;
1497 // data size of this in the previous pass.
1498 off_t prev_data_size_;
1499 }; // End of Stub_table
1500
1501
1502 // Add an erratum stub into the erratum stub set. The set is ordered by
1503 // (relobj, shndx, sh_offset).
1504
1505 template<int size, bool big_endian>
1506 void
1507 Stub_table<size, big_endian>::add_erratum_stub(The_erratum_stub* stub)
1508 {
1509 std::pair<Erratum_stub_set_iter, bool> ret =
1510 this->erratum_stubs_.insert(stub);
1511 gold_assert(ret.second);
1512 this->erratum_stubs_size_ = align_address(
1513 this->erratum_stubs_size_, The_erratum_stub::STUB_ADDR_ALIGN);
1514 stub->set_offset(this->erratum_stubs_size_);
1515 this->erratum_stubs_size_ += stub->stub_size();
1516 }
1517
1518
1519 // Find if such erratum exists for given (obj, shndx, sh_offset).
1520
1521 template<int size, bool big_endian>
1522 Erratum_stub<size, big_endian>*
1523 Stub_table<size, big_endian>::find_erratum_stub(
1524 The_aarch64_relobj* a64relobj, unsigned int shndx, unsigned int sh_offset)
1525 {
1526 // A dummy object used as key to search in the set.
1527 The_erratum_stub key(a64relobj, ST_NONE,
1528 shndx, sh_offset);
1529 Erratum_stub_set_iter i = this->erratum_stubs_.find(&key);
1530 if (i != this->erratum_stubs_.end())
1531 {
1532 The_erratum_stub* stub(*i);
1533 gold_assert(stub->erratum_insn() != 0);
1534 return stub;
1535 }
1536 return NULL;
1537 }
1538
1539
1540 // Find all the errata for a given input section. The return value is a pair of
1541 // iterators [begin, end).
1542
1543 template<int size, bool big_endian>
1544 std::pair<typename Stub_table<size, big_endian>::Erratum_stub_set_iter,
1545 typename Stub_table<size, big_endian>::Erratum_stub_set_iter>
1546 Stub_table<size, big_endian>::find_erratum_stubs_for_input_section(
1547 The_aarch64_relobj* a64relobj, unsigned int shndx)
1548 {
1549 typedef std::pair<Erratum_stub_set_iter, Erratum_stub_set_iter> Result_pair;
1550 Erratum_stub_set_iter start, end;
1551 The_erratum_stub low_key(a64relobj, ST_NONE, shndx, 0);
1552 start = this->erratum_stubs_.lower_bound(&low_key);
1553 if (start == this->erratum_stubs_.end())
1554 return Result_pair(this->erratum_stubs_.end(),
1555 this->erratum_stubs_.end());
1556 end = start;
1557 while (end != this->erratum_stubs_.end() &&
1558 (*end)->relobj() == a64relobj && (*end)->shndx() == shndx)
1559 ++end;
1560 return Result_pair(start, end);
1561 }
1562
1563
1564 // Add a STUB using KEY. The caller is responsible for avoiding addition
1565 // if a STUB with the same key has already been added.
1566
1567 template<int size, bool big_endian>
1568 void
1569 Stub_table<size, big_endian>::add_reloc_stub(
1570 The_reloc_stub* stub, const The_reloc_stub_key& key)
1571 {
1572 gold_assert(stub->type() == key.type());
1573 this->reloc_stubs_[key] = stub;
1574
1575 // Assign stub offset early. We can do this because we never remove
1576 // reloc stubs and they are in the beginning of the stub table.
1577 this->reloc_stubs_size_ = align_address(this->reloc_stubs_size_,
1578 The_reloc_stub::STUB_ADDR_ALIGN);
1579 stub->set_offset(this->reloc_stubs_size_);
1580 this->reloc_stubs_size_ += stub->stub_size();
1581 }
1582
1583
1584 // Relocate all stubs in this stub table.
1585
1586 template<int size, bool big_endian>
1587 void
1588 Stub_table<size, big_endian>::
1589 relocate_stubs(const The_relocate_info* relinfo,
1590 The_target_aarch64* target_aarch64,
1591 Output_section* output_section,
1592 unsigned char* view,
1593 AArch64_address address,
1594 section_size_type view_size)
1595 {
1596 // "view_size" is the total size of the stub_table.
1597 gold_assert(address == this->address() &&
1598 view_size == static_cast<section_size_type>(this->data_size()));
1599 for(Reloc_stub_map_const_iter p = this->reloc_stubs_.begin();
1600 p != this->reloc_stubs_.end(); ++p)
1601 relocate_stub(p->second, relinfo, target_aarch64, output_section,
1602 view, address, view_size);
1603
1604 // Just for convenience.
1605 const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN;
1606
1607 // Now 'relocate' erratum stubs.
1608 for(Erratum_stub_set_iter i = this->erratum_stubs_.begin();
1609 i != this->erratum_stubs_.end(); ++i)
1610 {
1611 AArch64_address stub_address = this->erratum_stub_address(*i);
1612 // The address of "b" in the stub that is to be "relocated".
1613 AArch64_address stub_b_insn_address;
1614 // Branch offset that is to be filled in "b" insn.
1615 int b_offset = 0;
1616 switch ((*i)->type())
1617 {
1618 case ST_E_843419:
1619 case ST_E_835769:
1620 // The 1st insn of the erratum could be a relocation spot,
1621 // in this case we need to fix it with
1622 // "(*i)->erratum_insn()".
1623 elfcpp::Swap<32, big_endian>::writeval(
1624 view + (stub_address - this->address()),
1625 (*i)->erratum_insn());
1626 // For the erratum, the 2nd insn is a b-insn to be patched
1627 // (relocated).
1628 stub_b_insn_address = stub_address + 1 * BPI;
1629 b_offset = (*i)->destination_address() - stub_b_insn_address;
1630 AArch64_relocate_functions<size, big_endian>::construct_b(
1631 view + (stub_b_insn_address - this->address()),
1632 ((unsigned int)(b_offset)) & 0xfffffff);
1633 break;
1634 default:
1635 gold_unreachable();
1636 break;
1637 }
1638 }
1639 }
1640
1641
1642 // Relocate one stub. This is a helper for Stub_table::relocate_stubs().
1643
1644 template<int size, bool big_endian>
1645 void
1646 Stub_table<size, big_endian>::
1647 relocate_stub(The_reloc_stub* stub,
1648 const The_relocate_info* relinfo,
1649 The_target_aarch64* target_aarch64,
1650 Output_section* output_section,
1651 unsigned char* view,
1652 AArch64_address address,
1653 section_size_type view_size)
1654 {
1655 // "offset" is the offset from the beginning of the stub_table.
1656 section_size_type offset = stub->offset();
1657 section_size_type stub_size = stub->stub_size();
1658 // "view_size" is the total size of the stub_table.
1659 gold_assert(offset + stub_size <= view_size);
1660
1661 target_aarch64->relocate_stub(stub, relinfo, output_section,
1662 view + offset, address + offset, view_size);
1663 }
1664
1665
1666 // Write out the stubs to file.
1667
1668 template<int size, bool big_endian>
1669 void
1670 Stub_table<size, big_endian>::do_write(Output_file* of)
1671 {
1672 off_t offset = this->offset();
1673 const section_size_type oview_size =
1674 convert_to_section_size_type(this->data_size());
1675 unsigned char* const oview = of->get_output_view(offset, oview_size);
1676
1677 // Write relocation stubs.
1678 for (typename Reloc_stub_map::const_iterator p = this->reloc_stubs_.begin();
1679 p != this->reloc_stubs_.end(); ++p)
1680 {
1681 The_reloc_stub* stub = p->second;
1682 AArch64_address address = this->address() + stub->offset();
1683 gold_assert(address ==
1684 align_address(address, The_reloc_stub::STUB_ADDR_ALIGN));
1685 stub->write(oview + stub->offset(), stub->stub_size());
1686 }
1687
1688 // Write erratum stubs.
1689 unsigned int erratum_stub_start_offset =
1690 align_address(this->reloc_stubs_size_, The_erratum_stub::STUB_ADDR_ALIGN);
1691 for (typename Erratum_stub_set::iterator p = this->erratum_stubs_.begin();
1692 p != this->erratum_stubs_.end(); ++p)
1693 {
1694 The_erratum_stub* stub(*p);
1695 stub->write(oview + erratum_stub_start_offset + stub->offset(),
1696 stub->stub_size());
1697 }
1698
1699 of->write_output_view(this->offset(), oview_size, oview);
1700 }
1701
1702
1703 // AArch64_relobj class.
1704
1705 template<int size, bool big_endian>
1706 class AArch64_relobj : public Sized_relobj_file<size, big_endian>
1707 {
1708 public:
1709 typedef AArch64_relobj<size, big_endian> This;
1710 typedef Target_aarch64<size, big_endian> The_target_aarch64;
1711 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section;
1712 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
1713 typedef Stub_table<size, big_endian> The_stub_table;
1714 typedef Erratum_stub<size, big_endian> The_erratum_stub;
1715 typedef typename The_stub_table::Erratum_stub_set_iter Erratum_stub_set_iter;
1716 typedef std::vector<The_stub_table*> Stub_table_list;
1717 static const AArch64_address invalid_address =
1718 static_cast<AArch64_address>(-1);
1719
1720 AArch64_relobj(const std::string& name, Input_file* input_file, off_t offset,
1721 const typename elfcpp::Ehdr<size, big_endian>& ehdr)
1722 : Sized_relobj_file<size, big_endian>(name, input_file, offset, ehdr),
1723 stub_tables_()
1724 { }
1725
1726 ~AArch64_relobj()
1727 { }
1728
1729 // Return the stub table of the SHNDX-th section if there is one.
1730 The_stub_table*
1731 stub_table(unsigned int shndx) const
1732 {
1733 gold_assert(shndx < this->stub_tables_.size());
1734 return this->stub_tables_[shndx];
1735 }
1736
1737 // Set STUB_TABLE to be the stub_table of the SHNDX-th section.
1738 void
1739 set_stub_table(unsigned int shndx, The_stub_table* stub_table)
1740 {
1741 gold_assert(shndx < this->stub_tables_.size());
1742 this->stub_tables_[shndx] = stub_table;
1743 }
1744
1745 // Entrance to errata scanning.
1746 void
1747 scan_errata(unsigned int shndx,
1748 const elfcpp::Shdr<size, big_endian>&,
1749 Output_section*, const Symbol_table*,
1750 The_target_aarch64*);
1751
1752 // Scan all relocation sections for stub generation.
1753 void
1754 scan_sections_for_stubs(The_target_aarch64*, const Symbol_table*,
1755 const Layout*);
1756
1757 // Whether a section is a scannable text section.
1758 bool
1759 text_section_is_scannable(const elfcpp::Shdr<size, big_endian>&, unsigned int,
1760 const Output_section*, const Symbol_table*);
1761
1762 // Convert regular input section with index SHNDX to a relaxed section.
1763 void
1764 convert_input_section_to_relaxed_section(unsigned /* shndx */)
1765 {
1766 // The stubs have relocations and we need to process them after writing
1767 // out the stubs. So relocation now must follow section write.
1768 this->set_relocs_must_follow_section_writes();
1769 }
1770
1771 // Structure for mapping symbol position.
1772 struct Mapping_symbol_position
1773 {
1774 Mapping_symbol_position(unsigned int shndx, AArch64_address offset):
1775 shndx_(shndx), offset_(offset)
1776 {}
1777
1778 // "<" comparator used in ordered_map container.
1779 bool
1780 operator<(const Mapping_symbol_position& p) const
1781 {
1782 return (this->shndx_ < p.shndx_
1783 || (this->shndx_ == p.shndx_ && this->offset_ < p.offset_));
1784 }
1785
1786 // Section index.
1787 unsigned int shndx_;
1788
1789 // Section offset.
1790 AArch64_address offset_;
1791 };
1792
1793 typedef std::map<Mapping_symbol_position, char> Mapping_symbol_info;
1794
1795 protected:
1796 // Post constructor setup.
1797 void
1798 do_setup()
1799 {
1800 // Call parent's setup method.
1801 Sized_relobj_file<size, big_endian>::do_setup();
1802
1803 // Initialize look-up tables.
1804 this->stub_tables_.resize(this->shnum());
1805 }
1806
1807 virtual void
1808 do_relocate_sections(
1809 const Symbol_table* symtab, const Layout* layout,
1810 const unsigned char* pshdrs, Output_file* of,
1811 typename Sized_relobj_file<size, big_endian>::Views* pviews);
1812
1813 // Count local symbols and (optionally) record mapping info.
1814 virtual void
1815 do_count_local_symbols(Stringpool_template<char>*,
1816 Stringpool_template<char>*);
1817
1818 private:
1819 // Fix all errata in the object.
1820 void
1821 fix_errata(typename Sized_relobj_file<size, big_endian>::Views* pviews);
1822
1823 // Try to fix erratum 843419 in an optimized way. Return true if patch is
1824 // applied.
1825 bool
1826 try_fix_erratum_843419_optimized(
1827 The_erratum_stub*,
1828 typename Sized_relobj_file<size, big_endian>::View_size&);
1829
1830 // Whether a section needs to be scanned for relocation stubs.
1831 bool
1832 section_needs_reloc_stub_scanning(const elfcpp::Shdr<size, big_endian>&,
1833 const Relobj::Output_sections&,
1834 const Symbol_table*, const unsigned char*);
1835
1836 // List of stub tables.
1837 Stub_table_list stub_tables_;
1838
1839 // Mapping symbol information sorted by (section index, section_offset).
1840 Mapping_symbol_info mapping_symbol_info_;
1841 }; // End of AArch64_relobj
1842
1843
1844 // Override to record mapping symbol information.
1845 template<int size, bool big_endian>
1846 void
1847 AArch64_relobj<size, big_endian>::do_count_local_symbols(
1848 Stringpool_template<char>* pool, Stringpool_template<char>* dynpool)
1849 {
1850 Sized_relobj_file<size, big_endian>::do_count_local_symbols(pool, dynpool);
1851
1852 // Only erratum-fixing work needs mapping symbols, so skip this time consuming
1853 // processing if not fixing erratum.
1854 if (!parameters->options().fix_cortex_a53_843419()
1855 && !parameters->options().fix_cortex_a53_835769())
1856 return;
1857
1858 const unsigned int loccount = this->local_symbol_count();
1859 if (loccount == 0)
1860 return;
1861
1862 // Read the symbol table section header.
1863 const unsigned int symtab_shndx = this->symtab_shndx();
1864 elfcpp::Shdr<size, big_endian>
1865 symtabshdr(this, this->elf_file()->section_header(symtab_shndx));
1866 gold_assert(symtabshdr.get_sh_type() == elfcpp::SHT_SYMTAB);
1867
1868 // Read the local symbols.
1869 const int sym_size =elfcpp::Elf_sizes<size>::sym_size;
1870 gold_assert(loccount == symtabshdr.get_sh_info());
1871 off_t locsize = loccount * sym_size;
1872 const unsigned char* psyms = this->get_view(symtabshdr.get_sh_offset(),
1873 locsize, true, true);
1874
1875 // For mapping symbol processing, we need to read the symbol names.
1876 unsigned int strtab_shndx = this->adjust_shndx(symtabshdr.get_sh_link());
1877 if (strtab_shndx >= this->shnum())
1878 {
1879 this->error(_("invalid symbol table name index: %u"), strtab_shndx);
1880 return;
1881 }
1882
1883 elfcpp::Shdr<size, big_endian>
1884 strtabshdr(this, this->elf_file()->section_header(strtab_shndx));
1885 if (strtabshdr.get_sh_type() != elfcpp::SHT_STRTAB)
1886 {
1887 this->error(_("symbol table name section has wrong type: %u"),
1888 static_cast<unsigned int>(strtabshdr.get_sh_type()));
1889 return;
1890 }
1891
1892 const char* pnames =
1893 reinterpret_cast<const char*>(this->get_view(strtabshdr.get_sh_offset(),
1894 strtabshdr.get_sh_size(),
1895 false, false));
1896
1897 // Skip the first dummy symbol.
1898 psyms += sym_size;
1899 typename Sized_relobj_file<size, big_endian>::Local_values*
1900 plocal_values = this->local_values();
1901 for (unsigned int i = 1; i < loccount; ++i, psyms += sym_size)
1902 {
1903 elfcpp::Sym<size, big_endian> sym(psyms);
1904 Symbol_value<size>& lv((*plocal_values)[i]);
1905 AArch64_address input_value = lv.input_value();
1906
1907 // Check to see if this is a mapping symbol. AArch64 mapping symbols are
1908 // defined in "ELF for the ARM 64-bit Architecture", Table 4-4, Mapping
1909 // symbols.
1910 // Mapping symbols could be one of the following 4 forms -
1911 // a) $x
1912 // b) $x.<any...>
1913 // c) $d
1914 // d) $d.<any...>
1915 const char* sym_name = pnames + sym.get_st_name();
1916 if (sym_name[0] == '$' && (sym_name[1] == 'x' || sym_name[1] == 'd')
1917 && (sym_name[2] == '\0' || sym_name[2] == '.'))
1918 {
1919 bool is_ordinary;
1920 unsigned int input_shndx =
1921 this->adjust_sym_shndx(i, sym.get_st_shndx(), &is_ordinary);
1922 gold_assert(is_ordinary);
1923
1924 Mapping_symbol_position msp(input_shndx, input_value);
1925 // Insert mapping_symbol_info into map whose ordering is defined by
1926 // (shndx, offset_within_section).
1927 this->mapping_symbol_info_[msp] = sym_name[1];
1928 }
1929 }
1930 }
1931
1932
1933 // Fix all errata in the object.
1934
1935 template<int size, bool big_endian>
1936 void
1937 AArch64_relobj<size, big_endian>::fix_errata(
1938 typename Sized_relobj_file<size, big_endian>::Views* pviews)
1939 {
1940 typedef typename elfcpp::Swap<32,big_endian>::Valtype Insntype;
1941 unsigned int shnum = this->shnum();
1942 for (unsigned int i = 1; i < shnum; ++i)
1943 {
1944 The_stub_table* stub_table = this->stub_table(i);
1945 if (!stub_table)
1946 continue;
1947 std::pair<Erratum_stub_set_iter, Erratum_stub_set_iter>
1948 ipair(stub_table->find_erratum_stubs_for_input_section(this, i));
1949 Erratum_stub_set_iter p = ipair.first, end = ipair.second;
1950 while (p != end)
1951 {
1952 The_erratum_stub* stub = *p;
1953 typename Sized_relobj_file<size, big_endian>::View_size&
1954 pview((*pviews)[i]);
1955
1956 // Double check data before fix.
1957 gold_assert(pview.address + stub->sh_offset()
1958 == stub->erratum_address());
1959
1960 // Update previously recorded erratum insn with relocated
1961 // version.
1962 Insntype* ip =
1963 reinterpret_cast<Insntype*>(pview.view + stub->sh_offset());
1964 Insntype insn_to_fix = ip[0];
1965 stub->update_erratum_insn(insn_to_fix);
1966
1967 // First try to see if erratum is 843419 and if it can be fixed
1968 // without using branch-to-stub.
1969 if (!try_fix_erratum_843419_optimized(stub, pview))
1970 {
1971 // Replace the erratum insn with a branch-to-stub.
1972 AArch64_address stub_address =
1973 stub_table->erratum_stub_address(stub);
1974 unsigned int b_offset = stub_address - stub->erratum_address();
1975 AArch64_relocate_functions<size, big_endian>::construct_b(
1976 pview.view + stub->sh_offset(), b_offset & 0xfffffff);
1977 }
1978 ++p;
1979 }
1980 }
1981 }
1982
1983
1984 // This is an optimization for 843419. This erratum requires the sequence begin
1985 // with 'adrp', when final value calculated by adrp fits in adr, we can just
1986 // replace 'adrp' with 'adr', so we save 2 jumps per occurrence. (Note, however,
1987 // in this case, we do not delete the erratum stub (too late to do so), it is
1988 // merely generated without ever being called.)
1989
1990 template<int size, bool big_endian>
1991 bool
1992 AArch64_relobj<size, big_endian>::try_fix_erratum_843419_optimized(
1993 The_erratum_stub* stub,
1994 typename Sized_relobj_file<size, big_endian>::View_size& pview)
1995 {
1996 if (stub->type() != ST_E_843419)
1997 return false;
1998
1999 typedef AArch64_insn_utilities<big_endian> Insn_utilities;
2000 typedef typename elfcpp::Swap<32,big_endian>::Valtype Insntype;
2001 E843419_stub<size, big_endian>* e843419_stub =
2002 reinterpret_cast<E843419_stub<size, big_endian>*>(stub);
2003 AArch64_address pc = pview.address + e843419_stub->adrp_sh_offset();
2004 Insntype* adrp_view = reinterpret_cast<Insntype*>(
2005 pview.view + e843419_stub->adrp_sh_offset());
2006 Insntype adrp_insn = adrp_view[0];
2007 gold_assert(Insn_utilities::is_adrp(adrp_insn));
2008 // Get adrp 33-bit signed imm value.
2009 int64_t adrp_imm = Insn_utilities::
2010 aarch64_adrp_decode_imm(adrp_insn);
2011 // adrp - final value transferred to target register is calculated as:
2012 // PC[11:0] = Zeros(12)
2013 // adrp_dest_value = PC + adrp_imm;
2014 int64_t adrp_dest_value = (pc & ~((1 << 12) - 1)) + adrp_imm;
2015 // adr -final value transferred to target register is calucalted as:
2016 // PC + adr_imm
2017 // So we have:
2018 // PC + adr_imm = adrp_dest_value
2019 // ==>
2020 // adr_imm = adrp_dest_value - PC
2021 int64_t adr_imm = adrp_dest_value - pc;
2022 // Check if imm fits in adr (21-bit signed).
2023 if (-(1 << 20) <= adr_imm && adr_imm < (1 << 20))
2024 {
2025 // Convert 'adrp' into 'adr'.
2026 Insntype adr_insn = adrp_insn & ((1u << 31) - 1);
2027 adr_insn = Insn_utilities::
2028 aarch64_adr_encode_imm(adr_insn, adr_imm);
2029 elfcpp::Swap<32, big_endian>::writeval(adrp_view, adr_insn);
2030 return true;
2031 }
2032 return false;
2033 }
2034
2035
2036 // Relocate sections.
2037
2038 template<int size, bool big_endian>
2039 void
2040 AArch64_relobj<size, big_endian>::do_relocate_sections(
2041 const Symbol_table* symtab, const Layout* layout,
2042 const unsigned char* pshdrs, Output_file* of,
2043 typename Sized_relobj_file<size, big_endian>::Views* pviews)
2044 {
2045 // Call parent to relocate sections.
2046 Sized_relobj_file<size, big_endian>::do_relocate_sections(symtab, layout,
2047 pshdrs, of, pviews);
2048
2049 // We do not generate stubs if doing a relocatable link.
2050 if (parameters->options().relocatable())
2051 return;
2052
2053 if (parameters->options().fix_cortex_a53_843419()
2054 || parameters->options().fix_cortex_a53_835769())
2055 this->fix_errata(pviews);
2056
2057 Relocate_info<size, big_endian> relinfo;
2058 relinfo.symtab = symtab;
2059 relinfo.layout = layout;
2060 relinfo.object = this;
2061
2062 // Relocate stub tables.
2063 unsigned int shnum = this->shnum();
2064 The_target_aarch64* target = The_target_aarch64::current_target();
2065
2066 for (unsigned int i = 1; i < shnum; ++i)
2067 {
2068 The_aarch64_input_section* aarch64_input_section =
2069 target->find_aarch64_input_section(this, i);
2070 if (aarch64_input_section != NULL
2071 && aarch64_input_section->is_stub_table_owner()
2072 && !aarch64_input_section->stub_table()->empty())
2073 {
2074 Output_section* os = this->output_section(i);
2075 gold_assert(os != NULL);
2076
2077 relinfo.reloc_shndx = elfcpp::SHN_UNDEF;
2078 relinfo.reloc_shdr = NULL;
2079 relinfo.data_shndx = i;
2080 relinfo.data_shdr = pshdrs + i * elfcpp::Elf_sizes<size>::shdr_size;
2081
2082 typename Sized_relobj_file<size, big_endian>::View_size&
2083 view_struct = (*pviews)[i];
2084 gold_assert(view_struct.view != NULL);
2085
2086 The_stub_table* stub_table = aarch64_input_section->stub_table();
2087 off_t offset = stub_table->address() - view_struct.address;
2088 unsigned char* view = view_struct.view + offset;
2089 AArch64_address address = stub_table->address();
2090 section_size_type view_size = stub_table->data_size();
2091 stub_table->relocate_stubs(&relinfo, target, os, view, address,
2092 view_size);
2093 }
2094 }
2095 }
2096
2097
2098 // Determine if an input section is scannable for stub processing. SHDR is
2099 // the header of the section and SHNDX is the section index. OS is the output
2100 // section for the input section and SYMTAB is the global symbol table used to
2101 // look up ICF information.
2102
2103 template<int size, bool big_endian>
2104 bool
2105 AArch64_relobj<size, big_endian>::text_section_is_scannable(
2106 const elfcpp::Shdr<size, big_endian>& text_shdr,
2107 unsigned int text_shndx,
2108 const Output_section* os,
2109 const Symbol_table* symtab)
2110 {
2111 // Skip any empty sections, unallocated sections or sections whose
2112 // type are not SHT_PROGBITS.
2113 if (text_shdr.get_sh_size() == 0
2114 || (text_shdr.get_sh_flags() & elfcpp::SHF_ALLOC) == 0
2115 || text_shdr.get_sh_type() != elfcpp::SHT_PROGBITS)
2116 return false;
2117
2118 // Skip any discarded or ICF'ed sections.
2119 if (os == NULL || symtab->is_section_folded(this, text_shndx))
2120 return false;
2121
2122 // Skip exception frame.
2123 if (strcmp(os->name(), ".eh_frame") == 0)
2124 return false ;
2125
2126 gold_assert(!this->is_output_section_offset_invalid(text_shndx) ||
2127 os->find_relaxed_input_section(this, text_shndx) != NULL);
2128
2129 return true;
2130 }
2131
2132
2133 // Determine if we want to scan the SHNDX-th section for relocation stubs.
2134 // This is a helper for AArch64_relobj::scan_sections_for_stubs().
2135
2136 template<int size, bool big_endian>
2137 bool
2138 AArch64_relobj<size, big_endian>::section_needs_reloc_stub_scanning(
2139 const elfcpp::Shdr<size, big_endian>& shdr,
2140 const Relobj::Output_sections& out_sections,
2141 const Symbol_table* symtab,
2142 const unsigned char* pshdrs)
2143 {
2144 unsigned int sh_type = shdr.get_sh_type();
2145 if (sh_type != elfcpp::SHT_RELA)
2146 return false;
2147
2148 // Ignore empty section.
2149 off_t sh_size = shdr.get_sh_size();
2150 if (sh_size == 0)
2151 return false;
2152
2153 // Ignore reloc section with unexpected symbol table. The
2154 // error will be reported in the final link.
2155 if (this->adjust_shndx(shdr.get_sh_link()) != this->symtab_shndx())
2156 return false;
2157
2158 gold_assert(sh_type == elfcpp::SHT_RELA);
2159 unsigned int reloc_size = elfcpp::Elf_sizes<size>::rela_size;
2160
2161 // Ignore reloc section with unexpected entsize or uneven size.
2162 // The error will be reported in the final link.
2163 if (reloc_size != shdr.get_sh_entsize() || sh_size % reloc_size != 0)
2164 return false;
2165
2166 // Ignore reloc section with bad info. This error will be
2167 // reported in the final link.
2168 unsigned int text_shndx = this->adjust_shndx(shdr.get_sh_info());
2169 if (text_shndx >= this->shnum())
2170 return false;
2171
2172 const unsigned int shdr_size = elfcpp::Elf_sizes<size>::shdr_size;
2173 const elfcpp::Shdr<size, big_endian> text_shdr(pshdrs +
2174 text_shndx * shdr_size);
2175 return this->text_section_is_scannable(text_shdr, text_shndx,
2176 out_sections[text_shndx], symtab);
2177 }
2178
2179
2180 // Scan section SHNDX for erratum 843419 and 835769.
2181
2182 template<int size, bool big_endian>
2183 void
2184 AArch64_relobj<size, big_endian>::scan_errata(
2185 unsigned int shndx, const elfcpp::Shdr<size, big_endian>& shdr,
2186 Output_section* os, const Symbol_table* symtab,
2187 The_target_aarch64* target)
2188 {
2189 if (shdr.get_sh_size() == 0
2190 || (shdr.get_sh_flags() &
2191 (elfcpp::SHF_ALLOC | elfcpp::SHF_EXECINSTR)) == 0
2192 || shdr.get_sh_type() != elfcpp::SHT_PROGBITS)
2193 return;
2194
2195 if (!os || symtab->is_section_folded(this, shndx)) return;
2196
2197 AArch64_address output_offset = this->get_output_section_offset(shndx);
2198 AArch64_address output_address;
2199 if (output_offset != invalid_address)
2200 output_address = os->address() + output_offset;
2201 else
2202 {
2203 const Output_relaxed_input_section* poris =
2204 os->find_relaxed_input_section(this, shndx);
2205 if (!poris) return;
2206 output_address = poris->address();
2207 }
2208
2209 section_size_type input_view_size = 0;
2210 const unsigned char* input_view =
2211 this->section_contents(shndx, &input_view_size, false);
2212
2213 Mapping_symbol_position section_start(shndx, 0);
2214 // Find the first mapping symbol record within section shndx.
2215 typename Mapping_symbol_info::const_iterator p =
2216 this->mapping_symbol_info_.lower_bound(section_start);
2217 while (p != this->mapping_symbol_info_.end() &&
2218 p->first.shndx_ == shndx)
2219 {
2220 typename Mapping_symbol_info::const_iterator prev = p;
2221 ++p;
2222 if (prev->second == 'x')
2223 {
2224 section_size_type span_start =
2225 convert_to_section_size_type(prev->first.offset_);
2226 section_size_type span_end;
2227 if (p != this->mapping_symbol_info_.end()
2228 && p->first.shndx_ == shndx)
2229 span_end = convert_to_section_size_type(p->first.offset_);
2230 else
2231 span_end = convert_to_section_size_type(shdr.get_sh_size());
2232
2233 // Here we do not share the scanning code of both errata. For 843419,
2234 // only the last few insns of each page are examined, which is fast,
2235 // whereas, for 835769, every insn pair needs to be checked.
2236
2237 if (parameters->options().fix_cortex_a53_843419())
2238 target->scan_erratum_843419_span(
2239 this, shndx, span_start, span_end,
2240 const_cast<unsigned char*>(input_view), output_address);
2241
2242 if (parameters->options().fix_cortex_a53_835769())
2243 target->scan_erratum_835769_span(
2244 this, shndx, span_start, span_end,
2245 const_cast<unsigned char*>(input_view), output_address);
2246 }
2247 }
2248 }
2249
2250
2251 // Scan relocations for stub generation.
2252
2253 template<int size, bool big_endian>
2254 void
2255 AArch64_relobj<size, big_endian>::scan_sections_for_stubs(
2256 The_target_aarch64* target,
2257 const Symbol_table* symtab,
2258 const Layout* layout)
2259 {
2260 unsigned int shnum = this->shnum();
2261 const unsigned int shdr_size = elfcpp::Elf_sizes<size>::shdr_size;
2262
2263 // Read the section headers.
2264 const unsigned char* pshdrs = this->get_view(this->elf_file()->shoff(),
2265 shnum * shdr_size,
2266 true, true);
2267
2268 // To speed up processing, we set up hash tables for fast lookup of
2269 // input offsets to output addresses.
2270 this->initialize_input_to_output_maps();
2271
2272 const Relobj::Output_sections& out_sections(this->output_sections());
2273
2274 Relocate_info<size, big_endian> relinfo;
2275 relinfo.symtab = symtab;
2276 relinfo.layout = layout;
2277 relinfo.object = this;
2278
2279 // Do relocation stubs scanning.
2280 const unsigned char* p = pshdrs + shdr_size;
2281 for (unsigned int i = 1; i < shnum; ++i, p += shdr_size)
2282 {
2283 const elfcpp::Shdr<size, big_endian> shdr(p);
2284 if (parameters->options().fix_cortex_a53_843419()
2285 || parameters->options().fix_cortex_a53_835769())
2286 scan_errata(i, shdr, out_sections[i], symtab, target);
2287 if (this->section_needs_reloc_stub_scanning(shdr, out_sections, symtab,
2288 pshdrs))
2289 {
2290 unsigned int index = this->adjust_shndx(shdr.get_sh_info());
2291 AArch64_address output_offset =
2292 this->get_output_section_offset(index);
2293 AArch64_address output_address;
2294 if (output_offset != invalid_address)
2295 {
2296 output_address = out_sections[index]->address() + output_offset;
2297 }
2298 else
2299 {
2300 // Currently this only happens for a relaxed section.
2301 const Output_relaxed_input_section* poris =
2302 out_sections[index]->find_relaxed_input_section(this, index);
2303 gold_assert(poris != NULL);
2304 output_address = poris->address();
2305 }
2306
2307 // Get the relocations.
2308 const unsigned char* prelocs = this->get_view(shdr.get_sh_offset(),
2309 shdr.get_sh_size(),
2310 true, false);
2311
2312 // Get the section contents.
2313 section_size_type input_view_size = 0;
2314 const unsigned char* input_view =
2315 this->section_contents(index, &input_view_size, false);
2316
2317 relinfo.reloc_shndx = i;
2318 relinfo.data_shndx = index;
2319 unsigned int sh_type = shdr.get_sh_type();
2320 unsigned int reloc_size;
2321 gold_assert (sh_type == elfcpp::SHT_RELA);
2322 reloc_size = elfcpp::Elf_sizes<size>::rela_size;
2323
2324 Output_section* os = out_sections[index];
2325 target->scan_section_for_stubs(&relinfo, sh_type, prelocs,
2326 shdr.get_sh_size() / reloc_size,
2327 os,
2328 output_offset == invalid_address,
2329 input_view, output_address,
2330 input_view_size);
2331 }
2332 }
2333 }
2334
2335
2336 // A class to wrap an ordinary input section containing executable code.
2337
2338 template<int size, bool big_endian>
2339 class AArch64_input_section : public Output_relaxed_input_section
2340 {
2341 public:
2342 typedef Stub_table<size, big_endian> The_stub_table;
2343
2344 AArch64_input_section(Relobj* relobj, unsigned int shndx)
2345 : Output_relaxed_input_section(relobj, shndx, 1),
2346 stub_table_(NULL),
2347 original_contents_(NULL), original_size_(0),
2348 original_addralign_(1)
2349 { }
2350
2351 ~AArch64_input_section()
2352 { delete[] this->original_contents_; }
2353
2354 // Initialize.
2355 void
2356 init();
2357
2358 // Set the stub_table.
2359 void
2360 set_stub_table(The_stub_table* st)
2361 { this->stub_table_ = st; }
2362
2363 // Whether this is a stub table owner.
2364 bool
2365 is_stub_table_owner() const
2366 { return this->stub_table_ != NULL && this->stub_table_->owner() == this; }
2367
2368 // Return the original size of the section.
2369 uint32_t
2370 original_size() const
2371 { return this->original_size_; }
2372
2373 // Return the stub table.
2374 The_stub_table*
2375 stub_table()
2376 { return stub_table_; }
2377
2378 protected:
2379 // Write out this input section.
2380 void
2381 do_write(Output_file*);
2382
2383 // Return required alignment of this.
2384 uint64_t
2385 do_addralign() const
2386 {
2387 if (this->is_stub_table_owner())
2388 return std::max(this->stub_table_->addralign(),
2389 static_cast<uint64_t>(this->original_addralign_));
2390 else
2391 return this->original_addralign_;
2392 }
2393
2394 // Finalize data size.
2395 void
2396 set_final_data_size();
2397
2398 // Reset address and file offset.
2399 void
2400 do_reset_address_and_file_offset();
2401
2402 // Output offset.
2403 bool
2404 do_output_offset(const Relobj* object, unsigned int shndx,
2405 section_offset_type offset,
2406 section_offset_type* poutput) const
2407 {
2408 if ((object == this->relobj())
2409 && (shndx == this->shndx())
2410 && (offset >= 0)
2411 && (offset <=
2412 convert_types<section_offset_type, uint32_t>(this->original_size_)))
2413 {
2414 *poutput = offset;
2415 return true;
2416 }
2417 else
2418 return false;
2419 }
2420
2421 private:
2422 // Copying is not allowed.
2423 AArch64_input_section(const AArch64_input_section&);
2424 AArch64_input_section& operator=(const AArch64_input_section&);
2425
2426 // The relocation stubs.
2427 The_stub_table* stub_table_;
2428 // Original section contents. We have to make a copy here since the file
2429 // containing the original section may not be locked when we need to access
2430 // the contents.
2431 unsigned char* original_contents_;
2432 // Section size of the original input section.
2433 uint32_t original_size_;
2434 // Address alignment of the original input section.
2435 uint32_t original_addralign_;
2436 }; // End of AArch64_input_section
2437
2438
2439 // Finalize data size.
2440
2441 template<int size, bool big_endian>
2442 void
2443 AArch64_input_section<size, big_endian>::set_final_data_size()
2444 {
2445 off_t off = convert_types<off_t, uint64_t>(this->original_size_);
2446
2447 if (this->is_stub_table_owner())
2448 {
2449 this->stub_table_->finalize_data_size();
2450 off = align_address(off, this->stub_table_->addralign());
2451 off += this->stub_table_->data_size();
2452 }
2453 this->set_data_size(off);
2454 }
2455
2456
2457 // Reset address and file offset.
2458
2459 template<int size, bool big_endian>
2460 void
2461 AArch64_input_section<size, big_endian>::do_reset_address_and_file_offset()
2462 {
2463 // Size of the original input section contents.
2464 off_t off = convert_types<off_t, uint64_t>(this->original_size_);
2465
2466 // If this is a stub table owner, account for the stub table size.
2467 if (this->is_stub_table_owner())
2468 {
2469 The_stub_table* stub_table = this->stub_table_;
2470
2471 // Reset the stub table's address and file offset. The
2472 // current data size for child will be updated after that.
2473 stub_table_->reset_address_and_file_offset();
2474 off = align_address(off, stub_table_->addralign());
2475 off += stub_table->current_data_size();
2476 }
2477
2478 this->set_current_data_size(off);
2479 }
2480
2481
2482 // Initialize an Arm_input_section.
2483
2484 template<int size, bool big_endian>
2485 void
2486 AArch64_input_section<size, big_endian>::init()
2487 {
2488 Relobj* relobj = this->relobj();
2489 unsigned int shndx = this->shndx();
2490
2491 // We have to cache original size, alignment and contents to avoid locking
2492 // the original file.
2493 this->original_addralign_ =
2494 convert_types<uint32_t, uint64_t>(relobj->section_addralign(shndx));
2495
2496 // This is not efficient but we expect only a small number of relaxed
2497 // input sections for stubs.
2498 section_size_type section_size;
2499 const unsigned char* section_contents =
2500 relobj->section_contents(shndx, &section_size, false);
2501 this->original_size_ =
2502 convert_types<uint32_t, uint64_t>(relobj->section_size(shndx));
2503
2504 gold_assert(this->original_contents_ == NULL);
2505 this->original_contents_ = new unsigned char[section_size];
2506 memcpy(this->original_contents_, section_contents, section_size);
2507
2508 // We want to make this look like the original input section after
2509 // output sections are finalized.
2510 Output_section* os = relobj->output_section(shndx);
2511 off_t offset = relobj->output_section_offset(shndx);
2512 gold_assert(os != NULL && !relobj->is_output_section_offset_invalid(shndx));
2513 this->set_address(os->address() + offset);
2514 this->set_file_offset(os->offset() + offset);
2515 this->set_current_data_size(this->original_size_);
2516 this->finalize_data_size();
2517 }
2518
2519
2520 // Write data to output file.
2521
2522 template<int size, bool big_endian>
2523 void
2524 AArch64_input_section<size, big_endian>::do_write(Output_file* of)
2525 {
2526 // We have to write out the original section content.
2527 gold_assert(this->original_contents_ != NULL);
2528 of->write(this->offset(), this->original_contents_,
2529 this->original_size_);
2530
2531 // If this owns a stub table and it is not empty, write it.
2532 if (this->is_stub_table_owner() && !this->stub_table_->empty())
2533 this->stub_table_->write(of);
2534 }
2535
2536
2537 // Arm output section class. This is defined mainly to add a number of stub
2538 // generation methods.
2539
2540 template<int size, bool big_endian>
2541 class AArch64_output_section : public Output_section
2542 {
2543 public:
2544 typedef Target_aarch64<size, big_endian> The_target_aarch64;
2545 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
2546 typedef Stub_table<size, big_endian> The_stub_table;
2547 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section;
2548
2549 public:
2550 AArch64_output_section(const char* name, elfcpp::Elf_Word type,
2551 elfcpp::Elf_Xword flags)
2552 : Output_section(name, type, flags)
2553 { }
2554
2555 ~AArch64_output_section() {}
2556
2557 // Group input sections for stub generation.
2558 void
2559 group_sections(section_size_type, bool, Target_aarch64<size, big_endian>*,
2560 const Task*);
2561
2562 private:
2563 typedef Output_section::Input_section Input_section;
2564 typedef Output_section::Input_section_list Input_section_list;
2565
2566 // Create a stub group.
2567 void
2568 create_stub_group(Input_section_list::const_iterator,
2569 Input_section_list::const_iterator,
2570 Input_section_list::const_iterator,
2571 The_target_aarch64*,
2572 std::vector<Output_relaxed_input_section*>&,
2573 const Task*);
2574 }; // End of AArch64_output_section
2575
2576
2577 // Create a stub group for input sections from FIRST to LAST. OWNER points to
2578 // the input section that will be the owner of the stub table.
2579
2580 template<int size, bool big_endian> void
2581 AArch64_output_section<size, big_endian>::create_stub_group(
2582 Input_section_list::const_iterator first,
2583 Input_section_list::const_iterator last,
2584 Input_section_list::const_iterator owner,
2585 The_target_aarch64* target,
2586 std::vector<Output_relaxed_input_section*>& new_relaxed_sections,
2587 const Task* task)
2588 {
2589 // Currently we convert ordinary input sections into relaxed sections only
2590 // at this point.
2591 The_aarch64_input_section* input_section;
2592 if (owner->is_relaxed_input_section())
2593 gold_unreachable();
2594 else
2595 {
2596 gold_assert(owner->is_input_section());
2597 // Create a new relaxed input section. We need to lock the original
2598 // file.
2599 Task_lock_obj<Object> tl(task, owner->relobj());
2600 input_section =
2601 target->new_aarch64_input_section(owner->relobj(), owner->shndx());
2602 new_relaxed_sections.push_back(input_section);
2603 }
2604
2605 // Create a stub table.
2606 The_stub_table* stub_table =
2607 target->new_stub_table(input_section);
2608
2609 input_section->set_stub_table(stub_table);
2610
2611 Input_section_list::const_iterator p = first;
2612 // Look for input sections or relaxed input sections in [first ... last].
2613 do
2614 {
2615 if (p->is_input_section() || p->is_relaxed_input_section())
2616 {
2617 // The stub table information for input sections live
2618 // in their objects.
2619 The_aarch64_relobj* aarch64_relobj =
2620 static_cast<The_aarch64_relobj*>(p->relobj());
2621 aarch64_relobj->set_stub_table(p->shndx(), stub_table);
2622 }
2623 }
2624 while (p++ != last);
2625 }
2626
2627
2628 // Group input sections for stub generation. GROUP_SIZE is roughly the limit of
2629 // stub groups. We grow a stub group by adding input section until the size is
2630 // just below GROUP_SIZE. The last input section will be converted into a stub
2631 // table owner. If STUB_ALWAYS_AFTER_BRANCH is false, we also add input sectiond
2632 // after the stub table, effectively doubling the group size.
2633 //
2634 // This is similar to the group_sections() function in elf32-arm.c but is
2635 // implemented differently.
2636
2637 template<int size, bool big_endian>
2638 void AArch64_output_section<size, big_endian>::group_sections(
2639 section_size_type group_size,
2640 bool stubs_always_after_branch,
2641 Target_aarch64<size, big_endian>* target,
2642 const Task* task)
2643 {
2644 typedef enum
2645 {
2646 NO_GROUP,
2647 FINDING_STUB_SECTION,
2648 HAS_STUB_SECTION
2649 } State;
2650
2651 std::vector<Output_relaxed_input_section*> new_relaxed_sections;
2652
2653 State state = NO_GROUP;
2654 section_size_type off = 0;
2655 section_size_type group_begin_offset = 0;
2656 section_size_type group_end_offset = 0;
2657 section_size_type stub_table_end_offset = 0;
2658 Input_section_list::const_iterator group_begin =
2659 this->input_sections().end();
2660 Input_section_list::const_iterator stub_table =
2661 this->input_sections().end();
2662 Input_section_list::const_iterator group_end = this->input_sections().end();
2663 for (Input_section_list::const_iterator p = this->input_sections().begin();
2664 p != this->input_sections().end();
2665 ++p)
2666 {
2667 section_size_type section_begin_offset =
2668 align_address(off, p->addralign());
2669 section_size_type section_end_offset =
2670 section_begin_offset + p->data_size();
2671
2672 // Check to see if we should group the previously seen sections.
2673 switch (state)
2674 {
2675 case NO_GROUP:
2676 break;
2677
2678 case FINDING_STUB_SECTION:
2679 // Adding this section makes the group larger than GROUP_SIZE.
2680 if (section_end_offset - group_begin_offset >= group_size)
2681 {
2682 if (stubs_always_after_branch)
2683 {
2684 gold_assert(group_end != this->input_sections().end());
2685 this->create_stub_group(group_begin, group_end, group_end,
2686 target, new_relaxed_sections,
2687 task);
2688 state = NO_GROUP;
2689 }
2690 else
2691 {
2692 // Input sections up to stub_group_size bytes after the stub
2693 // table can be handled by it too.
2694 state = HAS_STUB_SECTION;
2695 stub_table = group_end;
2696 stub_table_end_offset = group_end_offset;
2697 }
2698 }
2699 break;
2700
2701 case HAS_STUB_SECTION:
2702 // Adding this section makes the post stub-section group larger
2703 // than GROUP_SIZE.
2704 gold_unreachable();
2705 // NOT SUPPORTED YET. For completeness only.
2706 if (section_end_offset - stub_table_end_offset >= group_size)
2707 {
2708 gold_assert(group_end != this->input_sections().end());
2709 this->create_stub_group(group_begin, group_end, stub_table,
2710 target, new_relaxed_sections, task);
2711 state = NO_GROUP;
2712 }
2713 break;
2714
2715 default:
2716 gold_unreachable();
2717 }
2718
2719 // If we see an input section and currently there is no group, start
2720 // a new one. Skip any empty sections. We look at the data size
2721 // instead of calling p->relobj()->section_size() to avoid locking.
2722 if ((p->is_input_section() || p->is_relaxed_input_section())
2723 && (p->data_size() != 0))
2724 {
2725 if (state == NO_GROUP)
2726 {
2727 state = FINDING_STUB_SECTION;
2728 group_begin = p;
2729 group_begin_offset = section_begin_offset;
2730 }
2731
2732 // Keep track of the last input section seen.
2733 group_end = p;
2734 group_end_offset = section_end_offset;
2735 }
2736
2737 off = section_end_offset;
2738 }
2739
2740 // Create a stub group for any ungrouped sections.
2741 if (state == FINDING_STUB_SECTION || state == HAS_STUB_SECTION)
2742 {
2743 gold_assert(group_end != this->input_sections().end());
2744 this->create_stub_group(group_begin, group_end,
2745 (state == FINDING_STUB_SECTION
2746 ? group_end
2747 : stub_table),
2748 target, new_relaxed_sections, task);
2749 }
2750
2751 if (!new_relaxed_sections.empty())
2752 this->convert_input_sections_to_relaxed_sections(new_relaxed_sections);
2753
2754 // Update the section offsets
2755 for (size_t i = 0; i < new_relaxed_sections.size(); ++i)
2756 {
2757 The_aarch64_relobj* relobj = static_cast<The_aarch64_relobj*>(
2758 new_relaxed_sections[i]->relobj());
2759 unsigned int shndx = new_relaxed_sections[i]->shndx();
2760 // Tell AArch64_relobj that this input section is converted.
2761 relobj->convert_input_section_to_relaxed_section(shndx);
2762 }
2763 } // End of AArch64_output_section::group_sections
2764
2765
2766 AArch64_reloc_property_table* aarch64_reloc_property_table = NULL;
2767
2768
2769 // The aarch64 target class.
2770 // See the ABI at
2771 // http://infocenter.arm.com/help/topic/com.arm.doc.ihi0056b/IHI0056B_aaelf64.pdf
2772 template<int size, bool big_endian>
2773 class Target_aarch64 : public Sized_target<size, big_endian>
2774 {
2775 public:
2776 typedef Target_aarch64<size, big_endian> This;
2777 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>
2778 Reloc_section;
2779 typedef Relocate_info<size, big_endian> The_relocate_info;
2780 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address;
2781 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
2782 typedef Reloc_stub<size, big_endian> The_reloc_stub;
2783 typedef Erratum_stub<size, big_endian> The_erratum_stub;
2784 typedef typename Reloc_stub<size, big_endian>::Key The_reloc_stub_key;
2785 typedef Stub_table<size, big_endian> The_stub_table;
2786 typedef std::vector<The_stub_table*> Stub_table_list;
2787 typedef typename Stub_table_list::iterator Stub_table_iterator;
2788 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section;
2789 typedef AArch64_output_section<size, big_endian> The_aarch64_output_section;
2790 typedef Unordered_map<Section_id,
2791 AArch64_input_section<size, big_endian>*,
2792 Section_id_hash> AArch64_input_section_map;
2793 typedef AArch64_insn_utilities<big_endian> Insn_utilities;
2794 const static int TCB_SIZE = size / 8 * 2;
2795
2796 Target_aarch64(const Target::Target_info* info = &aarch64_info)
2797 : Sized_target<size, big_endian>(info),
2798 got_(NULL), plt_(NULL), got_plt_(NULL), got_irelative_(NULL),
2799 got_tlsdesc_(NULL), global_offset_table_(NULL), rela_dyn_(NULL),
2800 rela_irelative_(NULL), copy_relocs_(elfcpp::R_AARCH64_COPY),
2801 got_mod_index_offset_(-1U),
2802 tlsdesc_reloc_info_(), tls_base_symbol_defined_(false),
2803 stub_tables_(), stub_group_size_(0), aarch64_input_section_map_()
2804 { }
2805
2806 // Scan the relocations to determine unreferenced sections for
2807 // garbage collection.
2808 void
2809 gc_process_relocs(Symbol_table* symtab,
2810 Layout* layout,
2811 Sized_relobj_file<size, big_endian>* object,
2812 unsigned int data_shndx,
2813 unsigned int sh_type,
2814 const unsigned char* prelocs,
2815 size_t reloc_count,
2816 Output_section* output_section,
2817 bool needs_special_offset_handling,
2818 size_t local_symbol_count,
2819 const unsigned char* plocal_symbols);
2820
2821 // Scan the relocations to look for symbol adjustments.
2822 void
2823 scan_relocs(Symbol_table* symtab,
2824 Layout* layout,
2825 Sized_relobj_file<size, big_endian>* object,
2826 unsigned int data_shndx,
2827 unsigned int sh_type,
2828 const unsigned char* prelocs,
2829 size_t reloc_count,
2830 Output_section* output_section,
2831 bool needs_special_offset_handling,
2832 size_t local_symbol_count,
2833 const unsigned char* plocal_symbols);
2834
2835 // Finalize the sections.
2836 void
2837 do_finalize_sections(Layout*, const Input_objects*, Symbol_table*);
2838
2839 // Return the value to use for a dynamic which requires special
2840 // treatment.
2841 uint64_t
2842 do_dynsym_value(const Symbol*) const;
2843
2844 // Relocate a section.
2845 void
2846 relocate_section(const Relocate_info<size, big_endian>*,
2847 unsigned int sh_type,
2848 const unsigned char* prelocs,
2849 size_t reloc_count,
2850 Output_section* output_section,
2851 bool needs_special_offset_handling,
2852 unsigned char* view,
2853 typename elfcpp::Elf_types<size>::Elf_Addr view_address,
2854 section_size_type view_size,
2855 const Reloc_symbol_changes*);
2856
2857 // Scan the relocs during a relocatable link.
2858 void
2859 scan_relocatable_relocs(Symbol_table* symtab,
2860 Layout* layout,
2861 Sized_relobj_file<size, big_endian>* object,
2862 unsigned int data_shndx,
2863 unsigned int sh_type,
2864 const unsigned char* prelocs,
2865 size_t reloc_count,
2866 Output_section* output_section,
2867 bool needs_special_offset_handling,
2868 size_t local_symbol_count,
2869 const unsigned char* plocal_symbols,
2870 Relocatable_relocs*);
2871
2872 // Scan the relocs for --emit-relocs.
2873 void
2874 emit_relocs_scan(Symbol_table* symtab,
2875 Layout* layout,
2876 Sized_relobj_file<size, big_endian>* object,
2877 unsigned int data_shndx,
2878 unsigned int sh_type,
2879 const unsigned char* prelocs,
2880 size_t reloc_count,
2881 Output_section* output_section,
2882 bool needs_special_offset_handling,
2883 size_t local_symbol_count,
2884 const unsigned char* plocal_syms,
2885 Relocatable_relocs* rr);
2886
2887 // Relocate a section during a relocatable link.
2888 void
2889 relocate_relocs(
2890 const Relocate_info<size, big_endian>*,
2891 unsigned int sh_type,
2892 const unsigned char* prelocs,
2893 size_t reloc_count,
2894 Output_section* output_section,
2895 typename elfcpp::Elf_types<size>::Elf_Off offset_in_output_section,
2896 unsigned char* view,
2897 typename elfcpp::Elf_types<size>::Elf_Addr view_address,
2898 section_size_type view_size,
2899 unsigned char* reloc_view,
2900 section_size_type reloc_view_size);
2901
2902 // Return the symbol index to use for a target specific relocation.
2903 // The only target specific relocation is R_AARCH64_TLSDESC for a
2904 // local symbol, which is an absolute reloc.
2905 unsigned int
2906 do_reloc_symbol_index(void*, unsigned int r_type) const
2907 {
2908 gold_assert(r_type == elfcpp::R_AARCH64_TLSDESC);
2909 return 0;
2910 }
2911
2912 // Return the addend to use for a target specific relocation.
2913 uint64_t
2914 do_reloc_addend(void* arg, unsigned int r_type, uint64_t addend) const;
2915
2916 // Return the PLT section.
2917 uint64_t
2918 do_plt_address_for_global(const Symbol* gsym) const
2919 { return this->plt_section()->address_for_global(gsym); }
2920
2921 uint64_t
2922 do_plt_address_for_local(const Relobj* relobj, unsigned int symndx) const
2923 { return this->plt_section()->address_for_local(relobj, symndx); }
2924
2925 // This function should be defined in targets that can use relocation
2926 // types to determine (implemented in local_reloc_may_be_function_pointer
2927 // and global_reloc_may_be_function_pointer)
2928 // if a function's pointer is taken. ICF uses this in safe mode to only
2929 // fold those functions whose pointer is defintely not taken.
2930 bool
2931 do_can_check_for_function_pointers() const
2932 { return true; }
2933
2934 // Return the number of entries in the PLT.
2935 unsigned int
2936 plt_entry_count() const;
2937
2938 //Return the offset of the first non-reserved PLT entry.
2939 unsigned int
2940 first_plt_entry_offset() const;
2941
2942 // Return the size of each PLT entry.
2943 unsigned int
2944 plt_entry_size() const;
2945
2946 // Create a stub table.
2947 The_stub_table*
2948 new_stub_table(The_aarch64_input_section*);
2949
2950 // Create an aarch64 input section.
2951 The_aarch64_input_section*
2952 new_aarch64_input_section(Relobj*, unsigned int);
2953
2954 // Find an aarch64 input section instance for a given OBJ and SHNDX.
2955 The_aarch64_input_section*
2956 find_aarch64_input_section(Relobj*, unsigned int) const;
2957
2958 // Return the thread control block size.
2959 unsigned int
2960 tcb_size() const { return This::TCB_SIZE; }
2961
2962 // Scan a section for stub generation.
2963 void
2964 scan_section_for_stubs(const Relocate_info<size, big_endian>*, unsigned int,
2965 const unsigned char*, size_t, Output_section*,
2966 bool, const unsigned char*,
2967 Address,
2968 section_size_type);
2969
2970 // Scan a relocation section for stub.
2971 template<int sh_type>
2972 void
2973 scan_reloc_section_for_stubs(
2974 const The_relocate_info* relinfo,
2975 const unsigned char* prelocs,
2976 size_t reloc_count,
2977 Output_section* output_section,
2978 bool needs_special_offset_handling,
2979 const unsigned char* view,
2980 Address view_address,
2981 section_size_type);
2982
2983 // Relocate a single stub.
2984 void
2985 relocate_stub(The_reloc_stub*, const Relocate_info<size, big_endian>*,
2986 Output_section*, unsigned char*, Address,
2987 section_size_type);
2988
2989 // Get the default AArch64 target.
2990 static This*
2991 current_target()
2992 {
2993 gold_assert(parameters->target().machine_code() == elfcpp::EM_AARCH64
2994 && parameters->target().get_size() == size
2995 && parameters->target().is_big_endian() == big_endian);
2996 return static_cast<This*>(parameters->sized_target<size, big_endian>());
2997 }
2998
2999
3000 // Scan erratum 843419 for a part of a section.
3001 void
3002 scan_erratum_843419_span(
3003 AArch64_relobj<size, big_endian>*,
3004 unsigned int,
3005 const section_size_type,
3006 const section_size_type,
3007 unsigned char*,
3008 Address);
3009
3010 // Scan erratum 835769 for a part of a section.
3011 void
3012 scan_erratum_835769_span(
3013 AArch64_relobj<size, big_endian>*,
3014 unsigned int,
3015 const section_size_type,
3016 const section_size_type,
3017 unsigned char*,
3018 Address);
3019
3020 protected:
3021 void
3022 do_select_as_default_target()
3023 {
3024 gold_assert(aarch64_reloc_property_table == NULL);
3025 aarch64_reloc_property_table = new AArch64_reloc_property_table();
3026 }
3027
3028 // Add a new reloc argument, returning the index in the vector.
3029 size_t
3030 add_tlsdesc_info(Sized_relobj_file<size, big_endian>* object,
3031 unsigned int r_sym)
3032 {
3033 this->tlsdesc_reloc_info_.push_back(Tlsdesc_info(object, r_sym));
3034 return this->tlsdesc_reloc_info_.size() - 1;
3035 }
3036
3037 virtual Output_data_plt_aarch64<size, big_endian>*
3038 do_make_data_plt(Layout* layout,
3039 Output_data_got_aarch64<size, big_endian>* got,
3040 Output_data_space* got_plt,
3041 Output_data_space* got_irelative)
3042 {
3043 return new Output_data_plt_aarch64_standard<size, big_endian>(
3044 layout, got, got_plt, got_irelative);
3045 }
3046
3047
3048 // do_make_elf_object to override the same function in the base class.
3049 Object*
3050 do_make_elf_object(const std::string&, Input_file*, off_t,
3051 const elfcpp::Ehdr<size, big_endian>&);
3052
3053 Output_data_plt_aarch64<size, big_endian>*
3054 make_data_plt(Layout* layout,
3055 Output_data_got_aarch64<size, big_endian>* got,
3056 Output_data_space* got_plt,
3057 Output_data_space* got_irelative)
3058 {
3059 return this->do_make_data_plt(layout, got, got_plt, got_irelative);
3060 }
3061
3062 // We only need to generate stubs, and hence perform relaxation if we are
3063 // not doing relocatable linking.
3064 virtual bool
3065 do_may_relax() const
3066 { return !parameters->options().relocatable(); }
3067
3068 // Relaxation hook. This is where we do stub generation.
3069 virtual bool
3070 do_relax(int, const Input_objects*, Symbol_table*, Layout*, const Task*);
3071
3072 void
3073 group_sections(Layout* layout,
3074 section_size_type group_size,
3075 bool stubs_always_after_branch,
3076 const Task* task);
3077
3078 void
3079 scan_reloc_for_stub(const The_relocate_info*, unsigned int,
3080 const Sized_symbol<size>*, unsigned int,
3081 const Symbol_value<size>*,
3082 typename elfcpp::Elf_types<size>::Elf_Swxword,
3083 Address Elf_Addr);
3084
3085 // Make an output section.
3086 Output_section*
3087 do_make_output_section(const char* name, elfcpp::Elf_Word type,
3088 elfcpp::Elf_Xword flags)
3089 { return new The_aarch64_output_section(name, type, flags); }
3090
3091 private:
3092 // The class which scans relocations.
3093 class Scan
3094 {
3095 public:
3096 Scan()
3097 : issued_non_pic_error_(false)
3098 { }
3099
3100 inline void
3101 local(Symbol_table* symtab, Layout* layout, Target_aarch64* target,
3102 Sized_relobj_file<size, big_endian>* object,
3103 unsigned int data_shndx,
3104 Output_section* output_section,
3105 const elfcpp::Rela<size, big_endian>& reloc, unsigned int r_type,
3106 const elfcpp::Sym<size, big_endian>& lsym,
3107 bool is_discarded);
3108
3109 inline void
3110 global(Symbol_table* symtab, Layout* layout, Target_aarch64* target,
3111 Sized_relobj_file<size, big_endian>* object,
3112 unsigned int data_shndx,
3113 Output_section* output_section,
3114 const elfcpp::Rela<size, big_endian>& reloc, unsigned int r_type,
3115 Symbol* gsym);
3116
3117 inline bool
3118 local_reloc_may_be_function_pointer(Symbol_table* , Layout* ,
3119 Target_aarch64<size, big_endian>* ,
3120 Sized_relobj_file<size, big_endian>* ,
3121 unsigned int ,
3122 Output_section* ,
3123 const elfcpp::Rela<size, big_endian>& ,
3124 unsigned int r_type,
3125 const elfcpp::Sym<size, big_endian>&);
3126
3127 inline bool
3128 global_reloc_may_be_function_pointer(Symbol_table* , Layout* ,
3129 Target_aarch64<size, big_endian>* ,
3130 Sized_relobj_file<size, big_endian>* ,
3131 unsigned int ,
3132 Output_section* ,
3133 const elfcpp::Rela<size, big_endian>& ,
3134 unsigned int r_type,
3135 Symbol* gsym);
3136
3137 private:
3138 static void
3139 unsupported_reloc_local(Sized_relobj_file<size, big_endian>*,
3140 unsigned int r_type);
3141
3142 static void
3143 unsupported_reloc_global(Sized_relobj_file<size, big_endian>*,
3144 unsigned int r_type, Symbol*);
3145
3146 inline bool
3147 possible_function_pointer_reloc(unsigned int r_type);
3148
3149 void
3150 check_non_pic(Relobj*, unsigned int r_type);
3151
3152 bool
3153 reloc_needs_plt_for_ifunc(Sized_relobj_file<size, big_endian>*,
3154 unsigned int r_type);
3155
3156 // Whether we have issued an error about a non-PIC compilation.
3157 bool issued_non_pic_error_;
3158 };
3159
3160 // The class which implements relocation.
3161 class Relocate
3162 {
3163 public:
3164 Relocate()
3165 : skip_call_tls_get_addr_(false)
3166 { }
3167
3168 ~Relocate()
3169 { }
3170
3171 // Do a relocation. Return false if the caller should not issue
3172 // any warnings about this relocation.
3173 inline bool
3174 relocate(const Relocate_info<size, big_endian>*, unsigned int,
3175 Target_aarch64*, Output_section*, size_t, const unsigned char*,
3176 const Sized_symbol<size>*, const Symbol_value<size>*,
3177 unsigned char*, typename elfcpp::Elf_types<size>::Elf_Addr,
3178 section_size_type);
3179
3180 private:
3181 inline typename AArch64_relocate_functions<size, big_endian>::Status
3182 relocate_tls(const Relocate_info<size, big_endian>*,
3183 Target_aarch64<size, big_endian>*,
3184 size_t,
3185 const elfcpp::Rela<size, big_endian>&,
3186 unsigned int r_type, const Sized_symbol<size>*,
3187 const Symbol_value<size>*,
3188 unsigned char*,
3189 typename elfcpp::Elf_types<size>::Elf_Addr);
3190
3191 inline typename AArch64_relocate_functions<size, big_endian>::Status
3192 tls_gd_to_le(
3193 const Relocate_info<size, big_endian>*,
3194 Target_aarch64<size, big_endian>*,
3195 const elfcpp::Rela<size, big_endian>&,
3196 unsigned int,
3197 unsigned char*,
3198 const Symbol_value<size>*);
3199
3200 inline typename AArch64_relocate_functions<size, big_endian>::Status
3201 tls_ld_to_le(
3202 const Relocate_info<size, big_endian>*,
3203 Target_aarch64<size, big_endian>*,
3204 const elfcpp::Rela<size, big_endian>&,
3205 unsigned int,
3206 unsigned char*,
3207 const Symbol_value<size>*);
3208
3209 inline typename AArch64_relocate_functions<size, big_endian>::Status
3210 tls_ie_to_le(
3211 const Relocate_info<size, big_endian>*,
3212 Target_aarch64<size, big_endian>*,
3213 const elfcpp::Rela<size, big_endian>&,
3214 unsigned int,
3215 unsigned char*,
3216 const Symbol_value<size>*);
3217
3218 inline typename AArch64_relocate_functions<size, big_endian>::Status
3219 tls_desc_gd_to_le(
3220 const Relocate_info<size, big_endian>*,
3221 Target_aarch64<size, big_endian>*,
3222 const elfcpp::Rela<size, big_endian>&,
3223 unsigned int,
3224 unsigned char*,
3225 const Symbol_value<size>*);
3226
3227 inline typename AArch64_relocate_functions<size, big_endian>::Status
3228 tls_desc_gd_to_ie(
3229 const Relocate_info<size, big_endian>*,
3230 Target_aarch64<size, big_endian>*,
3231 const elfcpp::Rela<size, big_endian>&,
3232 unsigned int,
3233 unsigned char*,
3234 const Symbol_value<size>*,
3235 typename elfcpp::Elf_types<size>::Elf_Addr,
3236 typename elfcpp::Elf_types<size>::Elf_Addr);
3237
3238 bool skip_call_tls_get_addr_;
3239
3240 }; // End of class Relocate
3241
3242 // Adjust TLS relocation type based on the options and whether this
3243 // is a local symbol.
3244 static tls::Tls_optimization
3245 optimize_tls_reloc(bool is_final, int r_type);
3246
3247 // Get the GOT section, creating it if necessary.
3248 Output_data_got_aarch64<size, big_endian>*
3249 got_section(Symbol_table*, Layout*);
3250
3251 // Get the GOT PLT section.
3252 Output_data_space*
3253 got_plt_section() const
3254 {
3255 gold_assert(this->got_plt_ != NULL);
3256 return this->got_plt_;
3257 }
3258
3259 // Get the GOT section for TLSDESC entries.
3260 Output_data_got<size, big_endian>*
3261 got_tlsdesc_section() const
3262 {
3263 gold_assert(this->got_tlsdesc_ != NULL);
3264 return this->got_tlsdesc_;
3265 }
3266
3267 // Create the PLT section.
3268 void
3269 make_plt_section(Symbol_table* symtab, Layout* layout);
3270
3271 // Create a PLT entry for a global symbol.
3272 void
3273 make_plt_entry(Symbol_table*, Layout*, Symbol*);
3274
3275 // Create a PLT entry for a local STT_GNU_IFUNC symbol.
3276 void
3277 make_local_ifunc_plt_entry(Symbol_table*, Layout*,
3278 Sized_relobj_file<size, big_endian>* relobj,
3279 unsigned int local_sym_index);
3280
3281 // Define the _TLS_MODULE_BASE_ symbol in the TLS segment.
3282 void
3283 define_tls_base_symbol(Symbol_table*, Layout*);
3284
3285 // Create the reserved PLT and GOT entries for the TLS descriptor resolver.
3286 void
3287 reserve_tlsdesc_entries(Symbol_table* symtab, Layout* layout);
3288
3289 // Create a GOT entry for the TLS module index.
3290 unsigned int
3291 got_mod_index_entry(Symbol_table* symtab, Layout* layout,
3292 Sized_relobj_file<size, big_endian>* object);
3293
3294 // Get the PLT section.
3295 Output_data_plt_aarch64<size, big_endian>*
3296 plt_section() const
3297 {
3298 gold_assert(this->plt_ != NULL);
3299 return this->plt_;
3300 }
3301
3302 // Helper method to create erratum stubs for ST_E_843419 and ST_E_835769. For
3303 // ST_E_843419, we need an additional field for adrp offset.
3304 void create_erratum_stub(
3305 AArch64_relobj<size, big_endian>* relobj,
3306 unsigned int shndx,
3307 section_size_type erratum_insn_offset,
3308 Address erratum_address,
3309 typename Insn_utilities::Insntype erratum_insn,
3310 int erratum_type,
3311 unsigned int e843419_adrp_offset=0);
3312
3313 // Return whether this is a 3-insn erratum sequence.
3314 bool is_erratum_843419_sequence(
3315 typename elfcpp::Swap<32,big_endian>::Valtype insn1,
3316 typename elfcpp::Swap<32,big_endian>::Valtype insn2,
3317 typename elfcpp::Swap<32,big_endian>::Valtype insn3);
3318
3319 // Return whether this is a 835769 sequence.
3320 // (Similarly implemented as in elfnn-aarch64.c.)
3321 bool is_erratum_835769_sequence(
3322 typename elfcpp::Swap<32,big_endian>::Valtype,
3323 typename elfcpp::Swap<32,big_endian>::Valtype);
3324
3325 // Get the dynamic reloc section, creating it if necessary.
3326 Reloc_section*
3327 rela_dyn_section(Layout*);
3328
3329 // Get the section to use for TLSDESC relocations.
3330 Reloc_section*
3331 rela_tlsdesc_section(Layout*) const;
3332
3333 // Get the section to use for IRELATIVE relocations.
3334 Reloc_section*
3335 rela_irelative_section(Layout*);
3336
3337 // Add a potential copy relocation.
3338 void
3339 copy_reloc(Symbol_table* symtab, Layout* layout,
3340 Sized_relobj_file<size, big_endian>* object,
3341 unsigned int shndx, Output_section* output_section,
3342 Symbol* sym, const elfcpp::Rela<size, big_endian>& reloc)
3343 {
3344 unsigned int r_type = elfcpp::elf_r_type<size>(reloc.get_r_info());
3345 this->copy_relocs_.copy_reloc(symtab, layout,
3346 symtab->get_sized_symbol<size>(sym),
3347 object, shndx, output_section,
3348 r_type, reloc.get_r_offset(),
3349 reloc.get_r_addend(),
3350 this->rela_dyn_section(layout));
3351 }
3352
3353 // Information about this specific target which we pass to the
3354 // general Target structure.
3355 static const Target::Target_info aarch64_info;
3356
3357 // The types of GOT entries needed for this platform.
3358 // These values are exposed to the ABI in an incremental link.
3359 // Do not renumber existing values without changing the version
3360 // number of the .gnu_incremental_inputs section.
3361 enum Got_type
3362 {
3363 GOT_TYPE_STANDARD = 0, // GOT entry for a regular symbol
3364 GOT_TYPE_TLS_OFFSET = 1, // GOT entry for TLS offset
3365 GOT_TYPE_TLS_PAIR = 2, // GOT entry for TLS module/offset pair
3366 GOT_TYPE_TLS_DESC = 3 // GOT entry for TLS_DESC pair
3367 };
3368
3369 // This type is used as the argument to the target specific
3370 // relocation routines. The only target specific reloc is
3371 // R_AARCh64_TLSDESC against a local symbol.
3372 struct Tlsdesc_info
3373 {
3374 Tlsdesc_info(Sized_relobj_file<size, big_endian>* a_object,
3375 unsigned int a_r_sym)
3376 : object(a_object), r_sym(a_r_sym)
3377 { }
3378
3379 // The object in which the local symbol is defined.
3380 Sized_relobj_file<size, big_endian>* object;
3381 // The local symbol index in the object.
3382 unsigned int r_sym;
3383 };
3384
3385 // The GOT section.
3386 Output_data_got_aarch64<size, big_endian>* got_;
3387 // The PLT section.
3388 Output_data_plt_aarch64<size, big_endian>* plt_;
3389 // The GOT PLT section.
3390 Output_data_space* got_plt_;
3391 // The GOT section for IRELATIVE relocations.
3392 Output_data_space* got_irelative_;
3393 // The GOT section for TLSDESC relocations.
3394 Output_data_got<size, big_endian>* got_tlsdesc_;
3395 // The _GLOBAL_OFFSET_TABLE_ symbol.
3396 Symbol* global_offset_table_;
3397 // The dynamic reloc section.
3398 Reloc_section* rela_dyn_;
3399 // The section to use for IRELATIVE relocs.
3400 Reloc_section* rela_irelative_;
3401 // Relocs saved to avoid a COPY reloc.
3402 Copy_relocs<elfcpp::SHT_RELA, size, big_endian> copy_relocs_;
3403 // Offset of the GOT entry for the TLS module index.
3404 unsigned int got_mod_index_offset_;
3405 // We handle R_AARCH64_TLSDESC against a local symbol as a target
3406 // specific relocation. Here we store the object and local symbol
3407 // index for the relocation.
3408 std::vector<Tlsdesc_info> tlsdesc_reloc_info_;
3409 // True if the _TLS_MODULE_BASE_ symbol has been defined.
3410 bool tls_base_symbol_defined_;
3411 // List of stub_tables
3412 Stub_table_list stub_tables_;
3413 // Actual stub group size
3414 section_size_type stub_group_size_;
3415 AArch64_input_section_map aarch64_input_section_map_;
3416 }; // End of Target_aarch64
3417
3418
3419 template<>
3420 const Target::Target_info Target_aarch64<64, false>::aarch64_info =
3421 {
3422 64, // size
3423 false, // is_big_endian
3424 elfcpp::EM_AARCH64, // machine_code
3425 false, // has_make_symbol
3426 false, // has_resolve
3427 false, // has_code_fill
3428 true, // is_default_stack_executable
3429 true, // can_icf_inline_merge_sections
3430 '\0', // wrap_char
3431 "/lib/ld.so.1", // program interpreter
3432 0x400000, // default_text_segment_address
3433 0x10000, // abi_pagesize (overridable by -z max-page-size)
3434 0x1000, // common_pagesize (overridable by -z common-page-size)
3435 false, // isolate_execinstr
3436 0, // rosegment_gap
3437 elfcpp::SHN_UNDEF, // small_common_shndx
3438 elfcpp::SHN_UNDEF, // large_common_shndx
3439 0, // small_common_section_flags
3440 0, // large_common_section_flags
3441 NULL, // attributes_section
3442 NULL, // attributes_vendor
3443 "_start", // entry_symbol_name
3444 32, // hash_entry_size
3445 };
3446
3447 template<>
3448 const Target::Target_info Target_aarch64<32, false>::aarch64_info =
3449 {
3450 32, // size
3451 false, // is_big_endian
3452 elfcpp::EM_AARCH64, // machine_code
3453 false, // has_make_symbol
3454 false, // has_resolve
3455 false, // has_code_fill
3456 true, // is_default_stack_executable
3457 false, // can_icf_inline_merge_sections
3458 '\0', // wrap_char
3459 "/lib/ld.so.1", // program interpreter
3460 0x400000, // default_text_segment_address
3461 0x10000, // abi_pagesize (overridable by -z max-page-size)
3462 0x1000, // common_pagesize (overridable by -z common-page-size)
3463 false, // isolate_execinstr
3464 0, // rosegment_gap
3465 elfcpp::SHN_UNDEF, // small_common_shndx
3466 elfcpp::SHN_UNDEF, // large_common_shndx
3467 0, // small_common_section_flags
3468 0, // large_common_section_flags
3469 NULL, // attributes_section
3470 NULL, // attributes_vendor
3471 "_start", // entry_symbol_name
3472 32, // hash_entry_size
3473 };
3474
3475 template<>
3476 const Target::Target_info Target_aarch64<64, true>::aarch64_info =
3477 {
3478 64, // size
3479 true, // is_big_endian
3480 elfcpp::EM_AARCH64, // machine_code
3481 false, // has_make_symbol
3482 false, // has_resolve
3483 false, // has_code_fill
3484 true, // is_default_stack_executable
3485 true, // can_icf_inline_merge_sections
3486 '\0', // wrap_char
3487 "/lib/ld.so.1", // program interpreter
3488 0x400000, // default_text_segment_address
3489 0x10000, // abi_pagesize (overridable by -z max-page-size)
3490 0x1000, // common_pagesize (overridable by -z common-page-size)
3491 false, // isolate_execinstr
3492 0, // rosegment_gap
3493 elfcpp::SHN_UNDEF, // small_common_shndx
3494 elfcpp::SHN_UNDEF, // large_common_shndx
3495 0, // small_common_section_flags
3496 0, // large_common_section_flags
3497 NULL, // attributes_section
3498 NULL, // attributes_vendor
3499 "_start", // entry_symbol_name
3500 32, // hash_entry_size
3501 };
3502
3503 template<>
3504 const Target::Target_info Target_aarch64<32, true>::aarch64_info =
3505 {
3506 32, // size
3507 true, // is_big_endian
3508 elfcpp::EM_AARCH64, // machine_code
3509 false, // has_make_symbol
3510 false, // has_resolve
3511 false, // has_code_fill
3512 true, // is_default_stack_executable
3513 false, // can_icf_inline_merge_sections
3514 '\0', // wrap_char
3515 "/lib/ld.so.1", // program interpreter
3516 0x400000, // default_text_segment_address
3517 0x10000, // abi_pagesize (overridable by -z max-page-size)
3518 0x1000, // common_pagesize (overridable by -z common-page-size)
3519 false, // isolate_execinstr
3520 0, // rosegment_gap
3521 elfcpp::SHN_UNDEF, // small_common_shndx
3522 elfcpp::SHN_UNDEF, // large_common_shndx
3523 0, // small_common_section_flags
3524 0, // large_common_section_flags
3525 NULL, // attributes_section
3526 NULL, // attributes_vendor
3527 "_start", // entry_symbol_name
3528 32, // hash_entry_size
3529 };
3530
3531 // Get the GOT section, creating it if necessary.
3532
3533 template<int size, bool big_endian>
3534 Output_data_got_aarch64<size, big_endian>*
3535 Target_aarch64<size, big_endian>::got_section(Symbol_table* symtab,
3536 Layout* layout)
3537 {
3538 if (this->got_ == NULL)
3539 {
3540 gold_assert(symtab != NULL && layout != NULL);
3541
3542 // When using -z now, we can treat .got.plt as a relro section.
3543 // Without -z now, it is modified after program startup by lazy
3544 // PLT relocations.
3545 bool is_got_plt_relro = parameters->options().now();
3546 Output_section_order got_order = (is_got_plt_relro
3547 ? ORDER_RELRO
3548 : ORDER_RELRO_LAST);
3549 Output_section_order got_plt_order = (is_got_plt_relro
3550 ? ORDER_RELRO
3551 : ORDER_NON_RELRO_FIRST);
3552
3553 // Layout of .got and .got.plt sections.
3554 // .got[0] &_DYNAMIC <-_GLOBAL_OFFSET_TABLE_
3555 // ...
3556 // .gotplt[0] reserved for ld.so (&linkmap) <--DT_PLTGOT
3557 // .gotplt[1] reserved for ld.so (resolver)
3558 // .gotplt[2] reserved
3559
3560 // Generate .got section.
3561 this->got_ = new Output_data_got_aarch64<size, big_endian>(symtab,
3562 layout);
3563 layout->add_output_section_data(".got", elfcpp::SHT_PROGBITS,
3564 (elfcpp::SHF_ALLOC | elfcpp::SHF_WRITE),
3565 this->got_, got_order, true);
3566 // The first word of GOT is reserved for the address of .dynamic.
3567 // We put 0 here now. The value will be replaced later in
3568 // Output_data_got_aarch64::do_write.
3569 this->got_->add_constant(0);
3570
3571 // Define _GLOBAL_OFFSET_TABLE_ at the start of the PLT.
3572 // _GLOBAL_OFFSET_TABLE_ value points to the start of the .got section,
3573 // even if there is a .got.plt section.
3574 this->global_offset_table_ =
3575 symtab->define_in_output_data("_GLOBAL_OFFSET_TABLE_", NULL,
3576 Symbol_table::PREDEFINED,
3577 this->got_,
3578 0, 0, elfcpp::STT_OBJECT,
3579 elfcpp::STB_LOCAL,
3580 elfcpp::STV_HIDDEN, 0,
3581 false, false);
3582
3583 // Generate .got.plt section.
3584 this->got_plt_ = new Output_data_space(size / 8, "** GOT PLT");
3585 layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS,
3586 (elfcpp::SHF_ALLOC
3587 | elfcpp::SHF_WRITE),
3588 this->got_plt_, got_plt_order,
3589 is_got_plt_relro);
3590
3591 // The first three entries are reserved.
3592 this->got_plt_->set_current_data_size(
3593 AARCH64_GOTPLT_RESERVE_COUNT * (size / 8));
3594
3595 // If there are any IRELATIVE relocations, they get GOT entries
3596 // in .got.plt after the jump slot entries.
3597 this->got_irelative_ = new Output_data_space(size / 8,
3598 "** GOT IRELATIVE PLT");
3599 layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS,
3600 (elfcpp::SHF_ALLOC
3601 | elfcpp::SHF_WRITE),
3602 this->got_irelative_,
3603 got_plt_order,
3604 is_got_plt_relro);
3605
3606 // If there are any TLSDESC relocations, they get GOT entries in
3607 // .got.plt after the jump slot and IRELATIVE entries.
3608 this->got_tlsdesc_ = new Output_data_got<size, big_endian>();
3609 layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS,
3610 (elfcpp::SHF_ALLOC
3611 | elfcpp::SHF_WRITE),
3612 this->got_tlsdesc_,
3613 got_plt_order,
3614 is_got_plt_relro);
3615
3616 if (!is_got_plt_relro)
3617 {
3618 // Those bytes can go into the relro segment.
3619 layout->increase_relro(
3620 AARCH64_GOTPLT_RESERVE_COUNT * (size / 8));
3621 }
3622
3623 }
3624 return this->got_;
3625 }
3626
3627 // Get the dynamic reloc section, creating it if necessary.
3628
3629 template<int size, bool big_endian>
3630 typename Target_aarch64<size, big_endian>::Reloc_section*
3631 Target_aarch64<size, big_endian>::rela_dyn_section(Layout* layout)
3632 {
3633 if (this->rela_dyn_ == NULL)
3634 {
3635 gold_assert(layout != NULL);
3636 this->rela_dyn_ = new Reloc_section(parameters->options().combreloc());
3637 layout->add_output_section_data(".rela.dyn", elfcpp::SHT_RELA,
3638 elfcpp::SHF_ALLOC, this->rela_dyn_,
3639 ORDER_DYNAMIC_RELOCS, false);
3640 }
3641 return this->rela_dyn_;
3642 }
3643
3644 // Get the section to use for IRELATIVE relocs, creating it if
3645 // necessary. These go in .rela.dyn, but only after all other dynamic
3646 // relocations. They need to follow the other dynamic relocations so
3647 // that they can refer to global variables initialized by those
3648 // relocs.
3649
3650 template<int size, bool big_endian>
3651 typename Target_aarch64<size, big_endian>::Reloc_section*
3652 Target_aarch64<size, big_endian>::rela_irelative_section(Layout* layout)
3653 {
3654 if (this->rela_irelative_ == NULL)
3655 {
3656 // Make sure we have already created the dynamic reloc section.
3657 this->rela_dyn_section(layout);
3658 this->rela_irelative_ = new Reloc_section(false);
3659 layout->add_output_section_data(".rela.dyn", elfcpp::SHT_RELA,
3660 elfcpp::SHF_ALLOC, this->rela_irelative_,
3661 ORDER_DYNAMIC_RELOCS, false);
3662 gold_assert(this->rela_dyn_->output_section()
3663 == this->rela_irelative_->output_section());
3664 }
3665 return this->rela_irelative_;
3666 }
3667
3668
3669 // do_make_elf_object to override the same function in the base class. We need
3670 // to use a target-specific sub-class of Sized_relobj_file<size, big_endian> to
3671 // store backend specific information. Hence we need to have our own ELF object
3672 // creation.
3673
3674 template<int size, bool big_endian>
3675 Object*
3676 Target_aarch64<size, big_endian>::do_make_elf_object(
3677 const std::string& name,
3678 Input_file* input_file,
3679 off_t offset, const elfcpp::Ehdr<size, big_endian>& ehdr)
3680 {
3681 int et = ehdr.get_e_type();
3682 // ET_EXEC files are valid input for --just-symbols/-R,
3683 // and we treat them as relocatable objects.
3684 if (et == elfcpp::ET_EXEC && input_file->just_symbols())
3685 return Sized_target<size, big_endian>::do_make_elf_object(
3686 name, input_file, offset, ehdr);
3687 else if (et == elfcpp::ET_REL)
3688 {
3689 AArch64_relobj<size, big_endian>* obj =
3690 new AArch64_relobj<size, big_endian>(name, input_file, offset, ehdr);
3691 obj->setup();
3692 return obj;
3693 }
3694 else if (et == elfcpp::ET_DYN)
3695 {
3696 // Keep base implementation.
3697 Sized_dynobj<size, big_endian>* obj =
3698 new Sized_dynobj<size, big_endian>(name, input_file, offset, ehdr);
3699 obj->setup();
3700 return obj;
3701 }
3702 else
3703 {
3704 gold_error(_("%s: unsupported ELF file type %d"),
3705 name.c_str(), et);
3706 return NULL;
3707 }
3708 }
3709
3710
3711 // Scan a relocation for stub generation.
3712
3713 template<int size, bool big_endian>
3714 void
3715 Target_aarch64<size, big_endian>::scan_reloc_for_stub(
3716 const Relocate_info<size, big_endian>* relinfo,
3717 unsigned int r_type,
3718 const Sized_symbol<size>* gsym,
3719 unsigned int r_sym,
3720 const Symbol_value<size>* psymval,
3721 typename elfcpp::Elf_types<size>::Elf_Swxword addend,
3722 Address address)
3723 {
3724 const AArch64_relobj<size, big_endian>* aarch64_relobj =
3725 static_cast<AArch64_relobj<size, big_endian>*>(relinfo->object);
3726
3727 Symbol_value<size> symval;
3728 if (gsym != NULL)
3729 {
3730 const AArch64_reloc_property* arp = aarch64_reloc_property_table->
3731 get_reloc_property(r_type);
3732 if (gsym->use_plt_offset(arp->reference_flags()))
3733 {
3734 // This uses a PLT, change the symbol value.
3735 symval.set_output_value(this->plt_section()->address()
3736 + gsym->plt_offset());
3737 psymval = &symval;
3738 }
3739 else if (gsym->is_undefined())
3740 // There is no need to generate a stub symbol is undefined.
3741 return;
3742 }
3743
3744 // Get the symbol value.
3745 typename Symbol_value<size>::Value value = psymval->value(aarch64_relobj, 0);
3746
3747 // Owing to pipelining, the PC relative branches below actually skip
3748 // two instructions when the branch offset is 0.
3749 Address destination = static_cast<Address>(-1);
3750 switch (r_type)
3751 {
3752 case elfcpp::R_AARCH64_CALL26:
3753 case elfcpp::R_AARCH64_JUMP26:
3754 destination = value + addend;
3755 break;
3756 default:
3757 gold_unreachable();
3758 }
3759
3760 int stub_type = The_reloc_stub::
3761 stub_type_for_reloc(r_type, address, destination);
3762 if (stub_type == ST_NONE)
3763 return;
3764
3765 The_stub_table* stub_table = aarch64_relobj->stub_table(relinfo->data_shndx);
3766 gold_assert(stub_table != NULL);
3767
3768 The_reloc_stub_key key(stub_type, gsym, aarch64_relobj, r_sym, addend);
3769 The_reloc_stub* stub = stub_table->find_reloc_stub(key);
3770 if (stub == NULL)
3771 {
3772 stub = new The_reloc_stub(stub_type);
3773 stub_table->add_reloc_stub(stub, key);
3774 }
3775 stub->set_destination_address(destination);
3776 } // End of Target_aarch64::scan_reloc_for_stub
3777
3778
3779 // This function scans a relocation section for stub generation.
3780 // The template parameter Relocate must be a class type which provides
3781 // a single function, relocate(), which implements the machine
3782 // specific part of a relocation.
3783
3784 // BIG_ENDIAN is the endianness of the data. SH_TYPE is the section type:
3785 // SHT_REL or SHT_RELA.
3786
3787 // PRELOCS points to the relocation data. RELOC_COUNT is the number
3788 // of relocs. OUTPUT_SECTION is the output section.
3789 // NEEDS_SPECIAL_OFFSET_HANDLING is true if input offsets need to be
3790 // mapped to output offsets.
3791
3792 // VIEW is the section data, VIEW_ADDRESS is its memory address, and
3793 // VIEW_SIZE is the size. These refer to the input section, unless
3794 // NEEDS_SPECIAL_OFFSET_HANDLING is true, in which case they refer to
3795 // the output section.
3796
3797 template<int size, bool big_endian>
3798 template<int sh_type>
3799 void inline
3800 Target_aarch64<size, big_endian>::scan_reloc_section_for_stubs(
3801 const Relocate_info<size, big_endian>* relinfo,
3802 const unsigned char* prelocs,
3803 size_t reloc_count,
3804 Output_section* /*output_section*/,
3805 bool /*needs_special_offset_handling*/,
3806 const unsigned char* /*view*/,
3807 Address view_address,
3808 section_size_type)
3809 {
3810 typedef typename Reloc_types<sh_type,size,big_endian>::Reloc Reltype;
3811
3812 const int reloc_size =
3813 Reloc_types<sh_type,size,big_endian>::reloc_size;
3814 AArch64_relobj<size, big_endian>* object =
3815 static_cast<AArch64_relobj<size, big_endian>*>(relinfo->object);
3816 unsigned int local_count = object->local_symbol_count();
3817
3818 gold::Default_comdat_behavior default_comdat_behavior;
3819 Comdat_behavior comdat_behavior = CB_UNDETERMINED;
3820
3821 for (size_t i = 0; i < reloc_count; ++i, prelocs += reloc_size)
3822 {
3823 Reltype reloc(prelocs);
3824 typename elfcpp::Elf_types<size>::Elf_WXword r_info = reloc.get_r_info();
3825 unsigned int r_sym = elfcpp::elf_r_sym<size>(r_info);
3826 unsigned int r_type = elfcpp::elf_r_type<size>(r_info);
3827 if (r_type != elfcpp::R_AARCH64_CALL26
3828 && r_type != elfcpp::R_AARCH64_JUMP26)
3829 continue;
3830
3831 section_offset_type offset =
3832 convert_to_section_size_type(reloc.get_r_offset());
3833
3834 // Get the addend.
3835 typename elfcpp::Elf_types<size>::Elf_Swxword addend =
3836 reloc.get_r_addend();
3837
3838 const Sized_symbol<size>* sym;
3839 Symbol_value<size> symval;
3840 const Symbol_value<size> *psymval;
3841 bool is_defined_in_discarded_section;
3842 unsigned int shndx;
3843 if (r_sym < local_count)
3844 {
3845 sym = NULL;
3846 psymval = object->local_symbol(r_sym);
3847
3848 // If the local symbol belongs to a section we are discarding,
3849 // and that section is a debug section, try to find the
3850 // corresponding kept section and map this symbol to its
3851 // counterpart in the kept section. The symbol must not
3852 // correspond to a section we are folding.
3853 bool is_ordinary;
3854 shndx = psymval->input_shndx(&is_ordinary);
3855 is_defined_in_discarded_section =
3856 (is_ordinary
3857 && shndx != elfcpp::SHN_UNDEF
3858 && !object->is_section_included(shndx)
3859 && !relinfo->symtab->is_section_folded(object, shndx));
3860
3861 // We need to compute the would-be final value of this local
3862 // symbol.
3863 if (!is_defined_in_discarded_section)
3864 {
3865 typedef Sized_relobj_file<size, big_endian> ObjType;
3866 typename ObjType::Compute_final_local_value_status status =
3867 object->compute_final_local_value(r_sym, psymval, &symval,
3868 relinfo->symtab);
3869 if (status == ObjType::CFLV_OK)
3870 {
3871 // Currently we cannot handle a branch to a target in
3872 // a merged section. If this is the case, issue an error
3873 // and also free the merge symbol value.
3874 if (!symval.has_output_value())
3875 {
3876 const std::string& section_name =
3877 object->section_name(shndx);
3878 object->error(_("cannot handle branch to local %u "
3879 "in a merged section %s"),
3880 r_sym, section_name.c_str());
3881 }
3882 psymval = &symval;
3883 }
3884 else
3885 {
3886 // We cannot determine the final value.
3887 continue;
3888 }
3889 }
3890 }
3891 else
3892 {
3893 const Symbol* gsym;
3894 gsym = object->global_symbol(r_sym);
3895 gold_assert(gsym != NULL);
3896 if (gsym->is_forwarder())
3897 gsym = relinfo->symtab->resolve_forwards(gsym);
3898
3899 sym = static_cast<const Sized_symbol<size>*>(gsym);
3900 if (sym->has_symtab_index() && sym->symtab_index() != -1U)
3901 symval.set_output_symtab_index(sym->symtab_index());
3902 else
3903 symval.set_no_output_symtab_entry();
3904
3905 // We need to compute the would-be final value of this global
3906 // symbol.
3907 const Symbol_table* symtab = relinfo->symtab;
3908 const Sized_symbol<size>* sized_symbol =
3909 symtab->get_sized_symbol<size>(gsym);
3910 Symbol_table::Compute_final_value_status status;
3911 typename elfcpp::Elf_types<size>::Elf_Addr value =
3912 symtab->compute_final_value<size>(sized_symbol, &status);
3913
3914 // Skip this if the symbol has not output section.
3915 if (status == Symbol_table::CFVS_NO_OUTPUT_SECTION)
3916 continue;
3917 symval.set_output_value(value);
3918
3919 if (gsym->type() == elfcpp::STT_TLS)
3920 symval.set_is_tls_symbol();
3921 else if (gsym->type() == elfcpp::STT_GNU_IFUNC)
3922 symval.set_is_ifunc_symbol();
3923 psymval = &symval;
3924
3925 is_defined_in_discarded_section =
3926 (gsym->is_defined_in_discarded_section()
3927 && gsym->is_undefined());
3928 shndx = 0;
3929 }
3930
3931 Symbol_value<size> symval2;
3932 if (is_defined_in_discarded_section)
3933 {
3934 if (comdat_behavior == CB_UNDETERMINED)
3935 {
3936 std::string name = object->section_name(relinfo->data_shndx);
3937 comdat_behavior = default_comdat_behavior.get(name.c_str());
3938 }
3939 if (comdat_behavior == CB_PRETEND)
3940 {
3941 bool found;
3942 typename elfcpp::Elf_types<size>::Elf_Addr value =
3943 object->map_to_kept_section(shndx, &found);
3944 if (found)
3945 symval2.set_output_value(value + psymval->input_value());
3946 else
3947 symval2.set_output_value(0);
3948 }
3949 else
3950 {
3951 if (comdat_behavior == CB_WARNING)
3952 gold_warning_at_location(relinfo, i, offset,
3953 _("relocation refers to discarded "
3954 "section"));
3955 symval2.set_output_value(0);
3956 }
3957 symval2.set_no_output_symtab_entry();
3958 psymval = &symval2;
3959 }
3960
3961 // If symbol is a section symbol, we don't know the actual type of
3962 // destination. Give up.
3963 if (psymval->is_section_symbol())
3964 continue;
3965
3966 this->scan_reloc_for_stub(relinfo, r_type, sym, r_sym, psymval,
3967 addend, view_address + offset);
3968 } // End of iterating relocs in a section
3969 } // End of Target_aarch64::scan_reloc_section_for_stubs
3970
3971
3972 // Scan an input section for stub generation.
3973
3974 template<int size, bool big_endian>
3975 void
3976 Target_aarch64<size, big_endian>::scan_section_for_stubs(
3977 const Relocate_info<size, big_endian>* relinfo,
3978 unsigned int sh_type,
3979 const unsigned char* prelocs,
3980 size_t reloc_count,
3981 Output_section* output_section,
3982 bool needs_special_offset_handling,
3983 const unsigned char* view,
3984 Address view_address,
3985 section_size_type view_size)
3986 {
3987 gold_assert(sh_type == elfcpp::SHT_RELA);
3988 this->scan_reloc_section_for_stubs<elfcpp::SHT_RELA>(
3989 relinfo,
3990 prelocs,
3991 reloc_count,
3992 output_section,
3993 needs_special_offset_handling,
3994 view,
3995 view_address,
3996 view_size);
3997 }
3998
3999
4000 // Relocate a single stub.
4001
4002 template<int size, bool big_endian>
4003 void Target_aarch64<size, big_endian>::
4004 relocate_stub(The_reloc_stub* stub,
4005 const The_relocate_info*,
4006 Output_section*,
4007 unsigned char* view,
4008 Address address,
4009 section_size_type)
4010 {
4011 typedef AArch64_relocate_functions<size, big_endian> The_reloc_functions;
4012 typedef typename The_reloc_functions::Status The_reloc_functions_status;
4013 typedef typename elfcpp::Swap<32,big_endian>::Valtype Insntype;
4014
4015 Insntype* ip = reinterpret_cast<Insntype*>(view);
4016 int insn_number = stub->insn_num();
4017 const uint32_t* insns = stub->insns();
4018 // Check the insns are really those stub insns.
4019 for (int i = 0; i < insn_number; ++i)
4020 {
4021 Insntype insn = elfcpp::Swap<32,big_endian>::readval(ip + i);
4022 gold_assert(((uint32_t)insn == insns[i]));
4023 }
4024
4025 Address dest = stub->destination_address();
4026
4027 switch(stub->type())
4028 {
4029 case ST_ADRP_BRANCH:
4030 {
4031 // 1st reloc is ADR_PREL_PG_HI21
4032 The_reloc_functions_status status =
4033 The_reloc_functions::adrp(view, dest, address);
4034 // An error should never arise in the above step. If so, please
4035 // check 'aarch64_valid_for_adrp_p'.
4036 gold_assert(status == The_reloc_functions::STATUS_OKAY);
4037
4038 // 2nd reloc is ADD_ABS_LO12_NC
4039 const AArch64_reloc_property* arp =
4040 aarch64_reloc_property_table->get_reloc_property(
4041 elfcpp::R_AARCH64_ADD_ABS_LO12_NC);
4042 gold_assert(arp != NULL);
4043 status = The_reloc_functions::template
4044 rela_general<32>(view + 4, dest, 0, arp);
4045 // An error should never arise, it is an "_NC" relocation.
4046 gold_assert(status == The_reloc_functions::STATUS_OKAY);
4047 }
4048 break;
4049
4050 case ST_LONG_BRANCH_ABS:
4051 // 1st reloc is R_AARCH64_PREL64, at offset 8
4052 elfcpp::Swap<64,big_endian>::writeval(view + 8, dest);
4053 break;
4054
4055 case ST_LONG_BRANCH_PCREL:
4056 {
4057 // "PC" calculation is the 2nd insn in the stub.
4058 uint64_t offset = dest - (address + 4);
4059 // Offset is placed at offset 4 and 5.
4060 elfcpp::Swap<64,big_endian>::writeval(view + 16, offset);
4061 }
4062 break;
4063
4064 default:
4065 gold_unreachable();
4066 }
4067 }
4068
4069
4070 // A class to handle the PLT data.
4071 // This is an abstract base class that handles most of the linker details
4072 // but does not know the actual contents of PLT entries. The derived
4073 // classes below fill in those details.
4074
4075 template<int size, bool big_endian>
4076 class Output_data_plt_aarch64 : public Output_section_data
4077 {
4078 public:
4079 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>
4080 Reloc_section;
4081 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address;
4082
4083 Output_data_plt_aarch64(Layout* layout,
4084 uint64_t addralign,
4085 Output_data_got_aarch64<size, big_endian>* got,
4086 Output_data_space* got_plt,
4087 Output_data_space* got_irelative)
4088 : Output_section_data(addralign), tlsdesc_rel_(NULL), irelative_rel_(NULL),
4089 got_(got), got_plt_(got_plt), got_irelative_(got_irelative),
4090 count_(0), irelative_count_(0), tlsdesc_got_offset_(-1U)
4091 { this->init(layout); }
4092
4093 // Initialize the PLT section.
4094 void
4095 init(Layout* layout);
4096
4097 // Add an entry to the PLT.
4098 void
4099 add_entry(Symbol_table*, Layout*, Symbol* gsym);
4100
4101 // Add an entry to the PLT for a local STT_GNU_IFUNC symbol.
4102 unsigned int
4103 add_local_ifunc_entry(Symbol_table* symtab, Layout*,
4104 Sized_relobj_file<size, big_endian>* relobj,
4105 unsigned int local_sym_index);
4106
4107 // Add the relocation for a PLT entry.
4108 void
4109 add_relocation(Symbol_table*, Layout*, Symbol* gsym,
4110 unsigned int got_offset);
4111
4112 // Add the reserved TLSDESC_PLT entry to the PLT.
4113 void
4114 reserve_tlsdesc_entry(unsigned int got_offset)
4115 { this->tlsdesc_got_offset_ = got_offset; }
4116
4117 // Return true if a TLSDESC_PLT entry has been reserved.
4118 bool
4119 has_tlsdesc_entry() const
4120 { return this->tlsdesc_got_offset_ != -1U; }
4121
4122 // Return the GOT offset for the reserved TLSDESC_PLT entry.
4123 unsigned int
4124 get_tlsdesc_got_offset() const
4125 { return this->tlsdesc_got_offset_; }
4126
4127 // Return the PLT offset of the reserved TLSDESC_PLT entry.
4128 unsigned int
4129 get_tlsdesc_plt_offset() const
4130 {
4131 return (this->first_plt_entry_offset() +
4132 (this->count_ + this->irelative_count_)
4133 * this->get_plt_entry_size());
4134 }
4135
4136 // Return the .rela.plt section data.
4137 Reloc_section*
4138 rela_plt()
4139 { return this->rel_; }
4140
4141 // Return where the TLSDESC relocations should go.
4142 Reloc_section*
4143 rela_tlsdesc(Layout*);
4144
4145 // Return where the IRELATIVE relocations should go in the PLT
4146 // relocations.
4147 Reloc_section*
4148 rela_irelative(Symbol_table*, Layout*);
4149
4150 // Return whether we created a section for IRELATIVE relocations.
4151 bool
4152 has_irelative_section() const
4153 { return this->irelative_rel_ != NULL; }
4154
4155 // Return the number of PLT entries.
4156 unsigned int
4157 entry_count() const
4158 { return this->count_ + this->irelative_count_; }
4159
4160 // Return the offset of the first non-reserved PLT entry.
4161 unsigned int
4162 first_plt_entry_offset() const
4163 { return this->do_first_plt_entry_offset(); }
4164
4165 // Return the size of a PLT entry.
4166 unsigned int
4167 get_plt_entry_size() const
4168 { return this->do_get_plt_entry_size(); }
4169
4170 // Return the reserved tlsdesc entry size.
4171 unsigned int
4172 get_plt_tlsdesc_entry_size() const
4173 { return this->do_get_plt_tlsdesc_entry_size(); }
4174
4175 // Return the PLT address to use for a global symbol.
4176 uint64_t
4177 address_for_global(const Symbol*);
4178
4179 // Return the PLT address to use for a local symbol.
4180 uint64_t
4181 address_for_local(const Relobj*, unsigned int symndx);
4182
4183 protected:
4184 // Fill in the first PLT entry.
4185 void
4186 fill_first_plt_entry(unsigned char* pov,
4187 Address got_address,
4188 Address plt_address)
4189 { this->do_fill_first_plt_entry(pov, got_address, plt_address); }
4190
4191 // Fill in a normal PLT entry.
4192 void
4193 fill_plt_entry(unsigned char* pov,
4194 Address got_address,
4195 Address plt_address,
4196 unsigned int got_offset,
4197 unsigned int plt_offset)
4198 {
4199 this->do_fill_plt_entry(pov, got_address, plt_address,
4200 got_offset, plt_offset);
4201 }
4202
4203 // Fill in the reserved TLSDESC PLT entry.
4204 void
4205 fill_tlsdesc_entry(unsigned char* pov,
4206 Address gotplt_address,
4207 Address plt_address,
4208 Address got_base,
4209 unsigned int tlsdesc_got_offset,
4210 unsigned int plt_offset)
4211 {
4212 this->do_fill_tlsdesc_entry(pov, gotplt_address, plt_address, got_base,
4213 tlsdesc_got_offset, plt_offset);
4214 }
4215
4216 virtual unsigned int
4217 do_first_plt_entry_offset() const = 0;
4218
4219 virtual unsigned int
4220 do_get_plt_entry_size() const = 0;
4221
4222 virtual unsigned int
4223 do_get_plt_tlsdesc_entry_size() const = 0;
4224
4225 virtual void
4226 do_fill_first_plt_entry(unsigned char* pov,
4227 Address got_addr,
4228 Address plt_addr) = 0;
4229
4230 virtual void
4231 do_fill_plt_entry(unsigned char* pov,
4232 Address got_address,
4233 Address plt_address,
4234 unsigned int got_offset,
4235 unsigned int plt_offset) = 0;
4236
4237 virtual void
4238 do_fill_tlsdesc_entry(unsigned char* pov,
4239 Address gotplt_address,
4240 Address plt_address,
4241 Address got_base,
4242 unsigned int tlsdesc_got_offset,
4243 unsigned int plt_offset) = 0;
4244
4245 void
4246 do_adjust_output_section(Output_section* os);
4247
4248 // Write to a map file.
4249 void
4250 do_print_to_mapfile(Mapfile* mapfile) const
4251 { mapfile->print_output_data(this, _("** PLT")); }
4252
4253 private:
4254 // Set the final size.
4255 void
4256 set_final_data_size();
4257
4258 // Write out the PLT data.
4259 void
4260 do_write(Output_file*);
4261
4262 // The reloc section.
4263 Reloc_section* rel_;
4264
4265 // The TLSDESC relocs, if necessary. These must follow the regular
4266 // PLT relocs.
4267 Reloc_section* tlsdesc_rel_;
4268
4269 // The IRELATIVE relocs, if necessary. These must follow the
4270 // regular PLT relocations.
4271 Reloc_section* irelative_rel_;
4272
4273 // The .got section.
4274 Output_data_got_aarch64<size, big_endian>* got_;
4275
4276 // The .got.plt section.
4277 Output_data_space* got_plt_;
4278
4279 // The part of the .got.plt section used for IRELATIVE relocs.
4280 Output_data_space* got_irelative_;
4281
4282 // The number of PLT entries.
4283 unsigned int count_;
4284
4285 // Number of PLT entries with R_AARCH64_IRELATIVE relocs. These
4286 // follow the regular PLT entries.
4287 unsigned int irelative_count_;
4288
4289 // GOT offset of the reserved TLSDESC_GOT entry for the lazy trampoline.
4290 // Communicated to the loader via DT_TLSDESC_GOT. The magic value -1
4291 // indicates an offset is not allocated.
4292 unsigned int tlsdesc_got_offset_;
4293 };
4294
4295 // Initialize the PLT section.
4296
4297 template<int size, bool big_endian>
4298 void
4299 Output_data_plt_aarch64<size, big_endian>::init(Layout* layout)
4300 {
4301 this->rel_ = new Reloc_section(false);
4302 layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA,
4303 elfcpp::SHF_ALLOC, this->rel_,
4304 ORDER_DYNAMIC_PLT_RELOCS, false);
4305 }
4306
4307 template<int size, bool big_endian>
4308 void
4309 Output_data_plt_aarch64<size, big_endian>::do_adjust_output_section(
4310 Output_section* os)
4311 {
4312 os->set_entsize(this->get_plt_entry_size());
4313 }
4314
4315 // Add an entry to the PLT.
4316
4317 template<int size, bool big_endian>
4318 void
4319 Output_data_plt_aarch64<size, big_endian>::add_entry(Symbol_table* symtab,
4320 Layout* layout, Symbol* gsym)
4321 {
4322 gold_assert(!gsym->has_plt_offset());
4323
4324 unsigned int* pcount;
4325 unsigned int plt_reserved;
4326 Output_section_data_build* got;
4327
4328 if (gsym->type() == elfcpp::STT_GNU_IFUNC
4329 && gsym->can_use_relative_reloc(false))
4330 {
4331 pcount = &this->irelative_count_;
4332 plt_reserved = 0;
4333 got = this->got_irelative_;
4334 }
4335 else
4336 {
4337 pcount = &this->count_;
4338 plt_reserved = this->first_plt_entry_offset();
4339 got = this->got_plt_;
4340 }
4341
4342 gsym->set_plt_offset((*pcount) * this->get_plt_entry_size()
4343 + plt_reserved);
4344
4345 ++*pcount;
4346
4347 section_offset_type got_offset = got->current_data_size();
4348
4349 // Every PLT entry needs a GOT entry which points back to the PLT
4350 // entry (this will be changed by the dynamic linker, normally
4351 // lazily when the function is called).
4352 got->set_current_data_size(got_offset + size / 8);
4353
4354 // Every PLT entry needs a reloc.
4355 this->add_relocation(symtab, layout, gsym, got_offset);
4356
4357 // Note that we don't need to save the symbol. The contents of the
4358 // PLT are independent of which symbols are used. The symbols only
4359 // appear in the relocations.
4360 }
4361
4362 // Add an entry to the PLT for a local STT_GNU_IFUNC symbol. Return
4363 // the PLT offset.
4364
4365 template<int size, bool big_endian>
4366 unsigned int
4367 Output_data_plt_aarch64<size, big_endian>::add_local_ifunc_entry(
4368 Symbol_table* symtab,
4369 Layout* layout,
4370 Sized_relobj_file<size, big_endian>* relobj,
4371 unsigned int local_sym_index)
4372 {
4373 unsigned int plt_offset = this->irelative_count_ * this->get_plt_entry_size();
4374 ++this->irelative_count_;
4375
4376 section_offset_type got_offset = this->got_irelative_->current_data_size();
4377
4378 // Every PLT entry needs a GOT entry which points back to the PLT
4379 // entry.
4380 this->got_irelative_->set_current_data_size(got_offset + size / 8);
4381
4382 // Every PLT entry needs a reloc.
4383 Reloc_section* rela = this->rela_irelative(symtab, layout);
4384 rela->add_symbolless_local_addend(relobj, local_sym_index,
4385 elfcpp::R_AARCH64_IRELATIVE,
4386 this->got_irelative_, got_offset, 0);
4387
4388 return plt_offset;
4389 }
4390
4391 // Add the relocation for a PLT entry.
4392
4393 template<int size, bool big_endian>
4394 void
4395 Output_data_plt_aarch64<size, big_endian>::add_relocation(
4396 Symbol_table* symtab, Layout* layout, Symbol* gsym, unsigned int got_offset)
4397 {
4398 if (gsym->type() == elfcpp::STT_GNU_IFUNC
4399 && gsym->can_use_relative_reloc(false))
4400 {
4401 Reloc_section* rela = this->rela_irelative(symtab, layout);
4402 rela->add_symbolless_global_addend(gsym, elfcpp::R_AARCH64_IRELATIVE,
4403 this->got_irelative_, got_offset, 0);
4404 }
4405 else
4406 {
4407 gsym->set_needs_dynsym_entry();
4408 this->rel_->add_global(gsym, elfcpp::R_AARCH64_JUMP_SLOT, this->got_plt_,
4409 got_offset, 0);
4410 }
4411 }
4412
4413 // Return where the TLSDESC relocations should go, creating it if
4414 // necessary. These follow the JUMP_SLOT relocations.
4415
4416 template<int size, bool big_endian>
4417 typename Output_data_plt_aarch64<size, big_endian>::Reloc_section*
4418 Output_data_plt_aarch64<size, big_endian>::rela_tlsdesc(Layout* layout)
4419 {
4420 if (this->tlsdesc_rel_ == NULL)
4421 {
4422 this->tlsdesc_rel_ = new Reloc_section(false);
4423 layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA,
4424 elfcpp::SHF_ALLOC, this->tlsdesc_rel_,
4425 ORDER_DYNAMIC_PLT_RELOCS, false);
4426 gold_assert(this->tlsdesc_rel_->output_section()
4427 == this->rel_->output_section());
4428 }
4429 return this->tlsdesc_rel_;
4430 }
4431
4432 // Return where the IRELATIVE relocations should go in the PLT. These
4433 // follow the JUMP_SLOT and the TLSDESC relocations.
4434
4435 template<int size, bool big_endian>
4436 typename Output_data_plt_aarch64<size, big_endian>::Reloc_section*
4437 Output_data_plt_aarch64<size, big_endian>::rela_irelative(Symbol_table* symtab,
4438 Layout* layout)
4439 {
4440 if (this->irelative_rel_ == NULL)
4441 {
4442 // Make sure we have a place for the TLSDESC relocations, in
4443 // case we see any later on.
4444 this->rela_tlsdesc(layout);
4445 this->irelative_rel_ = new Reloc_section(false);
4446 layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA,
4447 elfcpp::SHF_ALLOC, this->irelative_rel_,
4448 ORDER_DYNAMIC_PLT_RELOCS, false);
4449 gold_assert(this->irelative_rel_->output_section()
4450 == this->rel_->output_section());
4451
4452 if (parameters->doing_static_link())
4453 {
4454 // A statically linked executable will only have a .rela.plt
4455 // section to hold R_AARCH64_IRELATIVE relocs for
4456 // STT_GNU_IFUNC symbols. The library will use these
4457 // symbols to locate the IRELATIVE relocs at program startup
4458 // time.
4459 symtab->define_in_output_data("__rela_iplt_start", NULL,
4460 Symbol_table::PREDEFINED,
4461 this->irelative_rel_, 0, 0,
4462 elfcpp::STT_NOTYPE, elfcpp::STB_GLOBAL,
4463 elfcpp::STV_HIDDEN, 0, false, true);
4464 symtab->define_in_output_data("__rela_iplt_end", NULL,
4465 Symbol_table::PREDEFINED,
4466 this->irelative_rel_, 0, 0,
4467 elfcpp::STT_NOTYPE, elfcpp::STB_GLOBAL,
4468 elfcpp::STV_HIDDEN, 0, true, true);
4469 }
4470 }
4471 return this->irelative_rel_;
4472 }
4473
4474 // Return the PLT address to use for a global symbol.
4475
4476 template<int size, bool big_endian>
4477 uint64_t
4478 Output_data_plt_aarch64<size, big_endian>::address_for_global(
4479 const Symbol* gsym)
4480 {
4481 uint64_t offset = 0;
4482 if (gsym->type() == elfcpp::STT_GNU_IFUNC
4483 && gsym->can_use_relative_reloc(false))
4484 offset = (this->first_plt_entry_offset() +
4485 this->count_ * this->get_plt_entry_size());
4486 return this->address() + offset + gsym->plt_offset();
4487 }
4488
4489 // Return the PLT address to use for a local symbol. These are always
4490 // IRELATIVE relocs.
4491
4492 template<int size, bool big_endian>
4493 uint64_t
4494 Output_data_plt_aarch64<size, big_endian>::address_for_local(
4495 const Relobj* object,
4496 unsigned int r_sym)
4497 {
4498 return (this->address()
4499 + this->first_plt_entry_offset()
4500 + this->count_ * this->get_plt_entry_size()
4501 + object->local_plt_offset(r_sym));
4502 }
4503
4504 // Set the final size.
4505
4506 template<int size, bool big_endian>
4507 void
4508 Output_data_plt_aarch64<size, big_endian>::set_final_data_size()
4509 {
4510 unsigned int count = this->count_ + this->irelative_count_;
4511 unsigned int extra_size = 0;
4512 if (this->has_tlsdesc_entry())
4513 extra_size += this->get_plt_tlsdesc_entry_size();
4514 this->set_data_size(this->first_plt_entry_offset()
4515 + count * this->get_plt_entry_size()
4516 + extra_size);
4517 }
4518
4519 template<int size, bool big_endian>
4520 class Output_data_plt_aarch64_standard :
4521 public Output_data_plt_aarch64<size, big_endian>
4522 {
4523 public:
4524 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address;
4525 Output_data_plt_aarch64_standard(
4526 Layout* layout,
4527 Output_data_got_aarch64<size, big_endian>* got,
4528 Output_data_space* got_plt,
4529 Output_data_space* got_irelative)
4530 : Output_data_plt_aarch64<size, big_endian>(layout,
4531 size == 32 ? 4 : 8,
4532 got, got_plt,
4533 got_irelative)
4534 { }
4535
4536 protected:
4537 // Return the offset of the first non-reserved PLT entry.
4538 virtual unsigned int
4539 do_first_plt_entry_offset() const
4540 { return this->first_plt_entry_size; }
4541
4542 // Return the size of a PLT entry
4543 virtual unsigned int
4544 do_get_plt_entry_size() const
4545 { return this->plt_entry_size; }
4546
4547 // Return the size of a tlsdesc entry
4548 virtual unsigned int
4549 do_get_plt_tlsdesc_entry_size() const
4550 { return this->plt_tlsdesc_entry_size; }
4551
4552 virtual void
4553 do_fill_first_plt_entry(unsigned char* pov,
4554 Address got_address,
4555 Address plt_address);
4556
4557 virtual void
4558 do_fill_plt_entry(unsigned char* pov,
4559 Address got_address,
4560 Address plt_address,
4561 unsigned int got_offset,
4562 unsigned int plt_offset);
4563
4564 virtual void
4565 do_fill_tlsdesc_entry(unsigned char* pov,
4566 Address gotplt_address,
4567 Address plt_address,
4568 Address got_base,
4569 unsigned int tlsdesc_got_offset,
4570 unsigned int plt_offset);
4571
4572 private:
4573 // The size of the first plt entry size.
4574 static const int first_plt_entry_size = 32;
4575 // The size of the plt entry size.
4576 static const int plt_entry_size = 16;
4577 // The size of the plt tlsdesc entry size.
4578 static const int plt_tlsdesc_entry_size = 32;
4579 // Template for the first PLT entry.
4580 static const uint32_t first_plt_entry[first_plt_entry_size / 4];
4581 // Template for subsequent PLT entries.
4582 static const uint32_t plt_entry[plt_entry_size / 4];
4583 // The reserved TLSDESC entry in the PLT for an executable.
4584 static const uint32_t tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4];
4585 };
4586
4587 // The first entry in the PLT for an executable.
4588
4589 template<>
4590 const uint32_t
4591 Output_data_plt_aarch64_standard<32, false>::
4592 first_plt_entry[first_plt_entry_size / 4] =
4593 {
4594 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */
4595 0x90000010, /* adrp x16, PLT_GOT+0x8 */
4596 0xb9400A11, /* ldr w17, [x16, #PLT_GOT+0x8] */
4597 0x11002210, /* add w16, w16,#PLT_GOT+0x8 */
4598 0xd61f0220, /* br x17 */
4599 0xd503201f, /* nop */
4600 0xd503201f, /* nop */
4601 0xd503201f, /* nop */
4602 };
4603
4604
4605 template<>
4606 const uint32_t
4607 Output_data_plt_aarch64_standard<32, true>::
4608 first_plt_entry[first_plt_entry_size / 4] =
4609 {
4610 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */
4611 0x90000010, /* adrp x16, PLT_GOT+0x8 */
4612 0xb9400A11, /* ldr w17, [x16, #PLT_GOT+0x8] */
4613 0x11002210, /* add w16, w16,#PLT_GOT+0x8 */
4614 0xd61f0220, /* br x17 */
4615 0xd503201f, /* nop */
4616 0xd503201f, /* nop */
4617 0xd503201f, /* nop */
4618 };
4619
4620
4621 template<>
4622 const uint32_t
4623 Output_data_plt_aarch64_standard<64, false>::
4624 first_plt_entry[first_plt_entry_size / 4] =
4625 {
4626 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */
4627 0x90000010, /* adrp x16, PLT_GOT+16 */
4628 0xf9400A11, /* ldr x17, [x16, #PLT_GOT+0x10] */
4629 0x91004210, /* add x16, x16,#PLT_GOT+0x10 */
4630 0xd61f0220, /* br x17 */
4631 0xd503201f, /* nop */
4632 0xd503201f, /* nop */
4633 0xd503201f, /* nop */
4634 };
4635
4636
4637 template<>
4638 const uint32_t
4639 Output_data_plt_aarch64_standard<64, true>::
4640 first_plt_entry[first_plt_entry_size / 4] =
4641 {
4642 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */
4643 0x90000010, /* adrp x16, PLT_GOT+16 */
4644 0xf9400A11, /* ldr x17, [x16, #PLT_GOT+0x10] */
4645 0x91004210, /* add x16, x16,#PLT_GOT+0x10 */
4646 0xd61f0220, /* br x17 */
4647 0xd503201f, /* nop */
4648 0xd503201f, /* nop */
4649 0xd503201f, /* nop */
4650 };
4651
4652
4653 template<>
4654 const uint32_t
4655 Output_data_plt_aarch64_standard<32, false>::
4656 plt_entry[plt_entry_size / 4] =
4657 {
4658 0x90000010, /* adrp x16, PLTGOT + n * 4 */
4659 0xb9400211, /* ldr w17, [w16, PLTGOT + n * 4] */
4660 0x11000210, /* add w16, w16, :lo12:PLTGOT + n * 4 */
4661 0xd61f0220, /* br x17. */
4662 };
4663
4664
4665 template<>
4666 const uint32_t
4667 Output_data_plt_aarch64_standard<32, true>::
4668 plt_entry[plt_entry_size / 4] =
4669 {
4670 0x90000010, /* adrp x16, PLTGOT + n * 4 */
4671 0xb9400211, /* ldr w17, [w16, PLTGOT + n * 4] */
4672 0x11000210, /* add w16, w16, :lo12:PLTGOT + n * 4 */
4673 0xd61f0220, /* br x17. */
4674 };
4675
4676
4677 template<>
4678 const uint32_t
4679 Output_data_plt_aarch64_standard<64, false>::
4680 plt_entry[plt_entry_size / 4] =
4681 {
4682 0x90000010, /* adrp x16, PLTGOT + n * 8 */
4683 0xf9400211, /* ldr x17, [x16, PLTGOT + n * 8] */
4684 0x91000210, /* add x16, x16, :lo12:PLTGOT + n * 8 */
4685 0xd61f0220, /* br x17. */
4686 };
4687
4688
4689 template<>
4690 const uint32_t
4691 Output_data_plt_aarch64_standard<64, true>::
4692 plt_entry[plt_entry_size / 4] =
4693 {
4694 0x90000010, /* adrp x16, PLTGOT + n * 8 */
4695 0xf9400211, /* ldr x17, [x16, PLTGOT + n * 8] */
4696 0x91000210, /* add x16, x16, :lo12:PLTGOT + n * 8 */
4697 0xd61f0220, /* br x17. */
4698 };
4699
4700
4701 template<int size, bool big_endian>
4702 void
4703 Output_data_plt_aarch64_standard<size, big_endian>::do_fill_first_plt_entry(
4704 unsigned char* pov,
4705 Address got_address,
4706 Address plt_address)
4707 {
4708 // PLT0 of the small PLT looks like this in ELF64 -
4709 // stp x16, x30, [sp, #-16]! Save the reloc and lr on stack.
4710 // adrp x16, PLT_GOT + 16 Get the page base of the GOTPLT
4711 // ldr x17, [x16, #:lo12:PLT_GOT+16] Load the address of the
4712 // symbol resolver
4713 // add x16, x16, #:lo12:PLT_GOT+16 Load the lo12 bits of the
4714 // GOTPLT entry for this.
4715 // br x17
4716 // PLT0 will be slightly different in ELF32 due to different got entry
4717 // size.
4718 memcpy(pov, this->first_plt_entry, this->first_plt_entry_size);
4719 Address gotplt_2nd_ent = got_address + (size / 8) * 2;
4720
4721 // Fill in the top 21 bits for this: ADRP x16, PLT_GOT + 8 * 2.
4722 // ADRP: (PG(S+A)-PG(P)) >> 12) & 0x1fffff.
4723 // FIXME: This only works for 64bit
4724 AArch64_relocate_functions<size, big_endian>::adrp(pov + 4,
4725 gotplt_2nd_ent, plt_address + 4);
4726
4727 // Fill in R_AARCH64_LDST8_LO12
4728 elfcpp::Swap<32, big_endian>::writeval(
4729 pov + 8,
4730 ((this->first_plt_entry[2] & 0xffc003ff)
4731 | ((gotplt_2nd_ent & 0xff8) << 7)));
4732
4733 // Fill in R_AARCH64_ADD_ABS_LO12
4734 elfcpp::Swap<32, big_endian>::writeval(
4735 pov + 12,
4736 ((this->first_plt_entry[3] & 0xffc003ff)
4737 | ((gotplt_2nd_ent & 0xfff) << 10)));
4738 }
4739
4740
4741 // Subsequent entries in the PLT for an executable.
4742 // FIXME: This only works for 64bit
4743
4744 template<int size, bool big_endian>
4745 void
4746 Output_data_plt_aarch64_standard<size, big_endian>::do_fill_plt_entry(
4747 unsigned char* pov,
4748 Address got_address,
4749 Address plt_address,
4750 unsigned int got_offset,
4751 unsigned int plt_offset)
4752 {
4753 memcpy(pov, this->plt_entry, this->plt_entry_size);
4754
4755 Address gotplt_entry_address = got_address + got_offset;
4756 Address plt_entry_address = plt_address + plt_offset;
4757
4758 // Fill in R_AARCH64_PCREL_ADR_HI21
4759 AArch64_relocate_functions<size, big_endian>::adrp(
4760 pov,
4761 gotplt_entry_address,
4762 plt_entry_address);
4763
4764 // Fill in R_AARCH64_LDST64_ABS_LO12
4765 elfcpp::Swap<32, big_endian>::writeval(
4766 pov + 4,
4767 ((this->plt_entry[1] & 0xffc003ff)
4768 | ((gotplt_entry_address & 0xff8) << 7)));
4769
4770 // Fill in R_AARCH64_ADD_ABS_LO12
4771 elfcpp::Swap<32, big_endian>::writeval(
4772 pov + 8,
4773 ((this->plt_entry[2] & 0xffc003ff)
4774 | ((gotplt_entry_address & 0xfff) <<10)));
4775
4776 }
4777
4778
4779 template<>
4780 const uint32_t
4781 Output_data_plt_aarch64_standard<32, false>::
4782 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] =
4783 {
4784 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */
4785 0x90000002, /* adrp x2, 0 */
4786 0x90000003, /* adrp x3, 0 */
4787 0xb9400042, /* ldr w2, [w2, #0] */
4788 0x11000063, /* add w3, w3, 0 */
4789 0xd61f0040, /* br x2 */
4790 0xd503201f, /* nop */
4791 0xd503201f, /* nop */
4792 };
4793
4794 template<>
4795 const uint32_t
4796 Output_data_plt_aarch64_standard<32, true>::
4797 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] =
4798 {
4799 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */
4800 0x90000002, /* adrp x2, 0 */
4801 0x90000003, /* adrp x3, 0 */
4802 0xb9400042, /* ldr w2, [w2, #0] */
4803 0x11000063, /* add w3, w3, 0 */
4804 0xd61f0040, /* br x2 */
4805 0xd503201f, /* nop */
4806 0xd503201f, /* nop */
4807 };
4808
4809 template<>
4810 const uint32_t
4811 Output_data_plt_aarch64_standard<64, false>::
4812 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] =
4813 {
4814 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */
4815 0x90000002, /* adrp x2, 0 */
4816 0x90000003, /* adrp x3, 0 */
4817 0xf9400042, /* ldr x2, [x2, #0] */
4818 0x91000063, /* add x3, x3, 0 */
4819 0xd61f0040, /* br x2 */
4820 0xd503201f, /* nop */
4821 0xd503201f, /* nop */
4822 };
4823
4824 template<>
4825 const uint32_t
4826 Output_data_plt_aarch64_standard<64, true>::
4827 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] =
4828 {
4829 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */
4830 0x90000002, /* adrp x2, 0 */
4831 0x90000003, /* adrp x3, 0 */
4832 0xf9400042, /* ldr x2, [x2, #0] */
4833 0x91000063, /* add x3, x3, 0 */
4834 0xd61f0040, /* br x2 */
4835 0xd503201f, /* nop */
4836 0xd503201f, /* nop */
4837 };
4838
4839 template<int size, bool big_endian>
4840 void
4841 Output_data_plt_aarch64_standard<size, big_endian>::do_fill_tlsdesc_entry(
4842 unsigned char* pov,
4843 Address gotplt_address,
4844 Address plt_address,
4845 Address got_base,
4846 unsigned int tlsdesc_got_offset,
4847 unsigned int plt_offset)
4848 {
4849 memcpy(pov, tlsdesc_plt_entry, plt_tlsdesc_entry_size);
4850
4851 // move DT_TLSDESC_GOT address into x2
4852 // move .got.plt address into x3
4853 Address tlsdesc_got_entry = got_base + tlsdesc_got_offset;
4854 Address plt_entry_address = plt_address + plt_offset;
4855
4856 // R_AARCH64_ADR_PREL_PG_HI21
4857 AArch64_relocate_functions<size, big_endian>::adrp(
4858 pov + 4,
4859 tlsdesc_got_entry,
4860 plt_entry_address + 4);
4861
4862 // R_AARCH64_ADR_PREL_PG_HI21
4863 AArch64_relocate_functions<size, big_endian>::adrp(
4864 pov + 8,
4865 gotplt_address,
4866 plt_entry_address + 8);
4867
4868 // R_AARCH64_LDST64_ABS_LO12
4869 elfcpp::Swap<32, big_endian>::writeval(
4870 pov + 12,
4871 ((this->tlsdesc_plt_entry[3] & 0xffc003ff)
4872 | ((tlsdesc_got_entry & 0xff8) << 7)));
4873
4874 // R_AARCH64_ADD_ABS_LO12
4875 elfcpp::Swap<32, big_endian>::writeval(
4876 pov + 16,
4877 ((this->tlsdesc_plt_entry[4] & 0xffc003ff)
4878 | ((gotplt_address & 0xfff) << 10)));
4879 }
4880
4881 // Write out the PLT. This uses the hand-coded instructions above,
4882 // and adjusts them as needed. This is specified by the AMD64 ABI.
4883
4884 template<int size, bool big_endian>
4885 void
4886 Output_data_plt_aarch64<size, big_endian>::do_write(Output_file* of)
4887 {
4888 const off_t offset = this->offset();
4889 const section_size_type oview_size =
4890 convert_to_section_size_type(this->data_size());
4891 unsigned char* const oview = of->get_output_view(offset, oview_size);
4892
4893 const off_t got_file_offset = this->got_plt_->offset();
4894 gold_assert(got_file_offset + this->got_plt_->data_size()
4895 == this->got_irelative_->offset());
4896
4897 const section_size_type got_size =
4898 convert_to_section_size_type(this->got_plt_->data_size()
4899 + this->got_irelative_->data_size());
4900 unsigned char* const got_view = of->get_output_view(got_file_offset,
4901 got_size);
4902
4903 unsigned char* pov = oview;
4904
4905 // The base address of the .plt section.
4906 typename elfcpp::Elf_types<size>::Elf_Addr plt_address = this->address();
4907 // The base address of the PLT portion of the .got section.
4908 typename elfcpp::Elf_types<size>::Elf_Addr gotplt_address
4909 = this->got_plt_->address();
4910
4911 this->fill_first_plt_entry(pov, gotplt_address, plt_address);
4912 pov += this->first_plt_entry_offset();
4913
4914 // The first three entries in .got.plt are reserved.
4915 unsigned char* got_pov = got_view;
4916 memset(got_pov, 0, size / 8 * AARCH64_GOTPLT_RESERVE_COUNT);
4917 got_pov += (size / 8) * AARCH64_GOTPLT_RESERVE_COUNT;
4918
4919 unsigned int plt_offset = this->first_plt_entry_offset();
4920 unsigned int got_offset = (size / 8) * AARCH64_GOTPLT_RESERVE_COUNT;
4921 const unsigned int count = this->count_ + this->irelative_count_;
4922 for (unsigned int plt_index = 0;
4923 plt_index < count;
4924 ++plt_index,
4925 pov += this->get_plt_entry_size(),
4926 got_pov += size / 8,
4927 plt_offset += this->get_plt_entry_size(),
4928 got_offset += size / 8)
4929 {
4930 // Set and adjust the PLT entry itself.
4931 this->fill_plt_entry(pov, gotplt_address, plt_address,
4932 got_offset, plt_offset);
4933
4934 // Set the entry in the GOT, which points to plt0.
4935 elfcpp::Swap<size, big_endian>::writeval(got_pov, plt_address);
4936 }
4937
4938 if (this->has_tlsdesc_entry())
4939 {
4940 // Set and adjust the reserved TLSDESC PLT entry.
4941 unsigned int tlsdesc_got_offset = this->get_tlsdesc_got_offset();
4942 // The base address of the .base section.
4943 typename elfcpp::Elf_types<size>::Elf_Addr got_base =
4944 this->got_->address();
4945 this->fill_tlsdesc_entry(pov, gotplt_address, plt_address, got_base,
4946 tlsdesc_got_offset, plt_offset);
4947 pov += this->get_plt_tlsdesc_entry_size();
4948 }
4949
4950 gold_assert(static_cast<section_size_type>(pov - oview) == oview_size);
4951 gold_assert(static_cast<section_size_type>(got_pov - got_view) == got_size);
4952
4953 of->write_output_view(offset, oview_size, oview);
4954 of->write_output_view(got_file_offset, got_size, got_view);
4955 }
4956
4957 // Telling how to update the immediate field of an instruction.
4958 struct AArch64_howto
4959 {
4960 // The immediate field mask.
4961 elfcpp::Elf_Xword dst_mask;
4962
4963 // The offset to apply relocation immediate
4964 int doffset;
4965
4966 // The second part offset, if the immediate field has two parts.
4967 // -1 if the immediate field has only one part.
4968 int doffset2;
4969 };
4970
4971 static const AArch64_howto aarch64_howto[AArch64_reloc_property::INST_NUM] =
4972 {
4973 {0, -1, -1}, // DATA
4974 {0x1fffe0, 5, -1}, // MOVW [20:5]-imm16
4975 {0xffffe0, 5, -1}, // LD [23:5]-imm19
4976 {0x60ffffe0, 29, 5}, // ADR [30:29]-immlo [23:5]-immhi
4977 {0x60ffffe0, 29, 5}, // ADRP [30:29]-immlo [23:5]-immhi
4978 {0x3ffc00, 10, -1}, // ADD [21:10]-imm12
4979 {0x3ffc00, 10, -1}, // LDST [21:10]-imm12
4980 {0x7ffe0, 5, -1}, // TBZNZ [18:5]-imm14
4981 {0xffffe0, 5, -1}, // CONDB [23:5]-imm19
4982 {0x3ffffff, 0, -1}, // B [25:0]-imm26
4983 {0x3ffffff, 0, -1}, // CALL [25:0]-imm26
4984 };
4985
4986 // AArch64 relocate function class
4987
4988 template<int size, bool big_endian>
4989 class AArch64_relocate_functions
4990 {
4991 public:
4992 typedef enum
4993 {
4994 STATUS_OKAY, // No error during relocation.
4995 STATUS_OVERFLOW, // Relocation overflow.
4996 STATUS_BAD_RELOC, // Relocation cannot be applied.
4997 } Status;
4998
4999 typedef AArch64_relocate_functions<size, big_endian> This;
5000 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address;
5001 typedef Relocate_info<size, big_endian> The_relocate_info;
5002 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
5003 typedef Reloc_stub<size, big_endian> The_reloc_stub;
5004 typedef Stub_table<size, big_endian> The_stub_table;
5005 typedef elfcpp::Rela<size, big_endian> The_rela;
5006 typedef typename elfcpp::Swap<size, big_endian>::Valtype AArch64_valtype;
5007
5008 // Return the page address of the address.
5009 // Page(address) = address & ~0xFFF
5010
5011 static inline AArch64_valtype
5012 Page(Address address)
5013 {
5014 return (address & (~static_cast<Address>(0xFFF)));
5015 }
5016
5017 private:
5018 // Update instruction (pointed by view) with selected bits (immed).
5019 // val = (val & ~dst_mask) | (immed << doffset)
5020
5021 template<int valsize>
5022 static inline void
5023 update_view(unsigned char* view,
5024 AArch64_valtype immed,
5025 elfcpp::Elf_Xword doffset,
5026 elfcpp::Elf_Xword dst_mask)
5027 {
5028 typedef typename elfcpp::Swap<valsize, big_endian>::Valtype Valtype;
5029 Valtype* wv = reinterpret_cast<Valtype*>(view);
5030 Valtype val = elfcpp::Swap<valsize, big_endian>::readval(wv);
5031
5032 // Clear immediate fields.
5033 val &= ~dst_mask;
5034 elfcpp::Swap<valsize, big_endian>::writeval(wv,
5035 static_cast<Valtype>(val | (immed << doffset)));
5036 }
5037
5038 // Update two parts of an instruction (pointed by view) with selected
5039 // bits (immed1 and immed2).
5040 // val = (val & ~dst_mask) | (immed1 << doffset1) | (immed2 << doffset2)
5041
5042 template<int valsize>
5043 static inline void
5044 update_view_two_parts(
5045 unsigned char* view,
5046 AArch64_valtype immed1,
5047 AArch64_valtype immed2,
5048 elfcpp::Elf_Xword doffset1,
5049 elfcpp::Elf_Xword doffset2,
5050 elfcpp::Elf_Xword dst_mask)
5051 {
5052 typedef typename elfcpp::Swap<valsize, big_endian>::Valtype Valtype;
5053 Valtype* wv = reinterpret_cast<Valtype*>(view);
5054 Valtype val = elfcpp::Swap<valsize, big_endian>::readval(wv);
5055 val &= ~dst_mask;
5056 elfcpp::Swap<valsize, big_endian>::writeval(wv,
5057 static_cast<Valtype>(val | (immed1 << doffset1) |
5058 (immed2 << doffset2)));
5059 }
5060
5061 // Update adr or adrp instruction with immed.
5062 // In adr and adrp: [30:29] immlo [23:5] immhi
5063
5064 static inline void
5065 update_adr(unsigned char* view, AArch64_valtype immed)
5066 {
5067 elfcpp::Elf_Xword dst_mask = (0x3 << 29) | (0x7ffff << 5);
5068 This::template update_view_two_parts<32>(
5069 view,
5070 immed & 0x3,
5071 (immed & 0x1ffffc) >> 2,
5072 29,
5073 5,
5074 dst_mask);
5075 }
5076
5077 // Update movz/movn instruction with bits immed.
5078 // Set instruction to movz if is_movz is true, otherwise set instruction
5079 // to movn.
5080
5081 static inline void
5082 update_movnz(unsigned char* view,
5083 AArch64_valtype immed,
5084 bool is_movz)
5085 {
5086 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
5087 Valtype* wv = reinterpret_cast<Valtype*>(view);
5088 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
5089
5090 const elfcpp::Elf_Xword doffset =
5091 aarch64_howto[AArch64_reloc_property::INST_MOVW].doffset;
5092 const elfcpp::Elf_Xword dst_mask =
5093 aarch64_howto[AArch64_reloc_property::INST_MOVW].dst_mask;
5094
5095 // Clear immediate fields and opc code.
5096 val &= ~(dst_mask | (0x3 << 29));
5097
5098 // Set instruction to movz or movn.
5099 // movz: [30:29] is 10 movn: [30:29] is 00
5100 if (is_movz)
5101 val |= (0x2 << 29);
5102
5103 elfcpp::Swap<32, big_endian>::writeval(wv,
5104 static_cast<Valtype>(val | (immed << doffset)));
5105 }
5106
5107 // Update selected bits in text.
5108
5109 template<int valsize>
5110 static inline typename This::Status
5111 reloc_common(unsigned char* view, Address x,
5112 const AArch64_reloc_property* reloc_property)
5113 {
5114 // Select bits from X.
5115 Address immed = reloc_property->select_x_value(x);
5116
5117 // Update view.
5118 const AArch64_reloc_property::Reloc_inst inst =
5119 reloc_property->reloc_inst();
5120 // If it is a data relocation or instruction has 2 parts of immediate
5121 // fields, you should not call pcrela_general.
5122 gold_assert(aarch64_howto[inst].doffset2 == -1 &&
5123 aarch64_howto[inst].doffset != -1);
5124 This::template update_view<valsize>(view, immed,
5125 aarch64_howto[inst].doffset,
5126 aarch64_howto[inst].dst_mask);
5127
5128 // Do check overflow or alignment if needed.
5129 return (reloc_property->checkup_x_value(x)
5130 ? This::STATUS_OKAY
5131 : This::STATUS_OVERFLOW);
5132 }
5133
5134 public:
5135
5136 // Construct a B insn. Note, although we group it here with other relocation
5137 // operation, there is actually no 'relocation' involved here.
5138 static inline void
5139 construct_b(unsigned char* view, unsigned int branch_offset)
5140 {
5141 update_view_two_parts<32>(view, 0x05, (branch_offset >> 2),
5142 26, 0, 0xffffffff);
5143 }
5144
5145 // Do a simple rela relocation at unaligned addresses.
5146
5147 template<int valsize>
5148 static inline typename This::Status
5149 rela_ua(unsigned char* view,
5150 const Sized_relobj_file<size, big_endian>* object,
5151 const Symbol_value<size>* psymval,
5152 AArch64_valtype addend,
5153 const AArch64_reloc_property* reloc_property)
5154 {
5155 typedef typename elfcpp::Swap_unaligned<valsize, big_endian>::Valtype
5156 Valtype;
5157 typename elfcpp::Elf_types<size>::Elf_Addr x =
5158 psymval->value(object, addend);
5159 elfcpp::Swap_unaligned<valsize, big_endian>::writeval(view,
5160 static_cast<Valtype>(x));
5161 return (reloc_property->checkup_x_value(x)
5162 ? This::STATUS_OKAY
5163 : This::STATUS_OVERFLOW);
5164 }
5165
5166 // Do a simple pc-relative relocation at unaligned addresses.
5167
5168 template<int valsize>
5169 static inline typename This::Status
5170 pcrela_ua(unsigned char* view,
5171 const Sized_relobj_file<size, big_endian>* object,
5172 const Symbol_value<size>* psymval,
5173 AArch64_valtype addend,
5174 Address address,
5175 const AArch64_reloc_property* reloc_property)
5176 {
5177 typedef typename elfcpp::Swap_unaligned<valsize, big_endian>::Valtype
5178 Valtype;
5179 Address x = psymval->value(object, addend) - address;
5180 elfcpp::Swap_unaligned<valsize, big_endian>::writeval(view,
5181 static_cast<Valtype>(x));
5182 return (reloc_property->checkup_x_value(x)
5183 ? This::STATUS_OKAY
5184 : This::STATUS_OVERFLOW);
5185 }
5186
5187 // Do a simple rela relocation at aligned addresses.
5188
5189 template<int valsize>
5190 static inline typename This::Status
5191 rela(
5192 unsigned char* view,
5193 const Sized_relobj_file<size, big_endian>* object,
5194 const Symbol_value<size>* psymval,
5195 AArch64_valtype addend,
5196 const AArch64_reloc_property* reloc_property)
5197 {
5198 typedef typename elfcpp::Swap<valsize, big_endian>::Valtype Valtype;
5199 Valtype* wv = reinterpret_cast<Valtype*>(view);
5200 Address x = psymval->value(object, addend);
5201 elfcpp::Swap<valsize, big_endian>::writeval(wv,static_cast<Valtype>(x));
5202 return (reloc_property->checkup_x_value(x)
5203 ? This::STATUS_OKAY
5204 : This::STATUS_OVERFLOW);
5205 }
5206
5207 // Do relocate. Update selected bits in text.
5208 // new_val = (val & ~dst_mask) | (immed << doffset)
5209
5210 template<int valsize>
5211 static inline typename This::Status
5212 rela_general(unsigned char* view,
5213 const Sized_relobj_file<size, big_endian>* object,
5214 const Symbol_value<size>* psymval,
5215 AArch64_valtype addend,
5216 const AArch64_reloc_property* reloc_property)
5217 {
5218 // Calculate relocation.
5219 Address x = psymval->value(object, addend);
5220 return This::template reloc_common<valsize>(view, x, reloc_property);
5221 }
5222
5223 // Do relocate. Update selected bits in text.
5224 // new val = (val & ~dst_mask) | (immed << doffset)
5225
5226 template<int valsize>
5227 static inline typename This::Status
5228 rela_general(
5229 unsigned char* view,
5230 AArch64_valtype s,
5231 AArch64_valtype addend,
5232 const AArch64_reloc_property* reloc_property)
5233 {
5234 // Calculate relocation.
5235 Address x = s + addend;
5236 return This::template reloc_common<valsize>(view, x, reloc_property);
5237 }
5238
5239 // Do address relative relocate. Update selected bits in text.
5240 // new val = (val & ~dst_mask) | (immed << doffset)
5241
5242 template<int valsize>
5243 static inline typename This::Status
5244 pcrela_general(
5245 unsigned char* view,
5246 const Sized_relobj_file<size, big_endian>* object,
5247 const Symbol_value<size>* psymval,
5248 AArch64_valtype addend,
5249 Address address,
5250 const AArch64_reloc_property* reloc_property)
5251 {
5252 // Calculate relocation.
5253 Address x = psymval->value(object, addend) - address;
5254 return This::template reloc_common<valsize>(view, x, reloc_property);
5255 }
5256
5257
5258 // Calculate (S + A) - address, update adr instruction.
5259
5260 static inline typename This::Status
5261 adr(unsigned char* view,
5262 const Sized_relobj_file<size, big_endian>* object,
5263 const Symbol_value<size>* psymval,
5264 Address addend,
5265 Address address,
5266 const AArch64_reloc_property* /* reloc_property */)
5267 {
5268 AArch64_valtype x = psymval->value(object, addend) - address;
5269 // Pick bits [20:0] of X.
5270 AArch64_valtype immed = x & 0x1fffff;
5271 update_adr(view, immed);
5272 // Check -2^20 <= X < 2^20
5273 return (size == 64 && Bits<21>::has_overflow((x))
5274 ? This::STATUS_OVERFLOW
5275 : This::STATUS_OKAY);
5276 }
5277
5278 // Calculate PG(S+A) - PG(address), update adrp instruction.
5279 // R_AARCH64_ADR_PREL_PG_HI21
5280
5281 static inline typename This::Status
5282 adrp(
5283 unsigned char* view,
5284 Address sa,
5285 Address address)
5286 {
5287 AArch64_valtype x = This::Page(sa) - This::Page(address);
5288 // Pick [32:12] of X.
5289 AArch64_valtype immed = (x >> 12) & 0x1fffff;
5290 update_adr(view, immed);
5291 // Check -2^32 <= X < 2^32
5292 return (size == 64 && Bits<33>::has_overflow((x))
5293 ? This::STATUS_OVERFLOW
5294 : This::STATUS_OKAY);
5295 }
5296
5297 // Calculate PG(S+A) - PG(address), update adrp instruction.
5298 // R_AARCH64_ADR_PREL_PG_HI21
5299
5300 static inline typename This::Status
5301 adrp(unsigned char* view,
5302 const Sized_relobj_file<size, big_endian>* object,
5303 const Symbol_value<size>* psymval,
5304 Address addend,
5305 Address address,
5306 const AArch64_reloc_property* reloc_property)
5307 {
5308 Address sa = psymval->value(object, addend);
5309 AArch64_valtype x = This::Page(sa) - This::Page(address);
5310 // Pick [32:12] of X.
5311 AArch64_valtype immed = (x >> 12) & 0x1fffff;
5312 update_adr(view, immed);
5313 return (reloc_property->checkup_x_value(x)
5314 ? This::STATUS_OKAY
5315 : This::STATUS_OVERFLOW);
5316 }
5317
5318 // Update mov[n/z] instruction. Check overflow if needed.
5319 // If X >=0, set the instruction to movz and its immediate value to the
5320 // selected bits S.
5321 // If X < 0, set the instruction to movn and its immediate value to
5322 // NOT (selected bits of).
5323
5324 static inline typename This::Status
5325 movnz(unsigned char* view,
5326 AArch64_valtype x,
5327 const AArch64_reloc_property* reloc_property)
5328 {
5329 // Select bits from X.
5330 Address immed;
5331 bool is_movz;
5332 typedef typename elfcpp::Elf_types<size>::Elf_Swxword SignedW;
5333 if (static_cast<SignedW>(x) >= 0)
5334 {
5335 immed = reloc_property->select_x_value(x);
5336 is_movz = true;
5337 }
5338 else
5339 {
5340 immed = reloc_property->select_x_value(~x);;
5341 is_movz = false;
5342 }
5343
5344 // Update movnz instruction.
5345 update_movnz(view, immed, is_movz);
5346
5347 // Do check overflow or alignment if needed.
5348 return (reloc_property->checkup_x_value(x)
5349 ? This::STATUS_OKAY
5350 : This::STATUS_OVERFLOW);
5351 }
5352
5353 static inline bool
5354 maybe_apply_stub(unsigned int,
5355 const The_relocate_info*,
5356 const The_rela&,
5357 unsigned char*,
5358 Address,
5359 const Sized_symbol<size>*,
5360 const Symbol_value<size>*,
5361 const Sized_relobj_file<size, big_endian>*,
5362 section_size_type);
5363
5364 }; // End of AArch64_relocate_functions
5365
5366
5367 // For a certain relocation type (usually jump/branch), test to see if the
5368 // destination needs a stub to fulfil. If so, re-route the destination of the
5369 // original instruction to the stub, note, at this time, the stub has already
5370 // been generated.
5371
5372 template<int size, bool big_endian>
5373 bool
5374 AArch64_relocate_functions<size, big_endian>::
5375 maybe_apply_stub(unsigned int r_type,
5376 const The_relocate_info* relinfo,
5377 const The_rela& rela,
5378 unsigned char* view,
5379 Address address,
5380 const Sized_symbol<size>* gsym,
5381 const Symbol_value<size>* psymval,
5382 const Sized_relobj_file<size, big_endian>* object,
5383 section_size_type current_group_size)
5384 {
5385 if (parameters->options().relocatable())
5386 return false;
5387
5388 typename elfcpp::Elf_types<size>::Elf_Swxword addend = rela.get_r_addend();
5389 Address branch_target = psymval->value(object, 0) + addend;
5390 int stub_type =
5391 The_reloc_stub::stub_type_for_reloc(r_type, address, branch_target);
5392 if (stub_type == ST_NONE)
5393 return false;
5394
5395 const The_aarch64_relobj* aarch64_relobj =
5396 static_cast<const The_aarch64_relobj*>(object);
5397 The_stub_table* stub_table = aarch64_relobj->stub_table(relinfo->data_shndx);
5398 gold_assert(stub_table != NULL);
5399
5400 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
5401 typename The_reloc_stub::Key stub_key(stub_type, gsym, object, r_sym, addend);
5402 The_reloc_stub* stub = stub_table->find_reloc_stub(stub_key);
5403 gold_assert(stub != NULL);
5404
5405 Address new_branch_target = stub_table->address() + stub->offset();
5406 typename elfcpp::Swap<size, big_endian>::Valtype branch_offset =
5407 new_branch_target - address;
5408 const AArch64_reloc_property* arp =
5409 aarch64_reloc_property_table->get_reloc_property(r_type);
5410 gold_assert(arp != NULL);
5411 typename This::Status status = This::template
5412 rela_general<32>(view, branch_offset, 0, arp);
5413 if (status != This::STATUS_OKAY)
5414 gold_error(_("Stub is too far away, try a smaller value "
5415 "for '--stub-group-size'. The current value is 0x%lx."),
5416 static_cast<unsigned long>(current_group_size));
5417 return true;
5418 }
5419
5420
5421 // Group input sections for stub generation.
5422 //
5423 // We group input sections in an output section so that the total size,
5424 // including any padding space due to alignment is smaller than GROUP_SIZE
5425 // unless the only input section in group is bigger than GROUP_SIZE already.
5426 // Then an ARM stub table is created to follow the last input section
5427 // in group. For each group an ARM stub table is created an is placed
5428 // after the last group. If STUB_ALWAYS_AFTER_BRANCH is false, we further
5429 // extend the group after the stub table.
5430
5431 template<int size, bool big_endian>
5432 void
5433 Target_aarch64<size, big_endian>::group_sections(
5434 Layout* layout,
5435 section_size_type group_size,
5436 bool stubs_always_after_branch,
5437 const Task* task)
5438 {
5439 // Group input sections and insert stub table
5440 Layout::Section_list section_list;
5441 layout->get_executable_sections(&section_list);
5442 for (Layout::Section_list::const_iterator p = section_list.begin();
5443 p != section_list.end();
5444 ++p)
5445 {
5446 AArch64_output_section<size, big_endian>* output_section =
5447 static_cast<AArch64_output_section<size, big_endian>*>(*p);
5448 output_section->group_sections(group_size, stubs_always_after_branch,
5449 this, task);
5450 }
5451 }
5452
5453
5454 // Find the AArch64_input_section object corresponding to the SHNDX-th input
5455 // section of RELOBJ.
5456
5457 template<int size, bool big_endian>
5458 AArch64_input_section<size, big_endian>*
5459 Target_aarch64<size, big_endian>::find_aarch64_input_section(
5460 Relobj* relobj, unsigned int shndx) const
5461 {
5462 Section_id sid(relobj, shndx);
5463 typename AArch64_input_section_map::const_iterator p =
5464 this->aarch64_input_section_map_.find(sid);
5465 return (p != this->aarch64_input_section_map_.end()) ? p->second : NULL;
5466 }
5467
5468
5469 // Make a new AArch64_input_section object.
5470
5471 template<int size, bool big_endian>
5472 AArch64_input_section<size, big_endian>*
5473 Target_aarch64<size, big_endian>::new_aarch64_input_section(
5474 Relobj* relobj, unsigned int shndx)
5475 {
5476 Section_id sid(relobj, shndx);
5477
5478 AArch64_input_section<size, big_endian>* input_section =
5479 new AArch64_input_section<size, big_endian>(relobj, shndx);
5480 input_section->init();
5481
5482 // Register new AArch64_input_section in map for look-up.
5483 std::pair<typename AArch64_input_section_map::iterator,bool> ins =
5484 this->aarch64_input_section_map_.insert(
5485 std::make_pair(sid, input_section));
5486
5487 // Make sure that it we have not created another AArch64_input_section
5488 // for this input section already.
5489 gold_assert(ins.second);
5490
5491 return input_section;
5492 }
5493
5494
5495 // Relaxation hook. This is where we do stub generation.
5496
5497 template<int size, bool big_endian>
5498 bool
5499 Target_aarch64<size, big_endian>::do_relax(
5500 int pass,
5501 const Input_objects* input_objects,
5502 Symbol_table* symtab,
5503 Layout* layout ,
5504 const Task* task)
5505 {
5506 gold_assert(!parameters->options().relocatable());
5507 if (pass == 1)
5508 {
5509 // We don't handle negative stub_group_size right now.
5510 this->stub_group_size_ = abs(parameters->options().stub_group_size());
5511 if (this->stub_group_size_ == 1)
5512 {
5513 // Leave room for 4096 4-byte stub entries. If we exceed that, then we
5514 // will fail to link. The user will have to relink with an explicit
5515 // group size option.
5516 this->stub_group_size_ = The_reloc_stub::MAX_BRANCH_OFFSET -
5517 4096 * 4;
5518 }
5519 group_sections(layout, this->stub_group_size_, true, task);
5520 }
5521 else
5522 {
5523 // If this is not the first pass, addresses and file offsets have
5524 // been reset at this point, set them here.
5525 for (Stub_table_iterator sp = this->stub_tables_.begin();
5526 sp != this->stub_tables_.end(); ++sp)
5527 {
5528 The_stub_table* stt = *sp;
5529 The_aarch64_input_section* owner = stt->owner();
5530 off_t off = align_address(owner->original_size(),
5531 stt->addralign());
5532 stt->set_address_and_file_offset(owner->address() + off,
5533 owner->offset() + off);
5534 }
5535 }
5536
5537 // Scan relocs for relocation stubs
5538 for (Input_objects::Relobj_iterator op = input_objects->relobj_begin();
5539 op != input_objects->relobj_end();
5540 ++op)
5541 {
5542 The_aarch64_relobj* aarch64_relobj =
5543 static_cast<The_aarch64_relobj*>(*op);
5544 // Lock the object so we can read from it. This is only called
5545 // single-threaded from Layout::finalize, so it is OK to lock.
5546 Task_lock_obj<Object> tl(task, aarch64_relobj);
5547 aarch64_relobj->scan_sections_for_stubs(this, symtab, layout);
5548 }
5549
5550 bool any_stub_table_changed = false;
5551 for (Stub_table_iterator siter = this->stub_tables_.begin();
5552 siter != this->stub_tables_.end() && !any_stub_table_changed; ++siter)
5553 {
5554 The_stub_table* stub_table = *siter;
5555 if (stub_table->update_data_size_changed_p())
5556 {
5557 The_aarch64_input_section* owner = stub_table->owner();
5558 uint64_t address = owner->address();
5559 off_t offset = owner->offset();
5560 owner->reset_address_and_file_offset();
5561 owner->set_address_and_file_offset(address, offset);
5562
5563 any_stub_table_changed = true;
5564 }
5565 }
5566
5567 // Do not continue relaxation.
5568 bool continue_relaxation = any_stub_table_changed;
5569 if (!continue_relaxation)
5570 for (Stub_table_iterator sp = this->stub_tables_.begin();
5571 (sp != this->stub_tables_.end());
5572 ++sp)
5573 (*sp)->finalize_stubs();
5574
5575 return continue_relaxation;
5576 }
5577
5578
5579 // Make a new Stub_table.
5580
5581 template<int size, bool big_endian>
5582 Stub_table<size, big_endian>*
5583 Target_aarch64<size, big_endian>::new_stub_table(
5584 AArch64_input_section<size, big_endian>* owner)
5585 {
5586 Stub_table<size, big_endian>* stub_table =
5587 new Stub_table<size, big_endian>(owner);
5588 stub_table->set_address(align_address(
5589 owner->address() + owner->data_size(), 8));
5590 stub_table->set_file_offset(owner->offset() + owner->data_size());
5591 stub_table->finalize_data_size();
5592
5593 this->stub_tables_.push_back(stub_table);
5594
5595 return stub_table;
5596 }
5597
5598
5599 template<int size, bool big_endian>
5600 uint64_t
5601 Target_aarch64<size, big_endian>::do_reloc_addend(
5602 void* arg, unsigned int r_type, uint64_t) const
5603 {
5604 gold_assert(r_type == elfcpp::R_AARCH64_TLSDESC);
5605 uintptr_t intarg = reinterpret_cast<uintptr_t>(arg);
5606 gold_assert(intarg < this->tlsdesc_reloc_info_.size());
5607 const Tlsdesc_info& ti(this->tlsdesc_reloc_info_[intarg]);
5608 const Symbol_value<size>* psymval = ti.object->local_symbol(ti.r_sym);
5609 gold_assert(psymval->is_tls_symbol());
5610 // The value of a TLS symbol is the offset in the TLS segment.
5611 return psymval->value(ti.object, 0);
5612 }
5613
5614 // Return the number of entries in the PLT.
5615
5616 template<int size, bool big_endian>
5617 unsigned int
5618 Target_aarch64<size, big_endian>::plt_entry_count() const
5619 {
5620 if (this->plt_ == NULL)
5621 return 0;
5622 return this->plt_->entry_count();
5623 }
5624
5625 // Return the offset of the first non-reserved PLT entry.
5626
5627 template<int size, bool big_endian>
5628 unsigned int
5629 Target_aarch64<size, big_endian>::first_plt_entry_offset() const
5630 {
5631 return this->plt_->first_plt_entry_offset();
5632 }
5633
5634 // Return the size of each PLT entry.
5635
5636 template<int size, bool big_endian>
5637 unsigned int
5638 Target_aarch64<size, big_endian>::plt_entry_size() const
5639 {
5640 return this->plt_->get_plt_entry_size();
5641 }
5642
5643 // Define the _TLS_MODULE_BASE_ symbol in the TLS segment.
5644
5645 template<int size, bool big_endian>
5646 void
5647 Target_aarch64<size, big_endian>::define_tls_base_symbol(
5648 Symbol_table* symtab, Layout* layout)
5649 {
5650 if (this->tls_base_symbol_defined_)
5651 return;
5652
5653 Output_segment* tls_segment = layout->tls_segment();
5654 if (tls_segment != NULL)
5655 {
5656 // _TLS_MODULE_BASE_ always points to the beginning of tls segment.
5657 symtab->define_in_output_segment("_TLS_MODULE_BASE_", NULL,
5658 Symbol_table::PREDEFINED,
5659 tls_segment, 0, 0,
5660 elfcpp::STT_TLS,
5661 elfcpp::STB_LOCAL,
5662 elfcpp::STV_HIDDEN, 0,
5663 Symbol::SEGMENT_START,
5664 true);
5665 }
5666 this->tls_base_symbol_defined_ = true;
5667 }
5668
5669 // Create the reserved PLT and GOT entries for the TLS descriptor resolver.
5670
5671 template<int size, bool big_endian>
5672 void
5673 Target_aarch64<size, big_endian>::reserve_tlsdesc_entries(
5674 Symbol_table* symtab, Layout* layout)
5675 {
5676 if (this->plt_ == NULL)
5677 this->make_plt_section(symtab, layout);
5678
5679 if (!this->plt_->has_tlsdesc_entry())
5680 {
5681 // Allocate the TLSDESC_GOT entry.
5682 Output_data_got_aarch64<size, big_endian>* got =
5683 this->got_section(symtab, layout);
5684 unsigned int got_offset = got->add_constant(0);
5685
5686 // Allocate the TLSDESC_PLT entry.
5687 this->plt_->reserve_tlsdesc_entry(got_offset);
5688 }
5689 }
5690
5691 // Create a GOT entry for the TLS module index.
5692
5693 template<int size, bool big_endian>
5694 unsigned int
5695 Target_aarch64<size, big_endian>::got_mod_index_entry(
5696 Symbol_table* symtab, Layout* layout,
5697 Sized_relobj_file<size, big_endian>* object)
5698 {
5699 if (this->got_mod_index_offset_ == -1U)
5700 {
5701 gold_assert(symtab != NULL && layout != NULL && object != NULL);
5702 Reloc_section* rela_dyn = this->rela_dyn_section(layout);
5703 Output_data_got_aarch64<size, big_endian>* got =
5704 this->got_section(symtab, layout);
5705 unsigned int got_offset = got->add_constant(0);
5706 rela_dyn->add_local(object, 0, elfcpp::R_AARCH64_TLS_DTPMOD64, got,
5707 got_offset, 0);
5708 got->add_constant(0);
5709 this->got_mod_index_offset_ = got_offset;
5710 }
5711 return this->got_mod_index_offset_;
5712 }
5713
5714 // Optimize the TLS relocation type based on what we know about the
5715 // symbol. IS_FINAL is true if the final address of this symbol is
5716 // known at link time.
5717
5718 template<int size, bool big_endian>
5719 tls::Tls_optimization
5720 Target_aarch64<size, big_endian>::optimize_tls_reloc(bool is_final,
5721 int r_type)
5722 {
5723 // If we are generating a shared library, then we can't do anything
5724 // in the linker
5725 if (parameters->options().shared())
5726 return tls::TLSOPT_NONE;
5727
5728 switch (r_type)
5729 {
5730 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
5731 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC:
5732 case elfcpp::R_AARCH64_TLSDESC_LD_PREL19:
5733 case elfcpp::R_AARCH64_TLSDESC_ADR_PREL21:
5734 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
5735 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
5736 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
5737 case elfcpp::R_AARCH64_TLSDESC_OFF_G1:
5738 case elfcpp::R_AARCH64_TLSDESC_OFF_G0_NC:
5739 case elfcpp::R_AARCH64_TLSDESC_LDR:
5740 case elfcpp::R_AARCH64_TLSDESC_ADD:
5741 case elfcpp::R_AARCH64_TLSDESC_CALL:
5742 // These are General-Dynamic which permits fully general TLS
5743 // access. Since we know that we are generating an executable,
5744 // we can convert this to Initial-Exec. If we also know that
5745 // this is a local symbol, we can further switch to Local-Exec.
5746 if (is_final)
5747 return tls::TLSOPT_TO_LE;
5748 return tls::TLSOPT_TO_IE;
5749
5750 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
5751 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC:
5752 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
5753 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5754 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
5755 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
5756 // These are Local-Dynamic, which refer to local symbols in the
5757 // dynamic TLS block. Since we know that we generating an
5758 // executable, we can switch to Local-Exec.
5759 return tls::TLSOPT_TO_LE;
5760
5761 case elfcpp::R_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5762 case elfcpp::R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5763 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5764 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5765 case elfcpp::R_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5766 // These are Initial-Exec relocs which get the thread offset
5767 // from the GOT. If we know that we are linking against the
5768 // local symbol, we can switch to Local-Exec, which links the
5769 // thread offset into the instruction.
5770 if (is_final)
5771 return tls::TLSOPT_TO_LE;
5772 return tls::TLSOPT_NONE;
5773
5774 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
5775 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
5776 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5777 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
5778 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5779 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
5780 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
5781 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5782 // When we already have Local-Exec, there is nothing further we
5783 // can do.
5784 return tls::TLSOPT_NONE;
5785
5786 default:
5787 gold_unreachable();
5788 }
5789 }
5790
5791 // Returns true if this relocation type could be that of a function pointer.
5792
5793 template<int size, bool big_endian>
5794 inline bool
5795 Target_aarch64<size, big_endian>::Scan::possible_function_pointer_reloc(
5796 unsigned int r_type)
5797 {
5798 switch (r_type)
5799 {
5800 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21:
5801 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC:
5802 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC:
5803 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
5804 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
5805 {
5806 return true;
5807 }
5808 }
5809 return false;
5810 }
5811
5812 // For safe ICF, scan a relocation for a local symbol to check if it
5813 // corresponds to a function pointer being taken. In that case mark
5814 // the function whose pointer was taken as not foldable.
5815
5816 template<int size, bool big_endian>
5817 inline bool
5818 Target_aarch64<size, big_endian>::Scan::local_reloc_may_be_function_pointer(
5819 Symbol_table* ,
5820 Layout* ,
5821 Target_aarch64<size, big_endian>* ,
5822 Sized_relobj_file<size, big_endian>* ,
5823 unsigned int ,
5824 Output_section* ,
5825 const elfcpp::Rela<size, big_endian>& ,
5826 unsigned int r_type,
5827 const elfcpp::Sym<size, big_endian>&)
5828 {
5829 // When building a shared library, do not fold any local symbols.
5830 return (parameters->options().shared()
5831 || possible_function_pointer_reloc(r_type));
5832 }
5833
5834 // For safe ICF, scan a relocation for a global symbol to check if it
5835 // corresponds to a function pointer being taken. In that case mark
5836 // the function whose pointer was taken as not foldable.
5837
5838 template<int size, bool big_endian>
5839 inline bool
5840 Target_aarch64<size, big_endian>::Scan::global_reloc_may_be_function_pointer(
5841 Symbol_table* ,
5842 Layout* ,
5843 Target_aarch64<size, big_endian>* ,
5844 Sized_relobj_file<size, big_endian>* ,
5845 unsigned int ,
5846 Output_section* ,
5847 const elfcpp::Rela<size, big_endian>& ,
5848 unsigned int r_type,
5849 Symbol* gsym)
5850 {
5851 // When building a shared library, do not fold symbols whose visibility
5852 // is hidden, internal or protected.
5853 return ((parameters->options().shared()
5854 && (gsym->visibility() == elfcpp::STV_INTERNAL
5855 || gsym->visibility() == elfcpp::STV_PROTECTED
5856 || gsym->visibility() == elfcpp::STV_HIDDEN))
5857 || possible_function_pointer_reloc(r_type));
5858 }
5859
5860 // Report an unsupported relocation against a local symbol.
5861
5862 template<int size, bool big_endian>
5863 void
5864 Target_aarch64<size, big_endian>::Scan::unsupported_reloc_local(
5865 Sized_relobj_file<size, big_endian>* object,
5866 unsigned int r_type)
5867 {
5868 gold_error(_("%s: unsupported reloc %u against local symbol"),
5869 object->name().c_str(), r_type);
5870 }
5871
5872 // We are about to emit a dynamic relocation of type R_TYPE. If the
5873 // dynamic linker does not support it, issue an error.
5874
5875 template<int size, bool big_endian>
5876 void
5877 Target_aarch64<size, big_endian>::Scan::check_non_pic(Relobj* object,
5878 unsigned int r_type)
5879 {
5880 gold_assert(r_type != elfcpp::R_AARCH64_NONE);
5881
5882 switch (r_type)
5883 {
5884 // These are the relocation types supported by glibc for AARCH64.
5885 case elfcpp::R_AARCH64_NONE:
5886 case elfcpp::R_AARCH64_COPY:
5887 case elfcpp::R_AARCH64_GLOB_DAT:
5888 case elfcpp::R_AARCH64_JUMP_SLOT:
5889 case elfcpp::R_AARCH64_RELATIVE:
5890 case elfcpp::R_AARCH64_TLS_DTPREL64:
5891 case elfcpp::R_AARCH64_TLS_DTPMOD64:
5892 case elfcpp::R_AARCH64_TLS_TPREL64:
5893 case elfcpp::R_AARCH64_TLSDESC:
5894 case elfcpp::R_AARCH64_IRELATIVE:
5895 case elfcpp::R_AARCH64_ABS32:
5896 case elfcpp::R_AARCH64_ABS64:
5897 return;
5898
5899 default:
5900 break;
5901 }
5902
5903 // This prevents us from issuing more than one error per reloc
5904 // section. But we can still wind up issuing more than one
5905 // error per object file.
5906 if (this->issued_non_pic_error_)
5907 return;
5908 gold_assert(parameters->options().output_is_position_independent());
5909 object->error(_("requires unsupported dynamic reloc; "
5910 "recompile with -fPIC"));
5911 this->issued_non_pic_error_ = true;
5912 return;
5913 }
5914
5915 // Return whether we need to make a PLT entry for a relocation of the
5916 // given type against a STT_GNU_IFUNC symbol.
5917
5918 template<int size, bool big_endian>
5919 bool
5920 Target_aarch64<size, big_endian>::Scan::reloc_needs_plt_for_ifunc(
5921 Sized_relobj_file<size, big_endian>* object,
5922 unsigned int r_type)
5923 {
5924 const AArch64_reloc_property* arp =
5925 aarch64_reloc_property_table->get_reloc_property(r_type);
5926 gold_assert(arp != NULL);
5927
5928 int flags = arp->reference_flags();
5929 if (flags & Symbol::TLS_REF)
5930 {
5931 gold_error(_("%s: unsupported TLS reloc %s for IFUNC symbol"),
5932 object->name().c_str(), arp->name().c_str());
5933 return false;
5934 }
5935 return flags != 0;
5936 }
5937
5938 // Scan a relocation for a local symbol.
5939
5940 template<int size, bool big_endian>
5941 inline void
5942 Target_aarch64<size, big_endian>::Scan::local(
5943 Symbol_table* symtab,
5944 Layout* layout,
5945 Target_aarch64<size, big_endian>* target,
5946 Sized_relobj_file<size, big_endian>* object,
5947 unsigned int data_shndx,
5948 Output_section* output_section,
5949 const elfcpp::Rela<size, big_endian>& rela,
5950 unsigned int r_type,
5951 const elfcpp::Sym<size, big_endian>& lsym,
5952 bool is_discarded)
5953 {
5954 if (is_discarded)
5955 return;
5956
5957 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>
5958 Reloc_section;
5959 Output_data_got_aarch64<size, big_endian>* got =
5960 target->got_section(symtab, layout);
5961 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
5962
5963 // A local STT_GNU_IFUNC symbol may require a PLT entry.
5964 bool is_ifunc = lsym.get_st_type() == elfcpp::STT_GNU_IFUNC;
5965 if (is_ifunc && this->reloc_needs_plt_for_ifunc(object, r_type))
5966 target->make_local_ifunc_plt_entry(symtab, layout, object, r_sym);
5967
5968 switch (r_type)
5969 {
5970 case elfcpp::R_AARCH64_ABS32:
5971 case elfcpp::R_AARCH64_ABS16:
5972 if (parameters->options().output_is_position_independent())
5973 {
5974 gold_error(_("%s: unsupported reloc %u in pos independent link."),
5975 object->name().c_str(), r_type);
5976 }
5977 break;
5978
5979 case elfcpp::R_AARCH64_ABS64:
5980 // If building a shared library or pie, we need to mark this as a dynmic
5981 // reloction, so that the dynamic loader can relocate it.
5982 if (parameters->options().output_is_position_independent())
5983 {
5984 Reloc_section* rela_dyn = target->rela_dyn_section(layout);
5985 rela_dyn->add_local_relative(object, r_sym,
5986 elfcpp::R_AARCH64_RELATIVE,
5987 output_section,
5988 data_shndx,
5989 rela.get_r_offset(),
5990 rela.get_r_addend(),
5991 is_ifunc);
5992 }
5993 break;
5994
5995 case elfcpp::R_AARCH64_PREL64:
5996 case elfcpp::R_AARCH64_PREL32:
5997 case elfcpp::R_AARCH64_PREL16:
5998 break;
5999
6000 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
6001 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
6002 // This pair of relocations is used to access a specific GOT entry.
6003 {
6004 bool is_new = false;
6005 // This symbol requires a GOT entry.
6006 if (is_ifunc)
6007 is_new = got->add_local_plt(object, r_sym, GOT_TYPE_STANDARD);
6008 else
6009 is_new = got->add_local(object, r_sym, GOT_TYPE_STANDARD);
6010 if (is_new && parameters->options().output_is_position_independent())
6011 target->rela_dyn_section(layout)->
6012 add_local_relative(object,
6013 r_sym,
6014 elfcpp::R_AARCH64_RELATIVE,
6015 got,
6016 object->local_got_offset(r_sym,
6017 GOT_TYPE_STANDARD),
6018 0,
6019 false);
6020 }
6021 break;
6022
6023 case elfcpp::R_AARCH64_LD_PREL_LO19: // 273
6024 case elfcpp::R_AARCH64_ADR_PREL_LO21: // 274
6025 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21: // 275
6026 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC: // 276
6027 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC: // 277
6028 case elfcpp::R_AARCH64_LDST8_ABS_LO12_NC: // 278
6029 case elfcpp::R_AARCH64_LDST16_ABS_LO12_NC: // 284
6030 case elfcpp::R_AARCH64_LDST32_ABS_LO12_NC: // 285
6031 case elfcpp::R_AARCH64_LDST64_ABS_LO12_NC: // 286
6032 case elfcpp::R_AARCH64_LDST128_ABS_LO12_NC: // 299
6033 break;
6034
6035 // Control flow, pc-relative. We don't need to do anything for a relative
6036 // addressing relocation against a local symbol if it does not reference
6037 // the GOT.
6038 case elfcpp::R_AARCH64_TSTBR14:
6039 case elfcpp::R_AARCH64_CONDBR19:
6040 case elfcpp::R_AARCH64_JUMP26:
6041 case elfcpp::R_AARCH64_CALL26:
6042 break;
6043
6044 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6045 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6046 {
6047 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6048 optimize_tls_reloc(!parameters->options().shared(), r_type);
6049 if (tlsopt == tls::TLSOPT_TO_LE)
6050 break;
6051
6052 layout->set_has_static_tls();
6053 // Create a GOT entry for the tp-relative offset.
6054 if (!parameters->doing_static_link())
6055 {
6056 got->add_local_with_rel(object, r_sym, GOT_TYPE_TLS_OFFSET,
6057 target->rela_dyn_section(layout),
6058 elfcpp::R_AARCH64_TLS_TPREL64);
6059 }
6060 else if (!object->local_has_got_offset(r_sym,
6061 GOT_TYPE_TLS_OFFSET))
6062 {
6063 got->add_local(object, r_sym, GOT_TYPE_TLS_OFFSET);
6064 unsigned int got_offset =
6065 object->local_got_offset(r_sym, GOT_TYPE_TLS_OFFSET);
6066 const elfcpp::Elf_Xword addend = rela.get_r_addend();
6067 gold_assert(addend == 0);
6068 got->add_static_reloc(got_offset, elfcpp::R_AARCH64_TLS_TPREL64,
6069 object, r_sym);
6070 }
6071 }
6072 break;
6073
6074 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
6075 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC:
6076 {
6077 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6078 optimize_tls_reloc(!parameters->options().shared(), r_type);
6079 if (tlsopt == tls::TLSOPT_TO_LE)
6080 {
6081 layout->set_has_static_tls();
6082 break;
6083 }
6084 gold_assert(tlsopt == tls::TLSOPT_NONE);
6085
6086 got->add_local_pair_with_rel(object,r_sym, data_shndx,
6087 GOT_TYPE_TLS_PAIR,
6088 target->rela_dyn_section(layout),
6089 elfcpp::R_AARCH64_TLS_DTPMOD64);
6090 }
6091 break;
6092
6093 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
6094 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
6095 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6096 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
6097 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6098 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
6099 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
6100 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6101 {
6102 layout->set_has_static_tls();
6103 bool output_is_shared = parameters->options().shared();
6104 if (output_is_shared)
6105 gold_error(_("%s: unsupported TLSLE reloc %u in shared code."),
6106 object->name().c_str(), r_type);
6107 }
6108 break;
6109
6110 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
6111 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC:
6112 {
6113 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6114 optimize_tls_reloc(!parameters->options().shared(), r_type);
6115 if (tlsopt == tls::TLSOPT_NONE)
6116 {
6117 // Create a GOT entry for the module index.
6118 target->got_mod_index_entry(symtab, layout, object);
6119 }
6120 else if (tlsopt != tls::TLSOPT_TO_LE)
6121 unsupported_reloc_local(object, r_type);
6122 }
6123 break;
6124
6125 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
6126 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6127 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
6128 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
6129 break;
6130
6131 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
6132 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
6133 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
6134 {
6135 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6136 optimize_tls_reloc(!parameters->options().shared(), r_type);
6137 target->define_tls_base_symbol(symtab, layout);
6138 if (tlsopt == tls::TLSOPT_NONE)
6139 {
6140 // Create reserved PLT and GOT entries for the resolver.
6141 target->reserve_tlsdesc_entries(symtab, layout);
6142
6143 // Generate a double GOT entry with an R_AARCH64_TLSDESC reloc.
6144 // The R_AARCH64_TLSDESC reloc is resolved lazily, so the GOT
6145 // entry needs to be in an area in .got.plt, not .got. Call
6146 // got_section to make sure the section has been created.
6147 target->got_section(symtab, layout);
6148 Output_data_got<size, big_endian>* got =
6149 target->got_tlsdesc_section();
6150 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
6151 if (!object->local_has_got_offset(r_sym, GOT_TYPE_TLS_DESC))
6152 {
6153 unsigned int got_offset = got->add_constant(0);
6154 got->add_constant(0);
6155 object->set_local_got_offset(r_sym, GOT_TYPE_TLS_DESC,
6156 got_offset);
6157 Reloc_section* rt = target->rela_tlsdesc_section(layout);
6158 // We store the arguments we need in a vector, and use
6159 // the index into the vector as the parameter to pass
6160 // to the target specific routines.
6161 uintptr_t intarg = target->add_tlsdesc_info(object, r_sym);
6162 void* arg = reinterpret_cast<void*>(intarg);
6163 rt->add_target_specific(elfcpp::R_AARCH64_TLSDESC, arg,
6164 got, got_offset, 0);
6165 }
6166 }
6167 else if (tlsopt != tls::TLSOPT_TO_LE)
6168 unsupported_reloc_local(object, r_type);
6169 }
6170 break;
6171
6172 case elfcpp::R_AARCH64_TLSDESC_CALL:
6173 break;
6174
6175 default:
6176 unsupported_reloc_local(object, r_type);
6177 }
6178 }
6179
6180
6181 // Report an unsupported relocation against a global symbol.
6182
6183 template<int size, bool big_endian>
6184 void
6185 Target_aarch64<size, big_endian>::Scan::unsupported_reloc_global(
6186 Sized_relobj_file<size, big_endian>* object,
6187 unsigned int r_type,
6188 Symbol* gsym)
6189 {
6190 gold_error(_("%s: unsupported reloc %u against global symbol %s"),
6191 object->name().c_str(), r_type, gsym->demangled_name().c_str());
6192 }
6193
6194 template<int size, bool big_endian>
6195 inline void
6196 Target_aarch64<size, big_endian>::Scan::global(
6197 Symbol_table* symtab,
6198 Layout* layout,
6199 Target_aarch64<size, big_endian>* target,
6200 Sized_relobj_file<size, big_endian> * object,
6201 unsigned int data_shndx,
6202 Output_section* output_section,
6203 const elfcpp::Rela<size, big_endian>& rela,
6204 unsigned int r_type,
6205 Symbol* gsym)
6206 {
6207 // A STT_GNU_IFUNC symbol may require a PLT entry.
6208 if (gsym->type() == elfcpp::STT_GNU_IFUNC
6209 && this->reloc_needs_plt_for_ifunc(object, r_type))
6210 target->make_plt_entry(symtab, layout, gsym);
6211
6212 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>
6213 Reloc_section;
6214 const AArch64_reloc_property* arp =
6215 aarch64_reloc_property_table->get_reloc_property(r_type);
6216 gold_assert(arp != NULL);
6217
6218 switch (r_type)
6219 {
6220 case elfcpp::R_AARCH64_ABS16:
6221 case elfcpp::R_AARCH64_ABS32:
6222 case elfcpp::R_AARCH64_ABS64:
6223 {
6224 // Make a PLT entry if necessary.
6225 if (gsym->needs_plt_entry())
6226 {
6227 target->make_plt_entry(symtab, layout, gsym);
6228 // Since this is not a PC-relative relocation, we may be
6229 // taking the address of a function. In that case we need to
6230 // set the entry in the dynamic symbol table to the address of
6231 // the PLT entry.
6232 if (gsym->is_from_dynobj() && !parameters->options().shared())
6233 gsym->set_needs_dynsym_value();
6234 }
6235 // Make a dynamic relocation if necessary.
6236 if (gsym->needs_dynamic_reloc(arp->reference_flags()))
6237 {
6238 if (!parameters->options().output_is_position_independent()
6239 && gsym->may_need_copy_reloc())
6240 {
6241 target->copy_reloc(symtab, layout, object,
6242 data_shndx, output_section, gsym, rela);
6243 }
6244 else if (r_type == elfcpp::R_AARCH64_ABS64
6245 && gsym->type() == elfcpp::STT_GNU_IFUNC
6246 && gsym->can_use_relative_reloc(false)
6247 && !gsym->is_from_dynobj()
6248 && !gsym->is_undefined()
6249 && !gsym->is_preemptible())
6250 {
6251 // Use an IRELATIVE reloc for a locally defined STT_GNU_IFUNC
6252 // symbol. This makes a function address in a PIE executable
6253 // match the address in a shared library that it links against.
6254 Reloc_section* rela_dyn =
6255 target->rela_irelative_section(layout);
6256 unsigned int r_type = elfcpp::R_AARCH64_IRELATIVE;
6257 rela_dyn->add_symbolless_global_addend(gsym, r_type,
6258 output_section, object,
6259 data_shndx,
6260 rela.get_r_offset(),
6261 rela.get_r_addend());
6262 }
6263 else if (r_type == elfcpp::R_AARCH64_ABS64
6264 && gsym->can_use_relative_reloc(false))
6265 {
6266 Reloc_section* rela_dyn = target->rela_dyn_section(layout);
6267 rela_dyn->add_global_relative(gsym,
6268 elfcpp::R_AARCH64_RELATIVE,
6269 output_section,
6270 object,
6271 data_shndx,
6272 rela.get_r_offset(),
6273 rela.get_r_addend(),
6274 false);
6275 }
6276 else
6277 {
6278 check_non_pic(object, r_type);
6279 Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>*
6280 rela_dyn = target->rela_dyn_section(layout);
6281 rela_dyn->add_global(
6282 gsym, r_type, output_section, object,
6283 data_shndx, rela.get_r_offset(),rela.get_r_addend());
6284 }
6285 }
6286 }
6287 break;
6288
6289 case elfcpp::R_AARCH64_PREL16:
6290 case elfcpp::R_AARCH64_PREL32:
6291 case elfcpp::R_AARCH64_PREL64:
6292 // This is used to fill the GOT absolute address.
6293 if (gsym->needs_plt_entry())
6294 {
6295 target->make_plt_entry(symtab, layout, gsym);
6296 }
6297 break;
6298
6299 case elfcpp::R_AARCH64_LD_PREL_LO19: // 273
6300 case elfcpp::R_AARCH64_ADR_PREL_LO21: // 274
6301 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21: // 275
6302 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC: // 276
6303 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC: // 277
6304 case elfcpp::R_AARCH64_LDST8_ABS_LO12_NC: // 278
6305 case elfcpp::R_AARCH64_LDST16_ABS_LO12_NC: // 284
6306 case elfcpp::R_AARCH64_LDST32_ABS_LO12_NC: // 285
6307 case elfcpp::R_AARCH64_LDST64_ABS_LO12_NC: // 286
6308 case elfcpp::R_AARCH64_LDST128_ABS_LO12_NC: // 299
6309 {
6310 if (gsym->needs_plt_entry())
6311 target->make_plt_entry(symtab, layout, gsym);
6312 // Make a dynamic relocation if necessary.
6313 if (gsym->needs_dynamic_reloc(arp->reference_flags()))
6314 {
6315 if (parameters->options().output_is_executable()
6316 && gsym->may_need_copy_reloc())
6317 {
6318 target->copy_reloc(symtab, layout, object,
6319 data_shndx, output_section, gsym, rela);
6320 }
6321 }
6322 break;
6323 }
6324
6325 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
6326 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
6327 {
6328 // This pair of relocations is used to access a specific GOT entry.
6329 // Note a GOT entry is an *address* to a symbol.
6330 // The symbol requires a GOT entry
6331 Output_data_got_aarch64<size, big_endian>* got =
6332 target->got_section(symtab, layout);
6333 if (gsym->final_value_is_known())
6334 {
6335 // For a STT_GNU_IFUNC symbol we want the PLT address.
6336 if (gsym->type() == elfcpp::STT_GNU_IFUNC)
6337 got->add_global_plt(gsym, GOT_TYPE_STANDARD);
6338 else
6339 got->add_global(gsym, GOT_TYPE_STANDARD);
6340 }
6341 else
6342 {
6343 // If this symbol is not fully resolved, we need to add a dynamic
6344 // relocation for it.
6345 Reloc_section* rela_dyn = target->rela_dyn_section(layout);
6346
6347 // Use a GLOB_DAT rather than a RELATIVE reloc if:
6348 //
6349 // 1) The symbol may be defined in some other module.
6350 // 2) We are building a shared library and this is a protected
6351 // symbol; using GLOB_DAT means that the dynamic linker can use
6352 // the address of the PLT in the main executable when appropriate
6353 // so that function address comparisons work.
6354 // 3) This is a STT_GNU_IFUNC symbol in position dependent code,
6355 // again so that function address comparisons work.
6356 if (gsym->is_from_dynobj()
6357 || gsym->is_undefined()
6358 || gsym->is_preemptible()
6359 || (gsym->visibility() == elfcpp::STV_PROTECTED
6360 && parameters->options().shared())
6361 || (gsym->type() == elfcpp::STT_GNU_IFUNC
6362 && parameters->options().output_is_position_independent()))
6363 got->add_global_with_rel(gsym, GOT_TYPE_STANDARD,
6364 rela_dyn, elfcpp::R_AARCH64_GLOB_DAT);
6365 else
6366 {
6367 // For a STT_GNU_IFUNC symbol we want to write the PLT
6368 // offset into the GOT, so that function pointer
6369 // comparisons work correctly.
6370 bool is_new;
6371 if (gsym->type() != elfcpp::STT_GNU_IFUNC)
6372 is_new = got->add_global(gsym, GOT_TYPE_STANDARD);
6373 else
6374 {
6375 is_new = got->add_global_plt(gsym, GOT_TYPE_STANDARD);
6376 // Tell the dynamic linker to use the PLT address
6377 // when resolving relocations.
6378 if (gsym->is_from_dynobj()
6379 && !parameters->options().shared())
6380 gsym->set_needs_dynsym_value();
6381 }
6382 if (is_new)
6383 {
6384 rela_dyn->add_global_relative(
6385 gsym, elfcpp::R_AARCH64_RELATIVE,
6386 got,
6387 gsym->got_offset(GOT_TYPE_STANDARD),
6388 0,
6389 false);
6390 }
6391 }
6392 }
6393 break;
6394 }
6395
6396 case elfcpp::R_AARCH64_TSTBR14:
6397 case elfcpp::R_AARCH64_CONDBR19:
6398 case elfcpp::R_AARCH64_JUMP26:
6399 case elfcpp::R_AARCH64_CALL26:
6400 {
6401 if (gsym->final_value_is_known())
6402 break;
6403
6404 if (gsym->is_defined() &&
6405 !gsym->is_from_dynobj() &&
6406 !gsym->is_preemptible())
6407 break;
6408
6409 // Make plt entry for function call.
6410 target->make_plt_entry(symtab, layout, gsym);
6411 break;
6412 }
6413
6414 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
6415 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC: // General dynamic
6416 {
6417 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6418 optimize_tls_reloc(gsym->final_value_is_known(), r_type);
6419 if (tlsopt == tls::TLSOPT_TO_LE)
6420 {
6421 layout->set_has_static_tls();
6422 break;
6423 }
6424 gold_assert(tlsopt == tls::TLSOPT_NONE);
6425
6426 // General dynamic.
6427 Output_data_got_aarch64<size, big_endian>* got =
6428 target->got_section(symtab, layout);
6429 // Create 2 consecutive entries for module index and offset.
6430 got->add_global_pair_with_rel(gsym, GOT_TYPE_TLS_PAIR,
6431 target->rela_dyn_section(layout),
6432 elfcpp::R_AARCH64_TLS_DTPMOD64,
6433 elfcpp::R_AARCH64_TLS_DTPREL64);
6434 }
6435 break;
6436
6437 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
6438 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC: // Local dynamic
6439 {
6440 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6441 optimize_tls_reloc(!parameters->options().shared(), r_type);
6442 if (tlsopt == tls::TLSOPT_NONE)
6443 {
6444 // Create a GOT entry for the module index.
6445 target->got_mod_index_entry(symtab, layout, object);
6446 }
6447 else if (tlsopt != tls::TLSOPT_TO_LE)
6448 unsupported_reloc_local(object, r_type);
6449 }
6450 break;
6451
6452 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
6453 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6454 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
6455 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: // Other local dynamic
6456 break;
6457
6458 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6459 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: // Initial executable
6460 {
6461 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6462 optimize_tls_reloc(gsym->final_value_is_known(), r_type);
6463 if (tlsopt == tls::TLSOPT_TO_LE)
6464 break;
6465
6466 layout->set_has_static_tls();
6467 // Create a GOT entry for the tp-relative offset.
6468 Output_data_got_aarch64<size, big_endian>* got
6469 = target->got_section(symtab, layout);
6470 if (!parameters->doing_static_link())
6471 {
6472 got->add_global_with_rel(
6473 gsym, GOT_TYPE_TLS_OFFSET,
6474 target->rela_dyn_section(layout),
6475 elfcpp::R_AARCH64_TLS_TPREL64);
6476 }
6477 if (!gsym->has_got_offset(GOT_TYPE_TLS_OFFSET))
6478 {
6479 got->add_global(gsym, GOT_TYPE_TLS_OFFSET);
6480 unsigned int got_offset =
6481 gsym->got_offset(GOT_TYPE_TLS_OFFSET);
6482 const elfcpp::Elf_Xword addend = rela.get_r_addend();
6483 gold_assert(addend == 0);
6484 got->add_static_reloc(got_offset,
6485 elfcpp::R_AARCH64_TLS_TPREL64, gsym);
6486 }
6487 }
6488 break;
6489
6490 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
6491 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
6492 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6493 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
6494 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6495 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
6496 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
6497 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC: // Local executable
6498 layout->set_has_static_tls();
6499 if (parameters->options().shared())
6500 gold_error(_("%s: unsupported TLSLE reloc type %u in shared objects."),
6501 object->name().c_str(), r_type);
6502 break;
6503
6504 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
6505 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
6506 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12: // TLS descriptor
6507 {
6508 target->define_tls_base_symbol(symtab, layout);
6509 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6510 optimize_tls_reloc(gsym->final_value_is_known(), r_type);
6511 if (tlsopt == tls::TLSOPT_NONE)
6512 {
6513 // Create reserved PLT and GOT entries for the resolver.
6514 target->reserve_tlsdesc_entries(symtab, layout);
6515
6516 // Create a double GOT entry with an R_AARCH64_TLSDESC
6517 // relocation. The R_AARCH64_TLSDESC is resolved lazily, so the GOT
6518 // entry needs to be in an area in .got.plt, not .got. Call
6519 // got_section to make sure the section has been created.
6520 target->got_section(symtab, layout);
6521 Output_data_got<size, big_endian>* got =
6522 target->got_tlsdesc_section();
6523 Reloc_section* rt = target->rela_tlsdesc_section(layout);
6524 got->add_global_pair_with_rel(gsym, GOT_TYPE_TLS_DESC, rt,
6525 elfcpp::R_AARCH64_TLSDESC, 0);
6526 }
6527 else if (tlsopt == tls::TLSOPT_TO_IE)
6528 {
6529 // Create a GOT entry for the tp-relative offset.
6530 Output_data_got<size, big_endian>* got
6531 = target->got_section(symtab, layout);
6532 got->add_global_with_rel(gsym, GOT_TYPE_TLS_OFFSET,
6533 target->rela_dyn_section(layout),
6534 elfcpp::R_AARCH64_TLS_TPREL64);
6535 }
6536 else if (tlsopt != tls::TLSOPT_TO_LE)
6537 unsupported_reloc_global(object, r_type, gsym);
6538 }
6539 break;
6540
6541 case elfcpp::R_AARCH64_TLSDESC_CALL:
6542 break;
6543
6544 default:
6545 gold_error(_("%s: unsupported reloc type in global scan"),
6546 aarch64_reloc_property_table->
6547 reloc_name_in_error_message(r_type).c_str());
6548 }
6549 return;
6550 } // End of Scan::global
6551
6552
6553 // Create the PLT section.
6554 template<int size, bool big_endian>
6555 void
6556 Target_aarch64<size, big_endian>::make_plt_section(
6557 Symbol_table* symtab, Layout* layout)
6558 {
6559 if (this->plt_ == NULL)
6560 {
6561 // Create the GOT section first.
6562 this->got_section(symtab, layout);
6563
6564 this->plt_ = this->make_data_plt(layout, this->got_, this->got_plt_,
6565 this->got_irelative_);
6566
6567 layout->add_output_section_data(".plt", elfcpp::SHT_PROGBITS,
6568 (elfcpp::SHF_ALLOC
6569 | elfcpp::SHF_EXECINSTR),
6570 this->plt_, ORDER_PLT, false);
6571
6572 // Make the sh_info field of .rela.plt point to .plt.
6573 Output_section* rela_plt_os = this->plt_->rela_plt()->output_section();
6574 rela_plt_os->set_info_section(this->plt_->output_section());
6575 }
6576 }
6577
6578 // Return the section for TLSDESC relocations.
6579
6580 template<int size, bool big_endian>
6581 typename Target_aarch64<size, big_endian>::Reloc_section*
6582 Target_aarch64<size, big_endian>::rela_tlsdesc_section(Layout* layout) const
6583 {
6584 return this->plt_section()->rela_tlsdesc(layout);
6585 }
6586
6587 // Create a PLT entry for a global symbol.
6588
6589 template<int size, bool big_endian>
6590 void
6591 Target_aarch64<size, big_endian>::make_plt_entry(
6592 Symbol_table* symtab,
6593 Layout* layout,
6594 Symbol* gsym)
6595 {
6596 if (gsym->has_plt_offset())
6597 return;
6598
6599 if (this->plt_ == NULL)
6600 this->make_plt_section(symtab, layout);
6601
6602 this->plt_->add_entry(symtab, layout, gsym);
6603 }
6604
6605 // Make a PLT entry for a local STT_GNU_IFUNC symbol.
6606
6607 template<int size, bool big_endian>
6608 void
6609 Target_aarch64<size, big_endian>::make_local_ifunc_plt_entry(
6610 Symbol_table* symtab, Layout* layout,
6611 Sized_relobj_file<size, big_endian>* relobj,
6612 unsigned int local_sym_index)
6613 {
6614 if (relobj->local_has_plt_offset(local_sym_index))
6615 return;
6616 if (this->plt_ == NULL)
6617 this->make_plt_section(symtab, layout);
6618 unsigned int plt_offset = this->plt_->add_local_ifunc_entry(symtab, layout,
6619 relobj,
6620 local_sym_index);
6621 relobj->set_local_plt_offset(local_sym_index, plt_offset);
6622 }
6623
6624 template<int size, bool big_endian>
6625 void
6626 Target_aarch64<size, big_endian>::gc_process_relocs(
6627 Symbol_table* symtab,
6628 Layout* layout,
6629 Sized_relobj_file<size, big_endian>* object,
6630 unsigned int data_shndx,
6631 unsigned int sh_type,
6632 const unsigned char* prelocs,
6633 size_t reloc_count,
6634 Output_section* output_section,
6635 bool needs_special_offset_handling,
6636 size_t local_symbol_count,
6637 const unsigned char* plocal_symbols)
6638 {
6639 typedef Target_aarch64<size, big_endian> Aarch64;
6640 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
6641 Classify_reloc;
6642
6643 if (sh_type == elfcpp::SHT_REL)
6644 {
6645 return;
6646 }
6647
6648 gold::gc_process_relocs<size, big_endian, Aarch64, Scan, Classify_reloc>(
6649 symtab,
6650 layout,
6651 this,
6652 object,
6653 data_shndx,
6654 prelocs,
6655 reloc_count,
6656 output_section,
6657 needs_special_offset_handling,
6658 local_symbol_count,
6659 plocal_symbols);
6660 }
6661
6662 // Scan relocations for a section.
6663
6664 template<int size, bool big_endian>
6665 void
6666 Target_aarch64<size, big_endian>::scan_relocs(
6667 Symbol_table* symtab,
6668 Layout* layout,
6669 Sized_relobj_file<size, big_endian>* object,
6670 unsigned int data_shndx,
6671 unsigned int sh_type,
6672 const unsigned char* prelocs,
6673 size_t reloc_count,
6674 Output_section* output_section,
6675 bool needs_special_offset_handling,
6676 size_t local_symbol_count,
6677 const unsigned char* plocal_symbols)
6678 {
6679 typedef Target_aarch64<size, big_endian> Aarch64;
6680 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
6681 Classify_reloc;
6682
6683 if (sh_type == elfcpp::SHT_REL)
6684 {
6685 gold_error(_("%s: unsupported REL reloc section"),
6686 object->name().c_str());
6687 return;
6688 }
6689
6690 gold::scan_relocs<size, big_endian, Aarch64, Scan, Classify_reloc>(
6691 symtab,
6692 layout,
6693 this,
6694 object,
6695 data_shndx,
6696 prelocs,
6697 reloc_count,
6698 output_section,
6699 needs_special_offset_handling,
6700 local_symbol_count,
6701 plocal_symbols);
6702 }
6703
6704 // Return the value to use for a dynamic which requires special
6705 // treatment. This is how we support equality comparisons of function
6706 // pointers across shared library boundaries, as described in the
6707 // processor specific ABI supplement.
6708
6709 template<int size, bool big_endian>
6710 uint64_t
6711 Target_aarch64<size, big_endian>::do_dynsym_value(const Symbol* gsym) const
6712 {
6713 gold_assert(gsym->is_from_dynobj() && gsym->has_plt_offset());
6714 return this->plt_address_for_global(gsym);
6715 }
6716
6717
6718 // Finalize the sections.
6719
6720 template<int size, bool big_endian>
6721 void
6722 Target_aarch64<size, big_endian>::do_finalize_sections(
6723 Layout* layout,
6724 const Input_objects*,
6725 Symbol_table* symtab)
6726 {
6727 const Reloc_section* rel_plt = (this->plt_ == NULL
6728 ? NULL
6729 : this->plt_->rela_plt());
6730 layout->add_target_dynamic_tags(false, this->got_plt_, rel_plt,
6731 this->rela_dyn_, true, false);
6732
6733 // Emit any relocs we saved in an attempt to avoid generating COPY
6734 // relocs.
6735 if (this->copy_relocs_.any_saved_relocs())
6736 this->copy_relocs_.emit(this->rela_dyn_section(layout));
6737
6738 // Fill in some more dynamic tags.
6739 Output_data_dynamic* const odyn = layout->dynamic_data();
6740 if (odyn != NULL)
6741 {
6742 if (this->plt_ != NULL
6743 && this->plt_->output_section() != NULL
6744 && this->plt_ ->has_tlsdesc_entry())
6745 {
6746 unsigned int plt_offset = this->plt_->get_tlsdesc_plt_offset();
6747 unsigned int got_offset = this->plt_->get_tlsdesc_got_offset();
6748 this->got_->finalize_data_size();
6749 odyn->add_section_plus_offset(elfcpp::DT_TLSDESC_PLT,
6750 this->plt_, plt_offset);
6751 odyn->add_section_plus_offset(elfcpp::DT_TLSDESC_GOT,
6752 this->got_, got_offset);
6753 }
6754 }
6755
6756 // Set the size of the _GLOBAL_OFFSET_TABLE_ symbol to the size of
6757 // the .got.plt section.
6758 Symbol* sym = this->global_offset_table_;
6759 if (sym != NULL)
6760 {
6761 uint64_t data_size = this->got_plt_->current_data_size();
6762 symtab->get_sized_symbol<size>(sym)->set_symsize(data_size);
6763
6764 // If the .got section is more than 0x8000 bytes, we add
6765 // 0x8000 to the value of _GLOBAL_OFFSET_TABLE_, so that 16
6766 // bit relocations have a greater chance of working.
6767 if (data_size >= 0x8000)
6768 symtab->get_sized_symbol<size>(sym)->set_value(
6769 symtab->get_sized_symbol<size>(sym)->value() + 0x8000);
6770 }
6771
6772 if (parameters->doing_static_link()
6773 && (this->plt_ == NULL || !this->plt_->has_irelative_section()))
6774 {
6775 // If linking statically, make sure that the __rela_iplt symbols
6776 // were defined if necessary, even if we didn't create a PLT.
6777 static const Define_symbol_in_segment syms[] =
6778 {
6779 {
6780 "__rela_iplt_start", // name
6781 elfcpp::PT_LOAD, // segment_type
6782 elfcpp::PF_W, // segment_flags_set
6783 elfcpp::PF(0), // segment_flags_clear
6784 0, // value
6785 0, // size
6786 elfcpp::STT_NOTYPE, // type
6787 elfcpp::STB_GLOBAL, // binding
6788 elfcpp::STV_HIDDEN, // visibility
6789 0, // nonvis
6790 Symbol::SEGMENT_START, // offset_from_base
6791 true // only_if_ref
6792 },
6793 {
6794 "__rela_iplt_end", // name
6795 elfcpp::PT_LOAD, // segment_type
6796 elfcpp::PF_W, // segment_flags_set
6797 elfcpp::PF(0), // segment_flags_clear
6798 0, // value
6799 0, // size
6800 elfcpp::STT_NOTYPE, // type
6801 elfcpp::STB_GLOBAL, // binding
6802 elfcpp::STV_HIDDEN, // visibility
6803 0, // nonvis
6804 Symbol::SEGMENT_START, // offset_from_base
6805 true // only_if_ref
6806 }
6807 };
6808
6809 symtab->define_symbols(layout, 2, syms,
6810 layout->script_options()->saw_sections_clause());
6811 }
6812
6813 return;
6814 }
6815
6816 // Perform a relocation.
6817
6818 template<int size, bool big_endian>
6819 inline bool
6820 Target_aarch64<size, big_endian>::Relocate::relocate(
6821 const Relocate_info<size, big_endian>* relinfo,
6822 unsigned int,
6823 Target_aarch64<size, big_endian>* target,
6824 Output_section* ,
6825 size_t relnum,
6826 const unsigned char* preloc,
6827 const Sized_symbol<size>* gsym,
6828 const Symbol_value<size>* psymval,
6829 unsigned char* view,
6830 typename elfcpp::Elf_types<size>::Elf_Addr address,
6831 section_size_type /* view_size */)
6832 {
6833 if (view == NULL)
6834 return true;
6835
6836 typedef AArch64_relocate_functions<size, big_endian> Reloc;
6837
6838 const elfcpp::Rela<size, big_endian> rela(preloc);
6839 unsigned int r_type = elfcpp::elf_r_type<size>(rela.get_r_info());
6840 const AArch64_reloc_property* reloc_property =
6841 aarch64_reloc_property_table->get_reloc_property(r_type);
6842
6843 if (reloc_property == NULL)
6844 {
6845 std::string reloc_name =
6846 aarch64_reloc_property_table->reloc_name_in_error_message(r_type);
6847 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
6848 _("cannot relocate %s in object file"),
6849 reloc_name.c_str());
6850 return true;
6851 }
6852
6853 const Sized_relobj_file<size, big_endian>* object = relinfo->object;
6854
6855 // Pick the value to use for symbols defined in the PLT.
6856 Symbol_value<size> symval;
6857 if (gsym != NULL
6858 && gsym->use_plt_offset(reloc_property->reference_flags()))
6859 {
6860 symval.set_output_value(target->plt_address_for_global(gsym));
6861 psymval = &symval;
6862 }
6863 else if (gsym == NULL && psymval->is_ifunc_symbol())
6864 {
6865 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
6866 if (object->local_has_plt_offset(r_sym))
6867 {
6868 symval.set_output_value(target->plt_address_for_local(object, r_sym));
6869 psymval = &symval;
6870 }
6871 }
6872
6873 const elfcpp::Elf_Xword addend = rela.get_r_addend();
6874
6875 // Get the GOT offset if needed.
6876 // For aarch64, the GOT pointer points to the start of the GOT section.
6877 bool have_got_offset = false;
6878 int got_offset = 0;
6879 int got_base = (target->got_ != NULL
6880 ? (target->got_->current_data_size() >= 0x8000
6881 ? 0x8000 : 0)
6882 : 0);
6883 switch (r_type)
6884 {
6885 case elfcpp::R_AARCH64_MOVW_GOTOFF_G0:
6886 case elfcpp::R_AARCH64_MOVW_GOTOFF_G0_NC:
6887 case elfcpp::R_AARCH64_MOVW_GOTOFF_G1:
6888 case elfcpp::R_AARCH64_MOVW_GOTOFF_G1_NC:
6889 case elfcpp::R_AARCH64_MOVW_GOTOFF_G2:
6890 case elfcpp::R_AARCH64_MOVW_GOTOFF_G2_NC:
6891 case elfcpp::R_AARCH64_MOVW_GOTOFF_G3:
6892 case elfcpp::R_AARCH64_GOTREL64:
6893 case elfcpp::R_AARCH64_GOTREL32:
6894 case elfcpp::R_AARCH64_GOT_LD_PREL19:
6895 case elfcpp::R_AARCH64_LD64_GOTOFF_LO15:
6896 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
6897 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
6898 case elfcpp::R_AARCH64_LD64_GOTPAGE_LO15:
6899 if (gsym != NULL)
6900 {
6901 gold_assert(gsym->has_got_offset(GOT_TYPE_STANDARD));
6902 got_offset = gsym->got_offset(GOT_TYPE_STANDARD) - got_base;
6903 }
6904 else
6905 {
6906 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
6907 gold_assert(object->local_has_got_offset(r_sym, GOT_TYPE_STANDARD));
6908 got_offset = (object->local_got_offset(r_sym, GOT_TYPE_STANDARD)
6909 - got_base);
6910 }
6911 have_got_offset = true;
6912 break;
6913
6914 default:
6915 break;
6916 }
6917
6918 typename Reloc::Status reloc_status = Reloc::STATUS_OKAY;
6919 typename elfcpp::Elf_types<size>::Elf_Addr value;
6920 switch (r_type)
6921 {
6922 case elfcpp::R_AARCH64_NONE:
6923 break;
6924
6925 case elfcpp::R_AARCH64_ABS64:
6926 if (!parameters->options().apply_dynamic_relocs()
6927 && parameters->options().output_is_position_independent()
6928 && gsym != NULL
6929 && gsym->needs_dynamic_reloc(reloc_property->reference_flags())
6930 && !gsym->can_use_relative_reloc(false))
6931 // We have generated an absolute dynamic relocation, so do not
6932 // apply the relocation statically. (Works around bugs in older
6933 // Android dynamic linkers.)
6934 break;
6935 reloc_status = Reloc::template rela_ua<64>(
6936 view, object, psymval, addend, reloc_property);
6937 break;
6938
6939 case elfcpp::R_AARCH64_ABS32:
6940 if (!parameters->options().apply_dynamic_relocs()
6941 && parameters->options().output_is_position_independent()
6942 && gsym != NULL
6943 && gsym->needs_dynamic_reloc(reloc_property->reference_flags()))
6944 // We have generated an absolute dynamic relocation, so do not
6945 // apply the relocation statically. (Works around bugs in older
6946 // Android dynamic linkers.)
6947 break;
6948 reloc_status = Reloc::template rela_ua<32>(
6949 view, object, psymval, addend, reloc_property);
6950 break;
6951
6952 case elfcpp::R_AARCH64_ABS16:
6953 if (!parameters->options().apply_dynamic_relocs()
6954 && parameters->options().output_is_position_independent()
6955 && gsym != NULL
6956 && gsym->needs_dynamic_reloc(reloc_property->reference_flags()))
6957 // We have generated an absolute dynamic relocation, so do not
6958 // apply the relocation statically. (Works around bugs in older
6959 // Android dynamic linkers.)
6960 break;
6961 reloc_status = Reloc::template rela_ua<16>(
6962 view, object, psymval, addend, reloc_property);
6963 break;
6964
6965 case elfcpp::R_AARCH64_PREL64:
6966 reloc_status = Reloc::template pcrela_ua<64>(
6967 view, object, psymval, addend, address, reloc_property);
6968 break;
6969
6970 case elfcpp::R_AARCH64_PREL32:
6971 reloc_status = Reloc::template pcrela_ua<32>(
6972 view, object, psymval, addend, address, reloc_property);
6973 break;
6974
6975 case elfcpp::R_AARCH64_PREL16:
6976 reloc_status = Reloc::template pcrela_ua<16>(
6977 view, object, psymval, addend, address, reloc_property);
6978 break;
6979
6980 case elfcpp::R_AARCH64_LD_PREL_LO19:
6981 reloc_status = Reloc::template pcrela_general<32>(
6982 view, object, psymval, addend, address, reloc_property);
6983 break;
6984
6985 case elfcpp::R_AARCH64_ADR_PREL_LO21:
6986 reloc_status = Reloc::adr(view, object, psymval, addend,
6987 address, reloc_property);
6988 break;
6989
6990 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC:
6991 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21:
6992 reloc_status = Reloc::adrp(view, object, psymval, addend, address,
6993 reloc_property);
6994 break;
6995
6996 case elfcpp::R_AARCH64_LDST8_ABS_LO12_NC:
6997 case elfcpp::R_AARCH64_LDST16_ABS_LO12_NC:
6998 case elfcpp::R_AARCH64_LDST32_ABS_LO12_NC:
6999 case elfcpp::R_AARCH64_LDST64_ABS_LO12_NC:
7000 case elfcpp::R_AARCH64_LDST128_ABS_LO12_NC:
7001 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC:
7002 reloc_status = Reloc::template rela_general<32>(
7003 view, object, psymval, addend, reloc_property);
7004 break;
7005
7006 case elfcpp::R_AARCH64_CALL26:
7007 if (this->skip_call_tls_get_addr_)
7008 {
7009 // Double check that the TLSGD insn has been optimized away.
7010 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7011 Insntype insn = elfcpp::Swap<32, big_endian>::readval(
7012 reinterpret_cast<Insntype*>(view));
7013 gold_assert((insn & 0xff000000) == 0x91000000);
7014
7015 reloc_status = Reloc::STATUS_OKAY;
7016 this->skip_call_tls_get_addr_ = false;
7017 // Return false to stop further processing this reloc.
7018 return false;
7019 }
7020 // Fallthrough
7021 case elfcpp::R_AARCH64_JUMP26:
7022 if (Reloc::maybe_apply_stub(r_type, relinfo, rela, view, address,
7023 gsym, psymval, object,
7024 target->stub_group_size_))
7025 break;
7026 // Fallthrough
7027 case elfcpp::R_AARCH64_TSTBR14:
7028 case elfcpp::R_AARCH64_CONDBR19:
7029 reloc_status = Reloc::template pcrela_general<32>(
7030 view, object, psymval, addend, address, reloc_property);
7031 break;
7032
7033 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
7034 gold_assert(have_got_offset);
7035 value = target->got_->address() + got_base + got_offset;
7036 reloc_status = Reloc::adrp(view, value + addend, address);
7037 break;
7038
7039 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
7040 gold_assert(have_got_offset);
7041 value = target->got_->address() + got_base + got_offset;
7042 reloc_status = Reloc::template rela_general<32>(
7043 view, value, addend, reloc_property);
7044 break;
7045
7046 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
7047 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC:
7048 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
7049 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC:
7050 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
7051 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7052 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
7053 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7054 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7055 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7056 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
7057 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
7058 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7059 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
7060 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7061 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
7062 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
7063 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7064 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
7065 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
7066 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
7067 case elfcpp::R_AARCH64_TLSDESC_CALL:
7068 reloc_status = relocate_tls(relinfo, target, relnum, rela, r_type,
7069 gsym, psymval, view, address);
7070 break;
7071
7072 // These are dynamic relocations, which are unexpected when linking.
7073 case elfcpp::R_AARCH64_COPY:
7074 case elfcpp::R_AARCH64_GLOB_DAT:
7075 case elfcpp::R_AARCH64_JUMP_SLOT:
7076 case elfcpp::R_AARCH64_RELATIVE:
7077 case elfcpp::R_AARCH64_IRELATIVE:
7078 case elfcpp::R_AARCH64_TLS_DTPREL64:
7079 case elfcpp::R_AARCH64_TLS_DTPMOD64:
7080 case elfcpp::R_AARCH64_TLS_TPREL64:
7081 case elfcpp::R_AARCH64_TLSDESC:
7082 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
7083 _("unexpected reloc %u in object file"),
7084 r_type);
7085 break;
7086
7087 default:
7088 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
7089 _("unsupported reloc %s"),
7090 reloc_property->name().c_str());
7091 break;
7092 }
7093
7094 // Report any errors.
7095 switch (reloc_status)
7096 {
7097 case Reloc::STATUS_OKAY:
7098 break;
7099 case Reloc::STATUS_OVERFLOW:
7100 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
7101 _("relocation overflow in %s"),
7102 reloc_property->name().c_str());
7103 break;
7104 case Reloc::STATUS_BAD_RELOC:
7105 gold_error_at_location(
7106 relinfo,
7107 relnum,
7108 rela.get_r_offset(),
7109 _("unexpected opcode while processing relocation %s"),
7110 reloc_property->name().c_str());
7111 break;
7112 default:
7113 gold_unreachable();
7114 }
7115
7116 return true;
7117 }
7118
7119
7120 template<int size, bool big_endian>
7121 inline
7122 typename AArch64_relocate_functions<size, big_endian>::Status
7123 Target_aarch64<size, big_endian>::Relocate::relocate_tls(
7124 const Relocate_info<size, big_endian>* relinfo,
7125 Target_aarch64<size, big_endian>* target,
7126 size_t relnum,
7127 const elfcpp::Rela<size, big_endian>& rela,
7128 unsigned int r_type, const Sized_symbol<size>* gsym,
7129 const Symbol_value<size>* psymval,
7130 unsigned char* view,
7131 typename elfcpp::Elf_types<size>::Elf_Addr address)
7132 {
7133 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7134 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7135
7136 Output_segment* tls_segment = relinfo->layout->tls_segment();
7137 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7138 const AArch64_reloc_property* reloc_property =
7139 aarch64_reloc_property_table->get_reloc_property(r_type);
7140 gold_assert(reloc_property != NULL);
7141
7142 const bool is_final = (gsym == NULL
7143 ? !parameters->options().shared()
7144 : gsym->final_value_is_known());
7145 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
7146 optimize_tls_reloc(is_final, r_type);
7147
7148 Sized_relobj_file<size, big_endian>* object = relinfo->object;
7149 int tls_got_offset_type;
7150 switch (r_type)
7151 {
7152 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
7153 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC: // Global-dynamic
7154 {
7155 if (tlsopt == tls::TLSOPT_TO_LE)
7156 {
7157 if (tls_segment == NULL)
7158 {
7159 gold_assert(parameters->errors()->error_count() > 0
7160 || issue_undefined_symbol_error(gsym));
7161 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7162 }
7163 return tls_gd_to_le(relinfo, target, rela, r_type, view,
7164 psymval);
7165 }
7166 else if (tlsopt == tls::TLSOPT_NONE)
7167 {
7168 tls_got_offset_type = GOT_TYPE_TLS_PAIR;
7169 // Firstly get the address for the got entry.
7170 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address;
7171 if (gsym != NULL)
7172 {
7173 gold_assert(gsym->has_got_offset(tls_got_offset_type));
7174 got_entry_address = target->got_->address() +
7175 gsym->got_offset(tls_got_offset_type);
7176 }
7177 else
7178 {
7179 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
7180 gold_assert(
7181 object->local_has_got_offset(r_sym, tls_got_offset_type));
7182 got_entry_address = target->got_->address() +
7183 object->local_got_offset(r_sym, tls_got_offset_type);
7184 }
7185
7186 // Relocate the address into adrp/ld, adrp/add pair.
7187 switch (r_type)
7188 {
7189 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
7190 return aarch64_reloc_funcs::adrp(
7191 view, got_entry_address + addend, address);
7192
7193 break;
7194
7195 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC:
7196 return aarch64_reloc_funcs::template rela_general<32>(
7197 view, got_entry_address, addend, reloc_property);
7198 break;
7199
7200 default:
7201 gold_unreachable();
7202 }
7203 }
7204 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
7205 _("unsupported gd_to_ie relaxation on %u"),
7206 r_type);
7207 }
7208 break;
7209
7210 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
7211 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC: // Local-dynamic
7212 {
7213 if (tlsopt == tls::TLSOPT_TO_LE)
7214 {
7215 if (tls_segment == NULL)
7216 {
7217 gold_assert(parameters->errors()->error_count() > 0
7218 || issue_undefined_symbol_error(gsym));
7219 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7220 }
7221 return this->tls_ld_to_le(relinfo, target, rela, r_type, view,
7222 psymval);
7223 }
7224
7225 gold_assert(tlsopt == tls::TLSOPT_NONE);
7226 // Relocate the field with the offset of the GOT entry for
7227 // the module index.
7228 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address;
7229 got_entry_address = (target->got_mod_index_entry(NULL, NULL, NULL) +
7230 target->got_->address());
7231
7232 switch (r_type)
7233 {
7234 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
7235 return aarch64_reloc_funcs::adrp(
7236 view, got_entry_address + addend, address);
7237 break;
7238
7239 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC:
7240 return aarch64_reloc_funcs::template rela_general<32>(
7241 view, got_entry_address, addend, reloc_property);
7242 break;
7243
7244 default:
7245 gold_unreachable();
7246 }
7247 }
7248 break;
7249
7250 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
7251 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7252 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
7253 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: // Other local-dynamic
7254 {
7255 AArch64_address value = psymval->value(object, 0);
7256 if (tlsopt == tls::TLSOPT_TO_LE)
7257 {
7258 if (tls_segment == NULL)
7259 {
7260 gold_assert(parameters->errors()->error_count() > 0
7261 || issue_undefined_symbol_error(gsym));
7262 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7263 }
7264 }
7265 switch (r_type)
7266 {
7267 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
7268 return aarch64_reloc_funcs::movnz(view, value + addend,
7269 reloc_property);
7270 break;
7271
7272 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7273 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
7274 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7275 return aarch64_reloc_funcs::template rela_general<32>(
7276 view, value, addend, reloc_property);
7277 break;
7278
7279 default:
7280 gold_unreachable();
7281 }
7282 // We should never reach here.
7283 }
7284 break;
7285
7286 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7287 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: // Initial-exec
7288 {
7289 if (tlsopt == tls::TLSOPT_TO_LE)
7290 {
7291 if (tls_segment == NULL)
7292 {
7293 gold_assert(parameters->errors()->error_count() > 0
7294 || issue_undefined_symbol_error(gsym));
7295 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7296 }
7297 return tls_ie_to_le(relinfo, target, rela, r_type, view,
7298 psymval);
7299 }
7300 tls_got_offset_type = GOT_TYPE_TLS_OFFSET;
7301
7302 // Firstly get the address for the got entry.
7303 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address;
7304 if (gsym != NULL)
7305 {
7306 gold_assert(gsym->has_got_offset(tls_got_offset_type));
7307 got_entry_address = target->got_->address() +
7308 gsym->got_offset(tls_got_offset_type);
7309 }
7310 else
7311 {
7312 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
7313 gold_assert(
7314 object->local_has_got_offset(r_sym, tls_got_offset_type));
7315 got_entry_address = target->got_->address() +
7316 object->local_got_offset(r_sym, tls_got_offset_type);
7317 }
7318 // Relocate the address into adrp/ld, adrp/add pair.
7319 switch (r_type)
7320 {
7321 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7322 return aarch64_reloc_funcs::adrp(view, got_entry_address + addend,
7323 address);
7324 break;
7325 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7326 return aarch64_reloc_funcs::template rela_general<32>(
7327 view, got_entry_address, addend, reloc_property);
7328 default:
7329 gold_unreachable();
7330 }
7331 }
7332 // We shall never reach here.
7333 break;
7334
7335 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
7336 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
7337 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7338 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
7339 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7340 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
7341 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
7342 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7343 {
7344 gold_assert(tls_segment != NULL);
7345 AArch64_address value = psymval->value(object, 0);
7346
7347 if (!parameters->options().shared())
7348 {
7349 AArch64_address aligned_tcb_size =
7350 align_address(target->tcb_size(),
7351 tls_segment->maximum_alignment());
7352 value += aligned_tcb_size;
7353 switch (r_type)
7354 {
7355 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
7356 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
7357 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
7358 return aarch64_reloc_funcs::movnz(view, value + addend,
7359 reloc_property);
7360 default:
7361 return aarch64_reloc_funcs::template
7362 rela_general<32>(view,
7363 value,
7364 addend,
7365 reloc_property);
7366 }
7367 }
7368 else
7369 gold_error(_("%s: unsupported reloc %u "
7370 "in non-static TLSLE mode."),
7371 object->name().c_str(), r_type);
7372 }
7373 break;
7374
7375 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
7376 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
7377 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
7378 case elfcpp::R_AARCH64_TLSDESC_CALL:
7379 {
7380 if (tlsopt == tls::TLSOPT_TO_LE)
7381 {
7382 if (tls_segment == NULL)
7383 {
7384 gold_assert(parameters->errors()->error_count() > 0
7385 || issue_undefined_symbol_error(gsym));
7386 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7387 }
7388 return tls_desc_gd_to_le(relinfo, target, rela, r_type,
7389 view, psymval);
7390 }
7391 else
7392 {
7393 tls_got_offset_type = (tlsopt == tls::TLSOPT_TO_IE
7394 ? GOT_TYPE_TLS_OFFSET
7395 : GOT_TYPE_TLS_DESC);
7396 unsigned int got_tlsdesc_offset = 0;
7397 if (r_type != elfcpp::R_AARCH64_TLSDESC_CALL
7398 && tlsopt == tls::TLSOPT_NONE)
7399 {
7400 // We created GOT entries in the .got.tlsdesc portion of the
7401 // .got.plt section, but the offset stored in the symbol is the
7402 // offset within .got.tlsdesc.
7403 got_tlsdesc_offset = (target->got_->data_size()
7404 + target->got_plt_section()->data_size());
7405 }
7406 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address;
7407 if (gsym != NULL)
7408 {
7409 gold_assert(gsym->has_got_offset(tls_got_offset_type));
7410 got_entry_address = target->got_->address()
7411 + got_tlsdesc_offset
7412 + gsym->got_offset(tls_got_offset_type);
7413 }
7414 else
7415 {
7416 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
7417 gold_assert(
7418 object->local_has_got_offset(r_sym, tls_got_offset_type));
7419 got_entry_address = target->got_->address() +
7420 got_tlsdesc_offset +
7421 object->local_got_offset(r_sym, tls_got_offset_type);
7422 }
7423 if (tlsopt == tls::TLSOPT_TO_IE)
7424 {
7425 return tls_desc_gd_to_ie(relinfo, target, rela, r_type,
7426 view, psymval, got_entry_address,
7427 address);
7428 }
7429
7430 // Now do tlsdesc relocation.
7431 switch (r_type)
7432 {
7433 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
7434 return aarch64_reloc_funcs::adrp(view,
7435 got_entry_address + addend,
7436 address);
7437 break;
7438 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
7439 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
7440 return aarch64_reloc_funcs::template rela_general<32>(
7441 view, got_entry_address, addend, reloc_property);
7442 break;
7443 case elfcpp::R_AARCH64_TLSDESC_CALL:
7444 return aarch64_reloc_funcs::STATUS_OKAY;
7445 break;
7446 default:
7447 gold_unreachable();
7448 }
7449 }
7450 }
7451 break;
7452
7453 default:
7454 gold_error(_("%s: unsupported TLS reloc %u."),
7455 object->name().c_str(), r_type);
7456 }
7457 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7458 } // End of relocate_tls.
7459
7460
7461 template<int size, bool big_endian>
7462 inline
7463 typename AArch64_relocate_functions<size, big_endian>::Status
7464 Target_aarch64<size, big_endian>::Relocate::tls_gd_to_le(
7465 const Relocate_info<size, big_endian>* relinfo,
7466 Target_aarch64<size, big_endian>* target,
7467 const elfcpp::Rela<size, big_endian>& rela,
7468 unsigned int r_type,
7469 unsigned char* view,
7470 const Symbol_value<size>* psymval)
7471 {
7472 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7473 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7474 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7475
7476 Insntype* ip = reinterpret_cast<Insntype*>(view);
7477 Insntype insn1 = elfcpp::Swap<32, big_endian>::readval(ip);
7478 Insntype insn2 = elfcpp::Swap<32, big_endian>::readval(ip + 1);
7479 Insntype insn3 = elfcpp::Swap<32, big_endian>::readval(ip + 2);
7480
7481 if (r_type == elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC)
7482 {
7483 // This is the 2nd relocs, optimization should already have been
7484 // done.
7485 gold_assert((insn1 & 0xfff00000) == 0x91400000);
7486 return aarch64_reloc_funcs::STATUS_OKAY;
7487 }
7488
7489 // The original sequence is -
7490 // 90000000 adrp x0, 0 <main>
7491 // 91000000 add x0, x0, #0x0
7492 // 94000000 bl 0 <__tls_get_addr>
7493 // optimized to sequence -
7494 // d53bd040 mrs x0, tpidr_el0
7495 // 91400000 add x0, x0, #0x0, lsl #12
7496 // 91000000 add x0, x0, #0x0
7497
7498 // Unlike tls_ie_to_le, we change the 3 insns in one function call when we
7499 // encounter the first relocation "R_AARCH64_TLSGD_ADR_PAGE21". Because we
7500 // have to change "bl tls_get_addr", which does not have a corresponding tls
7501 // relocation type. So before proceeding, we need to make sure compiler
7502 // does not change the sequence.
7503 if(!(insn1 == 0x90000000 // adrp x0,0
7504 && insn2 == 0x91000000 // add x0, x0, #0x0
7505 && insn3 == 0x94000000)) // bl 0
7506 {
7507 // Ideally we should give up gd_to_le relaxation and do gd access.
7508 // However the gd_to_le relaxation decision has been made early
7509 // in the scan stage, where we did not allocate any GOT entry for
7510 // this symbol. Therefore we have to exit and report error now.
7511 gold_error(_("unexpected reloc insn sequence while relaxing "
7512 "tls gd to le for reloc %u."), r_type);
7513 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7514 }
7515
7516 // Write new insns.
7517 insn1 = 0xd53bd040; // mrs x0, tpidr_el0
7518 insn2 = 0x91400000; // add x0, x0, #0x0, lsl #12
7519 insn3 = 0x91000000; // add x0, x0, #0x0
7520 elfcpp::Swap<32, big_endian>::writeval(ip, insn1);
7521 elfcpp::Swap<32, big_endian>::writeval(ip + 1, insn2);
7522 elfcpp::Swap<32, big_endian>::writeval(ip + 2, insn3);
7523
7524 // Calculate tprel value.
7525 Output_segment* tls_segment = relinfo->layout->tls_segment();
7526 gold_assert(tls_segment != NULL);
7527 AArch64_address value = psymval->value(relinfo->object, 0);
7528 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7529 AArch64_address aligned_tcb_size =
7530 align_address(target->tcb_size(), tls_segment->maximum_alignment());
7531 AArch64_address x = value + aligned_tcb_size;
7532
7533 // After new insns are written, apply TLSLE relocs.
7534 const AArch64_reloc_property* rp1 =
7535 aarch64_reloc_property_table->get_reloc_property(
7536 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12);
7537 const AArch64_reloc_property* rp2 =
7538 aarch64_reloc_property_table->get_reloc_property(
7539 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12);
7540 gold_assert(rp1 != NULL && rp2 != NULL);
7541
7542 typename aarch64_reloc_funcs::Status s1 =
7543 aarch64_reloc_funcs::template rela_general<32>(view + 4,
7544 x,
7545 addend,
7546 rp1);
7547 if (s1 != aarch64_reloc_funcs::STATUS_OKAY)
7548 return s1;
7549
7550 typename aarch64_reloc_funcs::Status s2 =
7551 aarch64_reloc_funcs::template rela_general<32>(view + 8,
7552 x,
7553 addend,
7554 rp2);
7555
7556 this->skip_call_tls_get_addr_ = true;
7557 return s2;
7558 } // End of tls_gd_to_le
7559
7560
7561 template<int size, bool big_endian>
7562 inline
7563 typename AArch64_relocate_functions<size, big_endian>::Status
7564 Target_aarch64<size, big_endian>::Relocate::tls_ld_to_le(
7565 const Relocate_info<size, big_endian>* relinfo,
7566 Target_aarch64<size, big_endian>* target,
7567 const elfcpp::Rela<size, big_endian>& rela,
7568 unsigned int r_type,
7569 unsigned char* view,
7570 const Symbol_value<size>* psymval)
7571 {
7572 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7573 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7574 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7575
7576 Insntype* ip = reinterpret_cast<Insntype*>(view);
7577 Insntype insn1 = elfcpp::Swap<32, big_endian>::readval(ip);
7578 Insntype insn2 = elfcpp::Swap<32, big_endian>::readval(ip + 1);
7579 Insntype insn3 = elfcpp::Swap<32, big_endian>::readval(ip + 2);
7580
7581 if (r_type == elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC)
7582 {
7583 // This is the 2nd relocs, optimization should already have been
7584 // done.
7585 gold_assert((insn1 & 0xfff00000) == 0x91400000);
7586 return aarch64_reloc_funcs::STATUS_OKAY;
7587 }
7588
7589 // The original sequence is -
7590 // 90000000 adrp x0, 0 <main>
7591 // 91000000 add x0, x0, #0x0
7592 // 94000000 bl 0 <__tls_get_addr>
7593 // optimized to sequence -
7594 // d53bd040 mrs x0, tpidr_el0
7595 // 91400000 add x0, x0, #0x0, lsl #12
7596 // 91000000 add x0, x0, #0x0
7597
7598 // Unlike tls_ie_to_le, we change the 3 insns in one function call when we
7599 // encounter the first relocation "R_AARCH64_TLSLD_ADR_PAGE21". Because we
7600 // have to change "bl tls_get_addr", which does not have a corresponding tls
7601 // relocation type. So before proceeding, we need to make sure compiler
7602 // does not change the sequence.
7603 if(!(insn1 == 0x90000000 // adrp x0,0
7604 && insn2 == 0x91000000 // add x0, x0, #0x0
7605 && insn3 == 0x94000000)) // bl 0
7606 {
7607 // Ideally we should give up gd_to_le relaxation and do gd access.
7608 // However the gd_to_le relaxation decision has been made early
7609 // in the scan stage, where we did not allocate any GOT entry for
7610 // this symbol. Therefore we have to exit and report error now.
7611 gold_error(_("unexpected reloc insn sequence while relaxing "
7612 "tls gd to le for reloc %u."), r_type);
7613 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7614 }
7615
7616 // Write new insns.
7617 insn1 = 0xd53bd040; // mrs x0, tpidr_el0
7618 insn2 = 0x91400000; // add x0, x0, #0x0, lsl #12
7619 insn3 = 0x91000000; // add x0, x0, #0x0
7620 elfcpp::Swap<32, big_endian>::writeval(ip, insn1);
7621 elfcpp::Swap<32, big_endian>::writeval(ip + 1, insn2);
7622 elfcpp::Swap<32, big_endian>::writeval(ip + 2, insn3);
7623
7624 // Calculate tprel value.
7625 Output_segment* tls_segment = relinfo->layout->tls_segment();
7626 gold_assert(tls_segment != NULL);
7627 AArch64_address value = psymval->value(relinfo->object, 0);
7628 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7629 AArch64_address aligned_tcb_size =
7630 align_address(target->tcb_size(), tls_segment->maximum_alignment());
7631 AArch64_address x = value + aligned_tcb_size;
7632
7633 // After new insns are written, apply TLSLE relocs.
7634 const AArch64_reloc_property* rp1 =
7635 aarch64_reloc_property_table->get_reloc_property(
7636 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12);
7637 const AArch64_reloc_property* rp2 =
7638 aarch64_reloc_property_table->get_reloc_property(
7639 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12);
7640 gold_assert(rp1 != NULL && rp2 != NULL);
7641
7642 typename aarch64_reloc_funcs::Status s1 =
7643 aarch64_reloc_funcs::template rela_general<32>(view + 4,
7644 x,
7645 addend,
7646 rp1);
7647 if (s1 != aarch64_reloc_funcs::STATUS_OKAY)
7648 return s1;
7649
7650 typename aarch64_reloc_funcs::Status s2 =
7651 aarch64_reloc_funcs::template rela_general<32>(view + 8,
7652 x,
7653 addend,
7654 rp2);
7655
7656 this->skip_call_tls_get_addr_ = true;
7657 return s2;
7658
7659 } // End of tls_ld_to_le
7660
7661 template<int size, bool big_endian>
7662 inline
7663 typename AArch64_relocate_functions<size, big_endian>::Status
7664 Target_aarch64<size, big_endian>::Relocate::tls_ie_to_le(
7665 const Relocate_info<size, big_endian>* relinfo,
7666 Target_aarch64<size, big_endian>* target,
7667 const elfcpp::Rela<size, big_endian>& rela,
7668 unsigned int r_type,
7669 unsigned char* view,
7670 const Symbol_value<size>* psymval)
7671 {
7672 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7673 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7674 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7675
7676 AArch64_address value = psymval->value(relinfo->object, 0);
7677 Output_segment* tls_segment = relinfo->layout->tls_segment();
7678 AArch64_address aligned_tcb_address =
7679 align_address(target->tcb_size(), tls_segment->maximum_alignment());
7680 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7681 AArch64_address x = value + addend + aligned_tcb_address;
7682 // "x" is the offset to tp, we can only do this if x is within
7683 // range [0, 2^32-1]
7684 if (!(size == 32 || (size == 64 && (static_cast<uint64_t>(x) >> 32) == 0)))
7685 {
7686 gold_error(_("TLS variable referred by reloc %u is too far from TP."),
7687 r_type);
7688 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7689 }
7690
7691 Insntype* ip = reinterpret_cast<Insntype*>(view);
7692 Insntype insn = elfcpp::Swap<32, big_endian>::readval(ip);
7693 unsigned int regno;
7694 Insntype newinsn;
7695 if (r_type == elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21)
7696 {
7697 // Generate movz.
7698 regno = (insn & 0x1f);
7699 newinsn = (0xd2a00000 | regno) | (((x >> 16) & 0xffff) << 5);
7700 }
7701 else if (r_type == elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC)
7702 {
7703 // Generate movk.
7704 regno = (insn & 0x1f);
7705 gold_assert(regno == ((insn >> 5) & 0x1f));
7706 newinsn = (0xf2800000 | regno) | ((x & 0xffff) << 5);
7707 }
7708 else
7709 gold_unreachable();
7710
7711 elfcpp::Swap<32, big_endian>::writeval(ip, newinsn);
7712 return aarch64_reloc_funcs::STATUS_OKAY;
7713 } // End of tls_ie_to_le
7714
7715
7716 template<int size, bool big_endian>
7717 inline
7718 typename AArch64_relocate_functions<size, big_endian>::Status
7719 Target_aarch64<size, big_endian>::Relocate::tls_desc_gd_to_le(
7720 const Relocate_info<size, big_endian>* relinfo,
7721 Target_aarch64<size, big_endian>* target,
7722 const elfcpp::Rela<size, big_endian>& rela,
7723 unsigned int r_type,
7724 unsigned char* view,
7725 const Symbol_value<size>* psymval)
7726 {
7727 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7728 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7729 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7730
7731 // TLSDESC-GD sequence is like:
7732 // adrp x0, :tlsdesc:v1
7733 // ldr x1, [x0, #:tlsdesc_lo12:v1]
7734 // add x0, x0, :tlsdesc_lo12:v1
7735 // .tlsdesccall v1
7736 // blr x1
7737 // After desc_gd_to_le optimization, the sequence will be like:
7738 // movz x0, #0x0, lsl #16
7739 // movk x0, #0x10
7740 // nop
7741 // nop
7742
7743 // Calculate tprel value.
7744 Output_segment* tls_segment = relinfo->layout->tls_segment();
7745 gold_assert(tls_segment != NULL);
7746 Insntype* ip = reinterpret_cast<Insntype*>(view);
7747 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7748 AArch64_address value = psymval->value(relinfo->object, addend);
7749 AArch64_address aligned_tcb_size =
7750 align_address(target->tcb_size(), tls_segment->maximum_alignment());
7751 AArch64_address x = value + aligned_tcb_size;
7752 // x is the offset to tp, we can only do this if x is within range
7753 // [0, 2^32-1]. If x is out of range, fail and exit.
7754 if (size == 64 && (static_cast<uint64_t>(x) >> 32) != 0)
7755 {
7756 gold_error(_("TLS variable referred by reloc %u is too far from TP. "
7757 "We Can't do gd_to_le relaxation.\n"), r_type);
7758 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7759 }
7760 Insntype newinsn;
7761 switch (r_type)
7762 {
7763 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
7764 case elfcpp::R_AARCH64_TLSDESC_CALL:
7765 // Change to nop
7766 newinsn = 0xd503201f;
7767 break;
7768
7769 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
7770 // Change to movz.
7771 newinsn = 0xd2a00000 | (((x >> 16) & 0xffff) << 5);
7772 break;
7773
7774 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
7775 // Change to movk.
7776 newinsn = 0xf2800000 | ((x & 0xffff) << 5);
7777 break;
7778
7779 default:
7780 gold_error(_("unsupported tlsdesc gd_to_le optimization on reloc %u"),
7781 r_type);
7782 gold_unreachable();
7783 }
7784 elfcpp::Swap<32, big_endian>::writeval(ip, newinsn);
7785 return aarch64_reloc_funcs::STATUS_OKAY;
7786 } // End of tls_desc_gd_to_le
7787
7788
7789 template<int size, bool big_endian>
7790 inline
7791 typename AArch64_relocate_functions<size, big_endian>::Status
7792 Target_aarch64<size, big_endian>::Relocate::tls_desc_gd_to_ie(
7793 const Relocate_info<size, big_endian>* /* relinfo */,
7794 Target_aarch64<size, big_endian>* /* target */,
7795 const elfcpp::Rela<size, big_endian>& rela,
7796 unsigned int r_type,
7797 unsigned char* view,
7798 const Symbol_value<size>* /* psymval */,
7799 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address,
7800 typename elfcpp::Elf_types<size>::Elf_Addr address)
7801 {
7802 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7803 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7804
7805 // TLSDESC-GD sequence is like:
7806 // adrp x0, :tlsdesc:v1
7807 // ldr x1, [x0, #:tlsdesc_lo12:v1]
7808 // add x0, x0, :tlsdesc_lo12:v1
7809 // .tlsdesccall v1
7810 // blr x1
7811 // After desc_gd_to_ie optimization, the sequence will be like:
7812 // adrp x0, :tlsie:v1
7813 // ldr x0, [x0, :tlsie_lo12:v1]
7814 // nop
7815 // nop
7816
7817 Insntype* ip = reinterpret_cast<Insntype*>(view);
7818 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7819 Insntype newinsn;
7820 switch (r_type)
7821 {
7822 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
7823 case elfcpp::R_AARCH64_TLSDESC_CALL:
7824 // Change to nop
7825 newinsn = 0xd503201f;
7826 elfcpp::Swap<32, big_endian>::writeval(ip, newinsn);
7827 break;
7828
7829 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
7830 {
7831 return aarch64_reloc_funcs::adrp(view, got_entry_address + addend,
7832 address);
7833 }
7834 break;
7835
7836 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
7837 {
7838 // Set ldr target register to be x0.
7839 Insntype insn = elfcpp::Swap<32, big_endian>::readval(ip);
7840 insn &= 0xffffffe0;
7841 elfcpp::Swap<32, big_endian>::writeval(ip, insn);
7842 // Do relocation.
7843 const AArch64_reloc_property* reloc_property =
7844 aarch64_reloc_property_table->get_reloc_property(
7845 elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
7846 return aarch64_reloc_funcs::template rela_general<32>(
7847 view, got_entry_address, addend, reloc_property);
7848 }
7849 break;
7850
7851 default:
7852 gold_error(_("Don't support tlsdesc gd_to_ie optimization on reloc %u"),
7853 r_type);
7854 gold_unreachable();
7855 }
7856 return aarch64_reloc_funcs::STATUS_OKAY;
7857 } // End of tls_desc_gd_to_ie
7858
7859 // Relocate section data.
7860
7861 template<int size, bool big_endian>
7862 void
7863 Target_aarch64<size, big_endian>::relocate_section(
7864 const Relocate_info<size, big_endian>* relinfo,
7865 unsigned int sh_type,
7866 const unsigned char* prelocs,
7867 size_t reloc_count,
7868 Output_section* output_section,
7869 bool needs_special_offset_handling,
7870 unsigned char* view,
7871 typename elfcpp::Elf_types<size>::Elf_Addr address,
7872 section_size_type view_size,
7873 const Reloc_symbol_changes* reloc_symbol_changes)
7874 {
7875 typedef Target_aarch64<size, big_endian> Aarch64;
7876 typedef typename Target_aarch64<size, big_endian>::Relocate AArch64_relocate;
7877 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
7878 Classify_reloc;
7879
7880 gold_assert(sh_type == elfcpp::SHT_RELA);
7881
7882 gold::relocate_section<size, big_endian, Aarch64, AArch64_relocate,
7883 gold::Default_comdat_behavior, Classify_reloc>(
7884 relinfo,
7885 this,
7886 prelocs,
7887 reloc_count,
7888 output_section,
7889 needs_special_offset_handling,
7890 view,
7891 address,
7892 view_size,
7893 reloc_symbol_changes);
7894 }
7895
7896 // Scan the relocs during a relocatable link.
7897
7898 template<int size, bool big_endian>
7899 void
7900 Target_aarch64<size, big_endian>::scan_relocatable_relocs(
7901 Symbol_table* symtab,
7902 Layout* layout,
7903 Sized_relobj_file<size, big_endian>* object,
7904 unsigned int data_shndx,
7905 unsigned int sh_type,
7906 const unsigned char* prelocs,
7907 size_t reloc_count,
7908 Output_section* output_section,
7909 bool needs_special_offset_handling,
7910 size_t local_symbol_count,
7911 const unsigned char* plocal_symbols,
7912 Relocatable_relocs* rr)
7913 {
7914 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
7915 Classify_reloc;
7916 typedef gold::Default_scan_relocatable_relocs<Classify_reloc>
7917 Scan_relocatable_relocs;
7918
7919 gold_assert(sh_type == elfcpp::SHT_RELA);
7920
7921 gold::scan_relocatable_relocs<size, big_endian, Scan_relocatable_relocs>(
7922 symtab,
7923 layout,
7924 object,
7925 data_shndx,
7926 prelocs,
7927 reloc_count,
7928 output_section,
7929 needs_special_offset_handling,
7930 local_symbol_count,
7931 plocal_symbols,
7932 rr);
7933 }
7934
7935 // Scan the relocs for --emit-relocs.
7936
7937 template<int size, bool big_endian>
7938 void
7939 Target_aarch64<size, big_endian>::emit_relocs_scan(
7940 Symbol_table* symtab,
7941 Layout* layout,
7942 Sized_relobj_file<size, big_endian>* object,
7943 unsigned int data_shndx,
7944 unsigned int sh_type,
7945 const unsigned char* prelocs,
7946 size_t reloc_count,
7947 Output_section* output_section,
7948 bool needs_special_offset_handling,
7949 size_t local_symbol_count,
7950 const unsigned char* plocal_syms,
7951 Relocatable_relocs* rr)
7952 {
7953 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
7954 Classify_reloc;
7955 typedef gold::Default_emit_relocs_strategy<Classify_reloc>
7956 Emit_relocs_strategy;
7957
7958 gold_assert(sh_type == elfcpp::SHT_RELA);
7959
7960 gold::scan_relocatable_relocs<size, big_endian, Emit_relocs_strategy>(
7961 symtab,
7962 layout,
7963 object,
7964 data_shndx,
7965 prelocs,
7966 reloc_count,
7967 output_section,
7968 needs_special_offset_handling,
7969 local_symbol_count,
7970 plocal_syms,
7971 rr);
7972 }
7973
7974 // Relocate a section during a relocatable link.
7975
7976 template<int size, bool big_endian>
7977 void
7978 Target_aarch64<size, big_endian>::relocate_relocs(
7979 const Relocate_info<size, big_endian>* relinfo,
7980 unsigned int sh_type,
7981 const unsigned char* prelocs,
7982 size_t reloc_count,
7983 Output_section* output_section,
7984 typename elfcpp::Elf_types<size>::Elf_Off offset_in_output_section,
7985 unsigned char* view,
7986 typename elfcpp::Elf_types<size>::Elf_Addr view_address,
7987 section_size_type view_size,
7988 unsigned char* reloc_view,
7989 section_size_type reloc_view_size)
7990 {
7991 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
7992 Classify_reloc;
7993
7994 gold_assert(sh_type == elfcpp::SHT_RELA);
7995
7996 gold::relocate_relocs<size, big_endian, Classify_reloc>(
7997 relinfo,
7998 prelocs,
7999 reloc_count,
8000 output_section,
8001 offset_in_output_section,
8002 view,
8003 view_address,
8004 view_size,
8005 reloc_view,
8006 reloc_view_size);
8007 }
8008
8009
8010 // Return whether this is a 3-insn erratum sequence.
8011
8012 template<int size, bool big_endian>
8013 bool
8014 Target_aarch64<size, big_endian>::is_erratum_843419_sequence(
8015 typename elfcpp::Swap<32,big_endian>::Valtype insn1,
8016 typename elfcpp::Swap<32,big_endian>::Valtype insn2,
8017 typename elfcpp::Swap<32,big_endian>::Valtype insn3)
8018 {
8019 unsigned rt1, rt2;
8020 bool load, pair;
8021
8022 // The 2nd insn is a single register load or store; or register pair
8023 // store.
8024 if (Insn_utilities::aarch64_mem_op_p(insn2, &rt1, &rt2, &pair, &load)
8025 && (!pair || (pair && !load)))
8026 {
8027 // The 3rd insn is a load or store instruction from the "Load/store
8028 // register (unsigned immediate)" encoding class, using Rn as the
8029 // base address register.
8030 if (Insn_utilities::aarch64_ldst_uimm(insn3)
8031 && (Insn_utilities::aarch64_rn(insn3)
8032 == Insn_utilities::aarch64_rd(insn1)))
8033 return true;
8034 }
8035 return false;
8036 }
8037
8038
8039 // Return whether this is a 835769 sequence.
8040 // (Similarly implemented as in elfnn-aarch64.c.)
8041
8042 template<int size, bool big_endian>
8043 bool
8044 Target_aarch64<size, big_endian>::is_erratum_835769_sequence(
8045 typename elfcpp::Swap<32,big_endian>::Valtype insn1,
8046 typename elfcpp::Swap<32,big_endian>::Valtype insn2)
8047 {
8048 uint32_t rt;
8049 uint32_t rt2;
8050 uint32_t rn;
8051 uint32_t rm;
8052 uint32_t ra;
8053 bool pair;
8054 bool load;
8055
8056 if (Insn_utilities::aarch64_mlxl(insn2)
8057 && Insn_utilities::aarch64_mem_op_p (insn1, &rt, &rt2, &pair, &load))
8058 {
8059 /* Any SIMD memory op is independent of the subsequent MLA
8060 by definition of the erratum. */
8061 if (Insn_utilities::aarch64_bit(insn1, 26))
8062 return true;
8063
8064 /* If not SIMD, check for integer memory ops and MLA relationship. */
8065 rn = Insn_utilities::aarch64_rn(insn2);
8066 ra = Insn_utilities::aarch64_ra(insn2);
8067 rm = Insn_utilities::aarch64_rm(insn2);
8068
8069 /* If this is a load and there's a true(RAW) dependency, we are safe
8070 and this is not an erratum sequence. */
8071 if (load &&
8072 (rt == rn || rt == rm || rt == ra
8073 || (pair && (rt2 == rn || rt2 == rm || rt2 == ra))))
8074 return false;
8075
8076 /* We conservatively put out stubs for all other cases (including
8077 writebacks). */
8078 return true;
8079 }
8080
8081 return false;
8082 }
8083
8084
8085 // Helper method to create erratum stub for ST_E_843419 and ST_E_835769.
8086
8087 template<int size, bool big_endian>
8088 void
8089 Target_aarch64<size, big_endian>::create_erratum_stub(
8090 AArch64_relobj<size, big_endian>* relobj,
8091 unsigned int shndx,
8092 section_size_type erratum_insn_offset,
8093 Address erratum_address,
8094 typename Insn_utilities::Insntype erratum_insn,
8095 int erratum_type,
8096 unsigned int e843419_adrp_offset)
8097 {
8098 gold_assert(erratum_type == ST_E_843419 || erratum_type == ST_E_835769);
8099 The_stub_table* stub_table = relobj->stub_table(shndx);
8100 gold_assert(stub_table != NULL);
8101 if (stub_table->find_erratum_stub(relobj,
8102 shndx,
8103 erratum_insn_offset) == NULL)
8104 {
8105 const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN;
8106 The_erratum_stub* stub;
8107 if (erratum_type == ST_E_835769)
8108 stub = new The_erratum_stub(relobj, erratum_type, shndx,
8109 erratum_insn_offset);
8110 else if (erratum_type == ST_E_843419)
8111 stub = new E843419_stub<size, big_endian>(
8112 relobj, shndx, erratum_insn_offset, e843419_adrp_offset);
8113 else
8114 gold_unreachable();
8115 stub->set_erratum_insn(erratum_insn);
8116 stub->set_erratum_address(erratum_address);
8117 // For erratum ST_E_843419 and ST_E_835769, the destination address is
8118 // always the next insn after erratum insn.
8119 stub->set_destination_address(erratum_address + BPI);
8120 stub_table->add_erratum_stub(stub);
8121 }
8122 }
8123
8124
8125 // Scan erratum for section SHNDX range [output_address + span_start,
8126 // output_address + span_end). Note here we do not share the code with
8127 // scan_erratum_843419_span function, because for 843419 we optimize by only
8128 // scanning the last few insns of a page, whereas for 835769, we need to scan
8129 // every insn.
8130
8131 template<int size, bool big_endian>
8132 void
8133 Target_aarch64<size, big_endian>::scan_erratum_835769_span(
8134 AArch64_relobj<size, big_endian>* relobj,
8135 unsigned int shndx,
8136 const section_size_type span_start,
8137 const section_size_type span_end,
8138 unsigned char* input_view,
8139 Address output_address)
8140 {
8141 typedef typename Insn_utilities::Insntype Insntype;
8142
8143 const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN;
8144
8145 // Adjust output_address and view to the start of span.
8146 output_address += span_start;
8147 input_view += span_start;
8148
8149 section_size_type span_length = span_end - span_start;
8150 section_size_type offset = 0;
8151 for (offset = 0; offset + BPI < span_length; offset += BPI)
8152 {
8153 Insntype* ip = reinterpret_cast<Insntype*>(input_view + offset);
8154 Insntype insn1 = ip[0];
8155 Insntype insn2 = ip[1];
8156 if (is_erratum_835769_sequence(insn1, insn2))
8157 {
8158 Insntype erratum_insn = insn2;
8159 // "span_start + offset" is the offset for insn1. So for insn2, it is
8160 // "span_start + offset + BPI".
8161 section_size_type erratum_insn_offset = span_start + offset + BPI;
8162 Address erratum_address = output_address + offset + BPI;
8163 gold_info(_("Erratum 835769 found and fixed at \"%s\", "
8164 "section %d, offset 0x%08x."),
8165 relobj->name().c_str(), shndx,
8166 (unsigned int)(span_start + offset));
8167
8168 this->create_erratum_stub(relobj, shndx,
8169 erratum_insn_offset, erratum_address,
8170 erratum_insn, ST_E_835769);
8171 offset += BPI; // Skip mac insn.
8172 }
8173 }
8174 } // End of "Target_aarch64::scan_erratum_835769_span".
8175
8176
8177 // Scan erratum for section SHNDX range
8178 // [output_address + span_start, output_address + span_end).
8179
8180 template<int size, bool big_endian>
8181 void
8182 Target_aarch64<size, big_endian>::scan_erratum_843419_span(
8183 AArch64_relobj<size, big_endian>* relobj,
8184 unsigned int shndx,
8185 const section_size_type span_start,
8186 const section_size_type span_end,
8187 unsigned char* input_view,
8188 Address output_address)
8189 {
8190 typedef typename Insn_utilities::Insntype Insntype;
8191
8192 // Adjust output_address and view to the start of span.
8193 output_address += span_start;
8194 input_view += span_start;
8195
8196 if ((output_address & 0x03) != 0)
8197 return;
8198
8199 section_size_type offset = 0;
8200 section_size_type span_length = span_end - span_start;
8201 // The first instruction must be ending at 0xFF8 or 0xFFC.
8202 unsigned int page_offset = output_address & 0xFFF;
8203 // Make sure starting position, that is "output_address+offset",
8204 // starts at page position 0xff8 or 0xffc.
8205 if (page_offset < 0xff8)
8206 offset = 0xff8 - page_offset;
8207 while (offset + 3 * Insn_utilities::BYTES_PER_INSN <= span_length)
8208 {
8209 Insntype* ip = reinterpret_cast<Insntype*>(input_view + offset);
8210 Insntype insn1 = ip[0];
8211 if (Insn_utilities::is_adrp(insn1))
8212 {
8213 Insntype insn2 = ip[1];
8214 Insntype insn3 = ip[2];
8215 Insntype erratum_insn;
8216 unsigned insn_offset;
8217 bool do_report = false;
8218 if (is_erratum_843419_sequence(insn1, insn2, insn3))
8219 {
8220 do_report = true;
8221 erratum_insn = insn3;
8222 insn_offset = 2 * Insn_utilities::BYTES_PER_INSN;
8223 }
8224 else if (offset + 4 * Insn_utilities::BYTES_PER_INSN <= span_length)
8225 {
8226 // Optionally we can have an insn between ins2 and ins3
8227 Insntype insn_opt = ip[2];
8228 // And insn_opt must not be a branch.
8229 if (!Insn_utilities::aarch64_b(insn_opt)
8230 && !Insn_utilities::aarch64_bl(insn_opt)
8231 && !Insn_utilities::aarch64_blr(insn_opt)
8232 && !Insn_utilities::aarch64_br(insn_opt))
8233 {
8234 // And insn_opt must not write to dest reg in insn1. However
8235 // we do a conservative scan, which means we may fix/report
8236 // more than necessary, but it doesn't hurt.
8237
8238 Insntype insn4 = ip[3];
8239 if (is_erratum_843419_sequence(insn1, insn2, insn4))
8240 {
8241 do_report = true;
8242 erratum_insn = insn4;
8243 insn_offset = 3 * Insn_utilities::BYTES_PER_INSN;
8244 }
8245 }
8246 }
8247 if (do_report)
8248 {
8249 gold_info(_("Erratum 843419 found and fixed at \"%s\", "
8250 "section %d, offset 0x%08x."),
8251 relobj->name().c_str(), shndx,
8252 (unsigned int)(span_start + offset));
8253 unsigned int erratum_insn_offset =
8254 span_start + offset + insn_offset;
8255 Address erratum_address =
8256 output_address + offset + insn_offset;
8257 create_erratum_stub(relobj, shndx,
8258 erratum_insn_offset, erratum_address,
8259 erratum_insn, ST_E_843419,
8260 span_start + offset);
8261 }
8262 }
8263
8264 // Advance to next candidate instruction. We only consider instruction
8265 // sequences starting at a page offset of 0xff8 or 0xffc.
8266 page_offset = (output_address + offset) & 0xfff;
8267 if (page_offset == 0xff8)
8268 offset += 4;
8269 else // (page_offset == 0xffc), we move to next page's 0xff8.
8270 offset += 0xffc;
8271 }
8272 } // End of "Target_aarch64::scan_erratum_843419_span".
8273
8274
8275 // The selector for aarch64 object files.
8276
8277 template<int size, bool big_endian>
8278 class Target_selector_aarch64 : public Target_selector
8279 {
8280 public:
8281 Target_selector_aarch64();
8282
8283 virtual Target*
8284 do_instantiate_target()
8285 { return new Target_aarch64<size, big_endian>(); }
8286 };
8287
8288 template<>
8289 Target_selector_aarch64<32, true>::Target_selector_aarch64()
8290 : Target_selector(elfcpp::EM_AARCH64, 32, true,
8291 "elf32-bigaarch64", "aarch64_elf32_be_vec")
8292 { }
8293
8294 template<>
8295 Target_selector_aarch64<32, false>::Target_selector_aarch64()
8296 : Target_selector(elfcpp::EM_AARCH64, 32, false,
8297 "elf32-littleaarch64", "aarch64_elf32_le_vec")
8298 { }
8299
8300 template<>
8301 Target_selector_aarch64<64, true>::Target_selector_aarch64()
8302 : Target_selector(elfcpp::EM_AARCH64, 64, true,
8303 "elf64-bigaarch64", "aarch64_elf64_be_vec")
8304 { }
8305
8306 template<>
8307 Target_selector_aarch64<64, false>::Target_selector_aarch64()
8308 : Target_selector(elfcpp::EM_AARCH64, 64, false,
8309 "elf64-littleaarch64", "aarch64_elf64_le_vec")
8310 { }
8311
8312 Target_selector_aarch64<32, true> target_selector_aarch64elf32b;
8313 Target_selector_aarch64<32, false> target_selector_aarch64elf32;
8314 Target_selector_aarch64<64, true> target_selector_aarch64elfb;
8315 Target_selector_aarch64<64, false> target_selector_aarch64elf;
8316
8317 } // End anonymous namespace.