[AArch64] Implement branch over stub section.
[binutils-gdb.git] / bfd / elfnn-aarch64.c
1 /* AArch64-specific support for NN-bit ELF.
2 Copyright (C) 2009-2015 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 /* Notes on implementation:
22
23 Thread Local Store (TLS)
24
25 Overview:
26
27 The implementation currently supports both traditional TLS and TLS
28 descriptors, but only general dynamic (GD).
29
30 For traditional TLS the assembler will present us with code
31 fragments of the form:
32
33 adrp x0, :tlsgd:foo
34 R_AARCH64_TLSGD_ADR_PAGE21(foo)
35 add x0, :tlsgd_lo12:foo
36 R_AARCH64_TLSGD_ADD_LO12_NC(foo)
37 bl __tls_get_addr
38 nop
39
40 For TLS descriptors the assembler will present us with code
41 fragments of the form:
42
43 adrp x0, :tlsdesc:foo R_AARCH64_TLSDESC_ADR_PAGE21(foo)
44 ldr x1, [x0, #:tlsdesc_lo12:foo] R_AARCH64_TLSDESC_LD64_LO12(foo)
45 add x0, x0, #:tlsdesc_lo12:foo R_AARCH64_TLSDESC_ADD_LO12(foo)
46 .tlsdesccall foo
47 blr x1 R_AARCH64_TLSDESC_CALL(foo)
48
49 The relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} against foo
50 indicate that foo is thread local and should be accessed via the
51 traditional TLS mechanims.
52
53 The relocations R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC}
54 against foo indicate that 'foo' is thread local and should be accessed
55 via a TLS descriptor mechanism.
56
57 The precise instruction sequence is only relevant from the
58 perspective of linker relaxation which is currently not implemented.
59
60 The static linker must detect that 'foo' is a TLS object and
61 allocate a double GOT entry. The GOT entry must be created for both
62 global and local TLS symbols. Note that this is different to none
63 TLS local objects which do not need a GOT entry.
64
65 In the traditional TLS mechanism, the double GOT entry is used to
66 provide the tls_index structure, containing module and offset
67 entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD
68 on the module entry. The loader will subsequently fixup this
69 relocation with the module identity.
70
71 For global traditional TLS symbols the static linker places an
72 R_AARCH64_TLS_DTPREL relocation on the offset entry. The loader
73 will subsequently fixup the offset. For local TLS symbols the static
74 linker fixes up offset.
75
76 In the TLS descriptor mechanism the double GOT entry is used to
77 provide the descriptor. The static linker places the relocation
78 R_AARCH64_TLSDESC on the first GOT slot. The loader will
79 subsequently fix this up.
80
81 Implementation:
82
83 The handling of TLS symbols is implemented across a number of
84 different backend functions. The following is a top level view of
85 what processing is performed where.
86
87 The TLS implementation maintains state information for each TLS
88 symbol. The state information for local and global symbols is kept
89 in different places. Global symbols use generic BFD structures while
90 local symbols use backend specific structures that are allocated and
91 maintained entirely by the backend.
92
93 The flow:
94
95 elfNN_aarch64_check_relocs()
96
97 This function is invoked for each relocation.
98
99 The TLS relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} and
100 R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC} are
101 spotted. One time creation of local symbol data structures are
102 created when the first local symbol is seen.
103
104 The reference count for a symbol is incremented. The GOT type for
105 each symbol is marked as general dynamic.
106
107 elfNN_aarch64_allocate_dynrelocs ()
108
109 For each global with positive reference count we allocate a double
110 GOT slot. For a traditional TLS symbol we allocate space for two
111 relocation entries on the GOT, for a TLS descriptor symbol we
112 allocate space for one relocation on the slot. Record the GOT offset
113 for this symbol.
114
115 elfNN_aarch64_size_dynamic_sections ()
116
117 Iterate all input BFDS, look for in the local symbol data structure
118 constructed earlier for local TLS symbols and allocate them double
119 GOT slots along with space for a single GOT relocation. Update the
120 local symbol structure to record the GOT offset allocated.
121
122 elfNN_aarch64_relocate_section ()
123
124 Calls elfNN_aarch64_final_link_relocate ()
125
126 Emit the relevant TLS relocations against the GOT for each TLS
127 symbol. For local TLS symbols emit the GOT offset directly. The GOT
128 relocations are emitted once the first time a TLS symbol is
129 encountered. The implementation uses the LSB of the GOT offset to
130 flag that the relevant GOT relocations for a symbol have been
131 emitted. All of the TLS code that uses the GOT offset needs to take
132 care to mask out this flag bit before using the offset.
133
134 elfNN_aarch64_final_link_relocate ()
135
136 Fixup the R_AARCH64_TLSGD_{ADR_PREL21, ADD_LO12_NC} relocations. */
137
138 #include "sysdep.h"
139 #include "bfd.h"
140 #include "libiberty.h"
141 #include "libbfd.h"
142 #include "bfd_stdint.h"
143 #include "elf-bfd.h"
144 #include "bfdlink.h"
145 #include "objalloc.h"
146 #include "elf/aarch64.h"
147 #include "elfxx-aarch64.h"
148
149 #define ARCH_SIZE NN
150
151 #if ARCH_SIZE == 64
152 #define AARCH64_R(NAME) R_AARCH64_ ## NAME
153 #define AARCH64_R_STR(NAME) "R_AARCH64_" #NAME
154 #define HOWTO64(...) HOWTO (__VA_ARGS__)
155 #define HOWTO32(...) EMPTY_HOWTO (0)
156 #define LOG_FILE_ALIGN 3
157 #endif
158
159 #if ARCH_SIZE == 32
160 #define AARCH64_R(NAME) R_AARCH64_P32_ ## NAME
161 #define AARCH64_R_STR(NAME) "R_AARCH64_P32_" #NAME
162 #define HOWTO64(...) EMPTY_HOWTO (0)
163 #define HOWTO32(...) HOWTO (__VA_ARGS__)
164 #define LOG_FILE_ALIGN 2
165 #endif
166
167 #define IS_AARCH64_TLS_RELOC(R_TYPE) \
168 ((R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21 \
169 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PREL21 \
170 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC \
171 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \
172 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \
173 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
174 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \
175 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC \
176 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
177 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12 \
178 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12 \
179 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC \
180 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2 \
181 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 \
182 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC \
183 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0 \
184 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC \
185 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPMOD \
186 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPREL \
187 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_TPREL \
188 || IS_AARCH64_TLSDESC_RELOC ((R_TYPE)))
189
190 #define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \
191 ((R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 \
192 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21 \
193 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 \
194 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC \
195 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC \
196 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC \
197 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G1 \
198 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC \
199 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
200 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD \
201 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_CALL \
202 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC)
203
204 #define ELIMINATE_COPY_RELOCS 0
205
206 /* Return size of a relocation entry. HTAB is the bfd's
207 elf_aarch64_link_hash_entry. */
208 #define RELOC_SIZE(HTAB) (sizeof (ElfNN_External_Rela))
209
210 /* GOT Entry size - 8 bytes in ELF64 and 4 bytes in ELF32. */
211 #define GOT_ENTRY_SIZE (ARCH_SIZE / 8)
212 #define PLT_ENTRY_SIZE (32)
213 #define PLT_SMALL_ENTRY_SIZE (16)
214 #define PLT_TLSDESC_ENTRY_SIZE (32)
215
216 /* Encoding of the nop instruction */
217 #define INSN_NOP 0xd503201f
218
219 #define aarch64_compute_jump_table_size(htab) \
220 (((htab)->root.srelplt == NULL) ? 0 \
221 : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
222
223 /* The first entry in a procedure linkage table looks like this
224 if the distance between the PLTGOT and the PLT is < 4GB use
225 these PLT entries. Note that the dynamic linker gets &PLTGOT[2]
226 in x16 and needs to work out PLTGOT[1] by using an address of
227 [x16,#-GOT_ENTRY_SIZE]. */
228 static const bfd_byte elfNN_aarch64_small_plt0_entry[PLT_ENTRY_SIZE] =
229 {
230 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
231 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
232 #if ARCH_SIZE == 64
233 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
234 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
235 #else
236 0x11, 0x0A, 0x40, 0xb9, /* ldr w17, [x16, #PLT_GOT+0x8] */
237 0x10, 0x22, 0x00, 0x11, /* add w16, w16,#PLT_GOT+0x8 */
238 #endif
239 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
240 0x1f, 0x20, 0x03, 0xd5, /* nop */
241 0x1f, 0x20, 0x03, 0xd5, /* nop */
242 0x1f, 0x20, 0x03, 0xd5, /* nop */
243 };
244
245 /* Per function entry in a procedure linkage table looks like this
246 if the distance between the PLTGOT and the PLT is < 4GB use
247 these PLT entries. */
248 static const bfd_byte elfNN_aarch64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
249 {
250 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
251 #if ARCH_SIZE == 64
252 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
253 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
254 #else
255 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
256 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
257 #endif
258 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
259 };
260
261 static const bfd_byte
262 elfNN_aarch64_tlsdesc_small_plt_entry[PLT_TLSDESC_ENTRY_SIZE] =
263 {
264 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
265 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
266 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
267 #if ARCH_SIZE == 64
268 0x42, 0x00, 0x40, 0xf9, /* ldr x2, [x2, #0] */
269 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
270 #else
271 0x42, 0x00, 0x40, 0xb9, /* ldr w2, [x2, #0] */
272 0x63, 0x00, 0x00, 0x11, /* add w3, w3, 0 */
273 #endif
274 0x40, 0x00, 0x1f, 0xd6, /* br x2 */
275 0x1f, 0x20, 0x03, 0xd5, /* nop */
276 0x1f, 0x20, 0x03, 0xd5, /* nop */
277 };
278
279 #define elf_info_to_howto elfNN_aarch64_info_to_howto
280 #define elf_info_to_howto_rel elfNN_aarch64_info_to_howto
281
282 #define AARCH64_ELF_ABI_VERSION 0
283
284 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
285 #define ALL_ONES (~ (bfd_vma) 0)
286
287 /* Indexed by the bfd interal reloc enumerators.
288 Therefore, the table needs to be synced with BFD_RELOC_AARCH64_*
289 in reloc.c. */
290
291 static reloc_howto_type elfNN_aarch64_howto_table[] =
292 {
293 EMPTY_HOWTO (0),
294
295 /* Basic data relocations. */
296
297 #if ARCH_SIZE == 64
298 HOWTO (R_AARCH64_NULL, /* type */
299 0, /* rightshift */
300 3, /* size (0 = byte, 1 = short, 2 = long) */
301 0, /* bitsize */
302 FALSE, /* pc_relative */
303 0, /* bitpos */
304 complain_overflow_dont, /* complain_on_overflow */
305 bfd_elf_generic_reloc, /* special_function */
306 "R_AARCH64_NULL", /* name */
307 FALSE, /* partial_inplace */
308 0, /* src_mask */
309 0, /* dst_mask */
310 FALSE), /* pcrel_offset */
311 #else
312 HOWTO (R_AARCH64_NONE, /* type */
313 0, /* rightshift */
314 3, /* size (0 = byte, 1 = short, 2 = long) */
315 0, /* bitsize */
316 FALSE, /* pc_relative */
317 0, /* bitpos */
318 complain_overflow_dont, /* complain_on_overflow */
319 bfd_elf_generic_reloc, /* special_function */
320 "R_AARCH64_NONE", /* name */
321 FALSE, /* partial_inplace */
322 0, /* src_mask */
323 0, /* dst_mask */
324 FALSE), /* pcrel_offset */
325 #endif
326
327 /* .xword: (S+A) */
328 HOWTO64 (AARCH64_R (ABS64), /* type */
329 0, /* rightshift */
330 4, /* size (4 = long long) */
331 64, /* bitsize */
332 FALSE, /* pc_relative */
333 0, /* bitpos */
334 complain_overflow_unsigned, /* complain_on_overflow */
335 bfd_elf_generic_reloc, /* special_function */
336 AARCH64_R_STR (ABS64), /* name */
337 FALSE, /* partial_inplace */
338 ALL_ONES, /* src_mask */
339 ALL_ONES, /* dst_mask */
340 FALSE), /* pcrel_offset */
341
342 /* .word: (S+A) */
343 HOWTO (AARCH64_R (ABS32), /* type */
344 0, /* rightshift */
345 2, /* size (0 = byte, 1 = short, 2 = long) */
346 32, /* bitsize */
347 FALSE, /* pc_relative */
348 0, /* bitpos */
349 complain_overflow_unsigned, /* complain_on_overflow */
350 bfd_elf_generic_reloc, /* special_function */
351 AARCH64_R_STR (ABS32), /* name */
352 FALSE, /* partial_inplace */
353 0xffffffff, /* src_mask */
354 0xffffffff, /* dst_mask */
355 FALSE), /* pcrel_offset */
356
357 /* .half: (S+A) */
358 HOWTO (AARCH64_R (ABS16), /* type */
359 0, /* rightshift */
360 1, /* size (0 = byte, 1 = short, 2 = long) */
361 16, /* bitsize */
362 FALSE, /* pc_relative */
363 0, /* bitpos */
364 complain_overflow_unsigned, /* complain_on_overflow */
365 bfd_elf_generic_reloc, /* special_function */
366 AARCH64_R_STR (ABS16), /* name */
367 FALSE, /* partial_inplace */
368 0xffff, /* src_mask */
369 0xffff, /* dst_mask */
370 FALSE), /* pcrel_offset */
371
372 /* .xword: (S+A-P) */
373 HOWTO64 (AARCH64_R (PREL64), /* type */
374 0, /* rightshift */
375 4, /* size (4 = long long) */
376 64, /* bitsize */
377 TRUE, /* pc_relative */
378 0, /* bitpos */
379 complain_overflow_signed, /* complain_on_overflow */
380 bfd_elf_generic_reloc, /* special_function */
381 AARCH64_R_STR (PREL64), /* name */
382 FALSE, /* partial_inplace */
383 ALL_ONES, /* src_mask */
384 ALL_ONES, /* dst_mask */
385 TRUE), /* pcrel_offset */
386
387 /* .word: (S+A-P) */
388 HOWTO (AARCH64_R (PREL32), /* type */
389 0, /* rightshift */
390 2, /* size (0 = byte, 1 = short, 2 = long) */
391 32, /* bitsize */
392 TRUE, /* pc_relative */
393 0, /* bitpos */
394 complain_overflow_signed, /* complain_on_overflow */
395 bfd_elf_generic_reloc, /* special_function */
396 AARCH64_R_STR (PREL32), /* name */
397 FALSE, /* partial_inplace */
398 0xffffffff, /* src_mask */
399 0xffffffff, /* dst_mask */
400 TRUE), /* pcrel_offset */
401
402 /* .half: (S+A-P) */
403 HOWTO (AARCH64_R (PREL16), /* type */
404 0, /* rightshift */
405 1, /* size (0 = byte, 1 = short, 2 = long) */
406 16, /* bitsize */
407 TRUE, /* pc_relative */
408 0, /* bitpos */
409 complain_overflow_signed, /* complain_on_overflow */
410 bfd_elf_generic_reloc, /* special_function */
411 AARCH64_R_STR (PREL16), /* name */
412 FALSE, /* partial_inplace */
413 0xffff, /* src_mask */
414 0xffff, /* dst_mask */
415 TRUE), /* pcrel_offset */
416
417 /* Group relocations to create a 16, 32, 48 or 64 bit
418 unsigned data or abs address inline. */
419
420 /* MOVZ: ((S+A) >> 0) & 0xffff */
421 HOWTO (AARCH64_R (MOVW_UABS_G0), /* type */
422 0, /* rightshift */
423 2, /* size (0 = byte, 1 = short, 2 = long) */
424 16, /* bitsize */
425 FALSE, /* pc_relative */
426 0, /* bitpos */
427 complain_overflow_unsigned, /* complain_on_overflow */
428 bfd_elf_generic_reloc, /* special_function */
429 AARCH64_R_STR (MOVW_UABS_G0), /* name */
430 FALSE, /* partial_inplace */
431 0xffff, /* src_mask */
432 0xffff, /* dst_mask */
433 FALSE), /* pcrel_offset */
434
435 /* MOVK: ((S+A) >> 0) & 0xffff [no overflow check] */
436 HOWTO (AARCH64_R (MOVW_UABS_G0_NC), /* type */
437 0, /* rightshift */
438 2, /* size (0 = byte, 1 = short, 2 = long) */
439 16, /* bitsize */
440 FALSE, /* pc_relative */
441 0, /* bitpos */
442 complain_overflow_dont, /* complain_on_overflow */
443 bfd_elf_generic_reloc, /* special_function */
444 AARCH64_R_STR (MOVW_UABS_G0_NC), /* name */
445 FALSE, /* partial_inplace */
446 0xffff, /* src_mask */
447 0xffff, /* dst_mask */
448 FALSE), /* pcrel_offset */
449
450 /* MOVZ: ((S+A) >> 16) & 0xffff */
451 HOWTO (AARCH64_R (MOVW_UABS_G1), /* type */
452 16, /* rightshift */
453 2, /* size (0 = byte, 1 = short, 2 = long) */
454 16, /* bitsize */
455 FALSE, /* pc_relative */
456 0, /* bitpos */
457 complain_overflow_unsigned, /* complain_on_overflow */
458 bfd_elf_generic_reloc, /* special_function */
459 AARCH64_R_STR (MOVW_UABS_G1), /* name */
460 FALSE, /* partial_inplace */
461 0xffff, /* src_mask */
462 0xffff, /* dst_mask */
463 FALSE), /* pcrel_offset */
464
465 /* MOVK: ((S+A) >> 16) & 0xffff [no overflow check] */
466 HOWTO64 (AARCH64_R (MOVW_UABS_G1_NC), /* type */
467 16, /* rightshift */
468 2, /* size (0 = byte, 1 = short, 2 = long) */
469 16, /* bitsize */
470 FALSE, /* pc_relative */
471 0, /* bitpos */
472 complain_overflow_dont, /* complain_on_overflow */
473 bfd_elf_generic_reloc, /* special_function */
474 AARCH64_R_STR (MOVW_UABS_G1_NC), /* name */
475 FALSE, /* partial_inplace */
476 0xffff, /* src_mask */
477 0xffff, /* dst_mask */
478 FALSE), /* pcrel_offset */
479
480 /* MOVZ: ((S+A) >> 32) & 0xffff */
481 HOWTO64 (AARCH64_R (MOVW_UABS_G2), /* type */
482 32, /* rightshift */
483 2, /* size (0 = byte, 1 = short, 2 = long) */
484 16, /* bitsize */
485 FALSE, /* pc_relative */
486 0, /* bitpos */
487 complain_overflow_unsigned, /* complain_on_overflow */
488 bfd_elf_generic_reloc, /* special_function */
489 AARCH64_R_STR (MOVW_UABS_G2), /* name */
490 FALSE, /* partial_inplace */
491 0xffff, /* src_mask */
492 0xffff, /* dst_mask */
493 FALSE), /* pcrel_offset */
494
495 /* MOVK: ((S+A) >> 32) & 0xffff [no overflow check] */
496 HOWTO64 (AARCH64_R (MOVW_UABS_G2_NC), /* type */
497 32, /* rightshift */
498 2, /* size (0 = byte, 1 = short, 2 = long) */
499 16, /* bitsize */
500 FALSE, /* pc_relative */
501 0, /* bitpos */
502 complain_overflow_dont, /* complain_on_overflow */
503 bfd_elf_generic_reloc, /* special_function */
504 AARCH64_R_STR (MOVW_UABS_G2_NC), /* name */
505 FALSE, /* partial_inplace */
506 0xffff, /* src_mask */
507 0xffff, /* dst_mask */
508 FALSE), /* pcrel_offset */
509
510 /* MOVZ: ((S+A) >> 48) & 0xffff */
511 HOWTO64 (AARCH64_R (MOVW_UABS_G3), /* type */
512 48, /* rightshift */
513 2, /* size (0 = byte, 1 = short, 2 = long) */
514 16, /* bitsize */
515 FALSE, /* pc_relative */
516 0, /* bitpos */
517 complain_overflow_unsigned, /* complain_on_overflow */
518 bfd_elf_generic_reloc, /* special_function */
519 AARCH64_R_STR (MOVW_UABS_G3), /* name */
520 FALSE, /* partial_inplace */
521 0xffff, /* src_mask */
522 0xffff, /* dst_mask */
523 FALSE), /* pcrel_offset */
524
525 /* Group relocations to create high part of a 16, 32, 48 or 64 bit
526 signed data or abs address inline. Will change instruction
527 to MOVN or MOVZ depending on sign of calculated value. */
528
529 /* MOV[ZN]: ((S+A) >> 0) & 0xffff */
530 HOWTO (AARCH64_R (MOVW_SABS_G0), /* type */
531 0, /* rightshift */
532 2, /* size (0 = byte, 1 = short, 2 = long) */
533 16, /* bitsize */
534 FALSE, /* pc_relative */
535 0, /* bitpos */
536 complain_overflow_signed, /* complain_on_overflow */
537 bfd_elf_generic_reloc, /* special_function */
538 AARCH64_R_STR (MOVW_SABS_G0), /* name */
539 FALSE, /* partial_inplace */
540 0xffff, /* src_mask */
541 0xffff, /* dst_mask */
542 FALSE), /* pcrel_offset */
543
544 /* MOV[ZN]: ((S+A) >> 16) & 0xffff */
545 HOWTO64 (AARCH64_R (MOVW_SABS_G1), /* type */
546 16, /* rightshift */
547 2, /* size (0 = byte, 1 = short, 2 = long) */
548 16, /* bitsize */
549 FALSE, /* pc_relative */
550 0, /* bitpos */
551 complain_overflow_signed, /* complain_on_overflow */
552 bfd_elf_generic_reloc, /* special_function */
553 AARCH64_R_STR (MOVW_SABS_G1), /* name */
554 FALSE, /* partial_inplace */
555 0xffff, /* src_mask */
556 0xffff, /* dst_mask */
557 FALSE), /* pcrel_offset */
558
559 /* MOV[ZN]: ((S+A) >> 32) & 0xffff */
560 HOWTO64 (AARCH64_R (MOVW_SABS_G2), /* type */
561 32, /* rightshift */
562 2, /* size (0 = byte, 1 = short, 2 = long) */
563 16, /* bitsize */
564 FALSE, /* pc_relative */
565 0, /* bitpos */
566 complain_overflow_signed, /* complain_on_overflow */
567 bfd_elf_generic_reloc, /* special_function */
568 AARCH64_R_STR (MOVW_SABS_G2), /* name */
569 FALSE, /* partial_inplace */
570 0xffff, /* src_mask */
571 0xffff, /* dst_mask */
572 FALSE), /* pcrel_offset */
573
574 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store
575 addresses: PG(x) is (x & ~0xfff). */
576
577 /* LD-lit: ((S+A-P) >> 2) & 0x7ffff */
578 HOWTO (AARCH64_R (LD_PREL_LO19), /* type */
579 2, /* rightshift */
580 2, /* size (0 = byte, 1 = short, 2 = long) */
581 19, /* bitsize */
582 TRUE, /* pc_relative */
583 0, /* bitpos */
584 complain_overflow_signed, /* complain_on_overflow */
585 bfd_elf_generic_reloc, /* special_function */
586 AARCH64_R_STR (LD_PREL_LO19), /* name */
587 FALSE, /* partial_inplace */
588 0x7ffff, /* src_mask */
589 0x7ffff, /* dst_mask */
590 TRUE), /* pcrel_offset */
591
592 /* ADR: (S+A-P) & 0x1fffff */
593 HOWTO (AARCH64_R (ADR_PREL_LO21), /* type */
594 0, /* rightshift */
595 2, /* size (0 = byte, 1 = short, 2 = long) */
596 21, /* bitsize */
597 TRUE, /* pc_relative */
598 0, /* bitpos */
599 complain_overflow_signed, /* complain_on_overflow */
600 bfd_elf_generic_reloc, /* special_function */
601 AARCH64_R_STR (ADR_PREL_LO21), /* name */
602 FALSE, /* partial_inplace */
603 0x1fffff, /* src_mask */
604 0x1fffff, /* dst_mask */
605 TRUE), /* pcrel_offset */
606
607 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
608 HOWTO (AARCH64_R (ADR_PREL_PG_HI21), /* type */
609 12, /* rightshift */
610 2, /* size (0 = byte, 1 = short, 2 = long) */
611 21, /* bitsize */
612 TRUE, /* pc_relative */
613 0, /* bitpos */
614 complain_overflow_signed, /* complain_on_overflow */
615 bfd_elf_generic_reloc, /* special_function */
616 AARCH64_R_STR (ADR_PREL_PG_HI21), /* name */
617 FALSE, /* partial_inplace */
618 0x1fffff, /* src_mask */
619 0x1fffff, /* dst_mask */
620 TRUE), /* pcrel_offset */
621
622 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff [no overflow check] */
623 HOWTO64 (AARCH64_R (ADR_PREL_PG_HI21_NC), /* type */
624 12, /* rightshift */
625 2, /* size (0 = byte, 1 = short, 2 = long) */
626 21, /* bitsize */
627 TRUE, /* pc_relative */
628 0, /* bitpos */
629 complain_overflow_dont, /* complain_on_overflow */
630 bfd_elf_generic_reloc, /* special_function */
631 AARCH64_R_STR (ADR_PREL_PG_HI21_NC), /* name */
632 FALSE, /* partial_inplace */
633 0x1fffff, /* src_mask */
634 0x1fffff, /* dst_mask */
635 TRUE), /* pcrel_offset */
636
637 /* ADD: (S+A) & 0xfff [no overflow check] */
638 HOWTO (AARCH64_R (ADD_ABS_LO12_NC), /* type */
639 0, /* rightshift */
640 2, /* size (0 = byte, 1 = short, 2 = long) */
641 12, /* bitsize */
642 FALSE, /* pc_relative */
643 10, /* bitpos */
644 complain_overflow_dont, /* complain_on_overflow */
645 bfd_elf_generic_reloc, /* special_function */
646 AARCH64_R_STR (ADD_ABS_LO12_NC), /* name */
647 FALSE, /* partial_inplace */
648 0x3ffc00, /* src_mask */
649 0x3ffc00, /* dst_mask */
650 FALSE), /* pcrel_offset */
651
652 /* LD/ST8: (S+A) & 0xfff */
653 HOWTO (AARCH64_R (LDST8_ABS_LO12_NC), /* type */
654 0, /* rightshift */
655 2, /* size (0 = byte, 1 = short, 2 = long) */
656 12, /* bitsize */
657 FALSE, /* pc_relative */
658 0, /* bitpos */
659 complain_overflow_dont, /* complain_on_overflow */
660 bfd_elf_generic_reloc, /* special_function */
661 AARCH64_R_STR (LDST8_ABS_LO12_NC), /* name */
662 FALSE, /* partial_inplace */
663 0xfff, /* src_mask */
664 0xfff, /* dst_mask */
665 FALSE), /* pcrel_offset */
666
667 /* Relocations for control-flow instructions. */
668
669 /* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */
670 HOWTO (AARCH64_R (TSTBR14), /* type */
671 2, /* rightshift */
672 2, /* size (0 = byte, 1 = short, 2 = long) */
673 14, /* bitsize */
674 TRUE, /* pc_relative */
675 0, /* bitpos */
676 complain_overflow_signed, /* complain_on_overflow */
677 bfd_elf_generic_reloc, /* special_function */
678 AARCH64_R_STR (TSTBR14), /* name */
679 FALSE, /* partial_inplace */
680 0x3fff, /* src_mask */
681 0x3fff, /* dst_mask */
682 TRUE), /* pcrel_offset */
683
684 /* B.cond: ((S+A-P) >> 2) & 0x7ffff */
685 HOWTO (AARCH64_R (CONDBR19), /* type */
686 2, /* rightshift */
687 2, /* size (0 = byte, 1 = short, 2 = long) */
688 19, /* bitsize */
689 TRUE, /* pc_relative */
690 0, /* bitpos */
691 complain_overflow_signed, /* complain_on_overflow */
692 bfd_elf_generic_reloc, /* special_function */
693 AARCH64_R_STR (CONDBR19), /* name */
694 FALSE, /* partial_inplace */
695 0x7ffff, /* src_mask */
696 0x7ffff, /* dst_mask */
697 TRUE), /* pcrel_offset */
698
699 /* B: ((S+A-P) >> 2) & 0x3ffffff */
700 HOWTO (AARCH64_R (JUMP26), /* type */
701 2, /* rightshift */
702 2, /* size (0 = byte, 1 = short, 2 = long) */
703 26, /* bitsize */
704 TRUE, /* pc_relative */
705 0, /* bitpos */
706 complain_overflow_signed, /* complain_on_overflow */
707 bfd_elf_generic_reloc, /* special_function */
708 AARCH64_R_STR (JUMP26), /* name */
709 FALSE, /* partial_inplace */
710 0x3ffffff, /* src_mask */
711 0x3ffffff, /* dst_mask */
712 TRUE), /* pcrel_offset */
713
714 /* BL: ((S+A-P) >> 2) & 0x3ffffff */
715 HOWTO (AARCH64_R (CALL26), /* type */
716 2, /* rightshift */
717 2, /* size (0 = byte, 1 = short, 2 = long) */
718 26, /* bitsize */
719 TRUE, /* pc_relative */
720 0, /* bitpos */
721 complain_overflow_signed, /* complain_on_overflow */
722 bfd_elf_generic_reloc, /* special_function */
723 AARCH64_R_STR (CALL26), /* name */
724 FALSE, /* partial_inplace */
725 0x3ffffff, /* src_mask */
726 0x3ffffff, /* dst_mask */
727 TRUE), /* pcrel_offset */
728
729 /* LD/ST16: (S+A) & 0xffe */
730 HOWTO (AARCH64_R (LDST16_ABS_LO12_NC), /* type */
731 1, /* rightshift */
732 2, /* size (0 = byte, 1 = short, 2 = long) */
733 12, /* bitsize */
734 FALSE, /* pc_relative */
735 0, /* bitpos */
736 complain_overflow_dont, /* complain_on_overflow */
737 bfd_elf_generic_reloc, /* special_function */
738 AARCH64_R_STR (LDST16_ABS_LO12_NC), /* name */
739 FALSE, /* partial_inplace */
740 0xffe, /* src_mask */
741 0xffe, /* dst_mask */
742 FALSE), /* pcrel_offset */
743
744 /* LD/ST32: (S+A) & 0xffc */
745 HOWTO (AARCH64_R (LDST32_ABS_LO12_NC), /* type */
746 2, /* rightshift */
747 2, /* size (0 = byte, 1 = short, 2 = long) */
748 12, /* bitsize */
749 FALSE, /* pc_relative */
750 0, /* bitpos */
751 complain_overflow_dont, /* complain_on_overflow */
752 bfd_elf_generic_reloc, /* special_function */
753 AARCH64_R_STR (LDST32_ABS_LO12_NC), /* name */
754 FALSE, /* partial_inplace */
755 0xffc, /* src_mask */
756 0xffc, /* dst_mask */
757 FALSE), /* pcrel_offset */
758
759 /* LD/ST64: (S+A) & 0xff8 */
760 HOWTO (AARCH64_R (LDST64_ABS_LO12_NC), /* type */
761 3, /* rightshift */
762 2, /* size (0 = byte, 1 = short, 2 = long) */
763 12, /* bitsize */
764 FALSE, /* pc_relative */
765 0, /* bitpos */
766 complain_overflow_dont, /* complain_on_overflow */
767 bfd_elf_generic_reloc, /* special_function */
768 AARCH64_R_STR (LDST64_ABS_LO12_NC), /* name */
769 FALSE, /* partial_inplace */
770 0xff8, /* src_mask */
771 0xff8, /* dst_mask */
772 FALSE), /* pcrel_offset */
773
774 /* LD/ST128: (S+A) & 0xff0 */
775 HOWTO (AARCH64_R (LDST128_ABS_LO12_NC), /* type */
776 4, /* rightshift */
777 2, /* size (0 = byte, 1 = short, 2 = long) */
778 12, /* bitsize */
779 FALSE, /* pc_relative */
780 0, /* bitpos */
781 complain_overflow_dont, /* complain_on_overflow */
782 bfd_elf_generic_reloc, /* special_function */
783 AARCH64_R_STR (LDST128_ABS_LO12_NC), /* name */
784 FALSE, /* partial_inplace */
785 0xff0, /* src_mask */
786 0xff0, /* dst_mask */
787 FALSE), /* pcrel_offset */
788
789 /* Set a load-literal immediate field to bits
790 0x1FFFFC of G(S)-P */
791 HOWTO (AARCH64_R (GOT_LD_PREL19), /* type */
792 2, /* rightshift */
793 2, /* size (0 = byte,1 = short,2 = long) */
794 19, /* bitsize */
795 TRUE, /* pc_relative */
796 0, /* bitpos */
797 complain_overflow_signed, /* complain_on_overflow */
798 bfd_elf_generic_reloc, /* special_function */
799 AARCH64_R_STR (GOT_LD_PREL19), /* name */
800 FALSE, /* partial_inplace */
801 0xffffe0, /* src_mask */
802 0xffffe0, /* dst_mask */
803 TRUE), /* pcrel_offset */
804
805 /* Get to the page for the GOT entry for the symbol
806 (G(S) - P) using an ADRP instruction. */
807 HOWTO (AARCH64_R (ADR_GOT_PAGE), /* type */
808 12, /* rightshift */
809 2, /* size (0 = byte, 1 = short, 2 = long) */
810 21, /* bitsize */
811 TRUE, /* pc_relative */
812 0, /* bitpos */
813 complain_overflow_dont, /* complain_on_overflow */
814 bfd_elf_generic_reloc, /* special_function */
815 AARCH64_R_STR (ADR_GOT_PAGE), /* name */
816 FALSE, /* partial_inplace */
817 0x1fffff, /* src_mask */
818 0x1fffff, /* dst_mask */
819 TRUE), /* pcrel_offset */
820
821 /* LD64: GOT offset G(S) & 0xff8 */
822 HOWTO64 (AARCH64_R (LD64_GOT_LO12_NC), /* type */
823 3, /* rightshift */
824 2, /* size (0 = byte, 1 = short, 2 = long) */
825 12, /* bitsize */
826 FALSE, /* pc_relative */
827 0, /* bitpos */
828 complain_overflow_dont, /* complain_on_overflow */
829 bfd_elf_generic_reloc, /* special_function */
830 AARCH64_R_STR (LD64_GOT_LO12_NC), /* name */
831 FALSE, /* partial_inplace */
832 0xff8, /* src_mask */
833 0xff8, /* dst_mask */
834 FALSE), /* pcrel_offset */
835
836 /* LD32: GOT offset G(S) & 0xffc */
837 HOWTO32 (AARCH64_R (LD32_GOT_LO12_NC), /* type */
838 2, /* rightshift */
839 2, /* size (0 = byte, 1 = short, 2 = long) */
840 12, /* bitsize */
841 FALSE, /* pc_relative */
842 0, /* bitpos */
843 complain_overflow_dont, /* complain_on_overflow */
844 bfd_elf_generic_reloc, /* special_function */
845 AARCH64_R_STR (LD32_GOT_LO12_NC), /* name */
846 FALSE, /* partial_inplace */
847 0xffc, /* src_mask */
848 0xffc, /* dst_mask */
849 FALSE), /* pcrel_offset */
850
851 /* Get to the page for the GOT entry for the symbol
852 (G(S) - P) using an ADRP instruction. */
853 HOWTO (AARCH64_R (TLSGD_ADR_PAGE21), /* type */
854 12, /* rightshift */
855 2, /* size (0 = byte, 1 = short, 2 = long) */
856 21, /* bitsize */
857 TRUE, /* pc_relative */
858 0, /* bitpos */
859 complain_overflow_dont, /* complain_on_overflow */
860 bfd_elf_generic_reloc, /* special_function */
861 AARCH64_R_STR (TLSGD_ADR_PAGE21), /* name */
862 FALSE, /* partial_inplace */
863 0x1fffff, /* src_mask */
864 0x1fffff, /* dst_mask */
865 TRUE), /* pcrel_offset */
866
867 HOWTO (AARCH64_R (TLSGD_ADR_PREL21), /* type */
868 0, /* rightshift */
869 2, /* size (0 = byte, 1 = short, 2 = long) */
870 21, /* bitsize */
871 TRUE, /* pc_relative */
872 0, /* bitpos */
873 complain_overflow_dont, /* complain_on_overflow */
874 bfd_elf_generic_reloc, /* special_function */
875 AARCH64_R_STR (TLSGD_ADR_PREL21), /* name */
876 FALSE, /* partial_inplace */
877 0x1fffff, /* src_mask */
878 0x1fffff, /* dst_mask */
879 TRUE), /* pcrel_offset */
880
881 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
882 HOWTO (AARCH64_R (TLSGD_ADD_LO12_NC), /* type */
883 0, /* rightshift */
884 2, /* size (0 = byte, 1 = short, 2 = long) */
885 12, /* bitsize */
886 FALSE, /* pc_relative */
887 0, /* bitpos */
888 complain_overflow_dont, /* complain_on_overflow */
889 bfd_elf_generic_reloc, /* special_function */
890 AARCH64_R_STR (TLSGD_ADD_LO12_NC), /* name */
891 FALSE, /* partial_inplace */
892 0xfff, /* src_mask */
893 0xfff, /* dst_mask */
894 FALSE), /* pcrel_offset */
895
896 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G1), /* type */
897 16, /* rightshift */
898 2, /* size (0 = byte, 1 = short, 2 = long) */
899 16, /* bitsize */
900 FALSE, /* pc_relative */
901 0, /* bitpos */
902 complain_overflow_dont, /* complain_on_overflow */
903 bfd_elf_generic_reloc, /* special_function */
904 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G1), /* name */
905 FALSE, /* partial_inplace */
906 0xffff, /* src_mask */
907 0xffff, /* dst_mask */
908 FALSE), /* pcrel_offset */
909
910 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G0_NC), /* type */
911 0, /* rightshift */
912 2, /* size (0 = byte, 1 = short, 2 = long) */
913 16, /* bitsize */
914 FALSE, /* pc_relative */
915 0, /* bitpos */
916 complain_overflow_dont, /* complain_on_overflow */
917 bfd_elf_generic_reloc, /* special_function */
918 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G0_NC), /* name */
919 FALSE, /* partial_inplace */
920 0xffff, /* src_mask */
921 0xffff, /* dst_mask */
922 FALSE), /* pcrel_offset */
923
924 HOWTO (AARCH64_R (TLSIE_ADR_GOTTPREL_PAGE21), /* type */
925 12, /* rightshift */
926 2, /* size (0 = byte, 1 = short, 2 = long) */
927 21, /* bitsize */
928 FALSE, /* pc_relative */
929 0, /* bitpos */
930 complain_overflow_dont, /* complain_on_overflow */
931 bfd_elf_generic_reloc, /* special_function */
932 AARCH64_R_STR (TLSIE_ADR_GOTTPREL_PAGE21), /* name */
933 FALSE, /* partial_inplace */
934 0x1fffff, /* src_mask */
935 0x1fffff, /* dst_mask */
936 FALSE), /* pcrel_offset */
937
938 HOWTO64 (AARCH64_R (TLSIE_LD64_GOTTPREL_LO12_NC), /* type */
939 3, /* rightshift */
940 2, /* size (0 = byte, 1 = short, 2 = long) */
941 12, /* bitsize */
942 FALSE, /* pc_relative */
943 0, /* bitpos */
944 complain_overflow_dont, /* complain_on_overflow */
945 bfd_elf_generic_reloc, /* special_function */
946 AARCH64_R_STR (TLSIE_LD64_GOTTPREL_LO12_NC), /* name */
947 FALSE, /* partial_inplace */
948 0xff8, /* src_mask */
949 0xff8, /* dst_mask */
950 FALSE), /* pcrel_offset */
951
952 HOWTO32 (AARCH64_R (TLSIE_LD32_GOTTPREL_LO12_NC), /* type */
953 2, /* rightshift */
954 2, /* size (0 = byte, 1 = short, 2 = long) */
955 12, /* bitsize */
956 FALSE, /* pc_relative */
957 0, /* bitpos */
958 complain_overflow_dont, /* complain_on_overflow */
959 bfd_elf_generic_reloc, /* special_function */
960 AARCH64_R_STR (TLSIE_LD32_GOTTPREL_LO12_NC), /* name */
961 FALSE, /* partial_inplace */
962 0xffc, /* src_mask */
963 0xffc, /* dst_mask */
964 FALSE), /* pcrel_offset */
965
966 HOWTO (AARCH64_R (TLSIE_LD_GOTTPREL_PREL19), /* type */
967 2, /* rightshift */
968 2, /* size (0 = byte, 1 = short, 2 = long) */
969 19, /* bitsize */
970 FALSE, /* pc_relative */
971 0, /* bitpos */
972 complain_overflow_dont, /* complain_on_overflow */
973 bfd_elf_generic_reloc, /* special_function */
974 AARCH64_R_STR (TLSIE_LD_GOTTPREL_PREL19), /* name */
975 FALSE, /* partial_inplace */
976 0x1ffffc, /* src_mask */
977 0x1ffffc, /* dst_mask */
978 FALSE), /* pcrel_offset */
979
980 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G2), /* type */
981 32, /* rightshift */
982 2, /* size (0 = byte, 1 = short, 2 = long) */
983 16, /* bitsize */
984 FALSE, /* pc_relative */
985 0, /* bitpos */
986 complain_overflow_unsigned, /* complain_on_overflow */
987 bfd_elf_generic_reloc, /* special_function */
988 AARCH64_R_STR (TLSLE_MOVW_TPREL_G2), /* name */
989 FALSE, /* partial_inplace */
990 0xffff, /* src_mask */
991 0xffff, /* dst_mask */
992 FALSE), /* pcrel_offset */
993
994 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G1), /* type */
995 16, /* rightshift */
996 2, /* size (0 = byte, 1 = short, 2 = long) */
997 16, /* bitsize */
998 FALSE, /* pc_relative */
999 0, /* bitpos */
1000 complain_overflow_dont, /* complain_on_overflow */
1001 bfd_elf_generic_reloc, /* special_function */
1002 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1), /* name */
1003 FALSE, /* partial_inplace */
1004 0xffff, /* src_mask */
1005 0xffff, /* dst_mask */
1006 FALSE), /* pcrel_offset */
1007
1008 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G1_NC), /* type */
1009 16, /* rightshift */
1010 2, /* size (0 = byte, 1 = short, 2 = long) */
1011 16, /* bitsize */
1012 FALSE, /* pc_relative */
1013 0, /* bitpos */
1014 complain_overflow_dont, /* complain_on_overflow */
1015 bfd_elf_generic_reloc, /* special_function */
1016 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1_NC), /* name */
1017 FALSE, /* partial_inplace */
1018 0xffff, /* src_mask */
1019 0xffff, /* dst_mask */
1020 FALSE), /* pcrel_offset */
1021
1022 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0), /* type */
1023 0, /* rightshift */
1024 2, /* size (0 = byte, 1 = short, 2 = long) */
1025 16, /* bitsize */
1026 FALSE, /* pc_relative */
1027 0, /* bitpos */
1028 complain_overflow_dont, /* complain_on_overflow */
1029 bfd_elf_generic_reloc, /* special_function */
1030 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0), /* name */
1031 FALSE, /* partial_inplace */
1032 0xffff, /* src_mask */
1033 0xffff, /* dst_mask */
1034 FALSE), /* pcrel_offset */
1035
1036 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0_NC), /* type */
1037 0, /* rightshift */
1038 2, /* size (0 = byte, 1 = short, 2 = long) */
1039 16, /* bitsize */
1040 FALSE, /* pc_relative */
1041 0, /* bitpos */
1042 complain_overflow_dont, /* complain_on_overflow */
1043 bfd_elf_generic_reloc, /* special_function */
1044 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0_NC), /* name */
1045 FALSE, /* partial_inplace */
1046 0xffff, /* src_mask */
1047 0xffff, /* dst_mask */
1048 FALSE), /* pcrel_offset */
1049
1050 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_HI12), /* type */
1051 12, /* rightshift */
1052 2, /* size (0 = byte, 1 = short, 2 = long) */
1053 12, /* bitsize */
1054 FALSE, /* pc_relative */
1055 0, /* bitpos */
1056 complain_overflow_unsigned, /* complain_on_overflow */
1057 bfd_elf_generic_reloc, /* special_function */
1058 AARCH64_R_STR (TLSLE_ADD_TPREL_HI12), /* name */
1059 FALSE, /* partial_inplace */
1060 0xfff, /* src_mask */
1061 0xfff, /* dst_mask */
1062 FALSE), /* pcrel_offset */
1063
1064 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12), /* type */
1065 0, /* rightshift */
1066 2, /* size (0 = byte, 1 = short, 2 = long) */
1067 12, /* bitsize */
1068 FALSE, /* pc_relative */
1069 0, /* bitpos */
1070 complain_overflow_dont, /* complain_on_overflow */
1071 bfd_elf_generic_reloc, /* special_function */
1072 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12), /* name */
1073 FALSE, /* partial_inplace */
1074 0xfff, /* src_mask */
1075 0xfff, /* dst_mask */
1076 FALSE), /* pcrel_offset */
1077
1078 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12_NC), /* type */
1079 0, /* rightshift */
1080 2, /* size (0 = byte, 1 = short, 2 = long) */
1081 12, /* bitsize */
1082 FALSE, /* pc_relative */
1083 0, /* bitpos */
1084 complain_overflow_dont, /* complain_on_overflow */
1085 bfd_elf_generic_reloc, /* special_function */
1086 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12_NC), /* name */
1087 FALSE, /* partial_inplace */
1088 0xfff, /* src_mask */
1089 0xfff, /* dst_mask */
1090 FALSE), /* pcrel_offset */
1091
1092 HOWTO (AARCH64_R (TLSDESC_LD_PREL19), /* type */
1093 2, /* rightshift */
1094 2, /* size (0 = byte, 1 = short, 2 = long) */
1095 19, /* bitsize */
1096 TRUE, /* pc_relative */
1097 0, /* bitpos */
1098 complain_overflow_dont, /* complain_on_overflow */
1099 bfd_elf_generic_reloc, /* special_function */
1100 AARCH64_R_STR (TLSDESC_LD_PREL19), /* name */
1101 FALSE, /* partial_inplace */
1102 0x0ffffe0, /* src_mask */
1103 0x0ffffe0, /* dst_mask */
1104 TRUE), /* pcrel_offset */
1105
1106 HOWTO (AARCH64_R (TLSDESC_ADR_PREL21), /* type */
1107 0, /* rightshift */
1108 2, /* size (0 = byte, 1 = short, 2 = long) */
1109 21, /* bitsize */
1110 TRUE, /* pc_relative */
1111 0, /* bitpos */
1112 complain_overflow_dont, /* complain_on_overflow */
1113 bfd_elf_generic_reloc, /* special_function */
1114 AARCH64_R_STR (TLSDESC_ADR_PREL21), /* name */
1115 FALSE, /* partial_inplace */
1116 0x1fffff, /* src_mask */
1117 0x1fffff, /* dst_mask */
1118 TRUE), /* pcrel_offset */
1119
1120 /* Get to the page for the GOT entry for the symbol
1121 (G(S) - P) using an ADRP instruction. */
1122 HOWTO (AARCH64_R (TLSDESC_ADR_PAGE21), /* type */
1123 12, /* rightshift */
1124 2, /* size (0 = byte, 1 = short, 2 = long) */
1125 21, /* bitsize */
1126 TRUE, /* pc_relative */
1127 0, /* bitpos */
1128 complain_overflow_dont, /* complain_on_overflow */
1129 bfd_elf_generic_reloc, /* special_function */
1130 AARCH64_R_STR (TLSDESC_ADR_PAGE21), /* name */
1131 FALSE, /* partial_inplace */
1132 0x1fffff, /* src_mask */
1133 0x1fffff, /* dst_mask */
1134 TRUE), /* pcrel_offset */
1135
1136 /* LD64: GOT offset G(S) & 0xff8. */
1137 HOWTO64 (AARCH64_R (TLSDESC_LD64_LO12_NC), /* type */
1138 3, /* rightshift */
1139 2, /* size (0 = byte, 1 = short, 2 = long) */
1140 12, /* bitsize */
1141 FALSE, /* pc_relative */
1142 0, /* bitpos */
1143 complain_overflow_dont, /* complain_on_overflow */
1144 bfd_elf_generic_reloc, /* special_function */
1145 AARCH64_R_STR (TLSDESC_LD64_LO12_NC), /* name */
1146 FALSE, /* partial_inplace */
1147 0xff8, /* src_mask */
1148 0xff8, /* dst_mask */
1149 FALSE), /* pcrel_offset */
1150
1151 /* LD32: GOT offset G(S) & 0xffc. */
1152 HOWTO32 (AARCH64_R (TLSDESC_LD32_LO12_NC), /* type */
1153 2, /* rightshift */
1154 2, /* size (0 = byte, 1 = short, 2 = long) */
1155 12, /* bitsize */
1156 FALSE, /* pc_relative */
1157 0, /* bitpos */
1158 complain_overflow_dont, /* complain_on_overflow */
1159 bfd_elf_generic_reloc, /* special_function */
1160 AARCH64_R_STR (TLSDESC_LD32_LO12_NC), /* name */
1161 FALSE, /* partial_inplace */
1162 0xffc, /* src_mask */
1163 0xffc, /* dst_mask */
1164 FALSE), /* pcrel_offset */
1165
1166 /* ADD: GOT offset G(S) & 0xfff. */
1167 HOWTO (AARCH64_R (TLSDESC_ADD_LO12_NC), /* type */
1168 0, /* rightshift */
1169 2, /* size (0 = byte, 1 = short, 2 = long) */
1170 12, /* bitsize */
1171 FALSE, /* pc_relative */
1172 0, /* bitpos */
1173 complain_overflow_dont, /* complain_on_overflow */
1174 bfd_elf_generic_reloc, /* special_function */
1175 AARCH64_R_STR (TLSDESC_ADD_LO12_NC), /* name */
1176 FALSE, /* partial_inplace */
1177 0xfff, /* src_mask */
1178 0xfff, /* dst_mask */
1179 FALSE), /* pcrel_offset */
1180
1181 HOWTO64 (AARCH64_R (TLSDESC_OFF_G1), /* type */
1182 16, /* rightshift */
1183 2, /* size (0 = byte, 1 = short, 2 = long) */
1184 12, /* bitsize */
1185 FALSE, /* pc_relative */
1186 0, /* bitpos */
1187 complain_overflow_dont, /* complain_on_overflow */
1188 bfd_elf_generic_reloc, /* special_function */
1189 AARCH64_R_STR (TLSDESC_OFF_G1), /* name */
1190 FALSE, /* partial_inplace */
1191 0xffff, /* src_mask */
1192 0xffff, /* dst_mask */
1193 FALSE), /* pcrel_offset */
1194
1195 HOWTO64 (AARCH64_R (TLSDESC_OFF_G0_NC), /* type */
1196 0, /* rightshift */
1197 2, /* size (0 = byte, 1 = short, 2 = long) */
1198 12, /* bitsize */
1199 FALSE, /* pc_relative */
1200 0, /* bitpos */
1201 complain_overflow_dont, /* complain_on_overflow */
1202 bfd_elf_generic_reloc, /* special_function */
1203 AARCH64_R_STR (TLSDESC_OFF_G0_NC), /* name */
1204 FALSE, /* partial_inplace */
1205 0xffff, /* src_mask */
1206 0xffff, /* dst_mask */
1207 FALSE), /* pcrel_offset */
1208
1209 HOWTO64 (AARCH64_R (TLSDESC_LDR), /* type */
1210 0, /* rightshift */
1211 2, /* size (0 = byte, 1 = short, 2 = long) */
1212 12, /* bitsize */
1213 FALSE, /* pc_relative */
1214 0, /* bitpos */
1215 complain_overflow_dont, /* complain_on_overflow */
1216 bfd_elf_generic_reloc, /* special_function */
1217 AARCH64_R_STR (TLSDESC_LDR), /* name */
1218 FALSE, /* partial_inplace */
1219 0x0, /* src_mask */
1220 0x0, /* dst_mask */
1221 FALSE), /* pcrel_offset */
1222
1223 HOWTO64 (AARCH64_R (TLSDESC_ADD), /* type */
1224 0, /* rightshift */
1225 2, /* size (0 = byte, 1 = short, 2 = long) */
1226 12, /* bitsize */
1227 FALSE, /* pc_relative */
1228 0, /* bitpos */
1229 complain_overflow_dont, /* complain_on_overflow */
1230 bfd_elf_generic_reloc, /* special_function */
1231 AARCH64_R_STR (TLSDESC_ADD), /* name */
1232 FALSE, /* partial_inplace */
1233 0x0, /* src_mask */
1234 0x0, /* dst_mask */
1235 FALSE), /* pcrel_offset */
1236
1237 HOWTO (AARCH64_R (TLSDESC_CALL), /* type */
1238 0, /* rightshift */
1239 2, /* size (0 = byte, 1 = short, 2 = long) */
1240 0, /* bitsize */
1241 FALSE, /* pc_relative */
1242 0, /* bitpos */
1243 complain_overflow_dont, /* complain_on_overflow */
1244 bfd_elf_generic_reloc, /* special_function */
1245 AARCH64_R_STR (TLSDESC_CALL), /* name */
1246 FALSE, /* partial_inplace */
1247 0x0, /* src_mask */
1248 0x0, /* dst_mask */
1249 FALSE), /* pcrel_offset */
1250
1251 HOWTO (AARCH64_R (COPY), /* type */
1252 0, /* rightshift */
1253 2, /* size (0 = byte, 1 = short, 2 = long) */
1254 64, /* bitsize */
1255 FALSE, /* pc_relative */
1256 0, /* bitpos */
1257 complain_overflow_bitfield, /* complain_on_overflow */
1258 bfd_elf_generic_reloc, /* special_function */
1259 AARCH64_R_STR (COPY), /* name */
1260 TRUE, /* partial_inplace */
1261 0xffffffff, /* src_mask */
1262 0xffffffff, /* dst_mask */
1263 FALSE), /* pcrel_offset */
1264
1265 HOWTO (AARCH64_R (GLOB_DAT), /* type */
1266 0, /* rightshift */
1267 2, /* size (0 = byte, 1 = short, 2 = long) */
1268 64, /* bitsize */
1269 FALSE, /* pc_relative */
1270 0, /* bitpos */
1271 complain_overflow_bitfield, /* complain_on_overflow */
1272 bfd_elf_generic_reloc, /* special_function */
1273 AARCH64_R_STR (GLOB_DAT), /* name */
1274 TRUE, /* partial_inplace */
1275 0xffffffff, /* src_mask */
1276 0xffffffff, /* dst_mask */
1277 FALSE), /* pcrel_offset */
1278
1279 HOWTO (AARCH64_R (JUMP_SLOT), /* type */
1280 0, /* rightshift */
1281 2, /* size (0 = byte, 1 = short, 2 = long) */
1282 64, /* bitsize */
1283 FALSE, /* pc_relative */
1284 0, /* bitpos */
1285 complain_overflow_bitfield, /* complain_on_overflow */
1286 bfd_elf_generic_reloc, /* special_function */
1287 AARCH64_R_STR (JUMP_SLOT), /* name */
1288 TRUE, /* partial_inplace */
1289 0xffffffff, /* src_mask */
1290 0xffffffff, /* dst_mask */
1291 FALSE), /* pcrel_offset */
1292
1293 HOWTO (AARCH64_R (RELATIVE), /* type */
1294 0, /* rightshift */
1295 2, /* size (0 = byte, 1 = short, 2 = long) */
1296 64, /* bitsize */
1297 FALSE, /* pc_relative */
1298 0, /* bitpos */
1299 complain_overflow_bitfield, /* complain_on_overflow */
1300 bfd_elf_generic_reloc, /* special_function */
1301 AARCH64_R_STR (RELATIVE), /* name */
1302 TRUE, /* partial_inplace */
1303 ALL_ONES, /* src_mask */
1304 ALL_ONES, /* dst_mask */
1305 FALSE), /* pcrel_offset */
1306
1307 HOWTO (AARCH64_R (TLS_DTPMOD), /* type */
1308 0, /* rightshift */
1309 2, /* size (0 = byte, 1 = short, 2 = long) */
1310 64, /* bitsize */
1311 FALSE, /* pc_relative */
1312 0, /* bitpos */
1313 complain_overflow_dont, /* complain_on_overflow */
1314 bfd_elf_generic_reloc, /* special_function */
1315 #if ARCH_SIZE == 64
1316 AARCH64_R_STR (TLS_DTPMOD64), /* name */
1317 #else
1318 AARCH64_R_STR (TLS_DTPMOD), /* name */
1319 #endif
1320 FALSE, /* partial_inplace */
1321 0, /* src_mask */
1322 ALL_ONES, /* dst_mask */
1323 FALSE), /* pc_reloffset */
1324
1325 HOWTO (AARCH64_R (TLS_DTPREL), /* type */
1326 0, /* rightshift */
1327 2, /* size (0 = byte, 1 = short, 2 = long) */
1328 64, /* bitsize */
1329 FALSE, /* pc_relative */
1330 0, /* bitpos */
1331 complain_overflow_dont, /* complain_on_overflow */
1332 bfd_elf_generic_reloc, /* special_function */
1333 #if ARCH_SIZE == 64
1334 AARCH64_R_STR (TLS_DTPREL64), /* name */
1335 #else
1336 AARCH64_R_STR (TLS_DTPREL), /* name */
1337 #endif
1338 FALSE, /* partial_inplace */
1339 0, /* src_mask */
1340 ALL_ONES, /* dst_mask */
1341 FALSE), /* pcrel_offset */
1342
1343 HOWTO (AARCH64_R (TLS_TPREL), /* type */
1344 0, /* rightshift */
1345 2, /* size (0 = byte, 1 = short, 2 = long) */
1346 64, /* bitsize */
1347 FALSE, /* pc_relative */
1348 0, /* bitpos */
1349 complain_overflow_dont, /* complain_on_overflow */
1350 bfd_elf_generic_reloc, /* special_function */
1351 #if ARCH_SIZE == 64
1352 AARCH64_R_STR (TLS_TPREL64), /* name */
1353 #else
1354 AARCH64_R_STR (TLS_TPREL), /* name */
1355 #endif
1356 FALSE, /* partial_inplace */
1357 0, /* src_mask */
1358 ALL_ONES, /* dst_mask */
1359 FALSE), /* pcrel_offset */
1360
1361 HOWTO (AARCH64_R (TLSDESC), /* type */
1362 0, /* rightshift */
1363 2, /* size (0 = byte, 1 = short, 2 = long) */
1364 64, /* bitsize */
1365 FALSE, /* pc_relative */
1366 0, /* bitpos */
1367 complain_overflow_dont, /* complain_on_overflow */
1368 bfd_elf_generic_reloc, /* special_function */
1369 AARCH64_R_STR (TLSDESC), /* name */
1370 FALSE, /* partial_inplace */
1371 0, /* src_mask */
1372 ALL_ONES, /* dst_mask */
1373 FALSE), /* pcrel_offset */
1374
1375 HOWTO (AARCH64_R (IRELATIVE), /* type */
1376 0, /* rightshift */
1377 2, /* size (0 = byte, 1 = short, 2 = long) */
1378 64, /* bitsize */
1379 FALSE, /* pc_relative */
1380 0, /* bitpos */
1381 complain_overflow_bitfield, /* complain_on_overflow */
1382 bfd_elf_generic_reloc, /* special_function */
1383 AARCH64_R_STR (IRELATIVE), /* name */
1384 FALSE, /* partial_inplace */
1385 0, /* src_mask */
1386 ALL_ONES, /* dst_mask */
1387 FALSE), /* pcrel_offset */
1388
1389 EMPTY_HOWTO (0),
1390 };
1391
1392 static reloc_howto_type elfNN_aarch64_howto_none =
1393 HOWTO (R_AARCH64_NONE, /* type */
1394 0, /* rightshift */
1395 3, /* size (0 = byte, 1 = short, 2 = long) */
1396 0, /* bitsize */
1397 FALSE, /* pc_relative */
1398 0, /* bitpos */
1399 complain_overflow_dont,/* complain_on_overflow */
1400 bfd_elf_generic_reloc, /* special_function */
1401 "R_AARCH64_NONE", /* name */
1402 FALSE, /* partial_inplace */
1403 0, /* src_mask */
1404 0, /* dst_mask */
1405 FALSE); /* pcrel_offset */
1406
1407 /* Given HOWTO, return the bfd internal relocation enumerator. */
1408
1409 static bfd_reloc_code_real_type
1410 elfNN_aarch64_bfd_reloc_from_howto (reloc_howto_type *howto)
1411 {
1412 const int size
1413 = (int) ARRAY_SIZE (elfNN_aarch64_howto_table);
1414 const ptrdiff_t offset
1415 = howto - elfNN_aarch64_howto_table;
1416
1417 if (offset > 0 && offset < size - 1)
1418 return BFD_RELOC_AARCH64_RELOC_START + offset;
1419
1420 if (howto == &elfNN_aarch64_howto_none)
1421 return BFD_RELOC_AARCH64_NONE;
1422
1423 return BFD_RELOC_AARCH64_RELOC_START;
1424 }
1425
1426 /* Given R_TYPE, return the bfd internal relocation enumerator. */
1427
1428 static bfd_reloc_code_real_type
1429 elfNN_aarch64_bfd_reloc_from_type (unsigned int r_type)
1430 {
1431 static bfd_boolean initialized_p = FALSE;
1432 /* Indexed by R_TYPE, values are offsets in the howto_table. */
1433 static unsigned int offsets[R_AARCH64_end];
1434
1435 if (initialized_p == FALSE)
1436 {
1437 unsigned int i;
1438
1439 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
1440 if (elfNN_aarch64_howto_table[i].type != 0)
1441 offsets[elfNN_aarch64_howto_table[i].type] = i;
1442
1443 initialized_p = TRUE;
1444 }
1445
1446 if (r_type == R_AARCH64_NONE || r_type == R_AARCH64_NULL)
1447 return BFD_RELOC_AARCH64_NONE;
1448
1449 /* PR 17512: file: b371e70a. */
1450 if (r_type >= R_AARCH64_end)
1451 {
1452 _bfd_error_handler (_("Invalid AArch64 reloc number: %d"), r_type);
1453 bfd_set_error (bfd_error_bad_value);
1454 return BFD_RELOC_AARCH64_NONE;
1455 }
1456
1457 return BFD_RELOC_AARCH64_RELOC_START + offsets[r_type];
1458 }
1459
1460 struct elf_aarch64_reloc_map
1461 {
1462 bfd_reloc_code_real_type from;
1463 bfd_reloc_code_real_type to;
1464 };
1465
1466 /* Map bfd generic reloc to AArch64-specific reloc. */
1467 static const struct elf_aarch64_reloc_map elf_aarch64_reloc_map[] =
1468 {
1469 {BFD_RELOC_NONE, BFD_RELOC_AARCH64_NONE},
1470
1471 /* Basic data relocations. */
1472 {BFD_RELOC_CTOR, BFD_RELOC_AARCH64_NN},
1473 {BFD_RELOC_64, BFD_RELOC_AARCH64_64},
1474 {BFD_RELOC_32, BFD_RELOC_AARCH64_32},
1475 {BFD_RELOC_16, BFD_RELOC_AARCH64_16},
1476 {BFD_RELOC_64_PCREL, BFD_RELOC_AARCH64_64_PCREL},
1477 {BFD_RELOC_32_PCREL, BFD_RELOC_AARCH64_32_PCREL},
1478 {BFD_RELOC_16_PCREL, BFD_RELOC_AARCH64_16_PCREL},
1479 };
1480
1481 /* Given the bfd internal relocation enumerator in CODE, return the
1482 corresponding howto entry. */
1483
1484 static reloc_howto_type *
1485 elfNN_aarch64_howto_from_bfd_reloc (bfd_reloc_code_real_type code)
1486 {
1487 unsigned int i;
1488
1489 /* Convert bfd generic reloc to AArch64-specific reloc. */
1490 if (code < BFD_RELOC_AARCH64_RELOC_START
1491 || code > BFD_RELOC_AARCH64_RELOC_END)
1492 for (i = 0; i < ARRAY_SIZE (elf_aarch64_reloc_map); i++)
1493 if (elf_aarch64_reloc_map[i].from == code)
1494 {
1495 code = elf_aarch64_reloc_map[i].to;
1496 break;
1497 }
1498
1499 if (code > BFD_RELOC_AARCH64_RELOC_START
1500 && code < BFD_RELOC_AARCH64_RELOC_END)
1501 if (elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START].type)
1502 return &elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START];
1503
1504 if (code == BFD_RELOC_AARCH64_NONE)
1505 return &elfNN_aarch64_howto_none;
1506
1507 return NULL;
1508 }
1509
1510 static reloc_howto_type *
1511 elfNN_aarch64_howto_from_type (unsigned int r_type)
1512 {
1513 bfd_reloc_code_real_type val;
1514 reloc_howto_type *howto;
1515
1516 #if ARCH_SIZE == 32
1517 if (r_type > 256)
1518 {
1519 bfd_set_error (bfd_error_bad_value);
1520 return NULL;
1521 }
1522 #endif
1523
1524 if (r_type == R_AARCH64_NONE)
1525 return &elfNN_aarch64_howto_none;
1526
1527 val = elfNN_aarch64_bfd_reloc_from_type (r_type);
1528 howto = elfNN_aarch64_howto_from_bfd_reloc (val);
1529
1530 if (howto != NULL)
1531 return howto;
1532
1533 bfd_set_error (bfd_error_bad_value);
1534 return NULL;
1535 }
1536
1537 static void
1538 elfNN_aarch64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc,
1539 Elf_Internal_Rela *elf_reloc)
1540 {
1541 unsigned int r_type;
1542
1543 r_type = ELFNN_R_TYPE (elf_reloc->r_info);
1544 bfd_reloc->howto = elfNN_aarch64_howto_from_type (r_type);
1545 }
1546
1547 static reloc_howto_type *
1548 elfNN_aarch64_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1549 bfd_reloc_code_real_type code)
1550 {
1551 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (code);
1552
1553 if (howto != NULL)
1554 return howto;
1555
1556 bfd_set_error (bfd_error_bad_value);
1557 return NULL;
1558 }
1559
1560 static reloc_howto_type *
1561 elfNN_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1562 const char *r_name)
1563 {
1564 unsigned int i;
1565
1566 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
1567 if (elfNN_aarch64_howto_table[i].name != NULL
1568 && strcasecmp (elfNN_aarch64_howto_table[i].name, r_name) == 0)
1569 return &elfNN_aarch64_howto_table[i];
1570
1571 return NULL;
1572 }
1573
1574 #define TARGET_LITTLE_SYM aarch64_elfNN_le_vec
1575 #define TARGET_LITTLE_NAME "elfNN-littleaarch64"
1576 #define TARGET_BIG_SYM aarch64_elfNN_be_vec
1577 #define TARGET_BIG_NAME "elfNN-bigaarch64"
1578
1579 /* The linker script knows the section names for placement.
1580 The entry_names are used to do simple name mangling on the stubs.
1581 Given a function name, and its type, the stub can be found. The
1582 name can be changed. The only requirement is the %s be present. */
1583 #define STUB_ENTRY_NAME "__%s_veneer"
1584
1585 /* The name of the dynamic interpreter. This is put in the .interp
1586 section. */
1587 #define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
1588
1589 #define AARCH64_MAX_FWD_BRANCH_OFFSET \
1590 (((1 << 25) - 1) << 2)
1591 #define AARCH64_MAX_BWD_BRANCH_OFFSET \
1592 (-((1 << 25) << 2))
1593
1594 #define AARCH64_MAX_ADRP_IMM ((1 << 20) - 1)
1595 #define AARCH64_MIN_ADRP_IMM (-(1 << 20))
1596
1597 static int
1598 aarch64_valid_for_adrp_p (bfd_vma value, bfd_vma place)
1599 {
1600 bfd_signed_vma offset = (bfd_signed_vma) (PG (value) - PG (place)) >> 12;
1601 return offset <= AARCH64_MAX_ADRP_IMM && offset >= AARCH64_MIN_ADRP_IMM;
1602 }
1603
1604 static int
1605 aarch64_valid_branch_p (bfd_vma value, bfd_vma place)
1606 {
1607 bfd_signed_vma offset = (bfd_signed_vma) (value - place);
1608 return (offset <= AARCH64_MAX_FWD_BRANCH_OFFSET
1609 && offset >= AARCH64_MAX_BWD_BRANCH_OFFSET);
1610 }
1611
1612 static const uint32_t aarch64_adrp_branch_stub [] =
1613 {
1614 0x90000010, /* adrp ip0, X */
1615 /* R_AARCH64_ADR_HI21_PCREL(X) */
1616 0x91000210, /* add ip0, ip0, :lo12:X */
1617 /* R_AARCH64_ADD_ABS_LO12_NC(X) */
1618 0xd61f0200, /* br ip0 */
1619 };
1620
1621 static const uint32_t aarch64_long_branch_stub[] =
1622 {
1623 #if ARCH_SIZE == 64
1624 0x58000090, /* ldr ip0, 1f */
1625 #else
1626 0x18000090, /* ldr wip0, 1f */
1627 #endif
1628 0x10000011, /* adr ip1, #0 */
1629 0x8b110210, /* add ip0, ip0, ip1 */
1630 0xd61f0200, /* br ip0 */
1631 0x00000000, /* 1: .xword or .word
1632 R_AARCH64_PRELNN(X) + 12
1633 */
1634 0x00000000,
1635 };
1636
1637 static const uint32_t aarch64_erratum_835769_stub[] =
1638 {
1639 0x00000000, /* Placeholder for multiply accumulate. */
1640 0x14000000, /* b <label> */
1641 };
1642
1643 /* Section name for stubs is the associated section name plus this
1644 string. */
1645 #define STUB_SUFFIX ".stub"
1646
1647 enum elf_aarch64_stub_type
1648 {
1649 aarch64_stub_none,
1650 aarch64_stub_adrp_branch,
1651 aarch64_stub_long_branch,
1652 aarch64_stub_erratum_835769_veneer,
1653 };
1654
1655 struct elf_aarch64_stub_hash_entry
1656 {
1657 /* Base hash table entry structure. */
1658 struct bfd_hash_entry root;
1659
1660 /* The stub section. */
1661 asection *stub_sec;
1662
1663 /* Offset within stub_sec of the beginning of this stub. */
1664 bfd_vma stub_offset;
1665
1666 /* Given the symbol's value and its section we can determine its final
1667 value when building the stubs (so the stub knows where to jump). */
1668 bfd_vma target_value;
1669 asection *target_section;
1670
1671 enum elf_aarch64_stub_type stub_type;
1672
1673 /* The symbol table entry, if any, that this was derived from. */
1674 struct elf_aarch64_link_hash_entry *h;
1675
1676 /* Destination symbol type */
1677 unsigned char st_type;
1678
1679 /* Where this stub is being called from, or, in the case of combined
1680 stub sections, the first input section in the group. */
1681 asection *id_sec;
1682
1683 /* The name for the local symbol at the start of this stub. The
1684 stub name in the hash table has to be unique; this does not, so
1685 it can be friendlier. */
1686 char *output_name;
1687
1688 /* The instruction which caused this stub to be generated (only valid for
1689 erratum 835769 workaround stubs at present). */
1690 uint32_t veneered_insn;
1691 };
1692
1693 /* Used to build a map of a section. This is required for mixed-endian
1694 code/data. */
1695
1696 typedef struct elf_elf_section_map
1697 {
1698 bfd_vma vma;
1699 char type;
1700 }
1701 elf_aarch64_section_map;
1702
1703
1704 typedef struct _aarch64_elf_section_data
1705 {
1706 struct bfd_elf_section_data elf;
1707 unsigned int mapcount;
1708 unsigned int mapsize;
1709 elf_aarch64_section_map *map;
1710 }
1711 _aarch64_elf_section_data;
1712
1713 #define elf_aarch64_section_data(sec) \
1714 ((_aarch64_elf_section_data *) elf_section_data (sec))
1715
1716 /* The size of the thread control block which is defined to be two pointers. */
1717 #define TCB_SIZE (ARCH_SIZE/8)*2
1718
1719 struct elf_aarch64_local_symbol
1720 {
1721 unsigned int got_type;
1722 bfd_signed_vma got_refcount;
1723 bfd_vma got_offset;
1724
1725 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The
1726 offset is from the end of the jump table and reserved entries
1727 within the PLTGOT.
1728
1729 The magic value (bfd_vma) -1 indicates that an offset has not be
1730 allocated. */
1731 bfd_vma tlsdesc_got_jump_table_offset;
1732 };
1733
1734 struct elf_aarch64_obj_tdata
1735 {
1736 struct elf_obj_tdata root;
1737
1738 /* local symbol descriptors */
1739 struct elf_aarch64_local_symbol *locals;
1740
1741 /* Zero to warn when linking objects with incompatible enum sizes. */
1742 int no_enum_size_warning;
1743
1744 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
1745 int no_wchar_size_warning;
1746 };
1747
1748 #define elf_aarch64_tdata(bfd) \
1749 ((struct elf_aarch64_obj_tdata *) (bfd)->tdata.any)
1750
1751 #define elf_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals)
1752
1753 #define is_aarch64_elf(bfd) \
1754 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
1755 && elf_tdata (bfd) != NULL \
1756 && elf_object_id (bfd) == AARCH64_ELF_DATA)
1757
1758 static bfd_boolean
1759 elfNN_aarch64_mkobject (bfd *abfd)
1760 {
1761 return bfd_elf_allocate_object (abfd, sizeof (struct elf_aarch64_obj_tdata),
1762 AARCH64_ELF_DATA);
1763 }
1764
1765 #define elf_aarch64_hash_entry(ent) \
1766 ((struct elf_aarch64_link_hash_entry *)(ent))
1767
1768 #define GOT_UNKNOWN 0
1769 #define GOT_NORMAL 1
1770 #define GOT_TLS_GD 2
1771 #define GOT_TLS_IE 4
1772 #define GOT_TLSDESC_GD 8
1773
1774 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD))
1775
1776 /* AArch64 ELF linker hash entry. */
1777 struct elf_aarch64_link_hash_entry
1778 {
1779 struct elf_link_hash_entry root;
1780
1781 /* Track dynamic relocs copied for this symbol. */
1782 struct elf_dyn_relocs *dyn_relocs;
1783
1784 /* Since PLT entries have variable size, we need to record the
1785 index into .got.plt instead of recomputing it from the PLT
1786 offset. */
1787 bfd_signed_vma plt_got_offset;
1788
1789 /* Bit mask representing the type of GOT entry(s) if any required by
1790 this symbol. */
1791 unsigned int got_type;
1792
1793 /* A pointer to the most recently used stub hash entry against this
1794 symbol. */
1795 struct elf_aarch64_stub_hash_entry *stub_cache;
1796
1797 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The offset
1798 is from the end of the jump table and reserved entries within the PLTGOT.
1799
1800 The magic value (bfd_vma) -1 indicates that an offset has not
1801 be allocated. */
1802 bfd_vma tlsdesc_got_jump_table_offset;
1803 };
1804
1805 static unsigned int
1806 elfNN_aarch64_symbol_got_type (struct elf_link_hash_entry *h,
1807 bfd *abfd,
1808 unsigned long r_symndx)
1809 {
1810 if (h)
1811 return elf_aarch64_hash_entry (h)->got_type;
1812
1813 if (! elf_aarch64_locals (abfd))
1814 return GOT_UNKNOWN;
1815
1816 return elf_aarch64_locals (abfd)[r_symndx].got_type;
1817 }
1818
1819 /* Get the AArch64 elf linker hash table from a link_info structure. */
1820 #define elf_aarch64_hash_table(info) \
1821 ((struct elf_aarch64_link_hash_table *) ((info)->hash))
1822
1823 #define aarch64_stub_hash_lookup(table, string, create, copy) \
1824 ((struct elf_aarch64_stub_hash_entry *) \
1825 bfd_hash_lookup ((table), (string), (create), (copy)))
1826
1827 /* AArch64 ELF linker hash table. */
1828 struct elf_aarch64_link_hash_table
1829 {
1830 /* The main hash table. */
1831 struct elf_link_hash_table root;
1832
1833 /* Nonzero to force PIC branch veneers. */
1834 int pic_veneer;
1835
1836 /* Fix erratum 835769. */
1837 int fix_erratum_835769;
1838
1839 /* The number of bytes in the initial entry in the PLT. */
1840 bfd_size_type plt_header_size;
1841
1842 /* The number of bytes in the subsequent PLT etries. */
1843 bfd_size_type plt_entry_size;
1844
1845 /* Short-cuts to get to dynamic linker sections. */
1846 asection *sdynbss;
1847 asection *srelbss;
1848
1849 /* Small local sym cache. */
1850 struct sym_cache sym_cache;
1851
1852 /* For convenience in allocate_dynrelocs. */
1853 bfd *obfd;
1854
1855 /* The amount of space used by the reserved portion of the sgotplt
1856 section, plus whatever space is used by the jump slots. */
1857 bfd_vma sgotplt_jump_table_size;
1858
1859 /* The stub hash table. */
1860 struct bfd_hash_table stub_hash_table;
1861
1862 /* Linker stub bfd. */
1863 bfd *stub_bfd;
1864
1865 /* Linker call-backs. */
1866 asection *(*add_stub_section) (const char *, asection *);
1867 void (*layout_sections_again) (void);
1868
1869 /* Array to keep track of which stub sections have been created, and
1870 information on stub grouping. */
1871 struct map_stub
1872 {
1873 /* This is the section to which stubs in the group will be
1874 attached. */
1875 asection *link_sec;
1876 /* The stub section. */
1877 asection *stub_sec;
1878 } *stub_group;
1879
1880 /* Assorted information used by elfNN_aarch64_size_stubs. */
1881 unsigned int bfd_count;
1882 int top_index;
1883 asection **input_list;
1884
1885 /* The offset into splt of the PLT entry for the TLS descriptor
1886 resolver. Special values are 0, if not necessary (or not found
1887 to be necessary yet), and -1 if needed but not determined
1888 yet. */
1889 bfd_vma tlsdesc_plt;
1890
1891 /* The GOT offset for the lazy trampoline. Communicated to the
1892 loader via DT_TLSDESC_GOT. The magic value (bfd_vma) -1
1893 indicates an offset is not allocated. */
1894 bfd_vma dt_tlsdesc_got;
1895
1896 /* Used by local STT_GNU_IFUNC symbols. */
1897 htab_t loc_hash_table;
1898 void * loc_hash_memory;
1899 };
1900
1901 /* Create an entry in an AArch64 ELF linker hash table. */
1902
1903 static struct bfd_hash_entry *
1904 elfNN_aarch64_link_hash_newfunc (struct bfd_hash_entry *entry,
1905 struct bfd_hash_table *table,
1906 const char *string)
1907 {
1908 struct elf_aarch64_link_hash_entry *ret =
1909 (struct elf_aarch64_link_hash_entry *) entry;
1910
1911 /* Allocate the structure if it has not already been allocated by a
1912 subclass. */
1913 if (ret == NULL)
1914 ret = bfd_hash_allocate (table,
1915 sizeof (struct elf_aarch64_link_hash_entry));
1916 if (ret == NULL)
1917 return (struct bfd_hash_entry *) ret;
1918
1919 /* Call the allocation method of the superclass. */
1920 ret = ((struct elf_aarch64_link_hash_entry *)
1921 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
1922 table, string));
1923 if (ret != NULL)
1924 {
1925 ret->dyn_relocs = NULL;
1926 ret->got_type = GOT_UNKNOWN;
1927 ret->plt_got_offset = (bfd_vma) - 1;
1928 ret->stub_cache = NULL;
1929 ret->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
1930 }
1931
1932 return (struct bfd_hash_entry *) ret;
1933 }
1934
1935 /* Initialize an entry in the stub hash table. */
1936
1937 static struct bfd_hash_entry *
1938 stub_hash_newfunc (struct bfd_hash_entry *entry,
1939 struct bfd_hash_table *table, const char *string)
1940 {
1941 /* Allocate the structure if it has not already been allocated by a
1942 subclass. */
1943 if (entry == NULL)
1944 {
1945 entry = bfd_hash_allocate (table,
1946 sizeof (struct
1947 elf_aarch64_stub_hash_entry));
1948 if (entry == NULL)
1949 return entry;
1950 }
1951
1952 /* Call the allocation method of the superclass. */
1953 entry = bfd_hash_newfunc (entry, table, string);
1954 if (entry != NULL)
1955 {
1956 struct elf_aarch64_stub_hash_entry *eh;
1957
1958 /* Initialize the local fields. */
1959 eh = (struct elf_aarch64_stub_hash_entry *) entry;
1960 eh->stub_sec = NULL;
1961 eh->stub_offset = 0;
1962 eh->target_value = 0;
1963 eh->target_section = NULL;
1964 eh->stub_type = aarch64_stub_none;
1965 eh->h = NULL;
1966 eh->id_sec = NULL;
1967 }
1968
1969 return entry;
1970 }
1971
1972 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
1973 for local symbol so that we can handle local STT_GNU_IFUNC symbols
1974 as global symbol. We reuse indx and dynstr_index for local symbol
1975 hash since they aren't used by global symbols in this backend. */
1976
1977 static hashval_t
1978 elfNN_aarch64_local_htab_hash (const void *ptr)
1979 {
1980 struct elf_link_hash_entry *h
1981 = (struct elf_link_hash_entry *) ptr;
1982 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
1983 }
1984
1985 /* Compare local hash entries. */
1986
1987 static int
1988 elfNN_aarch64_local_htab_eq (const void *ptr1, const void *ptr2)
1989 {
1990 struct elf_link_hash_entry *h1
1991 = (struct elf_link_hash_entry *) ptr1;
1992 struct elf_link_hash_entry *h2
1993 = (struct elf_link_hash_entry *) ptr2;
1994
1995 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
1996 }
1997
1998 /* Find and/or create a hash entry for local symbol. */
1999
2000 static struct elf_link_hash_entry *
2001 elfNN_aarch64_get_local_sym_hash (struct elf_aarch64_link_hash_table *htab,
2002 bfd *abfd, const Elf_Internal_Rela *rel,
2003 bfd_boolean create)
2004 {
2005 struct elf_aarch64_link_hash_entry e, *ret;
2006 asection *sec = abfd->sections;
2007 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
2008 ELFNN_R_SYM (rel->r_info));
2009 void **slot;
2010
2011 e.root.indx = sec->id;
2012 e.root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2013 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
2014 create ? INSERT : NO_INSERT);
2015
2016 if (!slot)
2017 return NULL;
2018
2019 if (*slot)
2020 {
2021 ret = (struct elf_aarch64_link_hash_entry *) *slot;
2022 return &ret->root;
2023 }
2024
2025 ret = (struct elf_aarch64_link_hash_entry *)
2026 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
2027 sizeof (struct elf_aarch64_link_hash_entry));
2028 if (ret)
2029 {
2030 memset (ret, 0, sizeof (*ret));
2031 ret->root.indx = sec->id;
2032 ret->root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2033 ret->root.dynindx = -1;
2034 *slot = ret;
2035 }
2036 return &ret->root;
2037 }
2038
2039 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2040
2041 static void
2042 elfNN_aarch64_copy_indirect_symbol (struct bfd_link_info *info,
2043 struct elf_link_hash_entry *dir,
2044 struct elf_link_hash_entry *ind)
2045 {
2046 struct elf_aarch64_link_hash_entry *edir, *eind;
2047
2048 edir = (struct elf_aarch64_link_hash_entry *) dir;
2049 eind = (struct elf_aarch64_link_hash_entry *) ind;
2050
2051 if (eind->dyn_relocs != NULL)
2052 {
2053 if (edir->dyn_relocs != NULL)
2054 {
2055 struct elf_dyn_relocs **pp;
2056 struct elf_dyn_relocs *p;
2057
2058 /* Add reloc counts against the indirect sym to the direct sym
2059 list. Merge any entries against the same section. */
2060 for (pp = &eind->dyn_relocs; (p = *pp) != NULL;)
2061 {
2062 struct elf_dyn_relocs *q;
2063
2064 for (q = edir->dyn_relocs; q != NULL; q = q->next)
2065 if (q->sec == p->sec)
2066 {
2067 q->pc_count += p->pc_count;
2068 q->count += p->count;
2069 *pp = p->next;
2070 break;
2071 }
2072 if (q == NULL)
2073 pp = &p->next;
2074 }
2075 *pp = edir->dyn_relocs;
2076 }
2077
2078 edir->dyn_relocs = eind->dyn_relocs;
2079 eind->dyn_relocs = NULL;
2080 }
2081
2082 if (ind->root.type == bfd_link_hash_indirect)
2083 {
2084 /* Copy over PLT info. */
2085 if (dir->got.refcount <= 0)
2086 {
2087 edir->got_type = eind->got_type;
2088 eind->got_type = GOT_UNKNOWN;
2089 }
2090 }
2091
2092 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2093 }
2094
2095 /* Destroy an AArch64 elf linker hash table. */
2096
2097 static void
2098 elfNN_aarch64_link_hash_table_free (bfd *obfd)
2099 {
2100 struct elf_aarch64_link_hash_table *ret
2101 = (struct elf_aarch64_link_hash_table *) obfd->link.hash;
2102
2103 if (ret->loc_hash_table)
2104 htab_delete (ret->loc_hash_table);
2105 if (ret->loc_hash_memory)
2106 objalloc_free ((struct objalloc *) ret->loc_hash_memory);
2107
2108 bfd_hash_table_free (&ret->stub_hash_table);
2109 _bfd_elf_link_hash_table_free (obfd);
2110 }
2111
2112 /* Create an AArch64 elf linker hash table. */
2113
2114 static struct bfd_link_hash_table *
2115 elfNN_aarch64_link_hash_table_create (bfd *abfd)
2116 {
2117 struct elf_aarch64_link_hash_table *ret;
2118 bfd_size_type amt = sizeof (struct elf_aarch64_link_hash_table);
2119
2120 ret = bfd_zmalloc (amt);
2121 if (ret == NULL)
2122 return NULL;
2123
2124 if (!_bfd_elf_link_hash_table_init
2125 (&ret->root, abfd, elfNN_aarch64_link_hash_newfunc,
2126 sizeof (struct elf_aarch64_link_hash_entry), AARCH64_ELF_DATA))
2127 {
2128 free (ret);
2129 return NULL;
2130 }
2131
2132 ret->plt_header_size = PLT_ENTRY_SIZE;
2133 ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
2134 ret->obfd = abfd;
2135 ret->dt_tlsdesc_got = (bfd_vma) - 1;
2136
2137 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2138 sizeof (struct elf_aarch64_stub_hash_entry)))
2139 {
2140 _bfd_elf_link_hash_table_free (abfd);
2141 return NULL;
2142 }
2143
2144 ret->loc_hash_table = htab_try_create (1024,
2145 elfNN_aarch64_local_htab_hash,
2146 elfNN_aarch64_local_htab_eq,
2147 NULL);
2148 ret->loc_hash_memory = objalloc_create ();
2149 if (!ret->loc_hash_table || !ret->loc_hash_memory)
2150 {
2151 elfNN_aarch64_link_hash_table_free (abfd);
2152 return NULL;
2153 }
2154 ret->root.root.hash_table_free = elfNN_aarch64_link_hash_table_free;
2155
2156 return &ret->root.root;
2157 }
2158
2159 static bfd_boolean
2160 aarch64_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
2161 bfd_vma offset, bfd_vma value)
2162 {
2163 reloc_howto_type *howto;
2164 bfd_vma place;
2165
2166 howto = elfNN_aarch64_howto_from_type (r_type);
2167 place = (input_section->output_section->vma + input_section->output_offset
2168 + offset);
2169
2170 r_type = elfNN_aarch64_bfd_reloc_from_type (r_type);
2171 value = _bfd_aarch64_elf_resolve_relocation (r_type, place, value, 0, FALSE);
2172 return _bfd_aarch64_elf_put_addend (input_bfd,
2173 input_section->contents + offset, r_type,
2174 howto, value);
2175 }
2176
2177 static enum elf_aarch64_stub_type
2178 aarch64_select_branch_stub (bfd_vma value, bfd_vma place)
2179 {
2180 if (aarch64_valid_for_adrp_p (value, place))
2181 return aarch64_stub_adrp_branch;
2182 return aarch64_stub_long_branch;
2183 }
2184
2185 /* Determine the type of stub needed, if any, for a call. */
2186
2187 static enum elf_aarch64_stub_type
2188 aarch64_type_of_stub (struct bfd_link_info *info,
2189 asection *input_sec,
2190 const Elf_Internal_Rela *rel,
2191 unsigned char st_type,
2192 struct elf_aarch64_link_hash_entry *hash,
2193 bfd_vma destination)
2194 {
2195 bfd_vma location;
2196 bfd_signed_vma branch_offset;
2197 unsigned int r_type;
2198 struct elf_aarch64_link_hash_table *globals;
2199 enum elf_aarch64_stub_type stub_type = aarch64_stub_none;
2200 bfd_boolean via_plt_p;
2201
2202 if (st_type != STT_FUNC)
2203 return stub_type;
2204
2205 globals = elf_aarch64_hash_table (info);
2206 via_plt_p = (globals->root.splt != NULL && hash != NULL
2207 && hash->root.plt.offset != (bfd_vma) - 1);
2208
2209 if (via_plt_p)
2210 return stub_type;
2211
2212 /* Determine where the call point is. */
2213 location = (input_sec->output_offset
2214 + input_sec->output_section->vma + rel->r_offset);
2215
2216 branch_offset = (bfd_signed_vma) (destination - location);
2217
2218 r_type = ELFNN_R_TYPE (rel->r_info);
2219
2220 /* We don't want to redirect any old unconditional jump in this way,
2221 only one which is being used for a sibcall, where it is
2222 acceptable for the IP0 and IP1 registers to be clobbered. */
2223 if ((r_type == AARCH64_R (CALL26) || r_type == AARCH64_R (JUMP26))
2224 && (branch_offset > AARCH64_MAX_FWD_BRANCH_OFFSET
2225 || branch_offset < AARCH64_MAX_BWD_BRANCH_OFFSET))
2226 {
2227 stub_type = aarch64_stub_long_branch;
2228 }
2229
2230 return stub_type;
2231 }
2232
2233 /* Build a name for an entry in the stub hash table. */
2234
2235 static char *
2236 elfNN_aarch64_stub_name (const asection *input_section,
2237 const asection *sym_sec,
2238 const struct elf_aarch64_link_hash_entry *hash,
2239 const Elf_Internal_Rela *rel)
2240 {
2241 char *stub_name;
2242 bfd_size_type len;
2243
2244 if (hash)
2245 {
2246 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
2247 stub_name = bfd_malloc (len);
2248 if (stub_name != NULL)
2249 snprintf (stub_name, len, "%08x_%s+%" BFD_VMA_FMT "x",
2250 (unsigned int) input_section->id,
2251 hash->root.root.root.string,
2252 rel->r_addend);
2253 }
2254 else
2255 {
2256 len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
2257 stub_name = bfd_malloc (len);
2258 if (stub_name != NULL)
2259 snprintf (stub_name, len, "%08x_%x:%x+%" BFD_VMA_FMT "x",
2260 (unsigned int) input_section->id,
2261 (unsigned int) sym_sec->id,
2262 (unsigned int) ELFNN_R_SYM (rel->r_info),
2263 rel->r_addend);
2264 }
2265
2266 return stub_name;
2267 }
2268
2269 /* Look up an entry in the stub hash. Stub entries are cached because
2270 creating the stub name takes a bit of time. */
2271
2272 static struct elf_aarch64_stub_hash_entry *
2273 elfNN_aarch64_get_stub_entry (const asection *input_section,
2274 const asection *sym_sec,
2275 struct elf_link_hash_entry *hash,
2276 const Elf_Internal_Rela *rel,
2277 struct elf_aarch64_link_hash_table *htab)
2278 {
2279 struct elf_aarch64_stub_hash_entry *stub_entry;
2280 struct elf_aarch64_link_hash_entry *h =
2281 (struct elf_aarch64_link_hash_entry *) hash;
2282 const asection *id_sec;
2283
2284 if ((input_section->flags & SEC_CODE) == 0)
2285 return NULL;
2286
2287 /* If this input section is part of a group of sections sharing one
2288 stub section, then use the id of the first section in the group.
2289 Stub names need to include a section id, as there may well be
2290 more than one stub used to reach say, printf, and we need to
2291 distinguish between them. */
2292 id_sec = htab->stub_group[input_section->id].link_sec;
2293
2294 if (h != NULL && h->stub_cache != NULL
2295 && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
2296 {
2297 stub_entry = h->stub_cache;
2298 }
2299 else
2300 {
2301 char *stub_name;
2302
2303 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, h, rel);
2304 if (stub_name == NULL)
2305 return NULL;
2306
2307 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table,
2308 stub_name, FALSE, FALSE);
2309 if (h != NULL)
2310 h->stub_cache = stub_entry;
2311
2312 free (stub_name);
2313 }
2314
2315 return stub_entry;
2316 }
2317
2318
2319 /* Create a stub section. */
2320
2321 static asection *
2322 _bfd_aarch64_create_stub_section (asection *section,
2323 struct elf_aarch64_link_hash_table *htab)
2324 {
2325 size_t namelen;
2326 bfd_size_type len;
2327 char *s_name;
2328
2329 namelen = strlen (section->name);
2330 len = namelen + sizeof (STUB_SUFFIX);
2331 s_name = bfd_alloc (htab->stub_bfd, len);
2332 if (s_name == NULL)
2333 return NULL;
2334
2335 memcpy (s_name, section->name, namelen);
2336 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
2337 return (*htab->add_stub_section) (s_name, section);
2338 }
2339
2340
2341 /* Find or create a stub section for a link section.
2342
2343 Fix or create the stub section used to collect stubs attached to
2344 the specified link section. */
2345
2346 static asection *
2347 _bfd_aarch64_get_stub_for_link_section (asection *link_section,
2348 struct elf_aarch64_link_hash_table *htab)
2349 {
2350 if (htab->stub_group[link_section->id].stub_sec == NULL)
2351 htab->stub_group[link_section->id].stub_sec
2352 = _bfd_aarch64_create_stub_section (link_section, htab);
2353 return htab->stub_group[link_section->id].stub_sec;
2354 }
2355
2356
2357 /* Find or create a stub section in the stub group for an input
2358 section. */
2359
2360 static asection *
2361 _bfd_aarch64_create_or_find_stub_sec (asection *section,
2362 struct elf_aarch64_link_hash_table *htab)
2363 {
2364 asection *link_sec = htab->stub_group[section->id].link_sec;
2365 return _bfd_aarch64_get_stub_for_link_section (link_sec, htab);
2366 }
2367
2368
2369 /* Add a new stub entry in the stub group associated with an input
2370 section to the stub hash. Not all fields of the new stub entry are
2371 initialised. */
2372
2373 static struct elf_aarch64_stub_hash_entry *
2374 _bfd_aarch64_add_stub_entry_in_group (const char *stub_name,
2375 asection *section,
2376 struct elf_aarch64_link_hash_table *htab)
2377 {
2378 asection *link_sec;
2379 asection *stub_sec;
2380 struct elf_aarch64_stub_hash_entry *stub_entry;
2381
2382 link_sec = htab->stub_group[section->id].link_sec;
2383 stub_sec = _bfd_aarch64_create_or_find_stub_sec (section, htab);
2384
2385 /* Enter this entry into the linker stub hash table. */
2386 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
2387 TRUE, FALSE);
2388 if (stub_entry == NULL)
2389 {
2390 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
2391 section->owner, stub_name);
2392 return NULL;
2393 }
2394
2395 stub_entry->stub_sec = stub_sec;
2396 stub_entry->stub_offset = 0;
2397 stub_entry->id_sec = link_sec;
2398
2399 return stub_entry;
2400 }
2401
2402 static bfd_boolean
2403 aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
2404 void *in_arg ATTRIBUTE_UNUSED)
2405 {
2406 struct elf_aarch64_stub_hash_entry *stub_entry;
2407 asection *stub_sec;
2408 bfd *stub_bfd;
2409 bfd_byte *loc;
2410 bfd_vma sym_value;
2411 bfd_vma veneered_insn_loc;
2412 bfd_vma veneer_entry_loc;
2413 bfd_signed_vma branch_offset = 0;
2414 unsigned int template_size;
2415 const uint32_t *template;
2416 unsigned int i;
2417
2418 /* Massage our args to the form they really have. */
2419 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
2420
2421 stub_sec = stub_entry->stub_sec;
2422
2423 /* Make a note of the offset within the stubs for this entry. */
2424 stub_entry->stub_offset = stub_sec->size;
2425 loc = stub_sec->contents + stub_entry->stub_offset;
2426
2427 stub_bfd = stub_sec->owner;
2428
2429 /* This is the address of the stub destination. */
2430 sym_value = (stub_entry->target_value
2431 + stub_entry->target_section->output_offset
2432 + stub_entry->target_section->output_section->vma);
2433
2434 if (stub_entry->stub_type == aarch64_stub_long_branch)
2435 {
2436 bfd_vma place = (stub_entry->stub_offset + stub_sec->output_section->vma
2437 + stub_sec->output_offset);
2438
2439 /* See if we can relax the stub. */
2440 if (aarch64_valid_for_adrp_p (sym_value, place))
2441 stub_entry->stub_type = aarch64_select_branch_stub (sym_value, place);
2442 }
2443
2444 switch (stub_entry->stub_type)
2445 {
2446 case aarch64_stub_adrp_branch:
2447 template = aarch64_adrp_branch_stub;
2448 template_size = sizeof (aarch64_adrp_branch_stub);
2449 break;
2450 case aarch64_stub_long_branch:
2451 template = aarch64_long_branch_stub;
2452 template_size = sizeof (aarch64_long_branch_stub);
2453 break;
2454 case aarch64_stub_erratum_835769_veneer:
2455 template = aarch64_erratum_835769_stub;
2456 template_size = sizeof (aarch64_erratum_835769_stub);
2457 break;
2458 default:
2459 abort ();
2460 }
2461
2462 for (i = 0; i < (template_size / sizeof template[0]); i++)
2463 {
2464 bfd_putl32 (template[i], loc);
2465 loc += 4;
2466 }
2467
2468 template_size = (template_size + 7) & ~7;
2469 stub_sec->size += template_size;
2470
2471 switch (stub_entry->stub_type)
2472 {
2473 case aarch64_stub_adrp_branch:
2474 if (aarch64_relocate (AARCH64_R (ADR_PREL_PG_HI21), stub_bfd, stub_sec,
2475 stub_entry->stub_offset, sym_value))
2476 /* The stub would not have been relaxed if the offset was out
2477 of range. */
2478 BFD_FAIL ();
2479
2480 _bfd_final_link_relocate
2481 (elfNN_aarch64_howto_from_type (AARCH64_R (ADD_ABS_LO12_NC)),
2482 stub_bfd,
2483 stub_sec,
2484 stub_sec->contents,
2485 stub_entry->stub_offset + 4,
2486 sym_value,
2487 0);
2488 break;
2489
2490 case aarch64_stub_long_branch:
2491 /* We want the value relative to the address 12 bytes back from the
2492 value itself. */
2493 _bfd_final_link_relocate (elfNN_aarch64_howto_from_type
2494 (AARCH64_R (PRELNN)), stub_bfd, stub_sec,
2495 stub_sec->contents,
2496 stub_entry->stub_offset + 16,
2497 sym_value + 12, 0);
2498 break;
2499
2500 case aarch64_stub_erratum_835769_veneer:
2501 veneered_insn_loc = stub_entry->target_section->output_section->vma
2502 + stub_entry->target_section->output_offset
2503 + stub_entry->target_value;
2504 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
2505 + stub_entry->stub_sec->output_offset
2506 + stub_entry->stub_offset;
2507 branch_offset = veneered_insn_loc - veneer_entry_loc;
2508 branch_offset >>= 2;
2509 branch_offset &= 0x3ffffff;
2510 bfd_putl32 (stub_entry->veneered_insn,
2511 stub_sec->contents + stub_entry->stub_offset);
2512 bfd_putl32 (template[1] | branch_offset,
2513 stub_sec->contents + stub_entry->stub_offset + 4);
2514 break;
2515
2516 default:
2517 abort ();
2518 }
2519
2520 return TRUE;
2521 }
2522
2523 /* As above, but don't actually build the stub. Just bump offset so
2524 we know stub section sizes. */
2525
2526 static bfd_boolean
2527 aarch64_size_one_stub (struct bfd_hash_entry *gen_entry,
2528 void *in_arg ATTRIBUTE_UNUSED)
2529 {
2530 struct elf_aarch64_stub_hash_entry *stub_entry;
2531 int size;
2532
2533 /* Massage our args to the form they really have. */
2534 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
2535
2536 switch (stub_entry->stub_type)
2537 {
2538 case aarch64_stub_adrp_branch:
2539 size = sizeof (aarch64_adrp_branch_stub);
2540 break;
2541 case aarch64_stub_long_branch:
2542 size = sizeof (aarch64_long_branch_stub);
2543 break;
2544 case aarch64_stub_erratum_835769_veneer:
2545 size = sizeof (aarch64_erratum_835769_stub);
2546 break;
2547 default:
2548 abort ();
2549 }
2550
2551 size = (size + 7) & ~7;
2552 stub_entry->stub_sec->size += size;
2553 return TRUE;
2554 }
2555
2556 /* External entry points for sizing and building linker stubs. */
2557
2558 /* Set up various things so that we can make a list of input sections
2559 for each output section included in the link. Returns -1 on error,
2560 0 when no stubs will be needed, and 1 on success. */
2561
2562 int
2563 elfNN_aarch64_setup_section_lists (bfd *output_bfd,
2564 struct bfd_link_info *info)
2565 {
2566 bfd *input_bfd;
2567 unsigned int bfd_count;
2568 int top_id, top_index;
2569 asection *section;
2570 asection **input_list, **list;
2571 bfd_size_type amt;
2572 struct elf_aarch64_link_hash_table *htab =
2573 elf_aarch64_hash_table (info);
2574
2575 if (!is_elf_hash_table (htab))
2576 return 0;
2577
2578 /* Count the number of input BFDs and find the top input section id. */
2579 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
2580 input_bfd != NULL; input_bfd = input_bfd->link.next)
2581 {
2582 bfd_count += 1;
2583 for (section = input_bfd->sections;
2584 section != NULL; section = section->next)
2585 {
2586 if (top_id < section->id)
2587 top_id = section->id;
2588 }
2589 }
2590 htab->bfd_count = bfd_count;
2591
2592 amt = sizeof (struct map_stub) * (top_id + 1);
2593 htab->stub_group = bfd_zmalloc (amt);
2594 if (htab->stub_group == NULL)
2595 return -1;
2596
2597 /* We can't use output_bfd->section_count here to find the top output
2598 section index as some sections may have been removed, and
2599 _bfd_strip_section_from_output doesn't renumber the indices. */
2600 for (section = output_bfd->sections, top_index = 0;
2601 section != NULL; section = section->next)
2602 {
2603 if (top_index < section->index)
2604 top_index = section->index;
2605 }
2606
2607 htab->top_index = top_index;
2608 amt = sizeof (asection *) * (top_index + 1);
2609 input_list = bfd_malloc (amt);
2610 htab->input_list = input_list;
2611 if (input_list == NULL)
2612 return -1;
2613
2614 /* For sections we aren't interested in, mark their entries with a
2615 value we can check later. */
2616 list = input_list + top_index;
2617 do
2618 *list = bfd_abs_section_ptr;
2619 while (list-- != input_list);
2620
2621 for (section = output_bfd->sections;
2622 section != NULL; section = section->next)
2623 {
2624 if ((section->flags & SEC_CODE) != 0)
2625 input_list[section->index] = NULL;
2626 }
2627
2628 return 1;
2629 }
2630
2631 /* Used by elfNN_aarch64_next_input_section and group_sections. */
2632 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
2633
2634 /* The linker repeatedly calls this function for each input section,
2635 in the order that input sections are linked into output sections.
2636 Build lists of input sections to determine groupings between which
2637 we may insert linker stubs. */
2638
2639 void
2640 elfNN_aarch64_next_input_section (struct bfd_link_info *info, asection *isec)
2641 {
2642 struct elf_aarch64_link_hash_table *htab =
2643 elf_aarch64_hash_table (info);
2644
2645 if (isec->output_section->index <= htab->top_index)
2646 {
2647 asection **list = htab->input_list + isec->output_section->index;
2648
2649 if (*list != bfd_abs_section_ptr)
2650 {
2651 /* Steal the link_sec pointer for our list. */
2652 /* This happens to make the list in reverse order,
2653 which is what we want. */
2654 PREV_SEC (isec) = *list;
2655 *list = isec;
2656 }
2657 }
2658 }
2659
2660 /* See whether we can group stub sections together. Grouping stub
2661 sections may result in fewer stubs. More importantly, we need to
2662 put all .init* and .fini* stubs at the beginning of the .init or
2663 .fini output sections respectively, because glibc splits the
2664 _init and _fini functions into multiple parts. Putting a stub in
2665 the middle of a function is not a good idea. */
2666
2667 static void
2668 group_sections (struct elf_aarch64_link_hash_table *htab,
2669 bfd_size_type stub_group_size,
2670 bfd_boolean stubs_always_before_branch)
2671 {
2672 asection **list = htab->input_list + htab->top_index;
2673
2674 do
2675 {
2676 asection *tail = *list;
2677
2678 if (tail == bfd_abs_section_ptr)
2679 continue;
2680
2681 while (tail != NULL)
2682 {
2683 asection *curr;
2684 asection *prev;
2685 bfd_size_type total;
2686
2687 curr = tail;
2688 total = tail->size;
2689 while ((prev = PREV_SEC (curr)) != NULL
2690 && ((total += curr->output_offset - prev->output_offset)
2691 < stub_group_size))
2692 curr = prev;
2693
2694 /* OK, the size from the start of CURR to the end is less
2695 than stub_group_size and thus can be handled by one stub
2696 section. (Or the tail section is itself larger than
2697 stub_group_size, in which case we may be toast.)
2698 We should really be keeping track of the total size of
2699 stubs added here, as stubs contribute to the final output
2700 section size. */
2701 do
2702 {
2703 prev = PREV_SEC (tail);
2704 /* Set up this stub group. */
2705 htab->stub_group[tail->id].link_sec = curr;
2706 }
2707 while (tail != curr && (tail = prev) != NULL);
2708
2709 /* But wait, there's more! Input sections up to stub_group_size
2710 bytes before the stub section can be handled by it too. */
2711 if (!stubs_always_before_branch)
2712 {
2713 total = 0;
2714 while (prev != NULL
2715 && ((total += tail->output_offset - prev->output_offset)
2716 < stub_group_size))
2717 {
2718 tail = prev;
2719 prev = PREV_SEC (tail);
2720 htab->stub_group[tail->id].link_sec = curr;
2721 }
2722 }
2723 tail = prev;
2724 }
2725 }
2726 while (list-- != htab->input_list);
2727
2728 free (htab->input_list);
2729 }
2730
2731 #undef PREV_SEC
2732
2733 #define AARCH64_BITS(x, pos, n) (((x) >> (pos)) & ((1 << (n)) - 1))
2734
2735 #define AARCH64_RT(insn) AARCH64_BITS (insn, 0, 5)
2736 #define AARCH64_RT2(insn) AARCH64_BITS (insn, 10, 5)
2737 #define AARCH64_RA(insn) AARCH64_BITS (insn, 10, 5)
2738 #define AARCH64_RD(insn) AARCH64_BITS (insn, 0, 5)
2739 #define AARCH64_RN(insn) AARCH64_BITS (insn, 5, 5)
2740 #define AARCH64_RM(insn) AARCH64_BITS (insn, 16, 5)
2741
2742 #define AARCH64_MAC(insn) (((insn) & 0xff000000) == 0x9b000000)
2743 #define AARCH64_BIT(insn, n) AARCH64_BITS (insn, n, 1)
2744 #define AARCH64_OP31(insn) AARCH64_BITS (insn, 21, 3)
2745 #define AARCH64_ZR 0x1f
2746
2747 /* All ld/st ops. See C4-182 of the ARM ARM. The encoding space for
2748 LD_PCREL, LDST_RO, LDST_UI and LDST_UIMM cover prefetch ops. */
2749
2750 #define AARCH64_LD(insn) (AARCH64_BIT (insn, 22) == 1)
2751 #define AARCH64_LDST(insn) (((insn) & 0x0a000000) == 0x08000000)
2752 #define AARCH64_LDST_EX(insn) (((insn) & 0x3f000000) == 0x08000000)
2753 #define AARCH64_LDST_PCREL(insn) (((insn) & 0x3b000000) == 0x18000000)
2754 #define AARCH64_LDST_NAP(insn) (((insn) & 0x3b800000) == 0x28000000)
2755 #define AARCH64_LDSTP_PI(insn) (((insn) & 0x3b800000) == 0x28800000)
2756 #define AARCH64_LDSTP_O(insn) (((insn) & 0x3b800000) == 0x29000000)
2757 #define AARCH64_LDSTP_PRE(insn) (((insn) & 0x3b800000) == 0x29800000)
2758 #define AARCH64_LDST_UI(insn) (((insn) & 0x3b200c00) == 0x38000000)
2759 #define AARCH64_LDST_PIIMM(insn) (((insn) & 0x3b200c00) == 0x38000400)
2760 #define AARCH64_LDST_U(insn) (((insn) & 0x3b200c00) == 0x38000800)
2761 #define AARCH64_LDST_PREIMM(insn) (((insn) & 0x3b200c00) == 0x38000c00)
2762 #define AARCH64_LDST_RO(insn) (((insn) & 0x3b200c00) == 0x38200800)
2763 #define AARCH64_LDST_UIMM(insn) (((insn) & 0x3b000000) == 0x39000000)
2764 #define AARCH64_LDST_SIMD_M(insn) (((insn) & 0xbfbf0000) == 0x0c000000)
2765 #define AARCH64_LDST_SIMD_M_PI(insn) (((insn) & 0xbfa00000) == 0x0c800000)
2766 #define AARCH64_LDST_SIMD_S(insn) (((insn) & 0xbf9f0000) == 0x0d000000)
2767 #define AARCH64_LDST_SIMD_S_PI(insn) (((insn) & 0xbf800000) == 0x0d800000)
2768
2769 /* Classify an INSN if it is indeed a load/store.
2770
2771 Return TRUE if INSN is a LD/ST instruction otherwise return FALSE.
2772
2773 For scalar LD/ST instructions PAIR is FALSE, RT is returned and RT2
2774 is set equal to RT.
2775
2776 For LD/ST pair instructions PAIR is TRUE, RT and RT2 are returned.
2777
2778 */
2779
2780 static bfd_boolean
2781 aarch64_mem_op_p (uint32_t insn, unsigned int *rt, unsigned int *rt2,
2782 bfd_boolean *pair, bfd_boolean *load)
2783 {
2784 uint32_t opcode;
2785 unsigned int r;
2786 uint32_t opc = 0;
2787 uint32_t v = 0;
2788 uint32_t opc_v = 0;
2789
2790 /* Bail out quickly if INSN doesn't fall into the the load-store
2791 encoding space. */
2792 if (!AARCH64_LDST (insn))
2793 return FALSE;
2794
2795 *pair = FALSE;
2796 *load = FALSE;
2797 if (AARCH64_LDST_EX (insn))
2798 {
2799 *rt = AARCH64_RT (insn);
2800 *rt2 = *rt;
2801 if (AARCH64_BIT (insn, 21) == 1)
2802 {
2803 *pair = TRUE;
2804 *rt2 = AARCH64_RT2 (insn);
2805 }
2806 *load = AARCH64_LD (insn);
2807 return TRUE;
2808 }
2809 else if (AARCH64_LDST_NAP (insn)
2810 || AARCH64_LDSTP_PI (insn)
2811 || AARCH64_LDSTP_O (insn)
2812 || AARCH64_LDSTP_PRE (insn))
2813 {
2814 *pair = TRUE;
2815 *rt = AARCH64_RT (insn);
2816 *rt2 = AARCH64_RT2 (insn);
2817 *load = AARCH64_LD (insn);
2818 return TRUE;
2819 }
2820 else if (AARCH64_LDST_PCREL (insn)
2821 || AARCH64_LDST_UI (insn)
2822 || AARCH64_LDST_PIIMM (insn)
2823 || AARCH64_LDST_U (insn)
2824 || AARCH64_LDST_PREIMM (insn)
2825 || AARCH64_LDST_RO (insn)
2826 || AARCH64_LDST_UIMM (insn))
2827 {
2828 *rt = AARCH64_RT (insn);
2829 *rt2 = *rt;
2830 if (AARCH64_LDST_PCREL (insn))
2831 *load = TRUE;
2832 opc = AARCH64_BITS (insn, 22, 2);
2833 v = AARCH64_BIT (insn, 26);
2834 opc_v = opc | (v << 2);
2835 *load = (opc_v == 1 || opc_v == 2 || opc_v == 3
2836 || opc_v == 5 || opc_v == 7);
2837 return TRUE;
2838 }
2839 else if (AARCH64_LDST_SIMD_M (insn)
2840 || AARCH64_LDST_SIMD_M_PI (insn))
2841 {
2842 *rt = AARCH64_RT (insn);
2843 *load = AARCH64_BIT (insn, 22);
2844 opcode = (insn >> 12) & 0xf;
2845 switch (opcode)
2846 {
2847 case 0:
2848 case 2:
2849 *rt2 = *rt + 3;
2850 break;
2851
2852 case 4:
2853 case 6:
2854 *rt2 = *rt + 2;
2855 break;
2856
2857 case 7:
2858 *rt2 = *rt;
2859 break;
2860
2861 case 8:
2862 case 10:
2863 *rt2 = *rt + 1;
2864 break;
2865
2866 default:
2867 return FALSE;
2868 }
2869 return TRUE;
2870 }
2871 else if (AARCH64_LDST_SIMD_S (insn)
2872 || AARCH64_LDST_SIMD_S_PI (insn))
2873 {
2874 *rt = AARCH64_RT (insn);
2875 r = (insn >> 21) & 1;
2876 *load = AARCH64_BIT (insn, 22);
2877 opcode = (insn >> 13) & 0x7;
2878 switch (opcode)
2879 {
2880 case 0:
2881 case 2:
2882 case 4:
2883 *rt2 = *rt + r;
2884 break;
2885
2886 case 1:
2887 case 3:
2888 case 5:
2889 *rt2 = *rt + (r == 0 ? 2 : 3);
2890 break;
2891
2892 case 6:
2893 *rt2 = *rt + r;
2894 break;
2895
2896 case 7:
2897 *rt2 = *rt + (r == 0 ? 2 : 3);
2898 break;
2899
2900 default:
2901 return FALSE;
2902 }
2903 return TRUE;
2904 }
2905
2906 return FALSE;
2907 }
2908
2909 /* Return TRUE if INSN is multiply-accumulate. */
2910
2911 static bfd_boolean
2912 aarch64_mlxl_p (uint32_t insn)
2913 {
2914 uint32_t op31 = AARCH64_OP31 (insn);
2915
2916 if (AARCH64_MAC (insn)
2917 && (op31 == 0 || op31 == 1 || op31 == 5)
2918 /* Exclude MUL instructions which are encoded as a multiple accumulate
2919 with RA = XZR. */
2920 && AARCH64_RA (insn) != AARCH64_ZR)
2921 return TRUE;
2922
2923 return FALSE;
2924 }
2925
2926 /* Some early revisions of the Cortex-A53 have an erratum (835769) whereby
2927 it is possible for a 64-bit multiply-accumulate instruction to generate an
2928 incorrect result. The details are quite complex and hard to
2929 determine statically, since branches in the code may exist in some
2930 circumstances, but all cases end with a memory (load, store, or
2931 prefetch) instruction followed immediately by the multiply-accumulate
2932 operation. We employ a linker patching technique, by moving the potentially
2933 affected multiply-accumulate instruction into a patch region and replacing
2934 the original instruction with a branch to the patch. This function checks
2935 if INSN_1 is the memory operation followed by a multiply-accumulate
2936 operation (INSN_2). Return TRUE if an erratum sequence is found, FALSE
2937 if INSN_1 and INSN_2 are safe. */
2938
2939 static bfd_boolean
2940 aarch64_erratum_sequence (uint32_t insn_1, uint32_t insn_2)
2941 {
2942 uint32_t rt;
2943 uint32_t rt2;
2944 uint32_t rn;
2945 uint32_t rm;
2946 uint32_t ra;
2947 bfd_boolean pair;
2948 bfd_boolean load;
2949
2950 if (aarch64_mlxl_p (insn_2)
2951 && aarch64_mem_op_p (insn_1, &rt, &rt2, &pair, &load))
2952 {
2953 /* Any SIMD memory op is independent of the subsequent MLA
2954 by definition of the erratum. */
2955 if (AARCH64_BIT (insn_1, 26))
2956 return TRUE;
2957
2958 /* If not SIMD, check for integer memory ops and MLA relationship. */
2959 rn = AARCH64_RN (insn_2);
2960 ra = AARCH64_RA (insn_2);
2961 rm = AARCH64_RM (insn_2);
2962
2963 /* If this is a load and there's a true(RAW) dependency, we are safe
2964 and this is not an erratum sequence. */
2965 if (load &&
2966 (rt == rn || rt == rm || rt == ra
2967 || (pair && (rt2 == rn || rt2 == rm || rt2 == ra))))
2968 return FALSE;
2969
2970 /* We conservatively put out stubs for all other cases (including
2971 writebacks). */
2972 return TRUE;
2973 }
2974
2975 return FALSE;
2976 }
2977
2978 /* Used to order a list of mapping symbols by address. */
2979
2980 static int
2981 elf_aarch64_compare_mapping (const void *a, const void *b)
2982 {
2983 const elf_aarch64_section_map *amap = (const elf_aarch64_section_map *) a;
2984 const elf_aarch64_section_map *bmap = (const elf_aarch64_section_map *) b;
2985
2986 if (amap->vma > bmap->vma)
2987 return 1;
2988 else if (amap->vma < bmap->vma)
2989 return -1;
2990 else if (amap->type > bmap->type)
2991 /* Ensure results do not depend on the host qsort for objects with
2992 multiple mapping symbols at the same address by sorting on type
2993 after vma. */
2994 return 1;
2995 else if (amap->type < bmap->type)
2996 return -1;
2997 else
2998 return 0;
2999 }
3000
3001
3002 static char *
3003 _bfd_aarch64_erratum_835769_stub_name (unsigned num_fixes)
3004 {
3005 char *stub_name = (char *) bfd_malloc
3006 (strlen ("__erratum_835769_veneer_") + 16);
3007 sprintf (stub_name,"__erratum_835769_veneer_%d", num_fixes);
3008 return stub_name;
3009 }
3010
3011 /* Scan for cortex-a53 erratum 835769 sequence.
3012
3013 Return TRUE else FALSE on abnormal termination. */
3014
3015 static bfd_boolean
3016 _bfd_aarch64_erratum_835769_scan (bfd *input_bfd,
3017 struct bfd_link_info *info,
3018 unsigned int *num_fixes_p)
3019 {
3020 asection *section;
3021 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3022 unsigned int num_fixes = *num_fixes_p;
3023
3024 if (htab == NULL)
3025 return TRUE;
3026
3027 for (section = input_bfd->sections;
3028 section != NULL;
3029 section = section->next)
3030 {
3031 bfd_byte *contents = NULL;
3032 struct _aarch64_elf_section_data *sec_data;
3033 unsigned int span;
3034
3035 if (elf_section_type (section) != SHT_PROGBITS
3036 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3037 || (section->flags & SEC_EXCLUDE) != 0
3038 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
3039 || (section->output_section == bfd_abs_section_ptr))
3040 continue;
3041
3042 if (elf_section_data (section)->this_hdr.contents != NULL)
3043 contents = elf_section_data (section)->this_hdr.contents;
3044 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3045 return FALSE;
3046
3047 sec_data = elf_aarch64_section_data (section);
3048
3049 qsort (sec_data->map, sec_data->mapcount,
3050 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
3051
3052 for (span = 0; span < sec_data->mapcount; span++)
3053 {
3054 unsigned int span_start = sec_data->map[span].vma;
3055 unsigned int span_end = ((span == sec_data->mapcount - 1)
3056 ? sec_data->map[0].vma + section->size
3057 : sec_data->map[span + 1].vma);
3058 unsigned int i;
3059 char span_type = sec_data->map[span].type;
3060
3061 if (span_type == 'd')
3062 continue;
3063
3064 for (i = span_start; i + 4 < span_end; i += 4)
3065 {
3066 uint32_t insn_1 = bfd_getl32 (contents + i);
3067 uint32_t insn_2 = bfd_getl32 (contents + i + 4);
3068
3069 if (aarch64_erratum_sequence (insn_1, insn_2))
3070 {
3071 struct elf_aarch64_stub_hash_entry *stub_entry;
3072 char *stub_name = _bfd_aarch64_erratum_835769_stub_name (num_fixes);
3073 if (! stub_name)
3074 return FALSE;
3075
3076 stub_entry = _bfd_aarch64_add_stub_entry_in_group (stub_name,
3077 section,
3078 htab);
3079 if (! stub_entry)
3080 return FALSE;
3081
3082 stub_entry->stub_type = aarch64_stub_erratum_835769_veneer;
3083 stub_entry->target_section = section;
3084 stub_entry->target_value = i + 4;
3085 stub_entry->veneered_insn = insn_2;
3086 stub_entry->output_name = stub_name;
3087 num_fixes++;
3088 }
3089 }
3090 }
3091 if (elf_section_data (section)->this_hdr.contents == NULL)
3092 free (contents);
3093 }
3094
3095 *num_fixes_p = num_fixes;
3096
3097 return TRUE;
3098 }
3099
3100
3101 /* Resize all stub sections. */
3102
3103 static void
3104 _bfd_aarch64_resize_stubs (struct elf_aarch64_link_hash_table *htab)
3105 {
3106 asection *section;
3107
3108 /* OK, we've added some stubs. Find out the new size of the
3109 stub sections. */
3110 for (section = htab->stub_bfd->sections;
3111 section != NULL; section = section->next)
3112 {
3113 /* Ignore non-stub sections. */
3114 if (!strstr (section->name, STUB_SUFFIX))
3115 continue;
3116 section->size = 0;
3117 }
3118
3119 bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab);
3120
3121 for (section = htab->stub_bfd->sections;
3122 section != NULL; section = section->next)
3123 {
3124 if (!strstr (section->name, STUB_SUFFIX))
3125 continue;
3126
3127 if (section->size)
3128 section->size += 4;
3129 }
3130 }
3131
3132 /* Determine and set the size of the stub section for a final link.
3133
3134 The basic idea here is to examine all the relocations looking for
3135 PC-relative calls to a target that is unreachable with a "bl"
3136 instruction. */
3137
3138 bfd_boolean
3139 elfNN_aarch64_size_stubs (bfd *output_bfd,
3140 bfd *stub_bfd,
3141 struct bfd_link_info *info,
3142 bfd_signed_vma group_size,
3143 asection * (*add_stub_section) (const char *,
3144 asection *),
3145 void (*layout_sections_again) (void))
3146 {
3147 bfd_size_type stub_group_size;
3148 bfd_boolean stubs_always_before_branch;
3149 bfd_boolean stub_changed = FALSE;
3150 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3151 unsigned int num_erratum_835769_fixes = 0;
3152
3153 /* Propagate mach to stub bfd, because it may not have been
3154 finalized when we created stub_bfd. */
3155 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
3156 bfd_get_mach (output_bfd));
3157
3158 /* Stash our params away. */
3159 htab->stub_bfd = stub_bfd;
3160 htab->add_stub_section = add_stub_section;
3161 htab->layout_sections_again = layout_sections_again;
3162 stubs_always_before_branch = group_size < 0;
3163 if (group_size < 0)
3164 stub_group_size = -group_size;
3165 else
3166 stub_group_size = group_size;
3167
3168 if (stub_group_size == 1)
3169 {
3170 /* Default values. */
3171 /* AArch64 branch range is +-128MB. The value used is 1MB less. */
3172 stub_group_size = 127 * 1024 * 1024;
3173 }
3174
3175 group_sections (htab, stub_group_size, stubs_always_before_branch);
3176
3177 if (htab->fix_erratum_835769)
3178 {
3179 bfd *input_bfd;
3180
3181 for (input_bfd = info->input_bfds;
3182 input_bfd != NULL; input_bfd = input_bfd->link.next)
3183 if (!_bfd_aarch64_erratum_835769_scan (input_bfd, info,
3184 &num_erratum_835769_fixes))
3185 return FALSE;
3186
3187 stub_changed = TRUE;
3188 }
3189
3190 while (1)
3191 {
3192 bfd *input_bfd;
3193
3194 for (input_bfd = info->input_bfds;
3195 input_bfd != NULL; input_bfd = input_bfd->link.next)
3196 {
3197 Elf_Internal_Shdr *symtab_hdr;
3198 asection *section;
3199 Elf_Internal_Sym *local_syms = NULL;
3200
3201 /* We'll need the symbol table in a second. */
3202 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
3203 if (symtab_hdr->sh_info == 0)
3204 continue;
3205
3206 /* Walk over each section attached to the input bfd. */
3207 for (section = input_bfd->sections;
3208 section != NULL; section = section->next)
3209 {
3210 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
3211
3212 /* If there aren't any relocs, then there's nothing more
3213 to do. */
3214 if ((section->flags & SEC_RELOC) == 0
3215 || section->reloc_count == 0
3216 || (section->flags & SEC_CODE) == 0)
3217 continue;
3218
3219 /* If this section is a link-once section that will be
3220 discarded, then don't create any stubs. */
3221 if (section->output_section == NULL
3222 || section->output_section->owner != output_bfd)
3223 continue;
3224
3225 /* Get the relocs. */
3226 internal_relocs
3227 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
3228 NULL, info->keep_memory);
3229 if (internal_relocs == NULL)
3230 goto error_ret_free_local;
3231
3232 /* Now examine each relocation. */
3233 irela = internal_relocs;
3234 irelaend = irela + section->reloc_count;
3235 for (; irela < irelaend; irela++)
3236 {
3237 unsigned int r_type, r_indx;
3238 enum elf_aarch64_stub_type stub_type;
3239 struct elf_aarch64_stub_hash_entry *stub_entry;
3240 asection *sym_sec;
3241 bfd_vma sym_value;
3242 bfd_vma destination;
3243 struct elf_aarch64_link_hash_entry *hash;
3244 const char *sym_name;
3245 char *stub_name;
3246 const asection *id_sec;
3247 unsigned char st_type;
3248 bfd_size_type len;
3249
3250 r_type = ELFNN_R_TYPE (irela->r_info);
3251 r_indx = ELFNN_R_SYM (irela->r_info);
3252
3253 if (r_type >= (unsigned int) R_AARCH64_end)
3254 {
3255 bfd_set_error (bfd_error_bad_value);
3256 error_ret_free_internal:
3257 if (elf_section_data (section)->relocs == NULL)
3258 free (internal_relocs);
3259 goto error_ret_free_local;
3260 }
3261
3262 /* Only look for stubs on unconditional branch and
3263 branch and link instructions. */
3264 if (r_type != (unsigned int) AARCH64_R (CALL26)
3265 && r_type != (unsigned int) AARCH64_R (JUMP26))
3266 continue;
3267
3268 /* Now determine the call target, its name, value,
3269 section. */
3270 sym_sec = NULL;
3271 sym_value = 0;
3272 destination = 0;
3273 hash = NULL;
3274 sym_name = NULL;
3275 if (r_indx < symtab_hdr->sh_info)
3276 {
3277 /* It's a local symbol. */
3278 Elf_Internal_Sym *sym;
3279 Elf_Internal_Shdr *hdr;
3280
3281 if (local_syms == NULL)
3282 {
3283 local_syms
3284 = (Elf_Internal_Sym *) symtab_hdr->contents;
3285 if (local_syms == NULL)
3286 local_syms
3287 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
3288 symtab_hdr->sh_info, 0,
3289 NULL, NULL, NULL);
3290 if (local_syms == NULL)
3291 goto error_ret_free_internal;
3292 }
3293
3294 sym = local_syms + r_indx;
3295 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
3296 sym_sec = hdr->bfd_section;
3297 if (!sym_sec)
3298 /* This is an undefined symbol. It can never
3299 be resolved. */
3300 continue;
3301
3302 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
3303 sym_value = sym->st_value;
3304 destination = (sym_value + irela->r_addend
3305 + sym_sec->output_offset
3306 + sym_sec->output_section->vma);
3307 st_type = ELF_ST_TYPE (sym->st_info);
3308 sym_name
3309 = bfd_elf_string_from_elf_section (input_bfd,
3310 symtab_hdr->sh_link,
3311 sym->st_name);
3312 }
3313 else
3314 {
3315 int e_indx;
3316
3317 e_indx = r_indx - symtab_hdr->sh_info;
3318 hash = ((struct elf_aarch64_link_hash_entry *)
3319 elf_sym_hashes (input_bfd)[e_indx]);
3320
3321 while (hash->root.root.type == bfd_link_hash_indirect
3322 || hash->root.root.type == bfd_link_hash_warning)
3323 hash = ((struct elf_aarch64_link_hash_entry *)
3324 hash->root.root.u.i.link);
3325
3326 if (hash->root.root.type == bfd_link_hash_defined
3327 || hash->root.root.type == bfd_link_hash_defweak)
3328 {
3329 struct elf_aarch64_link_hash_table *globals =
3330 elf_aarch64_hash_table (info);
3331 sym_sec = hash->root.root.u.def.section;
3332 sym_value = hash->root.root.u.def.value;
3333 /* For a destination in a shared library,
3334 use the PLT stub as target address to
3335 decide whether a branch stub is
3336 needed. */
3337 if (globals->root.splt != NULL && hash != NULL
3338 && hash->root.plt.offset != (bfd_vma) - 1)
3339 {
3340 sym_sec = globals->root.splt;
3341 sym_value = hash->root.plt.offset;
3342 if (sym_sec->output_section != NULL)
3343 destination = (sym_value
3344 + sym_sec->output_offset
3345 +
3346 sym_sec->output_section->vma);
3347 }
3348 else if (sym_sec->output_section != NULL)
3349 destination = (sym_value + irela->r_addend
3350 + sym_sec->output_offset
3351 + sym_sec->output_section->vma);
3352 }
3353 else if (hash->root.root.type == bfd_link_hash_undefined
3354 || (hash->root.root.type
3355 == bfd_link_hash_undefweak))
3356 {
3357 /* For a shared library, use the PLT stub as
3358 target address to decide whether a long
3359 branch stub is needed.
3360 For absolute code, they cannot be handled. */
3361 struct elf_aarch64_link_hash_table *globals =
3362 elf_aarch64_hash_table (info);
3363
3364 if (globals->root.splt != NULL && hash != NULL
3365 && hash->root.plt.offset != (bfd_vma) - 1)
3366 {
3367 sym_sec = globals->root.splt;
3368 sym_value = hash->root.plt.offset;
3369 if (sym_sec->output_section != NULL)
3370 destination = (sym_value
3371 + sym_sec->output_offset
3372 +
3373 sym_sec->output_section->vma);
3374 }
3375 else
3376 continue;
3377 }
3378 else
3379 {
3380 bfd_set_error (bfd_error_bad_value);
3381 goto error_ret_free_internal;
3382 }
3383 st_type = ELF_ST_TYPE (hash->root.type);
3384 sym_name = hash->root.root.root.string;
3385 }
3386
3387 /* Determine what (if any) linker stub is needed. */
3388 stub_type = aarch64_type_of_stub
3389 (info, section, irela, st_type, hash, destination);
3390 if (stub_type == aarch64_stub_none)
3391 continue;
3392
3393 /* Support for grouping stub sections. */
3394 id_sec = htab->stub_group[section->id].link_sec;
3395
3396 /* Get the name of this stub. */
3397 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, hash,
3398 irela);
3399 if (!stub_name)
3400 goto error_ret_free_internal;
3401
3402 stub_entry =
3403 aarch64_stub_hash_lookup (&htab->stub_hash_table,
3404 stub_name, FALSE, FALSE);
3405 if (stub_entry != NULL)
3406 {
3407 /* The proper stub has already been created. */
3408 free (stub_name);
3409 continue;
3410 }
3411
3412 stub_entry = _bfd_aarch64_add_stub_entry_in_group
3413 (stub_name, section, htab);
3414 if (stub_entry == NULL)
3415 {
3416 free (stub_name);
3417 goto error_ret_free_internal;
3418 }
3419
3420 stub_entry->target_value = sym_value;
3421 stub_entry->target_section = sym_sec;
3422 stub_entry->stub_type = stub_type;
3423 stub_entry->h = hash;
3424 stub_entry->st_type = st_type;
3425
3426 if (sym_name == NULL)
3427 sym_name = "unnamed";
3428 len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
3429 stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
3430 if (stub_entry->output_name == NULL)
3431 {
3432 free (stub_name);
3433 goto error_ret_free_internal;
3434 }
3435
3436 snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
3437 sym_name);
3438
3439 stub_changed = TRUE;
3440 }
3441
3442 /* We're done with the internal relocs, free them. */
3443 if (elf_section_data (section)->relocs == NULL)
3444 free (internal_relocs);
3445 }
3446 }
3447
3448 if (!stub_changed)
3449 break;
3450
3451 _bfd_aarch64_resize_stubs (htab);
3452
3453 /* Ask the linker to do its stuff. */
3454 (*htab->layout_sections_again) ();
3455 stub_changed = FALSE;
3456 }
3457
3458 return TRUE;
3459
3460 error_ret_free_local:
3461 return FALSE;
3462 }
3463
3464 /* Build all the stubs associated with the current output file. The
3465 stubs are kept in a hash table attached to the main linker hash
3466 table. We also set up the .plt entries for statically linked PIC
3467 functions here. This function is called via aarch64_elf_finish in the
3468 linker. */
3469
3470 bfd_boolean
3471 elfNN_aarch64_build_stubs (struct bfd_link_info *info)
3472 {
3473 asection *stub_sec;
3474 struct bfd_hash_table *table;
3475 struct elf_aarch64_link_hash_table *htab;
3476
3477 htab = elf_aarch64_hash_table (info);
3478
3479 for (stub_sec = htab->stub_bfd->sections;
3480 stub_sec != NULL; stub_sec = stub_sec->next)
3481 {
3482 bfd_size_type size;
3483
3484 /* Ignore non-stub sections. */
3485 if (!strstr (stub_sec->name, STUB_SUFFIX))
3486 continue;
3487
3488 /* Allocate memory to hold the linker stubs. */
3489 size = stub_sec->size;
3490 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
3491 if (stub_sec->contents == NULL && size != 0)
3492 return FALSE;
3493 stub_sec->size = 0;
3494
3495 bfd_putl32 (0x14000000 | (size >> 2), stub_sec->contents);
3496 stub_sec->size += 4;
3497 }
3498
3499 /* Build the stubs as directed by the stub hash table. */
3500 table = &htab->stub_hash_table;
3501 bfd_hash_traverse (table, aarch64_build_one_stub, info);
3502
3503 return TRUE;
3504 }
3505
3506
3507 /* Add an entry to the code/data map for section SEC. */
3508
3509 static void
3510 elfNN_aarch64_section_map_add (asection *sec, char type, bfd_vma vma)
3511 {
3512 struct _aarch64_elf_section_data *sec_data =
3513 elf_aarch64_section_data (sec);
3514 unsigned int newidx;
3515
3516 if (sec_data->map == NULL)
3517 {
3518 sec_data->map = bfd_malloc (sizeof (elf_aarch64_section_map));
3519 sec_data->mapcount = 0;
3520 sec_data->mapsize = 1;
3521 }
3522
3523 newidx = sec_data->mapcount++;
3524
3525 if (sec_data->mapcount > sec_data->mapsize)
3526 {
3527 sec_data->mapsize *= 2;
3528 sec_data->map = bfd_realloc_or_free
3529 (sec_data->map, sec_data->mapsize * sizeof (elf_aarch64_section_map));
3530 }
3531
3532 if (sec_data->map)
3533 {
3534 sec_data->map[newidx].vma = vma;
3535 sec_data->map[newidx].type = type;
3536 }
3537 }
3538
3539
3540 /* Initialise maps of insn/data for input BFDs. */
3541 void
3542 bfd_elfNN_aarch64_init_maps (bfd *abfd)
3543 {
3544 Elf_Internal_Sym *isymbuf;
3545 Elf_Internal_Shdr *hdr;
3546 unsigned int i, localsyms;
3547
3548 /* Make sure that we are dealing with an AArch64 elf binary. */
3549 if (!is_aarch64_elf (abfd))
3550 return;
3551
3552 if ((abfd->flags & DYNAMIC) != 0)
3553 return;
3554
3555 hdr = &elf_symtab_hdr (abfd);
3556 localsyms = hdr->sh_info;
3557
3558 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
3559 should contain the number of local symbols, which should come before any
3560 global symbols. Mapping symbols are always local. */
3561 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL, NULL);
3562
3563 /* No internal symbols read? Skip this BFD. */
3564 if (isymbuf == NULL)
3565 return;
3566
3567 for (i = 0; i < localsyms; i++)
3568 {
3569 Elf_Internal_Sym *isym = &isymbuf[i];
3570 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
3571 const char *name;
3572
3573 if (sec != NULL && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
3574 {
3575 name = bfd_elf_string_from_elf_section (abfd,
3576 hdr->sh_link,
3577 isym->st_name);
3578
3579 if (bfd_is_aarch64_special_symbol_name
3580 (name, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP))
3581 elfNN_aarch64_section_map_add (sec, name[1], isym->st_value);
3582 }
3583 }
3584 }
3585
3586 /* Set option values needed during linking. */
3587 void
3588 bfd_elfNN_aarch64_set_options (struct bfd *output_bfd,
3589 struct bfd_link_info *link_info,
3590 int no_enum_warn,
3591 int no_wchar_warn, int pic_veneer,
3592 int fix_erratum_835769)
3593 {
3594 struct elf_aarch64_link_hash_table *globals;
3595
3596 globals = elf_aarch64_hash_table (link_info);
3597 globals->pic_veneer = pic_veneer;
3598 globals->fix_erratum_835769 = fix_erratum_835769;
3599
3600 BFD_ASSERT (is_aarch64_elf (output_bfd));
3601 elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
3602 elf_aarch64_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
3603 }
3604
3605 static bfd_vma
3606 aarch64_calculate_got_entry_vma (struct elf_link_hash_entry *h,
3607 struct elf_aarch64_link_hash_table
3608 *globals, struct bfd_link_info *info,
3609 bfd_vma value, bfd *output_bfd,
3610 bfd_boolean *unresolved_reloc_p)
3611 {
3612 bfd_vma off = (bfd_vma) - 1;
3613 asection *basegot = globals->root.sgot;
3614 bfd_boolean dyn = globals->root.dynamic_sections_created;
3615
3616 if (h != NULL)
3617 {
3618 BFD_ASSERT (basegot != NULL);
3619 off = h->got.offset;
3620 BFD_ASSERT (off != (bfd_vma) - 1);
3621 if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
3622 || (info->shared
3623 && SYMBOL_REFERENCES_LOCAL (info, h))
3624 || (ELF_ST_VISIBILITY (h->other)
3625 && h->root.type == bfd_link_hash_undefweak))
3626 {
3627 /* This is actually a static link, or it is a -Bsymbolic link
3628 and the symbol is defined locally. We must initialize this
3629 entry in the global offset table. Since the offset must
3630 always be a multiple of 8 (4 in the case of ILP32), we use
3631 the least significant bit to record whether we have
3632 initialized it already.
3633 When doing a dynamic link, we create a .rel(a).got relocation
3634 entry to initialize the value. This is done in the
3635 finish_dynamic_symbol routine. */
3636 if ((off & 1) != 0)
3637 off &= ~1;
3638 else
3639 {
3640 bfd_put_NN (output_bfd, value, basegot->contents + off);
3641 h->got.offset |= 1;
3642 }
3643 }
3644 else
3645 *unresolved_reloc_p = FALSE;
3646
3647 off = off + basegot->output_section->vma + basegot->output_offset;
3648 }
3649
3650 return off;
3651 }
3652
3653 /* Change R_TYPE to a more efficient access model where possible,
3654 return the new reloc type. */
3655
3656 static bfd_reloc_code_real_type
3657 aarch64_tls_transition_without_check (bfd_reloc_code_real_type r_type,
3658 struct elf_link_hash_entry *h)
3659 {
3660 bfd_boolean is_local = h == NULL;
3661
3662 switch (r_type)
3663 {
3664 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3665 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3666 return (is_local
3667 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
3668 : BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21);
3669
3670 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3671 return (is_local
3672 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
3673 : r_type);
3674
3675 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3676 return (is_local
3677 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
3678 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
3679
3680 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3681 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
3682 return (is_local
3683 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
3684 : BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC);
3685
3686 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3687 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 : r_type;
3688
3689 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
3690 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC : r_type;
3691
3692 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3693 return r_type;
3694
3695 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3696 return (is_local
3697 ? BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12
3698 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
3699
3700 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
3701 case BFD_RELOC_AARCH64_TLSDESC_CALL:
3702 /* Instructions with these relocations will become NOPs. */
3703 return BFD_RELOC_AARCH64_NONE;
3704
3705 default:
3706 break;
3707 }
3708
3709 return r_type;
3710 }
3711
3712 static unsigned int
3713 aarch64_reloc_got_type (bfd_reloc_code_real_type r_type)
3714 {
3715 switch (r_type)
3716 {
3717 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3718 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3719 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3720 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3721 return GOT_NORMAL;
3722
3723 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3724 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3725 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3726 return GOT_TLS_GD;
3727
3728 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
3729 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3730 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3731 case BFD_RELOC_AARCH64_TLSDESC_CALL:
3732 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
3733 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3734 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3735 return GOT_TLSDESC_GD;
3736
3737 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3738 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3739 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3740 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3741 return GOT_TLS_IE;
3742
3743 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3744 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3745 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3746 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3747 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3748 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3749 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3750 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3751 return GOT_UNKNOWN;
3752
3753 default:
3754 break;
3755 }
3756 return GOT_UNKNOWN;
3757 }
3758
3759 static bfd_boolean
3760 aarch64_can_relax_tls (bfd *input_bfd,
3761 struct bfd_link_info *info,
3762 bfd_reloc_code_real_type r_type,
3763 struct elf_link_hash_entry *h,
3764 unsigned long r_symndx)
3765 {
3766 unsigned int symbol_got_type;
3767 unsigned int reloc_got_type;
3768
3769 if (! IS_AARCH64_TLS_RELOC (r_type))
3770 return FALSE;
3771
3772 symbol_got_type = elfNN_aarch64_symbol_got_type (h, input_bfd, r_symndx);
3773 reloc_got_type = aarch64_reloc_got_type (r_type);
3774
3775 if (symbol_got_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (reloc_got_type))
3776 return TRUE;
3777
3778 if (info->shared)
3779 return FALSE;
3780
3781 if (h && h->root.type == bfd_link_hash_undefweak)
3782 return FALSE;
3783
3784 return TRUE;
3785 }
3786
3787 /* Given the relocation code R_TYPE, return the relaxed bfd reloc
3788 enumerator. */
3789
3790 static bfd_reloc_code_real_type
3791 aarch64_tls_transition (bfd *input_bfd,
3792 struct bfd_link_info *info,
3793 unsigned int r_type,
3794 struct elf_link_hash_entry *h,
3795 unsigned long r_symndx)
3796 {
3797 bfd_reloc_code_real_type bfd_r_type
3798 = elfNN_aarch64_bfd_reloc_from_type (r_type);
3799
3800 if (! aarch64_can_relax_tls (input_bfd, info, bfd_r_type, h, r_symndx))
3801 return bfd_r_type;
3802
3803 return aarch64_tls_transition_without_check (bfd_r_type, h);
3804 }
3805
3806 /* Return the base VMA address which should be subtracted from real addresses
3807 when resolving R_AARCH64_TLS_DTPREL relocation. */
3808
3809 static bfd_vma
3810 dtpoff_base (struct bfd_link_info *info)
3811 {
3812 /* If tls_sec is NULL, we should have signalled an error already. */
3813 BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
3814 return elf_hash_table (info)->tls_sec->vma;
3815 }
3816
3817 /* Return the base VMA address which should be subtracted from real addresses
3818 when resolving R_AARCH64_TLS_GOTTPREL64 relocations. */
3819
3820 static bfd_vma
3821 tpoff_base (struct bfd_link_info *info)
3822 {
3823 struct elf_link_hash_table *htab = elf_hash_table (info);
3824
3825 /* If tls_sec is NULL, we should have signalled an error already. */
3826 BFD_ASSERT (htab->tls_sec != NULL);
3827
3828 bfd_vma base = align_power ((bfd_vma) TCB_SIZE,
3829 htab->tls_sec->alignment_power);
3830 return htab->tls_sec->vma - base;
3831 }
3832
3833 static bfd_vma *
3834 symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
3835 unsigned long r_symndx)
3836 {
3837 /* Calculate the address of the GOT entry for symbol
3838 referred to in h. */
3839 if (h != NULL)
3840 return &h->got.offset;
3841 else
3842 {
3843 /* local symbol */
3844 struct elf_aarch64_local_symbol *l;
3845
3846 l = elf_aarch64_locals (input_bfd);
3847 return &l[r_symndx].got_offset;
3848 }
3849 }
3850
3851 static void
3852 symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
3853 unsigned long r_symndx)
3854 {
3855 bfd_vma *p;
3856 p = symbol_got_offset_ref (input_bfd, h, r_symndx);
3857 *p |= 1;
3858 }
3859
3860 static int
3861 symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
3862 unsigned long r_symndx)
3863 {
3864 bfd_vma value;
3865 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
3866 return value & 1;
3867 }
3868
3869 static bfd_vma
3870 symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
3871 unsigned long r_symndx)
3872 {
3873 bfd_vma value;
3874 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
3875 value &= ~1;
3876 return value;
3877 }
3878
3879 static bfd_vma *
3880 symbol_tlsdesc_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
3881 unsigned long r_symndx)
3882 {
3883 /* Calculate the address of the GOT entry for symbol
3884 referred to in h. */
3885 if (h != NULL)
3886 {
3887 struct elf_aarch64_link_hash_entry *eh;
3888 eh = (struct elf_aarch64_link_hash_entry *) h;
3889 return &eh->tlsdesc_got_jump_table_offset;
3890 }
3891 else
3892 {
3893 /* local symbol */
3894 struct elf_aarch64_local_symbol *l;
3895
3896 l = elf_aarch64_locals (input_bfd);
3897 return &l[r_symndx].tlsdesc_got_jump_table_offset;
3898 }
3899 }
3900
3901 static void
3902 symbol_tlsdesc_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
3903 unsigned long r_symndx)
3904 {
3905 bfd_vma *p;
3906 p = symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3907 *p |= 1;
3908 }
3909
3910 static int
3911 symbol_tlsdesc_got_offset_mark_p (bfd *input_bfd,
3912 struct elf_link_hash_entry *h,
3913 unsigned long r_symndx)
3914 {
3915 bfd_vma value;
3916 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3917 return value & 1;
3918 }
3919
3920 static bfd_vma
3921 symbol_tlsdesc_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
3922 unsigned long r_symndx)
3923 {
3924 bfd_vma value;
3925 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3926 value &= ~1;
3927 return value;
3928 }
3929
3930 /* Data for make_branch_to_erratum_835769_stub(). */
3931
3932 struct erratum_835769_branch_to_stub_data
3933 {
3934 asection *output_section;
3935 bfd_byte *contents;
3936 };
3937
3938 /* Helper to insert branches to erratum 835769 stubs in the right
3939 places for a particular section. */
3940
3941 static bfd_boolean
3942 make_branch_to_erratum_835769_stub (struct bfd_hash_entry *gen_entry,
3943 void *in_arg)
3944 {
3945 struct elf_aarch64_stub_hash_entry *stub_entry;
3946 struct erratum_835769_branch_to_stub_data *data;
3947 bfd_byte *contents;
3948 unsigned long branch_insn = 0;
3949 bfd_vma veneered_insn_loc, veneer_entry_loc;
3950 bfd_signed_vma branch_offset;
3951 unsigned int target;
3952 bfd *abfd;
3953
3954 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
3955 data = (struct erratum_835769_branch_to_stub_data *) in_arg;
3956
3957 if (stub_entry->target_section != data->output_section
3958 || stub_entry->stub_type != aarch64_stub_erratum_835769_veneer)
3959 return TRUE;
3960
3961 contents = data->contents;
3962 veneered_insn_loc = stub_entry->target_section->output_section->vma
3963 + stub_entry->target_section->output_offset
3964 + stub_entry->target_value;
3965 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
3966 + stub_entry->stub_sec->output_offset
3967 + stub_entry->stub_offset;
3968 branch_offset = veneer_entry_loc - veneered_insn_loc;
3969
3970 abfd = stub_entry->target_section->owner;
3971 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
3972 (*_bfd_error_handler)
3973 (_("%B: error: Erratum 835769 stub out "
3974 "of range (input file too large)"), abfd);
3975
3976 target = stub_entry->target_value;
3977 branch_insn = 0x14000000;
3978 branch_offset >>= 2;
3979 branch_offset &= 0x3ffffff;
3980 branch_insn |= branch_offset;
3981 bfd_putl32 (branch_insn, &contents[target]);
3982
3983 return TRUE;
3984 }
3985
3986 static bfd_boolean
3987 elfNN_aarch64_write_section (bfd *output_bfd ATTRIBUTE_UNUSED,
3988 struct bfd_link_info *link_info,
3989 asection *sec,
3990 bfd_byte *contents)
3991
3992 {
3993 struct elf_aarch64_link_hash_table *globals =
3994 elf_aarch64_hash_table (link_info);
3995
3996 if (globals == NULL)
3997 return FALSE;
3998
3999 /* Fix code to point to erratum 835769 stubs. */
4000 if (globals->fix_erratum_835769)
4001 {
4002 struct erratum_835769_branch_to_stub_data data;
4003
4004 data.output_section = sec;
4005 data.contents = contents;
4006 bfd_hash_traverse (&globals->stub_hash_table,
4007 make_branch_to_erratum_835769_stub, &data);
4008 }
4009
4010 return FALSE;
4011 }
4012
4013 /* Perform a relocation as part of a final link. */
4014 static bfd_reloc_status_type
4015 elfNN_aarch64_final_link_relocate (reloc_howto_type *howto,
4016 bfd *input_bfd,
4017 bfd *output_bfd,
4018 asection *input_section,
4019 bfd_byte *contents,
4020 Elf_Internal_Rela *rel,
4021 bfd_vma value,
4022 struct bfd_link_info *info,
4023 asection *sym_sec,
4024 struct elf_link_hash_entry *h,
4025 bfd_boolean *unresolved_reloc_p,
4026 bfd_boolean save_addend,
4027 bfd_vma *saved_addend,
4028 Elf_Internal_Sym *sym)
4029 {
4030 Elf_Internal_Shdr *symtab_hdr;
4031 unsigned int r_type = howto->type;
4032 bfd_reloc_code_real_type bfd_r_type
4033 = elfNN_aarch64_bfd_reloc_from_howto (howto);
4034 bfd_reloc_code_real_type new_bfd_r_type;
4035 unsigned long r_symndx;
4036 bfd_byte *hit_data = contents + rel->r_offset;
4037 bfd_vma place;
4038 bfd_signed_vma signed_addend;
4039 struct elf_aarch64_link_hash_table *globals;
4040 bfd_boolean weak_undef_p;
4041
4042 globals = elf_aarch64_hash_table (info);
4043
4044 symtab_hdr = &elf_symtab_hdr (input_bfd);
4045
4046 BFD_ASSERT (is_aarch64_elf (input_bfd));
4047
4048 r_symndx = ELFNN_R_SYM (rel->r_info);
4049
4050 /* It is possible to have linker relaxations on some TLS access
4051 models. Update our information here. */
4052 new_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type, h, r_symndx);
4053 if (new_bfd_r_type != bfd_r_type)
4054 {
4055 bfd_r_type = new_bfd_r_type;
4056 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
4057 BFD_ASSERT (howto != NULL);
4058 r_type = howto->type;
4059 }
4060
4061 place = input_section->output_section->vma
4062 + input_section->output_offset + rel->r_offset;
4063
4064 /* Get addend, accumulating the addend for consecutive relocs
4065 which refer to the same offset. */
4066 signed_addend = saved_addend ? *saved_addend : 0;
4067 signed_addend += rel->r_addend;
4068
4069 weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
4070 : bfd_is_und_section (sym_sec));
4071
4072 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
4073 it here if it is defined in a non-shared object. */
4074 if (h != NULL
4075 && h->type == STT_GNU_IFUNC
4076 && h->def_regular)
4077 {
4078 asection *plt;
4079 const char *name;
4080 asection *base_got;
4081 bfd_vma off;
4082
4083 if ((input_section->flags & SEC_ALLOC) == 0
4084 || h->plt.offset == (bfd_vma) -1)
4085 abort ();
4086
4087 /* STT_GNU_IFUNC symbol must go through PLT. */
4088 plt = globals->root.splt ? globals->root.splt : globals->root.iplt;
4089 value = (plt->output_section->vma + plt->output_offset + h->plt.offset);
4090
4091 switch (bfd_r_type)
4092 {
4093 default:
4094 if (h->root.root.string)
4095 name = h->root.root.string;
4096 else
4097 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
4098 NULL);
4099 (*_bfd_error_handler)
4100 (_("%B: relocation %s against STT_GNU_IFUNC "
4101 "symbol `%s' isn't handled by %s"), input_bfd,
4102 howto->name, name, __FUNCTION__);
4103 bfd_set_error (bfd_error_bad_value);
4104 return FALSE;
4105
4106 case BFD_RELOC_AARCH64_NN:
4107 if (rel->r_addend != 0)
4108 {
4109 if (h->root.root.string)
4110 name = h->root.root.string;
4111 else
4112 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4113 sym, NULL);
4114 (*_bfd_error_handler)
4115 (_("%B: relocation %s against STT_GNU_IFUNC "
4116 "symbol `%s' has non-zero addend: %d"),
4117 input_bfd, howto->name, name, rel->r_addend);
4118 bfd_set_error (bfd_error_bad_value);
4119 return FALSE;
4120 }
4121
4122 /* Generate dynamic relocation only when there is a
4123 non-GOT reference in a shared object. */
4124 if (info->shared && h->non_got_ref)
4125 {
4126 Elf_Internal_Rela outrel;
4127 asection *sreloc;
4128
4129 /* Need a dynamic relocation to get the real function
4130 address. */
4131 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
4132 info,
4133 input_section,
4134 rel->r_offset);
4135 if (outrel.r_offset == (bfd_vma) -1
4136 || outrel.r_offset == (bfd_vma) -2)
4137 abort ();
4138
4139 outrel.r_offset += (input_section->output_section->vma
4140 + input_section->output_offset);
4141
4142 if (h->dynindx == -1
4143 || h->forced_local
4144 || info->executable)
4145 {
4146 /* This symbol is resolved locally. */
4147 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
4148 outrel.r_addend = (h->root.u.def.value
4149 + h->root.u.def.section->output_section->vma
4150 + h->root.u.def.section->output_offset);
4151 }
4152 else
4153 {
4154 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
4155 outrel.r_addend = 0;
4156 }
4157
4158 sreloc = globals->root.irelifunc;
4159 elf_append_rela (output_bfd, sreloc, &outrel);
4160
4161 /* If this reloc is against an external symbol, we
4162 do not want to fiddle with the addend. Otherwise,
4163 we need to include the symbol value so that it
4164 becomes an addend for the dynamic reloc. For an
4165 internal symbol, we have updated addend. */
4166 return bfd_reloc_ok;
4167 }
4168 /* FALLTHROUGH */
4169 case BFD_RELOC_AARCH64_JUMP26:
4170 case BFD_RELOC_AARCH64_CALL26:
4171 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4172 signed_addend,
4173 weak_undef_p);
4174 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
4175 howto, value);
4176 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4177 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4178 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4179 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4180 base_got = globals->root.sgot;
4181 off = h->got.offset;
4182
4183 if (base_got == NULL)
4184 abort ();
4185
4186 if (off == (bfd_vma) -1)
4187 {
4188 bfd_vma plt_index;
4189
4190 /* We can't use h->got.offset here to save state, or
4191 even just remember the offset, as finish_dynamic_symbol
4192 would use that as offset into .got. */
4193
4194 if (globals->root.splt != NULL)
4195 {
4196 plt_index = ((h->plt.offset - globals->plt_header_size) /
4197 globals->plt_entry_size);
4198 off = (plt_index + 3) * GOT_ENTRY_SIZE;
4199 base_got = globals->root.sgotplt;
4200 }
4201 else
4202 {
4203 plt_index = h->plt.offset / globals->plt_entry_size;
4204 off = plt_index * GOT_ENTRY_SIZE;
4205 base_got = globals->root.igotplt;
4206 }
4207
4208 if (h->dynindx == -1
4209 || h->forced_local
4210 || info->symbolic)
4211 {
4212 /* This references the local definition. We must
4213 initialize this entry in the global offset table.
4214 Since the offset must always be a multiple of 8,
4215 we use the least significant bit to record
4216 whether we have initialized it already.
4217
4218 When doing a dynamic link, we create a .rela.got
4219 relocation entry to initialize the value. This
4220 is done in the finish_dynamic_symbol routine. */
4221 if ((off & 1) != 0)
4222 off &= ~1;
4223 else
4224 {
4225 bfd_put_NN (output_bfd, value,
4226 base_got->contents + off);
4227 /* Note that this is harmless as -1 | 1 still is -1. */
4228 h->got.offset |= 1;
4229 }
4230 }
4231 value = (base_got->output_section->vma
4232 + base_got->output_offset + off);
4233 }
4234 else
4235 value = aarch64_calculate_got_entry_vma (h, globals, info,
4236 value, output_bfd,
4237 unresolved_reloc_p);
4238 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4239 0, weak_undef_p);
4240 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type, howto, value);
4241 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
4242 case BFD_RELOC_AARCH64_ADD_LO12:
4243 break;
4244 }
4245 }
4246
4247 switch (bfd_r_type)
4248 {
4249 case BFD_RELOC_AARCH64_NONE:
4250 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4251 *unresolved_reloc_p = FALSE;
4252 return bfd_reloc_ok;
4253
4254 case BFD_RELOC_AARCH64_NN:
4255
4256 /* When generating a shared object or relocatable executable, these
4257 relocations are copied into the output file to be resolved at
4258 run time. */
4259 if (((info->shared == TRUE) || globals->root.is_relocatable_executable)
4260 && (input_section->flags & SEC_ALLOC)
4261 && (h == NULL
4262 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4263 || h->root.type != bfd_link_hash_undefweak))
4264 {
4265 Elf_Internal_Rela outrel;
4266 bfd_byte *loc;
4267 bfd_boolean skip, relocate;
4268 asection *sreloc;
4269
4270 *unresolved_reloc_p = FALSE;
4271
4272 skip = FALSE;
4273 relocate = FALSE;
4274
4275 outrel.r_addend = signed_addend;
4276 outrel.r_offset =
4277 _bfd_elf_section_offset (output_bfd, info, input_section,
4278 rel->r_offset);
4279 if (outrel.r_offset == (bfd_vma) - 1)
4280 skip = TRUE;
4281 else if (outrel.r_offset == (bfd_vma) - 2)
4282 {
4283 skip = TRUE;
4284 relocate = TRUE;
4285 }
4286
4287 outrel.r_offset += (input_section->output_section->vma
4288 + input_section->output_offset);
4289
4290 if (skip)
4291 memset (&outrel, 0, sizeof outrel);
4292 else if (h != NULL
4293 && h->dynindx != -1
4294 && (!info->shared || !SYMBOLIC_BIND (info, h) || !h->def_regular))
4295 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
4296 else
4297 {
4298 int symbol;
4299
4300 /* On SVR4-ish systems, the dynamic loader cannot
4301 relocate the text and data segments independently,
4302 so the symbol does not matter. */
4303 symbol = 0;
4304 outrel.r_info = ELFNN_R_INFO (symbol, AARCH64_R (RELATIVE));
4305 outrel.r_addend += value;
4306 }
4307
4308 sreloc = elf_section_data (input_section)->sreloc;
4309 if (sreloc == NULL || sreloc->contents == NULL)
4310 return bfd_reloc_notsupported;
4311
4312 loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (globals);
4313 bfd_elfNN_swap_reloca_out (output_bfd, &outrel, loc);
4314
4315 if (sreloc->reloc_count * RELOC_SIZE (globals) > sreloc->size)
4316 {
4317 /* Sanity to check that we have previously allocated
4318 sufficient space in the relocation section for the
4319 number of relocations we actually want to emit. */
4320 abort ();
4321 }
4322
4323 /* If this reloc is against an external symbol, we do not want to
4324 fiddle with the addend. Otherwise, we need to include the symbol
4325 value so that it becomes an addend for the dynamic reloc. */
4326 if (!relocate)
4327 return bfd_reloc_ok;
4328
4329 return _bfd_final_link_relocate (howto, input_bfd, input_section,
4330 contents, rel->r_offset, value,
4331 signed_addend);
4332 }
4333 else
4334 value += signed_addend;
4335 break;
4336
4337 case BFD_RELOC_AARCH64_JUMP26:
4338 case BFD_RELOC_AARCH64_CALL26:
4339 {
4340 asection *splt = globals->root.splt;
4341 bfd_boolean via_plt_p =
4342 splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
4343
4344 /* A call to an undefined weak symbol is converted to a jump to
4345 the next instruction unless a PLT entry will be created.
4346 The jump to the next instruction is optimized as a NOP.
4347 Do the same for local undefined symbols. */
4348 if (weak_undef_p && ! via_plt_p)
4349 {
4350 bfd_putl32 (INSN_NOP, hit_data);
4351 return bfd_reloc_ok;
4352 }
4353
4354 /* If the call goes through a PLT entry, make sure to
4355 check distance to the right destination address. */
4356 if (via_plt_p)
4357 {
4358 value = (splt->output_section->vma
4359 + splt->output_offset + h->plt.offset);
4360 *unresolved_reloc_p = FALSE;
4361 }
4362
4363 /* If the target symbol is global and marked as a function the
4364 relocation applies a function call or a tail call. In this
4365 situation we can veneer out of range branches. The veneers
4366 use IP0 and IP1 hence cannot be used arbitrary out of range
4367 branches that occur within the body of a function. */
4368 if (h && h->type == STT_FUNC)
4369 {
4370 /* Check if a stub has to be inserted because the destination
4371 is too far away. */
4372 if (! aarch64_valid_branch_p (value, place))
4373 {
4374 /* The target is out of reach, so redirect the branch to
4375 the local stub for this function. */
4376 struct elf_aarch64_stub_hash_entry *stub_entry;
4377 stub_entry = elfNN_aarch64_get_stub_entry (input_section,
4378 sym_sec, h,
4379 rel, globals);
4380 if (stub_entry != NULL)
4381 value = (stub_entry->stub_offset
4382 + stub_entry->stub_sec->output_offset
4383 + stub_entry->stub_sec->output_section->vma);
4384 }
4385 }
4386 }
4387 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4388 signed_addend, weak_undef_p);
4389 break;
4390
4391 case BFD_RELOC_AARCH64_16:
4392 #if ARCH_SIZE == 64
4393 case BFD_RELOC_AARCH64_32:
4394 #endif
4395 case BFD_RELOC_AARCH64_ADD_LO12:
4396 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
4397 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
4398 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
4399 case BFD_RELOC_AARCH64_BRANCH19:
4400 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
4401 case BFD_RELOC_AARCH64_LDST8_LO12:
4402 case BFD_RELOC_AARCH64_LDST16_LO12:
4403 case BFD_RELOC_AARCH64_LDST32_LO12:
4404 case BFD_RELOC_AARCH64_LDST64_LO12:
4405 case BFD_RELOC_AARCH64_LDST128_LO12:
4406 case BFD_RELOC_AARCH64_MOVW_G0_S:
4407 case BFD_RELOC_AARCH64_MOVW_G1_S:
4408 case BFD_RELOC_AARCH64_MOVW_G2_S:
4409 case BFD_RELOC_AARCH64_MOVW_G0:
4410 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4411 case BFD_RELOC_AARCH64_MOVW_G1:
4412 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4413 case BFD_RELOC_AARCH64_MOVW_G2:
4414 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4415 case BFD_RELOC_AARCH64_MOVW_G3:
4416 case BFD_RELOC_AARCH64_16_PCREL:
4417 case BFD_RELOC_AARCH64_32_PCREL:
4418 case BFD_RELOC_AARCH64_64_PCREL:
4419 case BFD_RELOC_AARCH64_TSTBR14:
4420 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4421 signed_addend, weak_undef_p);
4422 break;
4423
4424 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4425 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4426 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4427 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4428 if (globals->root.sgot == NULL)
4429 BFD_ASSERT (h != NULL);
4430
4431 if (h != NULL)
4432 {
4433 value = aarch64_calculate_got_entry_vma (h, globals, info, value,
4434 output_bfd,
4435 unresolved_reloc_p);
4436 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4437 0, weak_undef_p);
4438 }
4439 break;
4440
4441 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4442 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4443 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4444 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4445 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4446 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
4447 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
4448 if (globals->root.sgot == NULL)
4449 return bfd_reloc_notsupported;
4450
4451 value = (symbol_got_offset (input_bfd, h, r_symndx)
4452 + globals->root.sgot->output_section->vma
4453 + globals->root.sgot->output_offset);
4454
4455 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4456 0, weak_undef_p);
4457 *unresolved_reloc_p = FALSE;
4458 break;
4459
4460 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
4461 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
4462 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4463 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4464 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4465 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4466 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4467 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4468 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4469 signed_addend - tpoff_base (info),
4470 weak_undef_p);
4471 *unresolved_reloc_p = FALSE;
4472 break;
4473
4474 case BFD_RELOC_AARCH64_TLSDESC_ADD:
4475 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4476 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4477 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
4478 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
4479 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
4480 case BFD_RELOC_AARCH64_TLSDESC_LDR:
4481 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
4482 if (globals->root.sgot == NULL)
4483 return bfd_reloc_notsupported;
4484 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
4485 + globals->root.sgotplt->output_section->vma
4486 + globals->root.sgotplt->output_offset
4487 + globals->sgotplt_jump_table_size);
4488
4489 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4490 0, weak_undef_p);
4491 *unresolved_reloc_p = FALSE;
4492 break;
4493
4494 default:
4495 return bfd_reloc_notsupported;
4496 }
4497
4498 if (saved_addend)
4499 *saved_addend = value;
4500
4501 /* Only apply the final relocation in a sequence. */
4502 if (save_addend)
4503 return bfd_reloc_continue;
4504
4505 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
4506 howto, value);
4507 }
4508
4509 /* Handle TLS relaxations. Relaxing is possible for symbols that use
4510 R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static
4511 link.
4512
4513 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
4514 is to then call final_link_relocate. Return other values in the
4515 case of error. */
4516
4517 static bfd_reloc_status_type
4518 elfNN_aarch64_tls_relax (struct elf_aarch64_link_hash_table *globals,
4519 bfd *input_bfd, bfd_byte *contents,
4520 Elf_Internal_Rela *rel, struct elf_link_hash_entry *h)
4521 {
4522 bfd_boolean is_local = h == NULL;
4523 unsigned int r_type = ELFNN_R_TYPE (rel->r_info);
4524 unsigned long insn;
4525
4526 BFD_ASSERT (globals && input_bfd && contents && rel);
4527
4528 switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
4529 {
4530 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4531 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4532 if (is_local)
4533 {
4534 /* GD->LE relaxation:
4535 adrp x0, :tlsgd:var => movz x0, :tprel_g1:var
4536 or
4537 adrp x0, :tlsdesc:var => movz x0, :tprel_g1:var
4538 */
4539 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
4540 return bfd_reloc_continue;
4541 }
4542 else
4543 {
4544 /* GD->IE relaxation:
4545 adrp x0, :tlsgd:var => adrp x0, :gottprel:var
4546 or
4547 adrp x0, :tlsdesc:var => adrp x0, :gottprel:var
4548 */
4549 return bfd_reloc_continue;
4550 }
4551
4552 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
4553 BFD_ASSERT (0);
4554 break;
4555
4556 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
4557 if (is_local)
4558 {
4559 /* Tiny TLSDESC->LE relaxation:
4560 ldr x1, :tlsdesc:var => movz x0, #:tprel_g1:var
4561 adr x0, :tlsdesc:var => movk x0, #:tprel_g0_nc:var
4562 .tlsdesccall var
4563 blr x1 => nop
4564 */
4565 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
4566 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
4567
4568 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
4569 AARCH64_R (TLSLE_MOVW_TPREL_G0_NC));
4570 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4571
4572 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
4573 bfd_putl32 (0xf2800000, contents + rel->r_offset + 4);
4574 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
4575 return bfd_reloc_continue;
4576 }
4577 else
4578 {
4579 /* Tiny TLSDESC->IE relaxation:
4580 ldr x1, :tlsdesc:var => ldr x0, :gottprel:var
4581 adr x0, :tlsdesc:var => nop
4582 .tlsdesccall var
4583 blr x1 => nop
4584 */
4585 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
4586 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
4587
4588 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4589 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4590
4591 bfd_putl32 (0x58000000, contents + rel->r_offset);
4592 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 4);
4593 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
4594 return bfd_reloc_continue;
4595 }
4596
4597 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4598 if (is_local)
4599 {
4600 /* Tiny GD->LE relaxation:
4601 adr x0, :tlsgd:var => mrs x1, tpidr_el0
4602 bl __tls_get_addr => add x0, x1, #:tprel_hi12:x, lsl #12
4603 nop => add x0, x0, #:tprel_lo12_nc:x
4604 */
4605
4606 /* First kill the tls_get_addr reloc on the bl instruction. */
4607 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
4608
4609 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 0);
4610 bfd_putl32 (0x91400020, contents + rel->r_offset + 4);
4611 bfd_putl32 (0x91000000, contents + rel->r_offset + 8);
4612
4613 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
4614 AARCH64_R (TLSLE_ADD_TPREL_LO12_NC));
4615 rel[1].r_offset = rel->r_offset + 8;
4616
4617 /* Move the current relocation to the second instruction in
4618 the sequence. */
4619 rel->r_offset += 4;
4620 rel->r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
4621 AARCH64_R (TLSLE_ADD_TPREL_HI12));
4622 return bfd_reloc_continue;
4623 }
4624 else
4625 {
4626 /* Tiny GD->IE relaxation:
4627 adr x0, :tlsgd:var => ldr x0, :gottprel:var
4628 bl __tls_get_addr => mrs x1, tpidr_el0
4629 nop => add x0, x0, x1
4630 */
4631
4632 /* First kill the tls_get_addr reloc on the bl instruction. */
4633 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
4634 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4635
4636 bfd_putl32 (0x58000000, contents + rel->r_offset);
4637 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
4638 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
4639 return bfd_reloc_continue;
4640 }
4641
4642 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
4643 return bfd_reloc_continue;
4644
4645 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
4646 if (is_local)
4647 {
4648 /* GD->LE relaxation:
4649 ldr xd, [x0, #:tlsdesc_lo12:var] => movk x0, :tprel_g0_nc:var
4650 */
4651 bfd_putl32 (0xf2800000, contents + rel->r_offset);
4652 return bfd_reloc_continue;
4653 }
4654 else
4655 {
4656 /* GD->IE relaxation:
4657 ldr xd, [x0, #:tlsdesc_lo12:var] => ldr x0, [x0, #:gottprel_lo12:var]
4658 */
4659 insn = bfd_getl32 (contents + rel->r_offset);
4660 insn &= 0xffffffe0;
4661 bfd_putl32 (insn, contents + rel->r_offset);
4662 return bfd_reloc_continue;
4663 }
4664
4665 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4666 if (is_local)
4667 {
4668 /* GD->LE relaxation
4669 add x0, #:tlsgd_lo12:var => movk x0, :tprel_g0_nc:var
4670 bl __tls_get_addr => mrs x1, tpidr_el0
4671 nop => add x0, x1, x0
4672 */
4673
4674 /* First kill the tls_get_addr reloc on the bl instruction. */
4675 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
4676 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4677
4678 bfd_putl32 (0xf2800000, contents + rel->r_offset);
4679 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
4680 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
4681 return bfd_reloc_continue;
4682 }
4683 else
4684 {
4685 /* GD->IE relaxation
4686 ADD x0, #:tlsgd_lo12:var => ldr x0, [x0, #:gottprel_lo12:var]
4687 BL __tls_get_addr => mrs x1, tpidr_el0
4688 R_AARCH64_CALL26
4689 NOP => add x0, x1, x0
4690 */
4691
4692 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
4693
4694 /* Remove the relocation on the BL instruction. */
4695 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4696
4697 bfd_putl32 (0xf9400000, contents + rel->r_offset);
4698
4699 /* We choose to fixup the BL and NOP instructions using the
4700 offset from the second relocation to allow flexibility in
4701 scheduling instructions between the ADD and BL. */
4702 bfd_putl32 (0xd53bd041, contents + rel[1].r_offset);
4703 bfd_putl32 (0x8b000020, contents + rel[1].r_offset + 4);
4704 return bfd_reloc_continue;
4705 }
4706
4707 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4708 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4709 /* GD->IE/LE relaxation:
4710 add x0, x0, #:tlsdesc_lo12:var => nop
4711 blr xd => nop
4712 */
4713 bfd_putl32 (INSN_NOP, contents + rel->r_offset);
4714 return bfd_reloc_ok;
4715
4716 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4717 /* IE->LE relaxation:
4718 adrp xd, :gottprel:var => movz xd, :tprel_g1:var
4719 */
4720 if (is_local)
4721 {
4722 insn = bfd_getl32 (contents + rel->r_offset);
4723 bfd_putl32 (0xd2a00000 | (insn & 0x1f), contents + rel->r_offset);
4724 }
4725 return bfd_reloc_continue;
4726
4727 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
4728 /* IE->LE relaxation:
4729 ldr xd, [xm, #:gottprel_lo12:var] => movk xd, :tprel_g0_nc:var
4730 */
4731 if (is_local)
4732 {
4733 insn = bfd_getl32 (contents + rel->r_offset);
4734 bfd_putl32 (0xf2800000 | (insn & 0x1f), contents + rel->r_offset);
4735 }
4736 return bfd_reloc_continue;
4737
4738 default:
4739 return bfd_reloc_continue;
4740 }
4741
4742 return bfd_reloc_ok;
4743 }
4744
4745 /* Relocate an AArch64 ELF section. */
4746
4747 static bfd_boolean
4748 elfNN_aarch64_relocate_section (bfd *output_bfd,
4749 struct bfd_link_info *info,
4750 bfd *input_bfd,
4751 asection *input_section,
4752 bfd_byte *contents,
4753 Elf_Internal_Rela *relocs,
4754 Elf_Internal_Sym *local_syms,
4755 asection **local_sections)
4756 {
4757 Elf_Internal_Shdr *symtab_hdr;
4758 struct elf_link_hash_entry **sym_hashes;
4759 Elf_Internal_Rela *rel;
4760 Elf_Internal_Rela *relend;
4761 const char *name;
4762 struct elf_aarch64_link_hash_table *globals;
4763 bfd_boolean save_addend = FALSE;
4764 bfd_vma addend = 0;
4765
4766 globals = elf_aarch64_hash_table (info);
4767
4768 symtab_hdr = &elf_symtab_hdr (input_bfd);
4769 sym_hashes = elf_sym_hashes (input_bfd);
4770
4771 rel = relocs;
4772 relend = relocs + input_section->reloc_count;
4773 for (; rel < relend; rel++)
4774 {
4775 unsigned int r_type;
4776 bfd_reloc_code_real_type bfd_r_type;
4777 bfd_reloc_code_real_type relaxed_bfd_r_type;
4778 reloc_howto_type *howto;
4779 unsigned long r_symndx;
4780 Elf_Internal_Sym *sym;
4781 asection *sec;
4782 struct elf_link_hash_entry *h;
4783 bfd_vma relocation;
4784 bfd_reloc_status_type r;
4785 arelent bfd_reloc;
4786 char sym_type;
4787 bfd_boolean unresolved_reloc = FALSE;
4788 char *error_message = NULL;
4789
4790 r_symndx = ELFNN_R_SYM (rel->r_info);
4791 r_type = ELFNN_R_TYPE (rel->r_info);
4792
4793 bfd_reloc.howto = elfNN_aarch64_howto_from_type (r_type);
4794 howto = bfd_reloc.howto;
4795
4796 if (howto == NULL)
4797 {
4798 (*_bfd_error_handler)
4799 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
4800 input_bfd, input_section, r_type);
4801 return FALSE;
4802 }
4803 bfd_r_type = elfNN_aarch64_bfd_reloc_from_howto (howto);
4804
4805 h = NULL;
4806 sym = NULL;
4807 sec = NULL;
4808
4809 if (r_symndx < symtab_hdr->sh_info)
4810 {
4811 sym = local_syms + r_symndx;
4812 sym_type = ELFNN_ST_TYPE (sym->st_info);
4813 sec = local_sections[r_symndx];
4814
4815 /* An object file might have a reference to a local
4816 undefined symbol. This is a daft object file, but we
4817 should at least do something about it. */
4818 if (r_type != R_AARCH64_NONE && r_type != R_AARCH64_NULL
4819 && bfd_is_und_section (sec)
4820 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
4821 {
4822 if (!info->callbacks->undefined_symbol
4823 (info, bfd_elf_string_from_elf_section
4824 (input_bfd, symtab_hdr->sh_link, sym->st_name),
4825 input_bfd, input_section, rel->r_offset, TRUE))
4826 return FALSE;
4827 }
4828
4829 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
4830
4831 /* Relocate against local STT_GNU_IFUNC symbol. */
4832 if (!info->relocatable
4833 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
4834 {
4835 h = elfNN_aarch64_get_local_sym_hash (globals, input_bfd,
4836 rel, FALSE);
4837 if (h == NULL)
4838 abort ();
4839
4840 /* Set STT_GNU_IFUNC symbol value. */
4841 h->root.u.def.value = sym->st_value;
4842 h->root.u.def.section = sec;
4843 }
4844 }
4845 else
4846 {
4847 bfd_boolean warned, ignored;
4848
4849 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
4850 r_symndx, symtab_hdr, sym_hashes,
4851 h, sec, relocation,
4852 unresolved_reloc, warned, ignored);
4853
4854 sym_type = h->type;
4855 }
4856
4857 if (sec != NULL && discarded_section (sec))
4858 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
4859 rel, 1, relend, howto, 0, contents);
4860
4861 if (info->relocatable)
4862 continue;
4863
4864 if (h != NULL)
4865 name = h->root.root.string;
4866 else
4867 {
4868 name = (bfd_elf_string_from_elf_section
4869 (input_bfd, symtab_hdr->sh_link, sym->st_name));
4870 if (name == NULL || *name == '\0')
4871 name = bfd_section_name (input_bfd, sec);
4872 }
4873
4874 if (r_symndx != 0
4875 && r_type != R_AARCH64_NONE
4876 && r_type != R_AARCH64_NULL
4877 && (h == NULL
4878 || h->root.type == bfd_link_hash_defined
4879 || h->root.type == bfd_link_hash_defweak)
4880 && IS_AARCH64_TLS_RELOC (bfd_r_type) != (sym_type == STT_TLS))
4881 {
4882 (*_bfd_error_handler)
4883 ((sym_type == STT_TLS
4884 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
4885 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
4886 input_bfd,
4887 input_section, (long) rel->r_offset, howto->name, name);
4888 }
4889
4890 /* We relax only if we can see that there can be a valid transition
4891 from a reloc type to another.
4892 We call elfNN_aarch64_final_link_relocate unless we're completely
4893 done, i.e., the relaxation produced the final output we want. */
4894
4895 relaxed_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type,
4896 h, r_symndx);
4897 if (relaxed_bfd_r_type != bfd_r_type)
4898 {
4899 bfd_r_type = relaxed_bfd_r_type;
4900 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
4901 BFD_ASSERT (howto != NULL);
4902 r_type = howto->type;
4903 r = elfNN_aarch64_tls_relax (globals, input_bfd, contents, rel, h);
4904 unresolved_reloc = 0;
4905 }
4906 else
4907 r = bfd_reloc_continue;
4908
4909 /* There may be multiple consecutive relocations for the
4910 same offset. In that case we are supposed to treat the
4911 output of each relocation as the addend for the next. */
4912 if (rel + 1 < relend
4913 && rel->r_offset == rel[1].r_offset
4914 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NONE
4915 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NULL)
4916 save_addend = TRUE;
4917 else
4918 save_addend = FALSE;
4919
4920 if (r == bfd_reloc_continue)
4921 r = elfNN_aarch64_final_link_relocate (howto, input_bfd, output_bfd,
4922 input_section, contents, rel,
4923 relocation, info, sec,
4924 h, &unresolved_reloc,
4925 save_addend, &addend, sym);
4926
4927 switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
4928 {
4929 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4930 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4931 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4932 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
4933 {
4934 bfd_boolean need_relocs = FALSE;
4935 bfd_byte *loc;
4936 int indx;
4937 bfd_vma off;
4938
4939 off = symbol_got_offset (input_bfd, h, r_symndx);
4940 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4941
4942 need_relocs =
4943 (info->shared || indx != 0) &&
4944 (h == NULL
4945 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4946 || h->root.type != bfd_link_hash_undefweak);
4947
4948 BFD_ASSERT (globals->root.srelgot != NULL);
4949
4950 if (need_relocs)
4951 {
4952 Elf_Internal_Rela rela;
4953 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPMOD));
4954 rela.r_addend = 0;
4955 rela.r_offset = globals->root.sgot->output_section->vma +
4956 globals->root.sgot->output_offset + off;
4957
4958
4959 loc = globals->root.srelgot->contents;
4960 loc += globals->root.srelgot->reloc_count++
4961 * RELOC_SIZE (htab);
4962 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
4963
4964 if (indx == 0)
4965 {
4966 bfd_put_NN (output_bfd,
4967 relocation - dtpoff_base (info),
4968 globals->root.sgot->contents + off
4969 + GOT_ENTRY_SIZE);
4970 }
4971 else
4972 {
4973 /* This TLS symbol is global. We emit a
4974 relocation to fixup the tls offset at load
4975 time. */
4976 rela.r_info =
4977 ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPREL));
4978 rela.r_addend = 0;
4979 rela.r_offset =
4980 (globals->root.sgot->output_section->vma
4981 + globals->root.sgot->output_offset + off
4982 + GOT_ENTRY_SIZE);
4983
4984 loc = globals->root.srelgot->contents;
4985 loc += globals->root.srelgot->reloc_count++
4986 * RELOC_SIZE (globals);
4987 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
4988 bfd_put_NN (output_bfd, (bfd_vma) 0,
4989 globals->root.sgot->contents + off
4990 + GOT_ENTRY_SIZE);
4991 }
4992 }
4993 else
4994 {
4995 bfd_put_NN (output_bfd, (bfd_vma) 1,
4996 globals->root.sgot->contents + off);
4997 bfd_put_NN (output_bfd,
4998 relocation - dtpoff_base (info),
4999 globals->root.sgot->contents + off
5000 + GOT_ENTRY_SIZE);
5001 }
5002
5003 symbol_got_offset_mark (input_bfd, h, r_symndx);
5004 }
5005 break;
5006
5007 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5008 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
5009 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5010 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5011 {
5012 bfd_boolean need_relocs = FALSE;
5013 bfd_byte *loc;
5014 int indx;
5015 bfd_vma off;
5016
5017 off = symbol_got_offset (input_bfd, h, r_symndx);
5018
5019 indx = h && h->dynindx != -1 ? h->dynindx : 0;
5020
5021 need_relocs =
5022 (info->shared || indx != 0) &&
5023 (h == NULL
5024 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5025 || h->root.type != bfd_link_hash_undefweak);
5026
5027 BFD_ASSERT (globals->root.srelgot != NULL);
5028
5029 if (need_relocs)
5030 {
5031 Elf_Internal_Rela rela;
5032
5033 if (indx == 0)
5034 rela.r_addend = relocation - dtpoff_base (info);
5035 else
5036 rela.r_addend = 0;
5037
5038 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_TPREL));
5039 rela.r_offset = globals->root.sgot->output_section->vma +
5040 globals->root.sgot->output_offset + off;
5041
5042 loc = globals->root.srelgot->contents;
5043 loc += globals->root.srelgot->reloc_count++
5044 * RELOC_SIZE (htab);
5045
5046 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5047
5048 bfd_put_NN (output_bfd, rela.r_addend,
5049 globals->root.sgot->contents + off);
5050 }
5051 else
5052 bfd_put_NN (output_bfd, relocation - tpoff_base (info),
5053 globals->root.sgot->contents + off);
5054
5055 symbol_got_offset_mark (input_bfd, h, r_symndx);
5056 }
5057 break;
5058
5059 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5060 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5061 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5062 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5063 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5064 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5065 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5066 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5067 break;
5068
5069 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5070 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5071 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5072 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
5073 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5074 if (! symbol_tlsdesc_got_offset_mark_p (input_bfd, h, r_symndx))
5075 {
5076 bfd_boolean need_relocs = FALSE;
5077 int indx = h && h->dynindx != -1 ? h->dynindx : 0;
5078 bfd_vma off = symbol_tlsdesc_got_offset (input_bfd, h, r_symndx);
5079
5080 need_relocs = (h == NULL
5081 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5082 || h->root.type != bfd_link_hash_undefweak);
5083
5084 BFD_ASSERT (globals->root.srelgot != NULL);
5085 BFD_ASSERT (globals->root.sgot != NULL);
5086
5087 if (need_relocs)
5088 {
5089 bfd_byte *loc;
5090 Elf_Internal_Rela rela;
5091 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLSDESC));
5092
5093 rela.r_addend = 0;
5094 rela.r_offset = (globals->root.sgotplt->output_section->vma
5095 + globals->root.sgotplt->output_offset
5096 + off + globals->sgotplt_jump_table_size);
5097
5098 if (indx == 0)
5099 rela.r_addend = relocation - dtpoff_base (info);
5100
5101 /* Allocate the next available slot in the PLT reloc
5102 section to hold our R_AARCH64_TLSDESC, the next
5103 available slot is determined from reloc_count,
5104 which we step. But note, reloc_count was
5105 artifically moved down while allocating slots for
5106 real PLT relocs such that all of the PLT relocs
5107 will fit above the initial reloc_count and the
5108 extra stuff will fit below. */
5109 loc = globals->root.srelplt->contents;
5110 loc += globals->root.srelplt->reloc_count++
5111 * RELOC_SIZE (globals);
5112
5113 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5114
5115 bfd_put_NN (output_bfd, (bfd_vma) 0,
5116 globals->root.sgotplt->contents + off +
5117 globals->sgotplt_jump_table_size);
5118 bfd_put_NN (output_bfd, (bfd_vma) 0,
5119 globals->root.sgotplt->contents + off +
5120 globals->sgotplt_jump_table_size +
5121 GOT_ENTRY_SIZE);
5122 }
5123
5124 symbol_tlsdesc_got_offset_mark (input_bfd, h, r_symndx);
5125 }
5126 break;
5127 default:
5128 break;
5129 }
5130
5131 if (!save_addend)
5132 addend = 0;
5133
5134
5135 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
5136 because such sections are not SEC_ALLOC and thus ld.so will
5137 not process them. */
5138 if (unresolved_reloc
5139 && !((input_section->flags & SEC_DEBUGGING) != 0
5140 && h->def_dynamic)
5141 && _bfd_elf_section_offset (output_bfd, info, input_section,
5142 +rel->r_offset) != (bfd_vma) - 1)
5143 {
5144 (*_bfd_error_handler)
5145 (_
5146 ("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
5147 input_bfd, input_section, (long) rel->r_offset, howto->name,
5148 h->root.root.string);
5149 return FALSE;
5150 }
5151
5152 if (r != bfd_reloc_ok && r != bfd_reloc_continue)
5153 {
5154 switch (r)
5155 {
5156 case bfd_reloc_overflow:
5157 /* If the overflowing reloc was to an undefined symbol,
5158 we have already printed one error message and there
5159 is no point complaining again. */
5160 if ((!h ||
5161 h->root.type != bfd_link_hash_undefined)
5162 && (!((*info->callbacks->reloc_overflow)
5163 (info, (h ? &h->root : NULL), name, howto->name,
5164 (bfd_vma) 0, input_bfd, input_section,
5165 rel->r_offset))))
5166 return FALSE;
5167 break;
5168
5169 case bfd_reloc_undefined:
5170 if (!((*info->callbacks->undefined_symbol)
5171 (info, name, input_bfd, input_section,
5172 rel->r_offset, TRUE)))
5173 return FALSE;
5174 break;
5175
5176 case bfd_reloc_outofrange:
5177 error_message = _("out of range");
5178 goto common_error;
5179
5180 case bfd_reloc_notsupported:
5181 error_message = _("unsupported relocation");
5182 goto common_error;
5183
5184 case bfd_reloc_dangerous:
5185 /* error_message should already be set. */
5186 goto common_error;
5187
5188 default:
5189 error_message = _("unknown error");
5190 /* Fall through. */
5191
5192 common_error:
5193 BFD_ASSERT (error_message != NULL);
5194 if (!((*info->callbacks->reloc_dangerous)
5195 (info, error_message, input_bfd, input_section,
5196 rel->r_offset)))
5197 return FALSE;
5198 break;
5199 }
5200 }
5201 }
5202
5203 return TRUE;
5204 }
5205
5206 /* Set the right machine number. */
5207
5208 static bfd_boolean
5209 elfNN_aarch64_object_p (bfd *abfd)
5210 {
5211 #if ARCH_SIZE == 32
5212 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64_ilp32);
5213 #else
5214 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64);
5215 #endif
5216 return TRUE;
5217 }
5218
5219 /* Function to keep AArch64 specific flags in the ELF header. */
5220
5221 static bfd_boolean
5222 elfNN_aarch64_set_private_flags (bfd *abfd, flagword flags)
5223 {
5224 if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
5225 {
5226 }
5227 else
5228 {
5229 elf_elfheader (abfd)->e_flags = flags;
5230 elf_flags_init (abfd) = TRUE;
5231 }
5232
5233 return TRUE;
5234 }
5235
5236 /* Merge backend specific data from an object file to the output
5237 object file when linking. */
5238
5239 static bfd_boolean
5240 elfNN_aarch64_merge_private_bfd_data (bfd *ibfd, bfd *obfd)
5241 {
5242 flagword out_flags;
5243 flagword in_flags;
5244 bfd_boolean flags_compatible = TRUE;
5245 asection *sec;
5246
5247 /* Check if we have the same endianess. */
5248 if (!_bfd_generic_verify_endian_match (ibfd, obfd))
5249 return FALSE;
5250
5251 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
5252 return TRUE;
5253
5254 /* The input BFD must have had its flags initialised. */
5255 /* The following seems bogus to me -- The flags are initialized in
5256 the assembler but I don't think an elf_flags_init field is
5257 written into the object. */
5258 /* BFD_ASSERT (elf_flags_init (ibfd)); */
5259
5260 in_flags = elf_elfheader (ibfd)->e_flags;
5261 out_flags = elf_elfheader (obfd)->e_flags;
5262
5263 if (!elf_flags_init (obfd))
5264 {
5265 /* If the input is the default architecture and had the default
5266 flags then do not bother setting the flags for the output
5267 architecture, instead allow future merges to do this. If no
5268 future merges ever set these flags then they will retain their
5269 uninitialised values, which surprise surprise, correspond
5270 to the default values. */
5271 if (bfd_get_arch_info (ibfd)->the_default
5272 && elf_elfheader (ibfd)->e_flags == 0)
5273 return TRUE;
5274
5275 elf_flags_init (obfd) = TRUE;
5276 elf_elfheader (obfd)->e_flags = in_flags;
5277
5278 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
5279 && bfd_get_arch_info (obfd)->the_default)
5280 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
5281 bfd_get_mach (ibfd));
5282
5283 return TRUE;
5284 }
5285
5286 /* Identical flags must be compatible. */
5287 if (in_flags == out_flags)
5288 return TRUE;
5289
5290 /* Check to see if the input BFD actually contains any sections. If
5291 not, its flags may not have been initialised either, but it
5292 cannot actually cause any incompatiblity. Do not short-circuit
5293 dynamic objects; their section list may be emptied by
5294 elf_link_add_object_symbols.
5295
5296 Also check to see if there are no code sections in the input.
5297 In this case there is no need to check for code specific flags.
5298 XXX - do we need to worry about floating-point format compatability
5299 in data sections ? */
5300 if (!(ibfd->flags & DYNAMIC))
5301 {
5302 bfd_boolean null_input_bfd = TRUE;
5303 bfd_boolean only_data_sections = TRUE;
5304
5305 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
5306 {
5307 if ((bfd_get_section_flags (ibfd, sec)
5308 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
5309 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
5310 only_data_sections = FALSE;
5311
5312 null_input_bfd = FALSE;
5313 break;
5314 }
5315
5316 if (null_input_bfd || only_data_sections)
5317 return TRUE;
5318 }
5319
5320 return flags_compatible;
5321 }
5322
5323 /* Display the flags field. */
5324
5325 static bfd_boolean
5326 elfNN_aarch64_print_private_bfd_data (bfd *abfd, void *ptr)
5327 {
5328 FILE *file = (FILE *) ptr;
5329 unsigned long flags;
5330
5331 BFD_ASSERT (abfd != NULL && ptr != NULL);
5332
5333 /* Print normal ELF private data. */
5334 _bfd_elf_print_private_bfd_data (abfd, ptr);
5335
5336 flags = elf_elfheader (abfd)->e_flags;
5337 /* Ignore init flag - it may not be set, despite the flags field
5338 containing valid data. */
5339
5340 /* xgettext:c-format */
5341 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
5342
5343 if (flags)
5344 fprintf (file, _("<Unrecognised flag bits set>"));
5345
5346 fputc ('\n', file);
5347
5348 return TRUE;
5349 }
5350
5351 /* Update the got entry reference counts for the section being removed. */
5352
5353 static bfd_boolean
5354 elfNN_aarch64_gc_sweep_hook (bfd *abfd,
5355 struct bfd_link_info *info,
5356 asection *sec,
5357 const Elf_Internal_Rela * relocs)
5358 {
5359 struct elf_aarch64_link_hash_table *htab;
5360 Elf_Internal_Shdr *symtab_hdr;
5361 struct elf_link_hash_entry **sym_hashes;
5362 struct elf_aarch64_local_symbol *locals;
5363 const Elf_Internal_Rela *rel, *relend;
5364
5365 if (info->relocatable)
5366 return TRUE;
5367
5368 htab = elf_aarch64_hash_table (info);
5369
5370 if (htab == NULL)
5371 return FALSE;
5372
5373 elf_section_data (sec)->local_dynrel = NULL;
5374
5375 symtab_hdr = &elf_symtab_hdr (abfd);
5376 sym_hashes = elf_sym_hashes (abfd);
5377
5378 locals = elf_aarch64_locals (abfd);
5379
5380 relend = relocs + sec->reloc_count;
5381 for (rel = relocs; rel < relend; rel++)
5382 {
5383 unsigned long r_symndx;
5384 unsigned int r_type;
5385 struct elf_link_hash_entry *h = NULL;
5386
5387 r_symndx = ELFNN_R_SYM (rel->r_info);
5388
5389 if (r_symndx >= symtab_hdr->sh_info)
5390 {
5391
5392 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
5393 while (h->root.type == bfd_link_hash_indirect
5394 || h->root.type == bfd_link_hash_warning)
5395 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5396 }
5397 else
5398 {
5399 Elf_Internal_Sym *isym;
5400
5401 /* A local symbol. */
5402 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
5403 abfd, r_symndx);
5404
5405 /* Check relocation against local STT_GNU_IFUNC symbol. */
5406 if (isym != NULL
5407 && ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
5408 {
5409 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel, FALSE);
5410 if (h == NULL)
5411 abort ();
5412 }
5413 }
5414
5415 if (h)
5416 {
5417 struct elf_aarch64_link_hash_entry *eh;
5418 struct elf_dyn_relocs **pp;
5419 struct elf_dyn_relocs *p;
5420
5421 eh = (struct elf_aarch64_link_hash_entry *) h;
5422
5423 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
5424 if (p->sec == sec)
5425 {
5426 /* Everything must go for SEC. */
5427 *pp = p->next;
5428 break;
5429 }
5430 }
5431
5432 r_type = ELFNN_R_TYPE (rel->r_info);
5433 switch (aarch64_tls_transition (abfd,info, r_type, h ,r_symndx))
5434 {
5435 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
5436 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
5437 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
5438 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
5439 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5440 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5441 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5442 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
5443 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
5444 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5445 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5446 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5447 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5448 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5449 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
5450 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5451 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5452 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5453 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5454 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5455 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5456 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5457 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5458 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5459 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5460 if (h != NULL)
5461 {
5462 if (h->got.refcount > 0)
5463 h->got.refcount -= 1;
5464
5465 if (h->type == STT_GNU_IFUNC)
5466 {
5467 if (h->plt.refcount > 0)
5468 h->plt.refcount -= 1;
5469 }
5470 }
5471 else if (locals != NULL)
5472 {
5473 if (locals[r_symndx].got_refcount > 0)
5474 locals[r_symndx].got_refcount -= 1;
5475 }
5476 break;
5477
5478 case BFD_RELOC_AARCH64_CALL26:
5479 case BFD_RELOC_AARCH64_JUMP26:
5480 /* If this is a local symbol then we resolve it
5481 directly without creating a PLT entry. */
5482 if (h == NULL)
5483 continue;
5484
5485 if (h->plt.refcount > 0)
5486 h->plt.refcount -= 1;
5487 break;
5488
5489 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5490 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5491 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5492 case BFD_RELOC_AARCH64_MOVW_G3:
5493 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
5494 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
5495 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
5496 case BFD_RELOC_AARCH64_NN:
5497 if (h != NULL && info->executable)
5498 {
5499 if (h->plt.refcount > 0)
5500 h->plt.refcount -= 1;
5501 }
5502 break;
5503
5504 default:
5505 break;
5506 }
5507 }
5508
5509 return TRUE;
5510 }
5511
5512 /* Adjust a symbol defined by a dynamic object and referenced by a
5513 regular object. The current definition is in some section of the
5514 dynamic object, but we're not including those sections. We have to
5515 change the definition to something the rest of the link can
5516 understand. */
5517
5518 static bfd_boolean
5519 elfNN_aarch64_adjust_dynamic_symbol (struct bfd_link_info *info,
5520 struct elf_link_hash_entry *h)
5521 {
5522 struct elf_aarch64_link_hash_table *htab;
5523 asection *s;
5524
5525 /* If this is a function, put it in the procedure linkage table. We
5526 will fill in the contents of the procedure linkage table later,
5527 when we know the address of the .got section. */
5528 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
5529 {
5530 if (h->plt.refcount <= 0
5531 || (h->type != STT_GNU_IFUNC
5532 && (SYMBOL_CALLS_LOCAL (info, h)
5533 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
5534 && h->root.type == bfd_link_hash_undefweak))))
5535 {
5536 /* This case can occur if we saw a CALL26 reloc in
5537 an input file, but the symbol wasn't referred to
5538 by a dynamic object or all references were
5539 garbage collected. In which case we can end up
5540 resolving. */
5541 h->plt.offset = (bfd_vma) - 1;
5542 h->needs_plt = 0;
5543 }
5544
5545 return TRUE;
5546 }
5547 else
5548 /* It's possible that we incorrectly decided a .plt reloc was
5549 needed for an R_X86_64_PC32 reloc to a non-function sym in
5550 check_relocs. We can't decide accurately between function and
5551 non-function syms in check-relocs; Objects loaded later in
5552 the link may change h->type. So fix it now. */
5553 h->plt.offset = (bfd_vma) - 1;
5554
5555
5556 /* If this is a weak symbol, and there is a real definition, the
5557 processor independent code will have arranged for us to see the
5558 real definition first, and we can just use the same value. */
5559 if (h->u.weakdef != NULL)
5560 {
5561 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
5562 || h->u.weakdef->root.type == bfd_link_hash_defweak);
5563 h->root.u.def.section = h->u.weakdef->root.u.def.section;
5564 h->root.u.def.value = h->u.weakdef->root.u.def.value;
5565 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
5566 h->non_got_ref = h->u.weakdef->non_got_ref;
5567 return TRUE;
5568 }
5569
5570 /* If we are creating a shared library, we must presume that the
5571 only references to the symbol are via the global offset table.
5572 For such cases we need not do anything here; the relocations will
5573 be handled correctly by relocate_section. */
5574 if (info->shared)
5575 return TRUE;
5576
5577 /* If there are no references to this symbol that do not use the
5578 GOT, we don't need to generate a copy reloc. */
5579 if (!h->non_got_ref)
5580 return TRUE;
5581
5582 /* If -z nocopyreloc was given, we won't generate them either. */
5583 if (info->nocopyreloc)
5584 {
5585 h->non_got_ref = 0;
5586 return TRUE;
5587 }
5588
5589 /* We must allocate the symbol in our .dynbss section, which will
5590 become part of the .bss section of the executable. There will be
5591 an entry for this symbol in the .dynsym section. The dynamic
5592 object will contain position independent code, so all references
5593 from the dynamic object to this symbol will go through the global
5594 offset table. The dynamic linker will use the .dynsym entry to
5595 determine the address it must put in the global offset table, so
5596 both the dynamic object and the regular object will refer to the
5597 same memory location for the variable. */
5598
5599 htab = elf_aarch64_hash_table (info);
5600
5601 /* We must generate a R_AARCH64_COPY reloc to tell the dynamic linker
5602 to copy the initial value out of the dynamic object and into the
5603 runtime process image. */
5604 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
5605 {
5606 htab->srelbss->size += RELOC_SIZE (htab);
5607 h->needs_copy = 1;
5608 }
5609
5610 s = htab->sdynbss;
5611
5612 return _bfd_elf_adjust_dynamic_copy (info, h, s);
5613
5614 }
5615
5616 static bfd_boolean
5617 elfNN_aarch64_allocate_local_symbols (bfd *abfd, unsigned number)
5618 {
5619 struct elf_aarch64_local_symbol *locals;
5620 locals = elf_aarch64_locals (abfd);
5621 if (locals == NULL)
5622 {
5623 locals = (struct elf_aarch64_local_symbol *)
5624 bfd_zalloc (abfd, number * sizeof (struct elf_aarch64_local_symbol));
5625 if (locals == NULL)
5626 return FALSE;
5627 elf_aarch64_locals (abfd) = locals;
5628 }
5629 return TRUE;
5630 }
5631
5632 /* Create the .got section to hold the global offset table. */
5633
5634 static bfd_boolean
5635 aarch64_elf_create_got_section (bfd *abfd, struct bfd_link_info *info)
5636 {
5637 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
5638 flagword flags;
5639 asection *s;
5640 struct elf_link_hash_entry *h;
5641 struct elf_link_hash_table *htab = elf_hash_table (info);
5642
5643 /* This function may be called more than once. */
5644 s = bfd_get_linker_section (abfd, ".got");
5645 if (s != NULL)
5646 return TRUE;
5647
5648 flags = bed->dynamic_sec_flags;
5649
5650 s = bfd_make_section_anyway_with_flags (abfd,
5651 (bed->rela_plts_and_copies_p
5652 ? ".rela.got" : ".rel.got"),
5653 (bed->dynamic_sec_flags
5654 | SEC_READONLY));
5655 if (s == NULL
5656 || ! bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
5657 return FALSE;
5658 htab->srelgot = s;
5659
5660 s = bfd_make_section_anyway_with_flags (abfd, ".got", flags);
5661 if (s == NULL
5662 || !bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
5663 return FALSE;
5664 htab->sgot = s;
5665 htab->sgot->size += GOT_ENTRY_SIZE;
5666
5667 if (bed->want_got_sym)
5668 {
5669 /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the .got
5670 (or .got.plt) section. We don't do this in the linker script
5671 because we don't want to define the symbol if we are not creating
5672 a global offset table. */
5673 h = _bfd_elf_define_linkage_sym (abfd, info, s,
5674 "_GLOBAL_OFFSET_TABLE_");
5675 elf_hash_table (info)->hgot = h;
5676 if (h == NULL)
5677 return FALSE;
5678 }
5679
5680 if (bed->want_got_plt)
5681 {
5682 s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags);
5683 if (s == NULL
5684 || !bfd_set_section_alignment (abfd, s,
5685 bed->s->log_file_align))
5686 return FALSE;
5687 htab->sgotplt = s;
5688 }
5689
5690 /* The first bit of the global offset table is the header. */
5691 s->size += bed->got_header_size;
5692
5693 return TRUE;
5694 }
5695
5696 /* Look through the relocs for a section during the first phase. */
5697
5698 static bfd_boolean
5699 elfNN_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info,
5700 asection *sec, const Elf_Internal_Rela *relocs)
5701 {
5702 Elf_Internal_Shdr *symtab_hdr;
5703 struct elf_link_hash_entry **sym_hashes;
5704 const Elf_Internal_Rela *rel;
5705 const Elf_Internal_Rela *rel_end;
5706 asection *sreloc;
5707
5708 struct elf_aarch64_link_hash_table *htab;
5709
5710 if (info->relocatable)
5711 return TRUE;
5712
5713 BFD_ASSERT (is_aarch64_elf (abfd));
5714
5715 htab = elf_aarch64_hash_table (info);
5716 sreloc = NULL;
5717
5718 symtab_hdr = &elf_symtab_hdr (abfd);
5719 sym_hashes = elf_sym_hashes (abfd);
5720
5721 rel_end = relocs + sec->reloc_count;
5722 for (rel = relocs; rel < rel_end; rel++)
5723 {
5724 struct elf_link_hash_entry *h;
5725 unsigned long r_symndx;
5726 unsigned int r_type;
5727 bfd_reloc_code_real_type bfd_r_type;
5728 Elf_Internal_Sym *isym;
5729
5730 r_symndx = ELFNN_R_SYM (rel->r_info);
5731 r_type = ELFNN_R_TYPE (rel->r_info);
5732
5733 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
5734 {
5735 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
5736 r_symndx);
5737 return FALSE;
5738 }
5739
5740 if (r_symndx < symtab_hdr->sh_info)
5741 {
5742 /* A local symbol. */
5743 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
5744 abfd, r_symndx);
5745 if (isym == NULL)
5746 return FALSE;
5747
5748 /* Check relocation against local STT_GNU_IFUNC symbol. */
5749 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
5750 {
5751 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel,
5752 TRUE);
5753 if (h == NULL)
5754 return FALSE;
5755
5756 /* Fake a STT_GNU_IFUNC symbol. */
5757 h->type = STT_GNU_IFUNC;
5758 h->def_regular = 1;
5759 h->ref_regular = 1;
5760 h->forced_local = 1;
5761 h->root.type = bfd_link_hash_defined;
5762 }
5763 else
5764 h = NULL;
5765 }
5766 else
5767 {
5768 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
5769 while (h->root.type == bfd_link_hash_indirect
5770 || h->root.type == bfd_link_hash_warning)
5771 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5772
5773 /* PR15323, ref flags aren't set for references in the same
5774 object. */
5775 h->root.non_ir_ref = 1;
5776 }
5777
5778 /* Could be done earlier, if h were already available. */
5779 bfd_r_type = aarch64_tls_transition (abfd, info, r_type, h, r_symndx);
5780
5781 if (h != NULL)
5782 {
5783 /* Create the ifunc sections for static executables. If we
5784 never see an indirect function symbol nor we are building
5785 a static executable, those sections will be empty and
5786 won't appear in output. */
5787 switch (bfd_r_type)
5788 {
5789 default:
5790 break;
5791
5792 case BFD_RELOC_AARCH64_NN:
5793 case BFD_RELOC_AARCH64_CALL26:
5794 case BFD_RELOC_AARCH64_JUMP26:
5795 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
5796 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
5797 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
5798 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
5799 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
5800 case BFD_RELOC_AARCH64_ADD_LO12:
5801 if (htab->root.dynobj == NULL)
5802 htab->root.dynobj = abfd;
5803 if (!_bfd_elf_create_ifunc_sections (htab->root.dynobj, info))
5804 return FALSE;
5805 break;
5806 }
5807
5808 /* It is referenced by a non-shared object. */
5809 h->ref_regular = 1;
5810 h->root.non_ir_ref = 1;
5811 }
5812
5813 switch (bfd_r_type)
5814 {
5815 case BFD_RELOC_AARCH64_NN:
5816
5817 /* We don't need to handle relocs into sections not going into
5818 the "real" output. */
5819 if ((sec->flags & SEC_ALLOC) == 0)
5820 break;
5821
5822 if (h != NULL)
5823 {
5824 if (!info->shared)
5825 h->non_got_ref = 1;
5826
5827 h->plt.refcount += 1;
5828 h->pointer_equality_needed = 1;
5829 }
5830
5831 /* No need to do anything if we're not creating a shared
5832 object. */
5833 if (! info->shared)
5834 break;
5835
5836 {
5837 struct elf_dyn_relocs *p;
5838 struct elf_dyn_relocs **head;
5839
5840 /* We must copy these reloc types into the output file.
5841 Create a reloc section in dynobj and make room for
5842 this reloc. */
5843 if (sreloc == NULL)
5844 {
5845 if (htab->root.dynobj == NULL)
5846 htab->root.dynobj = abfd;
5847
5848 sreloc = _bfd_elf_make_dynamic_reloc_section
5849 (sec, htab->root.dynobj, LOG_FILE_ALIGN, abfd, /*rela? */ TRUE);
5850
5851 if (sreloc == NULL)
5852 return FALSE;
5853 }
5854
5855 /* If this is a global symbol, we count the number of
5856 relocations we need for this symbol. */
5857 if (h != NULL)
5858 {
5859 struct elf_aarch64_link_hash_entry *eh;
5860 eh = (struct elf_aarch64_link_hash_entry *) h;
5861 head = &eh->dyn_relocs;
5862 }
5863 else
5864 {
5865 /* Track dynamic relocs needed for local syms too.
5866 We really need local syms available to do this
5867 easily. Oh well. */
5868
5869 asection *s;
5870 void **vpp;
5871
5872 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
5873 abfd, r_symndx);
5874 if (isym == NULL)
5875 return FALSE;
5876
5877 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
5878 if (s == NULL)
5879 s = sec;
5880
5881 /* Beware of type punned pointers vs strict aliasing
5882 rules. */
5883 vpp = &(elf_section_data (s)->local_dynrel);
5884 head = (struct elf_dyn_relocs **) vpp;
5885 }
5886
5887 p = *head;
5888 if (p == NULL || p->sec != sec)
5889 {
5890 bfd_size_type amt = sizeof *p;
5891 p = ((struct elf_dyn_relocs *)
5892 bfd_zalloc (htab->root.dynobj, amt));
5893 if (p == NULL)
5894 return FALSE;
5895 p->next = *head;
5896 *head = p;
5897 p->sec = sec;
5898 }
5899
5900 p->count += 1;
5901
5902 }
5903 break;
5904
5905 /* RR: We probably want to keep a consistency check that
5906 there are no dangling GOT_PAGE relocs. */
5907 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
5908 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
5909 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
5910 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
5911 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5912 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5913 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5914 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
5915 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
5916 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5917 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5918 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5919 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5920 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5921 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
5922 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5923 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5924 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5925 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5926 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5927 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5928 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5929 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5930 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5931 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5932 {
5933 unsigned got_type;
5934 unsigned old_got_type;
5935
5936 got_type = aarch64_reloc_got_type (bfd_r_type);
5937
5938 if (h)
5939 {
5940 h->got.refcount += 1;
5941 old_got_type = elf_aarch64_hash_entry (h)->got_type;
5942 }
5943 else
5944 {
5945 struct elf_aarch64_local_symbol *locals;
5946
5947 if (!elfNN_aarch64_allocate_local_symbols
5948 (abfd, symtab_hdr->sh_info))
5949 return FALSE;
5950
5951 locals = elf_aarch64_locals (abfd);
5952 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
5953 locals[r_symndx].got_refcount += 1;
5954 old_got_type = locals[r_symndx].got_type;
5955 }
5956
5957 /* If a variable is accessed with both general dynamic TLS
5958 methods, two slots may be created. */
5959 if (GOT_TLS_GD_ANY_P (old_got_type) && GOT_TLS_GD_ANY_P (got_type))
5960 got_type |= old_got_type;
5961
5962 /* We will already have issued an error message if there
5963 is a TLS/non-TLS mismatch, based on the symbol type.
5964 So just combine any TLS types needed. */
5965 if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
5966 && got_type != GOT_NORMAL)
5967 got_type |= old_got_type;
5968
5969 /* If the symbol is accessed by both IE and GD methods, we
5970 are able to relax. Turn off the GD flag, without
5971 messing up with any other kind of TLS types that may be
5972 involved. */
5973 if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type))
5974 got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD);
5975
5976 if (old_got_type != got_type)
5977 {
5978 if (h != NULL)
5979 elf_aarch64_hash_entry (h)->got_type = got_type;
5980 else
5981 {
5982 struct elf_aarch64_local_symbol *locals;
5983 locals = elf_aarch64_locals (abfd);
5984 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
5985 locals[r_symndx].got_type = got_type;
5986 }
5987 }
5988
5989 if (htab->root.dynobj == NULL)
5990 htab->root.dynobj = abfd;
5991 if (! aarch64_elf_create_got_section (htab->root.dynobj, info))
5992 return FALSE;
5993 break;
5994 }
5995
5996 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5997 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5998 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5999 case BFD_RELOC_AARCH64_MOVW_G3:
6000 if (info->shared)
6001 {
6002 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
6003 (*_bfd_error_handler)
6004 (_("%B: relocation %s against `%s' can not be used when making "
6005 "a shared object; recompile with -fPIC"),
6006 abfd, elfNN_aarch64_howto_table[howto_index].name,
6007 (h) ? h->root.root.string : "a local symbol");
6008 bfd_set_error (bfd_error_bad_value);
6009 return FALSE;
6010 }
6011
6012 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6013 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6014 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6015 if (h != NULL && info->executable)
6016 {
6017 /* If this reloc is in a read-only section, we might
6018 need a copy reloc. We can't check reliably at this
6019 stage whether the section is read-only, as input
6020 sections have not yet been mapped to output sections.
6021 Tentatively set the flag for now, and correct in
6022 adjust_dynamic_symbol. */
6023 h->non_got_ref = 1;
6024 h->plt.refcount += 1;
6025 h->pointer_equality_needed = 1;
6026 }
6027 /* FIXME:: RR need to handle these in shared libraries
6028 and essentially bomb out as these being non-PIC
6029 relocations in shared libraries. */
6030 break;
6031
6032 case BFD_RELOC_AARCH64_CALL26:
6033 case BFD_RELOC_AARCH64_JUMP26:
6034 /* If this is a local symbol then we resolve it
6035 directly without creating a PLT entry. */
6036 if (h == NULL)
6037 continue;
6038
6039 h->needs_plt = 1;
6040 if (h->plt.refcount <= 0)
6041 h->plt.refcount = 1;
6042 else
6043 h->plt.refcount += 1;
6044 break;
6045
6046 default:
6047 break;
6048 }
6049 }
6050
6051 return TRUE;
6052 }
6053
6054 /* Treat mapping symbols as special target symbols. */
6055
6056 static bfd_boolean
6057 elfNN_aarch64_is_target_special_symbol (bfd *abfd ATTRIBUTE_UNUSED,
6058 asymbol *sym)
6059 {
6060 return bfd_is_aarch64_special_symbol_name (sym->name,
6061 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY);
6062 }
6063
6064 /* This is a copy of elf_find_function () from elf.c except that
6065 AArch64 mapping symbols are ignored when looking for function names. */
6066
6067 static bfd_boolean
6068 aarch64_elf_find_function (bfd *abfd ATTRIBUTE_UNUSED,
6069 asymbol **symbols,
6070 asection *section,
6071 bfd_vma offset,
6072 const char **filename_ptr,
6073 const char **functionname_ptr)
6074 {
6075 const char *filename = NULL;
6076 asymbol *func = NULL;
6077 bfd_vma low_func = 0;
6078 asymbol **p;
6079
6080 for (p = symbols; *p != NULL; p++)
6081 {
6082 elf_symbol_type *q;
6083
6084 q = (elf_symbol_type *) * p;
6085
6086 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
6087 {
6088 default:
6089 break;
6090 case STT_FILE:
6091 filename = bfd_asymbol_name (&q->symbol);
6092 break;
6093 case STT_FUNC:
6094 case STT_NOTYPE:
6095 /* Skip mapping symbols. */
6096 if ((q->symbol.flags & BSF_LOCAL)
6097 && (bfd_is_aarch64_special_symbol_name
6098 (q->symbol.name, BFD_AARCH64_SPECIAL_SYM_TYPE_ANY)))
6099 continue;
6100 /* Fall through. */
6101 if (bfd_get_section (&q->symbol) == section
6102 && q->symbol.value >= low_func && q->symbol.value <= offset)
6103 {
6104 func = (asymbol *) q;
6105 low_func = q->symbol.value;
6106 }
6107 break;
6108 }
6109 }
6110
6111 if (func == NULL)
6112 return FALSE;
6113
6114 if (filename_ptr)
6115 *filename_ptr = filename;
6116 if (functionname_ptr)
6117 *functionname_ptr = bfd_asymbol_name (func);
6118
6119 return TRUE;
6120 }
6121
6122
6123 /* Find the nearest line to a particular section and offset, for error
6124 reporting. This code is a duplicate of the code in elf.c, except
6125 that it uses aarch64_elf_find_function. */
6126
6127 static bfd_boolean
6128 elfNN_aarch64_find_nearest_line (bfd *abfd,
6129 asymbol **symbols,
6130 asection *section,
6131 bfd_vma offset,
6132 const char **filename_ptr,
6133 const char **functionname_ptr,
6134 unsigned int *line_ptr,
6135 unsigned int *discriminator_ptr)
6136 {
6137 bfd_boolean found = FALSE;
6138
6139 if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset,
6140 filename_ptr, functionname_ptr,
6141 line_ptr, discriminator_ptr,
6142 dwarf_debug_sections, 0,
6143 &elf_tdata (abfd)->dwarf2_find_line_info))
6144 {
6145 if (!*functionname_ptr)
6146 aarch64_elf_find_function (abfd, symbols, section, offset,
6147 *filename_ptr ? NULL : filename_ptr,
6148 functionname_ptr);
6149
6150 return TRUE;
6151 }
6152
6153 /* Skip _bfd_dwarf1_find_nearest_line since no known AArch64
6154 toolchain uses DWARF1. */
6155
6156 if (!_bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
6157 &found, filename_ptr,
6158 functionname_ptr, line_ptr,
6159 &elf_tdata (abfd)->line_info))
6160 return FALSE;
6161
6162 if (found && (*functionname_ptr || *line_ptr))
6163 return TRUE;
6164
6165 if (symbols == NULL)
6166 return FALSE;
6167
6168 if (!aarch64_elf_find_function (abfd, symbols, section, offset,
6169 filename_ptr, functionname_ptr))
6170 return FALSE;
6171
6172 *line_ptr = 0;
6173 return TRUE;
6174 }
6175
6176 static bfd_boolean
6177 elfNN_aarch64_find_inliner_info (bfd *abfd,
6178 const char **filename_ptr,
6179 const char **functionname_ptr,
6180 unsigned int *line_ptr)
6181 {
6182 bfd_boolean found;
6183 found = _bfd_dwarf2_find_inliner_info
6184 (abfd, filename_ptr,
6185 functionname_ptr, line_ptr, &elf_tdata (abfd)->dwarf2_find_line_info);
6186 return found;
6187 }
6188
6189
6190 static void
6191 elfNN_aarch64_post_process_headers (bfd *abfd,
6192 struct bfd_link_info *link_info)
6193 {
6194 Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */
6195
6196 i_ehdrp = elf_elfheader (abfd);
6197 i_ehdrp->e_ident[EI_ABIVERSION] = AARCH64_ELF_ABI_VERSION;
6198
6199 _bfd_elf_post_process_headers (abfd, link_info);
6200 }
6201
6202 static enum elf_reloc_type_class
6203 elfNN_aarch64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
6204 const asection *rel_sec ATTRIBUTE_UNUSED,
6205 const Elf_Internal_Rela *rela)
6206 {
6207 switch ((int) ELFNN_R_TYPE (rela->r_info))
6208 {
6209 case AARCH64_R (RELATIVE):
6210 return reloc_class_relative;
6211 case AARCH64_R (JUMP_SLOT):
6212 return reloc_class_plt;
6213 case AARCH64_R (COPY):
6214 return reloc_class_copy;
6215 default:
6216 return reloc_class_normal;
6217 }
6218 }
6219
6220 /* Handle an AArch64 specific section when reading an object file. This is
6221 called when bfd_section_from_shdr finds a section with an unknown
6222 type. */
6223
6224 static bfd_boolean
6225 elfNN_aarch64_section_from_shdr (bfd *abfd,
6226 Elf_Internal_Shdr *hdr,
6227 const char *name, int shindex)
6228 {
6229 /* There ought to be a place to keep ELF backend specific flags, but
6230 at the moment there isn't one. We just keep track of the
6231 sections by their name, instead. Fortunately, the ABI gives
6232 names for all the AArch64 specific sections, so we will probably get
6233 away with this. */
6234 switch (hdr->sh_type)
6235 {
6236 case SHT_AARCH64_ATTRIBUTES:
6237 break;
6238
6239 default:
6240 return FALSE;
6241 }
6242
6243 if (!_bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
6244 return FALSE;
6245
6246 return TRUE;
6247 }
6248
6249 /* A structure used to record a list of sections, independently
6250 of the next and prev fields in the asection structure. */
6251 typedef struct section_list
6252 {
6253 asection *sec;
6254 struct section_list *next;
6255 struct section_list *prev;
6256 }
6257 section_list;
6258
6259 /* Unfortunately we need to keep a list of sections for which
6260 an _aarch64_elf_section_data structure has been allocated. This
6261 is because it is possible for functions like elfNN_aarch64_write_section
6262 to be called on a section which has had an elf_data_structure
6263 allocated for it (and so the used_by_bfd field is valid) but
6264 for which the AArch64 extended version of this structure - the
6265 _aarch64_elf_section_data structure - has not been allocated. */
6266 static section_list *sections_with_aarch64_elf_section_data = NULL;
6267
6268 static void
6269 record_section_with_aarch64_elf_section_data (asection *sec)
6270 {
6271 struct section_list *entry;
6272
6273 entry = bfd_malloc (sizeof (*entry));
6274 if (entry == NULL)
6275 return;
6276 entry->sec = sec;
6277 entry->next = sections_with_aarch64_elf_section_data;
6278 entry->prev = NULL;
6279 if (entry->next != NULL)
6280 entry->next->prev = entry;
6281 sections_with_aarch64_elf_section_data = entry;
6282 }
6283
6284 static struct section_list *
6285 find_aarch64_elf_section_entry (asection *sec)
6286 {
6287 struct section_list *entry;
6288 static struct section_list *last_entry = NULL;
6289
6290 /* This is a short cut for the typical case where the sections are added
6291 to the sections_with_aarch64_elf_section_data list in forward order and
6292 then looked up here in backwards order. This makes a real difference
6293 to the ld-srec/sec64k.exp linker test. */
6294 entry = sections_with_aarch64_elf_section_data;
6295 if (last_entry != NULL)
6296 {
6297 if (last_entry->sec == sec)
6298 entry = last_entry;
6299 else if (last_entry->next != NULL && last_entry->next->sec == sec)
6300 entry = last_entry->next;
6301 }
6302
6303 for (; entry; entry = entry->next)
6304 if (entry->sec == sec)
6305 break;
6306
6307 if (entry)
6308 /* Record the entry prior to this one - it is the entry we are
6309 most likely to want to locate next time. Also this way if we
6310 have been called from
6311 unrecord_section_with_aarch64_elf_section_data () we will not
6312 be caching a pointer that is about to be freed. */
6313 last_entry = entry->prev;
6314
6315 return entry;
6316 }
6317
6318 static void
6319 unrecord_section_with_aarch64_elf_section_data (asection *sec)
6320 {
6321 struct section_list *entry;
6322
6323 entry = find_aarch64_elf_section_entry (sec);
6324
6325 if (entry)
6326 {
6327 if (entry->prev != NULL)
6328 entry->prev->next = entry->next;
6329 if (entry->next != NULL)
6330 entry->next->prev = entry->prev;
6331 if (entry == sections_with_aarch64_elf_section_data)
6332 sections_with_aarch64_elf_section_data = entry->next;
6333 free (entry);
6334 }
6335 }
6336
6337
6338 typedef struct
6339 {
6340 void *finfo;
6341 struct bfd_link_info *info;
6342 asection *sec;
6343 int sec_shndx;
6344 int (*func) (void *, const char *, Elf_Internal_Sym *,
6345 asection *, struct elf_link_hash_entry *);
6346 } output_arch_syminfo;
6347
6348 enum map_symbol_type
6349 {
6350 AARCH64_MAP_INSN,
6351 AARCH64_MAP_DATA
6352 };
6353
6354
6355 /* Output a single mapping symbol. */
6356
6357 static bfd_boolean
6358 elfNN_aarch64_output_map_sym (output_arch_syminfo *osi,
6359 enum map_symbol_type type, bfd_vma offset)
6360 {
6361 static const char *names[2] = { "$x", "$d" };
6362 Elf_Internal_Sym sym;
6363
6364 sym.st_value = (osi->sec->output_section->vma
6365 + osi->sec->output_offset + offset);
6366 sym.st_size = 0;
6367 sym.st_other = 0;
6368 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
6369 sym.st_shndx = osi->sec_shndx;
6370 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
6371 }
6372
6373
6374
6375 /* Output mapping symbols for PLT entries associated with H. */
6376
6377 static bfd_boolean
6378 elfNN_aarch64_output_plt_map (struct elf_link_hash_entry *h, void *inf)
6379 {
6380 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
6381 bfd_vma addr;
6382
6383 if (h->root.type == bfd_link_hash_indirect)
6384 return TRUE;
6385
6386 if (h->root.type == bfd_link_hash_warning)
6387 /* When warning symbols are created, they **replace** the "real"
6388 entry in the hash table, thus we never get to see the real
6389 symbol in a hash traversal. So look at it now. */
6390 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6391
6392 if (h->plt.offset == (bfd_vma) - 1)
6393 return TRUE;
6394
6395 addr = h->plt.offset;
6396 if (addr == 32)
6397 {
6398 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
6399 return FALSE;
6400 }
6401 return TRUE;
6402 }
6403
6404
6405 /* Output a single local symbol for a generated stub. */
6406
6407 static bfd_boolean
6408 elfNN_aarch64_output_stub_sym (output_arch_syminfo *osi, const char *name,
6409 bfd_vma offset, bfd_vma size)
6410 {
6411 Elf_Internal_Sym sym;
6412
6413 sym.st_value = (osi->sec->output_section->vma
6414 + osi->sec->output_offset + offset);
6415 sym.st_size = size;
6416 sym.st_other = 0;
6417 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6418 sym.st_shndx = osi->sec_shndx;
6419 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
6420 }
6421
6422 static bfd_boolean
6423 aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
6424 {
6425 struct elf_aarch64_stub_hash_entry *stub_entry;
6426 asection *stub_sec;
6427 bfd_vma addr;
6428 char *stub_name;
6429 output_arch_syminfo *osi;
6430
6431 /* Massage our args to the form they really have. */
6432 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
6433 osi = (output_arch_syminfo *) in_arg;
6434
6435 stub_sec = stub_entry->stub_sec;
6436
6437 /* Ensure this stub is attached to the current section being
6438 processed. */
6439 if (stub_sec != osi->sec)
6440 return TRUE;
6441
6442 addr = (bfd_vma) stub_entry->stub_offset;
6443
6444 stub_name = stub_entry->output_name;
6445
6446 switch (stub_entry->stub_type)
6447 {
6448 case aarch64_stub_adrp_branch:
6449 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
6450 sizeof (aarch64_adrp_branch_stub)))
6451 return FALSE;
6452 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
6453 return FALSE;
6454 break;
6455 case aarch64_stub_long_branch:
6456 if (!elfNN_aarch64_output_stub_sym
6457 (osi, stub_name, addr, sizeof (aarch64_long_branch_stub)))
6458 return FALSE;
6459 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
6460 return FALSE;
6461 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16))
6462 return FALSE;
6463 break;
6464 case aarch64_stub_erratum_835769_veneer:
6465 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
6466 sizeof (aarch64_erratum_835769_stub)))
6467 return FALSE;
6468 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
6469 return FALSE;
6470 break;
6471 default:
6472 abort ();
6473 }
6474
6475 return TRUE;
6476 }
6477
6478 /* Output mapping symbols for linker generated sections. */
6479
6480 static bfd_boolean
6481 elfNN_aarch64_output_arch_local_syms (bfd *output_bfd,
6482 struct bfd_link_info *info,
6483 void *finfo,
6484 int (*func) (void *, const char *,
6485 Elf_Internal_Sym *,
6486 asection *,
6487 struct elf_link_hash_entry
6488 *))
6489 {
6490 output_arch_syminfo osi;
6491 struct elf_aarch64_link_hash_table *htab;
6492
6493 htab = elf_aarch64_hash_table (info);
6494
6495 osi.finfo = finfo;
6496 osi.info = info;
6497 osi.func = func;
6498
6499 /* Long calls stubs. */
6500 if (htab->stub_bfd && htab->stub_bfd->sections)
6501 {
6502 asection *stub_sec;
6503
6504 for (stub_sec = htab->stub_bfd->sections;
6505 stub_sec != NULL; stub_sec = stub_sec->next)
6506 {
6507 /* Ignore non-stub sections. */
6508 if (!strstr (stub_sec->name, STUB_SUFFIX))
6509 continue;
6510
6511 osi.sec = stub_sec;
6512
6513 osi.sec_shndx = _bfd_elf_section_from_bfd_section
6514 (output_bfd, osi.sec->output_section);
6515
6516 /* The first instruction in a stub is always a branch. */
6517 if (!elfNN_aarch64_output_map_sym (&osi, AARCH64_MAP_INSN, 0))
6518 return FALSE;
6519
6520 bfd_hash_traverse (&htab->stub_hash_table, aarch64_map_one_stub,
6521 &osi);
6522 }
6523 }
6524
6525 /* Finally, output mapping symbols for the PLT. */
6526 if (!htab->root.splt || htab->root.splt->size == 0)
6527 return TRUE;
6528
6529 /* For now live without mapping symbols for the plt. */
6530 osi.sec_shndx = _bfd_elf_section_from_bfd_section
6531 (output_bfd, htab->root.splt->output_section);
6532 osi.sec = htab->root.splt;
6533
6534 elf_link_hash_traverse (&htab->root, elfNN_aarch64_output_plt_map,
6535 (void *) &osi);
6536
6537 return TRUE;
6538
6539 }
6540
6541 /* Allocate target specific section data. */
6542
6543 static bfd_boolean
6544 elfNN_aarch64_new_section_hook (bfd *abfd, asection *sec)
6545 {
6546 if (!sec->used_by_bfd)
6547 {
6548 _aarch64_elf_section_data *sdata;
6549 bfd_size_type amt = sizeof (*sdata);
6550
6551 sdata = bfd_zalloc (abfd, amt);
6552 if (sdata == NULL)
6553 return FALSE;
6554 sec->used_by_bfd = sdata;
6555 }
6556
6557 record_section_with_aarch64_elf_section_data (sec);
6558
6559 return _bfd_elf_new_section_hook (abfd, sec);
6560 }
6561
6562
6563 static void
6564 unrecord_section_via_map_over_sections (bfd *abfd ATTRIBUTE_UNUSED,
6565 asection *sec,
6566 void *ignore ATTRIBUTE_UNUSED)
6567 {
6568 unrecord_section_with_aarch64_elf_section_data (sec);
6569 }
6570
6571 static bfd_boolean
6572 elfNN_aarch64_close_and_cleanup (bfd *abfd)
6573 {
6574 if (abfd->sections)
6575 bfd_map_over_sections (abfd,
6576 unrecord_section_via_map_over_sections, NULL);
6577
6578 return _bfd_elf_close_and_cleanup (abfd);
6579 }
6580
6581 static bfd_boolean
6582 elfNN_aarch64_bfd_free_cached_info (bfd *abfd)
6583 {
6584 if (abfd->sections)
6585 bfd_map_over_sections (abfd,
6586 unrecord_section_via_map_over_sections, NULL);
6587
6588 return _bfd_free_cached_info (abfd);
6589 }
6590
6591 /* Create dynamic sections. This is different from the ARM backend in that
6592 the got, plt, gotplt and their relocation sections are all created in the
6593 standard part of the bfd elf backend. */
6594
6595 static bfd_boolean
6596 elfNN_aarch64_create_dynamic_sections (bfd *dynobj,
6597 struct bfd_link_info *info)
6598 {
6599 struct elf_aarch64_link_hash_table *htab;
6600
6601 /* We need to create .got section. */
6602 if (!aarch64_elf_create_got_section (dynobj, info))
6603 return FALSE;
6604
6605 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
6606 return FALSE;
6607
6608 htab = elf_aarch64_hash_table (info);
6609 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
6610 if (!info->shared)
6611 htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
6612
6613 if (!htab->sdynbss || (!info->shared && !htab->srelbss))
6614 abort ();
6615
6616 return TRUE;
6617 }
6618
6619
6620 /* Allocate space in .plt, .got and associated reloc sections for
6621 dynamic relocs. */
6622
6623 static bfd_boolean
6624 elfNN_aarch64_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
6625 {
6626 struct bfd_link_info *info;
6627 struct elf_aarch64_link_hash_table *htab;
6628 struct elf_aarch64_link_hash_entry *eh;
6629 struct elf_dyn_relocs *p;
6630
6631 /* An example of a bfd_link_hash_indirect symbol is versioned
6632 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
6633 -> __gxx_personality_v0(bfd_link_hash_defined)
6634
6635 There is no need to process bfd_link_hash_indirect symbols here
6636 because we will also be presented with the concrete instance of
6637 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
6638 called to copy all relevant data from the generic to the concrete
6639 symbol instance.
6640 */
6641 if (h->root.type == bfd_link_hash_indirect)
6642 return TRUE;
6643
6644 if (h->root.type == bfd_link_hash_warning)
6645 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6646
6647 info = (struct bfd_link_info *) inf;
6648 htab = elf_aarch64_hash_table (info);
6649
6650 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
6651 here if it is defined and referenced in a non-shared object. */
6652 if (h->type == STT_GNU_IFUNC
6653 && h->def_regular)
6654 return TRUE;
6655 else if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
6656 {
6657 /* Make sure this symbol is output as a dynamic symbol.
6658 Undefined weak syms won't yet be marked as dynamic. */
6659 if (h->dynindx == -1 && !h->forced_local)
6660 {
6661 if (!bfd_elf_link_record_dynamic_symbol (info, h))
6662 return FALSE;
6663 }
6664
6665 if (info->shared || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
6666 {
6667 asection *s = htab->root.splt;
6668
6669 /* If this is the first .plt entry, make room for the special
6670 first entry. */
6671 if (s->size == 0)
6672 s->size += htab->plt_header_size;
6673
6674 h->plt.offset = s->size;
6675
6676 /* If this symbol is not defined in a regular file, and we are
6677 not generating a shared library, then set the symbol to this
6678 location in the .plt. This is required to make function
6679 pointers compare as equal between the normal executable and
6680 the shared library. */
6681 if (!info->shared && !h->def_regular)
6682 {
6683 h->root.u.def.section = s;
6684 h->root.u.def.value = h->plt.offset;
6685 }
6686
6687 /* Make room for this entry. For now we only create the
6688 small model PLT entries. We later need to find a way
6689 of relaxing into these from the large model PLT entries. */
6690 s->size += PLT_SMALL_ENTRY_SIZE;
6691
6692 /* We also need to make an entry in the .got.plt section, which
6693 will be placed in the .got section by the linker script. */
6694 htab->root.sgotplt->size += GOT_ENTRY_SIZE;
6695
6696 /* We also need to make an entry in the .rela.plt section. */
6697 htab->root.srelplt->size += RELOC_SIZE (htab);
6698
6699 /* We need to ensure that all GOT entries that serve the PLT
6700 are consecutive with the special GOT slots [0] [1] and
6701 [2]. Any addtional relocations, such as
6702 R_AARCH64_TLSDESC, must be placed after the PLT related
6703 entries. We abuse the reloc_count such that during
6704 sizing we adjust reloc_count to indicate the number of
6705 PLT related reserved entries. In subsequent phases when
6706 filling in the contents of the reloc entries, PLT related
6707 entries are placed by computing their PLT index (0
6708 .. reloc_count). While other none PLT relocs are placed
6709 at the slot indicated by reloc_count and reloc_count is
6710 updated. */
6711
6712 htab->root.srelplt->reloc_count++;
6713 }
6714 else
6715 {
6716 h->plt.offset = (bfd_vma) - 1;
6717 h->needs_plt = 0;
6718 }
6719 }
6720 else
6721 {
6722 h->plt.offset = (bfd_vma) - 1;
6723 h->needs_plt = 0;
6724 }
6725
6726 eh = (struct elf_aarch64_link_hash_entry *) h;
6727 eh->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
6728
6729 if (h->got.refcount > 0)
6730 {
6731 bfd_boolean dyn;
6732 unsigned got_type = elf_aarch64_hash_entry (h)->got_type;
6733
6734 h->got.offset = (bfd_vma) - 1;
6735
6736 dyn = htab->root.dynamic_sections_created;
6737
6738 /* Make sure this symbol is output as a dynamic symbol.
6739 Undefined weak syms won't yet be marked as dynamic. */
6740 if (dyn && h->dynindx == -1 && !h->forced_local)
6741 {
6742 if (!bfd_elf_link_record_dynamic_symbol (info, h))
6743 return FALSE;
6744 }
6745
6746 if (got_type == GOT_UNKNOWN)
6747 {
6748 }
6749 else if (got_type == GOT_NORMAL)
6750 {
6751 h->got.offset = htab->root.sgot->size;
6752 htab->root.sgot->size += GOT_ENTRY_SIZE;
6753 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6754 || h->root.type != bfd_link_hash_undefweak)
6755 && (info->shared
6756 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
6757 {
6758 htab->root.srelgot->size += RELOC_SIZE (htab);
6759 }
6760 }
6761 else
6762 {
6763 int indx;
6764 if (got_type & GOT_TLSDESC_GD)
6765 {
6766 eh->tlsdesc_got_jump_table_offset =
6767 (htab->root.sgotplt->size
6768 - aarch64_compute_jump_table_size (htab));
6769 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
6770 h->got.offset = (bfd_vma) - 2;
6771 }
6772
6773 if (got_type & GOT_TLS_GD)
6774 {
6775 h->got.offset = htab->root.sgot->size;
6776 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
6777 }
6778
6779 if (got_type & GOT_TLS_IE)
6780 {
6781 h->got.offset = htab->root.sgot->size;
6782 htab->root.sgot->size += GOT_ENTRY_SIZE;
6783 }
6784
6785 indx = h && h->dynindx != -1 ? h->dynindx : 0;
6786 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6787 || h->root.type != bfd_link_hash_undefweak)
6788 && (info->shared
6789 || indx != 0
6790 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
6791 {
6792 if (got_type & GOT_TLSDESC_GD)
6793 {
6794 htab->root.srelplt->size += RELOC_SIZE (htab);
6795 /* Note reloc_count not incremented here! We have
6796 already adjusted reloc_count for this relocation
6797 type. */
6798
6799 /* TLSDESC PLT is now needed, but not yet determined. */
6800 htab->tlsdesc_plt = (bfd_vma) - 1;
6801 }
6802
6803 if (got_type & GOT_TLS_GD)
6804 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
6805
6806 if (got_type & GOT_TLS_IE)
6807 htab->root.srelgot->size += RELOC_SIZE (htab);
6808 }
6809 }
6810 }
6811 else
6812 {
6813 h->got.offset = (bfd_vma) - 1;
6814 }
6815
6816 if (eh->dyn_relocs == NULL)
6817 return TRUE;
6818
6819 /* In the shared -Bsymbolic case, discard space allocated for
6820 dynamic pc-relative relocs against symbols which turn out to be
6821 defined in regular objects. For the normal shared case, discard
6822 space for pc-relative relocs that have become local due to symbol
6823 visibility changes. */
6824
6825 if (info->shared)
6826 {
6827 /* Relocs that use pc_count are those that appear on a call
6828 insn, or certain REL relocs that can generated via assembly.
6829 We want calls to protected symbols to resolve directly to the
6830 function rather than going via the plt. If people want
6831 function pointer comparisons to work as expected then they
6832 should avoid writing weird assembly. */
6833 if (SYMBOL_CALLS_LOCAL (info, h))
6834 {
6835 struct elf_dyn_relocs **pp;
6836
6837 for (pp = &eh->dyn_relocs; (p = *pp) != NULL;)
6838 {
6839 p->count -= p->pc_count;
6840 p->pc_count = 0;
6841 if (p->count == 0)
6842 *pp = p->next;
6843 else
6844 pp = &p->next;
6845 }
6846 }
6847
6848 /* Also discard relocs on undefined weak syms with non-default
6849 visibility. */
6850 if (eh->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
6851 {
6852 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
6853 eh->dyn_relocs = NULL;
6854
6855 /* Make sure undefined weak symbols are output as a dynamic
6856 symbol in PIEs. */
6857 else if (h->dynindx == -1
6858 && !h->forced_local
6859 && !bfd_elf_link_record_dynamic_symbol (info, h))
6860 return FALSE;
6861 }
6862
6863 }
6864 else if (ELIMINATE_COPY_RELOCS)
6865 {
6866 /* For the non-shared case, discard space for relocs against
6867 symbols which turn out to need copy relocs or are not
6868 dynamic. */
6869
6870 if (!h->non_got_ref
6871 && ((h->def_dynamic
6872 && !h->def_regular)
6873 || (htab->root.dynamic_sections_created
6874 && (h->root.type == bfd_link_hash_undefweak
6875 || h->root.type == bfd_link_hash_undefined))))
6876 {
6877 /* Make sure this symbol is output as a dynamic symbol.
6878 Undefined weak syms won't yet be marked as dynamic. */
6879 if (h->dynindx == -1
6880 && !h->forced_local
6881 && !bfd_elf_link_record_dynamic_symbol (info, h))
6882 return FALSE;
6883
6884 /* If that succeeded, we know we'll be keeping all the
6885 relocs. */
6886 if (h->dynindx != -1)
6887 goto keep;
6888 }
6889
6890 eh->dyn_relocs = NULL;
6891
6892 keep:;
6893 }
6894
6895 /* Finally, allocate space. */
6896 for (p = eh->dyn_relocs; p != NULL; p = p->next)
6897 {
6898 asection *sreloc;
6899
6900 sreloc = elf_section_data (p->sec)->sreloc;
6901
6902 BFD_ASSERT (sreloc != NULL);
6903
6904 sreloc->size += p->count * RELOC_SIZE (htab);
6905 }
6906
6907 return TRUE;
6908 }
6909
6910 /* Allocate space in .plt, .got and associated reloc sections for
6911 ifunc dynamic relocs. */
6912
6913 static bfd_boolean
6914 elfNN_aarch64_allocate_ifunc_dynrelocs (struct elf_link_hash_entry *h,
6915 void *inf)
6916 {
6917 struct bfd_link_info *info;
6918 struct elf_aarch64_link_hash_table *htab;
6919 struct elf_aarch64_link_hash_entry *eh;
6920
6921 /* An example of a bfd_link_hash_indirect symbol is versioned
6922 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
6923 -> __gxx_personality_v0(bfd_link_hash_defined)
6924
6925 There is no need to process bfd_link_hash_indirect symbols here
6926 because we will also be presented with the concrete instance of
6927 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
6928 called to copy all relevant data from the generic to the concrete
6929 symbol instance.
6930 */
6931 if (h->root.type == bfd_link_hash_indirect)
6932 return TRUE;
6933
6934 if (h->root.type == bfd_link_hash_warning)
6935 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6936
6937 info = (struct bfd_link_info *) inf;
6938 htab = elf_aarch64_hash_table (info);
6939
6940 eh = (struct elf_aarch64_link_hash_entry *) h;
6941
6942 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
6943 here if it is defined and referenced in a non-shared object. */
6944 if (h->type == STT_GNU_IFUNC
6945 && h->def_regular)
6946 return _bfd_elf_allocate_ifunc_dyn_relocs (info, h,
6947 &eh->dyn_relocs,
6948 htab->plt_entry_size,
6949 htab->plt_header_size,
6950 GOT_ENTRY_SIZE);
6951 return TRUE;
6952 }
6953
6954 /* Allocate space in .plt, .got and associated reloc sections for
6955 local dynamic relocs. */
6956
6957 static bfd_boolean
6958 elfNN_aarch64_allocate_local_dynrelocs (void **slot, void *inf)
6959 {
6960 struct elf_link_hash_entry *h
6961 = (struct elf_link_hash_entry *) *slot;
6962
6963 if (h->type != STT_GNU_IFUNC
6964 || !h->def_regular
6965 || !h->ref_regular
6966 || !h->forced_local
6967 || h->root.type != bfd_link_hash_defined)
6968 abort ();
6969
6970 return elfNN_aarch64_allocate_dynrelocs (h, inf);
6971 }
6972
6973 /* Allocate space in .plt, .got and associated reloc sections for
6974 local ifunc dynamic relocs. */
6975
6976 static bfd_boolean
6977 elfNN_aarch64_allocate_local_ifunc_dynrelocs (void **slot, void *inf)
6978 {
6979 struct elf_link_hash_entry *h
6980 = (struct elf_link_hash_entry *) *slot;
6981
6982 if (h->type != STT_GNU_IFUNC
6983 || !h->def_regular
6984 || !h->ref_regular
6985 || !h->forced_local
6986 || h->root.type != bfd_link_hash_defined)
6987 abort ();
6988
6989 return elfNN_aarch64_allocate_ifunc_dynrelocs (h, inf);
6990 }
6991
6992 /* This is the most important function of all . Innocuosly named
6993 though ! */
6994 static bfd_boolean
6995 elfNN_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
6996 struct bfd_link_info *info)
6997 {
6998 struct elf_aarch64_link_hash_table *htab;
6999 bfd *dynobj;
7000 asection *s;
7001 bfd_boolean relocs;
7002 bfd *ibfd;
7003
7004 htab = elf_aarch64_hash_table ((info));
7005 dynobj = htab->root.dynobj;
7006
7007 BFD_ASSERT (dynobj != NULL);
7008
7009 if (htab->root.dynamic_sections_created)
7010 {
7011 if (info->executable)
7012 {
7013 s = bfd_get_linker_section (dynobj, ".interp");
7014 if (s == NULL)
7015 abort ();
7016 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
7017 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
7018 }
7019 }
7020
7021 /* Set up .got offsets for local syms, and space for local dynamic
7022 relocs. */
7023 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
7024 {
7025 struct elf_aarch64_local_symbol *locals = NULL;
7026 Elf_Internal_Shdr *symtab_hdr;
7027 asection *srel;
7028 unsigned int i;
7029
7030 if (!is_aarch64_elf (ibfd))
7031 continue;
7032
7033 for (s = ibfd->sections; s != NULL; s = s->next)
7034 {
7035 struct elf_dyn_relocs *p;
7036
7037 for (p = (struct elf_dyn_relocs *)
7038 (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
7039 {
7040 if (!bfd_is_abs_section (p->sec)
7041 && bfd_is_abs_section (p->sec->output_section))
7042 {
7043 /* Input section has been discarded, either because
7044 it is a copy of a linkonce section or due to
7045 linker script /DISCARD/, so we'll be discarding
7046 the relocs too. */
7047 }
7048 else if (p->count != 0)
7049 {
7050 srel = elf_section_data (p->sec)->sreloc;
7051 srel->size += p->count * RELOC_SIZE (htab);
7052 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
7053 info->flags |= DF_TEXTREL;
7054 }
7055 }
7056 }
7057
7058 locals = elf_aarch64_locals (ibfd);
7059 if (!locals)
7060 continue;
7061
7062 symtab_hdr = &elf_symtab_hdr (ibfd);
7063 srel = htab->root.srelgot;
7064 for (i = 0; i < symtab_hdr->sh_info; i++)
7065 {
7066 locals[i].got_offset = (bfd_vma) - 1;
7067 locals[i].tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
7068 if (locals[i].got_refcount > 0)
7069 {
7070 unsigned got_type = locals[i].got_type;
7071 if (got_type & GOT_TLSDESC_GD)
7072 {
7073 locals[i].tlsdesc_got_jump_table_offset =
7074 (htab->root.sgotplt->size
7075 - aarch64_compute_jump_table_size (htab));
7076 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
7077 locals[i].got_offset = (bfd_vma) - 2;
7078 }
7079
7080 if (got_type & GOT_TLS_GD)
7081 {
7082 locals[i].got_offset = htab->root.sgot->size;
7083 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
7084 }
7085
7086 if (got_type & GOT_TLS_IE)
7087 {
7088 locals[i].got_offset = htab->root.sgot->size;
7089 htab->root.sgot->size += GOT_ENTRY_SIZE;
7090 }
7091
7092 if (got_type == GOT_UNKNOWN)
7093 {
7094 }
7095
7096 if (got_type == GOT_NORMAL)
7097 {
7098 }
7099
7100 if (info->shared)
7101 {
7102 if (got_type & GOT_TLSDESC_GD)
7103 {
7104 htab->root.srelplt->size += RELOC_SIZE (htab);
7105 /* Note RELOC_COUNT not incremented here! */
7106 htab->tlsdesc_plt = (bfd_vma) - 1;
7107 }
7108
7109 if (got_type & GOT_TLS_GD)
7110 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
7111
7112 if (got_type & GOT_TLS_IE)
7113 htab->root.srelgot->size += RELOC_SIZE (htab);
7114 }
7115 }
7116 else
7117 {
7118 locals[i].got_refcount = (bfd_vma) - 1;
7119 }
7120 }
7121 }
7122
7123
7124 /* Allocate global sym .plt and .got entries, and space for global
7125 sym dynamic relocs. */
7126 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_dynrelocs,
7127 info);
7128
7129 /* Allocate global ifunc sym .plt and .got entries, and space for global
7130 ifunc sym dynamic relocs. */
7131 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_ifunc_dynrelocs,
7132 info);
7133
7134 /* Allocate .plt and .got entries, and space for local symbols. */
7135 htab_traverse (htab->loc_hash_table,
7136 elfNN_aarch64_allocate_local_dynrelocs,
7137 info);
7138
7139 /* Allocate .plt and .got entries, and space for local ifunc symbols. */
7140 htab_traverse (htab->loc_hash_table,
7141 elfNN_aarch64_allocate_local_ifunc_dynrelocs,
7142 info);
7143
7144 /* For every jump slot reserved in the sgotplt, reloc_count is
7145 incremented. However, when we reserve space for TLS descriptors,
7146 it's not incremented, so in order to compute the space reserved
7147 for them, it suffices to multiply the reloc count by the jump
7148 slot size. */
7149
7150 if (htab->root.srelplt)
7151 htab->sgotplt_jump_table_size = aarch64_compute_jump_table_size (htab);
7152
7153 if (htab->tlsdesc_plt)
7154 {
7155 if (htab->root.splt->size == 0)
7156 htab->root.splt->size += PLT_ENTRY_SIZE;
7157
7158 htab->tlsdesc_plt = htab->root.splt->size;
7159 htab->root.splt->size += PLT_TLSDESC_ENTRY_SIZE;
7160
7161 /* If we're not using lazy TLS relocations, don't generate the
7162 GOT entry required. */
7163 if (!(info->flags & DF_BIND_NOW))
7164 {
7165 htab->dt_tlsdesc_got = htab->root.sgot->size;
7166 htab->root.sgot->size += GOT_ENTRY_SIZE;
7167 }
7168 }
7169
7170 /* Init mapping symbols information to use later to distingush between
7171 code and data while scanning for erratam 835769. */
7172 if (htab->fix_erratum_835769)
7173 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
7174 {
7175 if (!is_aarch64_elf (ibfd))
7176 continue;
7177 bfd_elfNN_aarch64_init_maps (ibfd);
7178 }
7179
7180 /* We now have determined the sizes of the various dynamic sections.
7181 Allocate memory for them. */
7182 relocs = FALSE;
7183 for (s = dynobj->sections; s != NULL; s = s->next)
7184 {
7185 if ((s->flags & SEC_LINKER_CREATED) == 0)
7186 continue;
7187
7188 if (s == htab->root.splt
7189 || s == htab->root.sgot
7190 || s == htab->root.sgotplt
7191 || s == htab->root.iplt
7192 || s == htab->root.igotplt || s == htab->sdynbss)
7193 {
7194 /* Strip this section if we don't need it; see the
7195 comment below. */
7196 }
7197 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
7198 {
7199 if (s->size != 0 && s != htab->root.srelplt)
7200 relocs = TRUE;
7201
7202 /* We use the reloc_count field as a counter if we need
7203 to copy relocs into the output file. */
7204 if (s != htab->root.srelplt)
7205 s->reloc_count = 0;
7206 }
7207 else
7208 {
7209 /* It's not one of our sections, so don't allocate space. */
7210 continue;
7211 }
7212
7213 if (s->size == 0)
7214 {
7215 /* If we don't need this section, strip it from the
7216 output file. This is mostly to handle .rela.bss and
7217 .rela.plt. We must create both sections in
7218 create_dynamic_sections, because they must be created
7219 before the linker maps input sections to output
7220 sections. The linker does that before
7221 adjust_dynamic_symbol is called, and it is that
7222 function which decides whether anything needs to go
7223 into these sections. */
7224
7225 s->flags |= SEC_EXCLUDE;
7226 continue;
7227 }
7228
7229 if ((s->flags & SEC_HAS_CONTENTS) == 0)
7230 continue;
7231
7232 /* Allocate memory for the section contents. We use bfd_zalloc
7233 here in case unused entries are not reclaimed before the
7234 section's contents are written out. This should not happen,
7235 but this way if it does, we get a R_AARCH64_NONE reloc instead
7236 of garbage. */
7237 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
7238 if (s->contents == NULL)
7239 return FALSE;
7240 }
7241
7242 if (htab->root.dynamic_sections_created)
7243 {
7244 /* Add some entries to the .dynamic section. We fill in the
7245 values later, in elfNN_aarch64_finish_dynamic_sections, but we
7246 must add the entries now so that we get the correct size for
7247 the .dynamic section. The DT_DEBUG entry is filled in by the
7248 dynamic linker and used by the debugger. */
7249 #define add_dynamic_entry(TAG, VAL) \
7250 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
7251
7252 if (info->executable)
7253 {
7254 if (!add_dynamic_entry (DT_DEBUG, 0))
7255 return FALSE;
7256 }
7257
7258 if (htab->root.splt->size != 0)
7259 {
7260 if (!add_dynamic_entry (DT_PLTGOT, 0)
7261 || !add_dynamic_entry (DT_PLTRELSZ, 0)
7262 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
7263 || !add_dynamic_entry (DT_JMPREL, 0))
7264 return FALSE;
7265
7266 if (htab->tlsdesc_plt
7267 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
7268 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
7269 return FALSE;
7270 }
7271
7272 if (relocs)
7273 {
7274 if (!add_dynamic_entry (DT_RELA, 0)
7275 || !add_dynamic_entry (DT_RELASZ, 0)
7276 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
7277 return FALSE;
7278
7279 /* If any dynamic relocs apply to a read-only section,
7280 then we need a DT_TEXTREL entry. */
7281 if ((info->flags & DF_TEXTREL) != 0)
7282 {
7283 if (!add_dynamic_entry (DT_TEXTREL, 0))
7284 return FALSE;
7285 }
7286 }
7287 }
7288 #undef add_dynamic_entry
7289
7290 return TRUE;
7291 }
7292
7293 static inline void
7294 elf_aarch64_update_plt_entry (bfd *output_bfd,
7295 bfd_reloc_code_real_type r_type,
7296 bfd_byte *plt_entry, bfd_vma value)
7297 {
7298 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (r_type);
7299
7300 _bfd_aarch64_elf_put_addend (output_bfd, plt_entry, r_type, howto, value);
7301 }
7302
7303 static void
7304 elfNN_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h,
7305 struct elf_aarch64_link_hash_table
7306 *htab, bfd *output_bfd,
7307 struct bfd_link_info *info)
7308 {
7309 bfd_byte *plt_entry;
7310 bfd_vma plt_index;
7311 bfd_vma got_offset;
7312 bfd_vma gotplt_entry_address;
7313 bfd_vma plt_entry_address;
7314 Elf_Internal_Rela rela;
7315 bfd_byte *loc;
7316 asection *plt, *gotplt, *relplt;
7317
7318 /* When building a static executable, use .iplt, .igot.plt and
7319 .rela.iplt sections for STT_GNU_IFUNC symbols. */
7320 if (htab->root.splt != NULL)
7321 {
7322 plt = htab->root.splt;
7323 gotplt = htab->root.sgotplt;
7324 relplt = htab->root.srelplt;
7325 }
7326 else
7327 {
7328 plt = htab->root.iplt;
7329 gotplt = htab->root.igotplt;
7330 relplt = htab->root.irelplt;
7331 }
7332
7333 /* Get the index in the procedure linkage table which
7334 corresponds to this symbol. This is the index of this symbol
7335 in all the symbols for which we are making plt entries. The
7336 first entry in the procedure linkage table is reserved.
7337
7338 Get the offset into the .got table of the entry that
7339 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
7340 bytes. The first three are reserved for the dynamic linker.
7341
7342 For static executables, we don't reserve anything. */
7343
7344 if (plt == htab->root.splt)
7345 {
7346 plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
7347 got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
7348 }
7349 else
7350 {
7351 plt_index = h->plt.offset / htab->plt_entry_size;
7352 got_offset = plt_index * GOT_ENTRY_SIZE;
7353 }
7354
7355 plt_entry = plt->contents + h->plt.offset;
7356 plt_entry_address = plt->output_section->vma
7357 + plt->output_offset + h->plt.offset;
7358 gotplt_entry_address = gotplt->output_section->vma +
7359 gotplt->output_offset + got_offset;
7360
7361 /* Copy in the boiler-plate for the PLTn entry. */
7362 memcpy (plt_entry, elfNN_aarch64_small_plt_entry, PLT_SMALL_ENTRY_SIZE);
7363
7364 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
7365 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
7366 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
7367 plt_entry,
7368 PG (gotplt_entry_address) -
7369 PG (plt_entry_address));
7370
7371 /* Fill in the lo12 bits for the load from the pltgot. */
7372 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
7373 plt_entry + 4,
7374 PG_OFFSET (gotplt_entry_address));
7375
7376 /* Fill in the lo12 bits for the add from the pltgot entry. */
7377 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
7378 plt_entry + 8,
7379 PG_OFFSET (gotplt_entry_address));
7380
7381 /* All the GOTPLT Entries are essentially initialized to PLT0. */
7382 bfd_put_NN (output_bfd,
7383 plt->output_section->vma + plt->output_offset,
7384 gotplt->contents + got_offset);
7385
7386 rela.r_offset = gotplt_entry_address;
7387
7388 if (h->dynindx == -1
7389 || ((info->executable
7390 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
7391 && h->def_regular
7392 && h->type == STT_GNU_IFUNC))
7393 {
7394 /* If an STT_GNU_IFUNC symbol is locally defined, generate
7395 R_AARCH64_IRELATIVE instead of R_AARCH64_JUMP_SLOT. */
7396 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
7397 rela.r_addend = (h->root.u.def.value
7398 + h->root.u.def.section->output_section->vma
7399 + h->root.u.def.section->output_offset);
7400 }
7401 else
7402 {
7403 /* Fill in the entry in the .rela.plt section. */
7404 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (JUMP_SLOT));
7405 rela.r_addend = 0;
7406 }
7407
7408 /* Compute the relocation entry to used based on PLT index and do
7409 not adjust reloc_count. The reloc_count has already been adjusted
7410 to account for this entry. */
7411 loc = relplt->contents + plt_index * RELOC_SIZE (htab);
7412 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
7413 }
7414
7415 /* Size sections even though they're not dynamic. We use it to setup
7416 _TLS_MODULE_BASE_, if needed. */
7417
7418 static bfd_boolean
7419 elfNN_aarch64_always_size_sections (bfd *output_bfd,
7420 struct bfd_link_info *info)
7421 {
7422 asection *tls_sec;
7423
7424 if (info->relocatable)
7425 return TRUE;
7426
7427 tls_sec = elf_hash_table (info)->tls_sec;
7428
7429 if (tls_sec)
7430 {
7431 struct elf_link_hash_entry *tlsbase;
7432
7433 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
7434 "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
7435
7436 if (tlsbase)
7437 {
7438 struct bfd_link_hash_entry *h = NULL;
7439 const struct elf_backend_data *bed =
7440 get_elf_backend_data (output_bfd);
7441
7442 if (!(_bfd_generic_link_add_one_symbol
7443 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
7444 tls_sec, 0, NULL, FALSE, bed->collect, &h)))
7445 return FALSE;
7446
7447 tlsbase->type = STT_TLS;
7448 tlsbase = (struct elf_link_hash_entry *) h;
7449 tlsbase->def_regular = 1;
7450 tlsbase->other = STV_HIDDEN;
7451 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
7452 }
7453 }
7454
7455 return TRUE;
7456 }
7457
7458 /* Finish up dynamic symbol handling. We set the contents of various
7459 dynamic sections here. */
7460 static bfd_boolean
7461 elfNN_aarch64_finish_dynamic_symbol (bfd *output_bfd,
7462 struct bfd_link_info *info,
7463 struct elf_link_hash_entry *h,
7464 Elf_Internal_Sym *sym)
7465 {
7466 struct elf_aarch64_link_hash_table *htab;
7467 htab = elf_aarch64_hash_table (info);
7468
7469 if (h->plt.offset != (bfd_vma) - 1)
7470 {
7471 asection *plt, *gotplt, *relplt;
7472
7473 /* This symbol has an entry in the procedure linkage table. Set
7474 it up. */
7475
7476 /* When building a static executable, use .iplt, .igot.plt and
7477 .rela.iplt sections for STT_GNU_IFUNC symbols. */
7478 if (htab->root.splt != NULL)
7479 {
7480 plt = htab->root.splt;
7481 gotplt = htab->root.sgotplt;
7482 relplt = htab->root.srelplt;
7483 }
7484 else
7485 {
7486 plt = htab->root.iplt;
7487 gotplt = htab->root.igotplt;
7488 relplt = htab->root.irelplt;
7489 }
7490
7491 /* This symbol has an entry in the procedure linkage table. Set
7492 it up. */
7493 if ((h->dynindx == -1
7494 && !((h->forced_local || info->executable)
7495 && h->def_regular
7496 && h->type == STT_GNU_IFUNC))
7497 || plt == NULL
7498 || gotplt == NULL
7499 || relplt == NULL)
7500 abort ();
7501
7502 elfNN_aarch64_create_small_pltn_entry (h, htab, output_bfd, info);
7503 if (!h->def_regular)
7504 {
7505 /* Mark the symbol as undefined, rather than as defined in
7506 the .plt section. */
7507 sym->st_shndx = SHN_UNDEF;
7508 /* If the symbol is weak we need to clear the value.
7509 Otherwise, the PLT entry would provide a definition for
7510 the symbol even if the symbol wasn't defined anywhere,
7511 and so the symbol would never be NULL. Leave the value if
7512 there were any relocations where pointer equality matters
7513 (this is a clue for the dynamic linker, to make function
7514 pointer comparisons work between an application and shared
7515 library). */
7516 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
7517 sym->st_value = 0;
7518 }
7519 }
7520
7521 if (h->got.offset != (bfd_vma) - 1
7522 && elf_aarch64_hash_entry (h)->got_type == GOT_NORMAL)
7523 {
7524 Elf_Internal_Rela rela;
7525 bfd_byte *loc;
7526
7527 /* This symbol has an entry in the global offset table. Set it
7528 up. */
7529 if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
7530 abort ();
7531
7532 rela.r_offset = (htab->root.sgot->output_section->vma
7533 + htab->root.sgot->output_offset
7534 + (h->got.offset & ~(bfd_vma) 1));
7535
7536 if (h->def_regular
7537 && h->type == STT_GNU_IFUNC)
7538 {
7539 if (info->shared)
7540 {
7541 /* Generate R_AARCH64_GLOB_DAT. */
7542 goto do_glob_dat;
7543 }
7544 else
7545 {
7546 asection *plt;
7547
7548 if (!h->pointer_equality_needed)
7549 abort ();
7550
7551 /* For non-shared object, we can't use .got.plt, which
7552 contains the real function address if we need pointer
7553 equality. We load the GOT entry with the PLT entry. */
7554 plt = htab->root.splt ? htab->root.splt : htab->root.iplt;
7555 bfd_put_NN (output_bfd, (plt->output_section->vma
7556 + plt->output_offset
7557 + h->plt.offset),
7558 htab->root.sgot->contents
7559 + (h->got.offset & ~(bfd_vma) 1));
7560 return TRUE;
7561 }
7562 }
7563 else if (info->shared && SYMBOL_REFERENCES_LOCAL (info, h))
7564 {
7565 if (!h->def_regular)
7566 return FALSE;
7567
7568 BFD_ASSERT ((h->got.offset & 1) != 0);
7569 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
7570 rela.r_addend = (h->root.u.def.value
7571 + h->root.u.def.section->output_section->vma
7572 + h->root.u.def.section->output_offset);
7573 }
7574 else
7575 {
7576 do_glob_dat:
7577 BFD_ASSERT ((h->got.offset & 1) == 0);
7578 bfd_put_NN (output_bfd, (bfd_vma) 0,
7579 htab->root.sgot->contents + h->got.offset);
7580 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (GLOB_DAT));
7581 rela.r_addend = 0;
7582 }
7583
7584 loc = htab->root.srelgot->contents;
7585 loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
7586 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
7587 }
7588
7589 if (h->needs_copy)
7590 {
7591 Elf_Internal_Rela rela;
7592 bfd_byte *loc;
7593
7594 /* This symbol needs a copy reloc. Set it up. */
7595
7596 if (h->dynindx == -1
7597 || (h->root.type != bfd_link_hash_defined
7598 && h->root.type != bfd_link_hash_defweak)
7599 || htab->srelbss == NULL)
7600 abort ();
7601
7602 rela.r_offset = (h->root.u.def.value
7603 + h->root.u.def.section->output_section->vma
7604 + h->root.u.def.section->output_offset);
7605 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (COPY));
7606 rela.r_addend = 0;
7607 loc = htab->srelbss->contents;
7608 loc += htab->srelbss->reloc_count++ * RELOC_SIZE (htab);
7609 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
7610 }
7611
7612 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may
7613 be NULL for local symbols. */
7614 if (sym != NULL
7615 && (h == elf_hash_table (info)->hdynamic
7616 || h == elf_hash_table (info)->hgot))
7617 sym->st_shndx = SHN_ABS;
7618
7619 return TRUE;
7620 }
7621
7622 /* Finish up local dynamic symbol handling. We set the contents of
7623 various dynamic sections here. */
7624
7625 static bfd_boolean
7626 elfNN_aarch64_finish_local_dynamic_symbol (void **slot, void *inf)
7627 {
7628 struct elf_link_hash_entry *h
7629 = (struct elf_link_hash_entry *) *slot;
7630 struct bfd_link_info *info
7631 = (struct bfd_link_info *) inf;
7632
7633 return elfNN_aarch64_finish_dynamic_symbol (info->output_bfd,
7634 info, h, NULL);
7635 }
7636
7637 static void
7638 elfNN_aarch64_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
7639 struct elf_aarch64_link_hash_table
7640 *htab)
7641 {
7642 /* Fill in PLT0. Fixme:RR Note this doesn't distinguish between
7643 small and large plts and at the minute just generates
7644 the small PLT. */
7645
7646 /* PLT0 of the small PLT looks like this in ELF64 -
7647 stp x16, x30, [sp, #-16]! // Save the reloc and lr on stack.
7648 adrp x16, PLT_GOT + 16 // Get the page base of the GOTPLT
7649 ldr x17, [x16, #:lo12:PLT_GOT+16] // Load the address of the
7650 // symbol resolver
7651 add x16, x16, #:lo12:PLT_GOT+16 // Load the lo12 bits of the
7652 // GOTPLT entry for this.
7653 br x17
7654 PLT0 will be slightly different in ELF32 due to different got entry
7655 size.
7656 */
7657 bfd_vma plt_got_2nd_ent; /* Address of GOT[2]. */
7658 bfd_vma plt_base;
7659
7660
7661 memcpy (htab->root.splt->contents, elfNN_aarch64_small_plt0_entry,
7662 PLT_ENTRY_SIZE);
7663 elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize =
7664 PLT_ENTRY_SIZE;
7665
7666 plt_got_2nd_ent = (htab->root.sgotplt->output_section->vma
7667 + htab->root.sgotplt->output_offset
7668 + GOT_ENTRY_SIZE * 2);
7669
7670 plt_base = htab->root.splt->output_section->vma +
7671 htab->root.splt->output_offset;
7672
7673 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
7674 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
7675 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
7676 htab->root.splt->contents + 4,
7677 PG (plt_got_2nd_ent) - PG (plt_base + 4));
7678
7679 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
7680 htab->root.splt->contents + 8,
7681 PG_OFFSET (plt_got_2nd_ent));
7682
7683 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
7684 htab->root.splt->contents + 12,
7685 PG_OFFSET (plt_got_2nd_ent));
7686 }
7687
7688 static bfd_boolean
7689 elfNN_aarch64_finish_dynamic_sections (bfd *output_bfd,
7690 struct bfd_link_info *info)
7691 {
7692 struct elf_aarch64_link_hash_table *htab;
7693 bfd *dynobj;
7694 asection *sdyn;
7695
7696 htab = elf_aarch64_hash_table (info);
7697 dynobj = htab->root.dynobj;
7698 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
7699
7700 if (htab->root.dynamic_sections_created)
7701 {
7702 ElfNN_External_Dyn *dyncon, *dynconend;
7703
7704 if (sdyn == NULL || htab->root.sgot == NULL)
7705 abort ();
7706
7707 dyncon = (ElfNN_External_Dyn *) sdyn->contents;
7708 dynconend = (ElfNN_External_Dyn *) (sdyn->contents + sdyn->size);
7709 for (; dyncon < dynconend; dyncon++)
7710 {
7711 Elf_Internal_Dyn dyn;
7712 asection *s;
7713
7714 bfd_elfNN_swap_dyn_in (dynobj, dyncon, &dyn);
7715
7716 switch (dyn.d_tag)
7717 {
7718 default:
7719 continue;
7720
7721 case DT_PLTGOT:
7722 s = htab->root.sgotplt;
7723 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
7724 break;
7725
7726 case DT_JMPREL:
7727 dyn.d_un.d_ptr = htab->root.srelplt->output_section->vma;
7728 break;
7729
7730 case DT_PLTRELSZ:
7731 s = htab->root.srelplt;
7732 dyn.d_un.d_val = s->size;
7733 break;
7734
7735 case DT_RELASZ:
7736 /* The procedure linkage table relocs (DT_JMPREL) should
7737 not be included in the overall relocs (DT_RELA).
7738 Therefore, we override the DT_RELASZ entry here to
7739 make it not include the JMPREL relocs. Since the
7740 linker script arranges for .rela.plt to follow all
7741 other relocation sections, we don't have to worry
7742 about changing the DT_RELA entry. */
7743 if (htab->root.srelplt != NULL)
7744 {
7745 s = htab->root.srelplt;
7746 dyn.d_un.d_val -= s->size;
7747 }
7748 break;
7749
7750 case DT_TLSDESC_PLT:
7751 s = htab->root.splt;
7752 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
7753 + htab->tlsdesc_plt;
7754 break;
7755
7756 case DT_TLSDESC_GOT:
7757 s = htab->root.sgot;
7758 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
7759 + htab->dt_tlsdesc_got;
7760 break;
7761 }
7762
7763 bfd_elfNN_swap_dyn_out (output_bfd, &dyn, dyncon);
7764 }
7765
7766 }
7767
7768 /* Fill in the special first entry in the procedure linkage table. */
7769 if (htab->root.splt && htab->root.splt->size > 0)
7770 {
7771 elfNN_aarch64_init_small_plt0_entry (output_bfd, htab);
7772
7773 elf_section_data (htab->root.splt->output_section)->
7774 this_hdr.sh_entsize = htab->plt_entry_size;
7775
7776
7777 if (htab->tlsdesc_plt)
7778 {
7779 bfd_put_NN (output_bfd, (bfd_vma) 0,
7780 htab->root.sgot->contents + htab->dt_tlsdesc_got);
7781
7782 memcpy (htab->root.splt->contents + htab->tlsdesc_plt,
7783 elfNN_aarch64_tlsdesc_small_plt_entry,
7784 sizeof (elfNN_aarch64_tlsdesc_small_plt_entry));
7785
7786 {
7787 bfd_vma adrp1_addr =
7788 htab->root.splt->output_section->vma
7789 + htab->root.splt->output_offset + htab->tlsdesc_plt + 4;
7790
7791 bfd_vma adrp2_addr = adrp1_addr + 4;
7792
7793 bfd_vma got_addr =
7794 htab->root.sgot->output_section->vma
7795 + htab->root.sgot->output_offset;
7796
7797 bfd_vma pltgot_addr =
7798 htab->root.sgotplt->output_section->vma
7799 + htab->root.sgotplt->output_offset;
7800
7801 bfd_vma dt_tlsdesc_got = got_addr + htab->dt_tlsdesc_got;
7802
7803 bfd_byte *plt_entry =
7804 htab->root.splt->contents + htab->tlsdesc_plt;
7805
7806 /* adrp x2, DT_TLSDESC_GOT */
7807 elf_aarch64_update_plt_entry (output_bfd,
7808 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
7809 plt_entry + 4,
7810 (PG (dt_tlsdesc_got)
7811 - PG (adrp1_addr)));
7812
7813 /* adrp x3, 0 */
7814 elf_aarch64_update_plt_entry (output_bfd,
7815 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
7816 plt_entry + 8,
7817 (PG (pltgot_addr)
7818 - PG (adrp2_addr)));
7819
7820 /* ldr x2, [x2, #0] */
7821 elf_aarch64_update_plt_entry (output_bfd,
7822 BFD_RELOC_AARCH64_LDSTNN_LO12,
7823 plt_entry + 12,
7824 PG_OFFSET (dt_tlsdesc_got));
7825
7826 /* add x3, x3, 0 */
7827 elf_aarch64_update_plt_entry (output_bfd,
7828 BFD_RELOC_AARCH64_ADD_LO12,
7829 plt_entry + 16,
7830 PG_OFFSET (pltgot_addr));
7831 }
7832 }
7833 }
7834
7835 if (htab->root.sgotplt)
7836 {
7837 if (bfd_is_abs_section (htab->root.sgotplt->output_section))
7838 {
7839 (*_bfd_error_handler)
7840 (_("discarded output section: `%A'"), htab->root.sgotplt);
7841 return FALSE;
7842 }
7843
7844 /* Fill in the first three entries in the global offset table. */
7845 if (htab->root.sgotplt->size > 0)
7846 {
7847 bfd_put_NN (output_bfd, (bfd_vma) 0, htab->root.sgotplt->contents);
7848
7849 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
7850 bfd_put_NN (output_bfd,
7851 (bfd_vma) 0,
7852 htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
7853 bfd_put_NN (output_bfd,
7854 (bfd_vma) 0,
7855 htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
7856 }
7857
7858 if (htab->root.sgot)
7859 {
7860 if (htab->root.sgot->size > 0)
7861 {
7862 bfd_vma addr =
7863 sdyn ? sdyn->output_section->vma + sdyn->output_offset : 0;
7864 bfd_put_NN (output_bfd, addr, htab->root.sgot->contents);
7865 }
7866 }
7867
7868 elf_section_data (htab->root.sgotplt->output_section)->
7869 this_hdr.sh_entsize = GOT_ENTRY_SIZE;
7870 }
7871
7872 if (htab->root.sgot && htab->root.sgot->size > 0)
7873 elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
7874 = GOT_ENTRY_SIZE;
7875
7876 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
7877 htab_traverse (htab->loc_hash_table,
7878 elfNN_aarch64_finish_local_dynamic_symbol,
7879 info);
7880
7881 return TRUE;
7882 }
7883
7884 /* Return address for Ith PLT stub in section PLT, for relocation REL
7885 or (bfd_vma) -1 if it should not be included. */
7886
7887 static bfd_vma
7888 elfNN_aarch64_plt_sym_val (bfd_vma i, const asection *plt,
7889 const arelent *rel ATTRIBUTE_UNUSED)
7890 {
7891 return plt->vma + PLT_ENTRY_SIZE + i * PLT_SMALL_ENTRY_SIZE;
7892 }
7893
7894
7895 /* We use this so we can override certain functions
7896 (though currently we don't). */
7897
7898 const struct elf_size_info elfNN_aarch64_size_info =
7899 {
7900 sizeof (ElfNN_External_Ehdr),
7901 sizeof (ElfNN_External_Phdr),
7902 sizeof (ElfNN_External_Shdr),
7903 sizeof (ElfNN_External_Rel),
7904 sizeof (ElfNN_External_Rela),
7905 sizeof (ElfNN_External_Sym),
7906 sizeof (ElfNN_External_Dyn),
7907 sizeof (Elf_External_Note),
7908 4, /* Hash table entry size. */
7909 1, /* Internal relocs per external relocs. */
7910 ARCH_SIZE, /* Arch size. */
7911 LOG_FILE_ALIGN, /* Log_file_align. */
7912 ELFCLASSNN, EV_CURRENT,
7913 bfd_elfNN_write_out_phdrs,
7914 bfd_elfNN_write_shdrs_and_ehdr,
7915 bfd_elfNN_checksum_contents,
7916 bfd_elfNN_write_relocs,
7917 bfd_elfNN_swap_symbol_in,
7918 bfd_elfNN_swap_symbol_out,
7919 bfd_elfNN_slurp_reloc_table,
7920 bfd_elfNN_slurp_symbol_table,
7921 bfd_elfNN_swap_dyn_in,
7922 bfd_elfNN_swap_dyn_out,
7923 bfd_elfNN_swap_reloc_in,
7924 bfd_elfNN_swap_reloc_out,
7925 bfd_elfNN_swap_reloca_in,
7926 bfd_elfNN_swap_reloca_out
7927 };
7928
7929 #define ELF_ARCH bfd_arch_aarch64
7930 #define ELF_MACHINE_CODE EM_AARCH64
7931 #define ELF_MAXPAGESIZE 0x10000
7932 #define ELF_MINPAGESIZE 0x1000
7933 #define ELF_COMMONPAGESIZE 0x1000
7934
7935 #define bfd_elfNN_close_and_cleanup \
7936 elfNN_aarch64_close_and_cleanup
7937
7938 #define bfd_elfNN_bfd_free_cached_info \
7939 elfNN_aarch64_bfd_free_cached_info
7940
7941 #define bfd_elfNN_bfd_is_target_special_symbol \
7942 elfNN_aarch64_is_target_special_symbol
7943
7944 #define bfd_elfNN_bfd_link_hash_table_create \
7945 elfNN_aarch64_link_hash_table_create
7946
7947 #define bfd_elfNN_bfd_merge_private_bfd_data \
7948 elfNN_aarch64_merge_private_bfd_data
7949
7950 #define bfd_elfNN_bfd_print_private_bfd_data \
7951 elfNN_aarch64_print_private_bfd_data
7952
7953 #define bfd_elfNN_bfd_reloc_type_lookup \
7954 elfNN_aarch64_reloc_type_lookup
7955
7956 #define bfd_elfNN_bfd_reloc_name_lookup \
7957 elfNN_aarch64_reloc_name_lookup
7958
7959 #define bfd_elfNN_bfd_set_private_flags \
7960 elfNN_aarch64_set_private_flags
7961
7962 #define bfd_elfNN_find_inliner_info \
7963 elfNN_aarch64_find_inliner_info
7964
7965 #define bfd_elfNN_find_nearest_line \
7966 elfNN_aarch64_find_nearest_line
7967
7968 #define bfd_elfNN_mkobject \
7969 elfNN_aarch64_mkobject
7970
7971 #define bfd_elfNN_new_section_hook \
7972 elfNN_aarch64_new_section_hook
7973
7974 #define elf_backend_adjust_dynamic_symbol \
7975 elfNN_aarch64_adjust_dynamic_symbol
7976
7977 #define elf_backend_always_size_sections \
7978 elfNN_aarch64_always_size_sections
7979
7980 #define elf_backend_check_relocs \
7981 elfNN_aarch64_check_relocs
7982
7983 #define elf_backend_copy_indirect_symbol \
7984 elfNN_aarch64_copy_indirect_symbol
7985
7986 /* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
7987 to them in our hash. */
7988 #define elf_backend_create_dynamic_sections \
7989 elfNN_aarch64_create_dynamic_sections
7990
7991 #define elf_backend_init_index_section \
7992 _bfd_elf_init_2_index_sections
7993
7994 #define elf_backend_finish_dynamic_sections \
7995 elfNN_aarch64_finish_dynamic_sections
7996
7997 #define elf_backend_finish_dynamic_symbol \
7998 elfNN_aarch64_finish_dynamic_symbol
7999
8000 #define elf_backend_gc_sweep_hook \
8001 elfNN_aarch64_gc_sweep_hook
8002
8003 #define elf_backend_object_p \
8004 elfNN_aarch64_object_p
8005
8006 #define elf_backend_output_arch_local_syms \
8007 elfNN_aarch64_output_arch_local_syms
8008
8009 #define elf_backend_plt_sym_val \
8010 elfNN_aarch64_plt_sym_val
8011
8012 #define elf_backend_post_process_headers \
8013 elfNN_aarch64_post_process_headers
8014
8015 #define elf_backend_relocate_section \
8016 elfNN_aarch64_relocate_section
8017
8018 #define elf_backend_reloc_type_class \
8019 elfNN_aarch64_reloc_type_class
8020
8021 #define elf_backend_section_from_shdr \
8022 elfNN_aarch64_section_from_shdr
8023
8024 #define elf_backend_size_dynamic_sections \
8025 elfNN_aarch64_size_dynamic_sections
8026
8027 #define elf_backend_size_info \
8028 elfNN_aarch64_size_info
8029
8030 #define elf_backend_write_section \
8031 elfNN_aarch64_write_section
8032
8033 #define elf_backend_can_refcount 1
8034 #define elf_backend_can_gc_sections 1
8035 #define elf_backend_plt_readonly 1
8036 #define elf_backend_want_got_plt 1
8037 #define elf_backend_want_plt_sym 0
8038 #define elf_backend_may_use_rel_p 0
8039 #define elf_backend_may_use_rela_p 1
8040 #define elf_backend_default_use_rela_p 1
8041 #define elf_backend_rela_normal 1
8042 #define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
8043 #define elf_backend_default_execstack 0
8044
8045 #undef elf_backend_obj_attrs_section
8046 #define elf_backend_obj_attrs_section ".ARM.attributes"
8047
8048 #include "elfNN-target.h"