Automatic date update in version.in
[binutils-gdb.git] / bfd / elfnn-aarch64.c
1 /* AArch64-specific support for NN-bit ELF.
2 Copyright (C) 2009-2022 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 /* Notes on implementation:
22
23 Thread Local Store (TLS)
24
25 Overview:
26
27 The implementation currently supports both traditional TLS and TLS
28 descriptors, but only general dynamic (GD).
29
30 For traditional TLS the assembler will present us with code
31 fragments of the form:
32
33 adrp x0, :tlsgd:foo
34 R_AARCH64_TLSGD_ADR_PAGE21(foo)
35 add x0, :tlsgd_lo12:foo
36 R_AARCH64_TLSGD_ADD_LO12_NC(foo)
37 bl __tls_get_addr
38 nop
39
40 For TLS descriptors the assembler will present us with code
41 fragments of the form:
42
43 adrp x0, :tlsdesc:foo R_AARCH64_TLSDESC_ADR_PAGE21(foo)
44 ldr x1, [x0, #:tlsdesc_lo12:foo] R_AARCH64_TLSDESC_LD64_LO12(foo)
45 add x0, x0, #:tlsdesc_lo12:foo R_AARCH64_TLSDESC_ADD_LO12(foo)
46 .tlsdesccall foo
47 blr x1 R_AARCH64_TLSDESC_CALL(foo)
48
49 The relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} against foo
50 indicate that foo is thread local and should be accessed via the
51 traditional TLS mechanims.
52
53 The relocations R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC}
54 against foo indicate that 'foo' is thread local and should be accessed
55 via a TLS descriptor mechanism.
56
57 The precise instruction sequence is only relevant from the
58 perspective of linker relaxation which is currently not implemented.
59
60 The static linker must detect that 'foo' is a TLS object and
61 allocate a double GOT entry. The GOT entry must be created for both
62 global and local TLS symbols. Note that this is different to none
63 TLS local objects which do not need a GOT entry.
64
65 In the traditional TLS mechanism, the double GOT entry is used to
66 provide the tls_index structure, containing module and offset
67 entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD
68 on the module entry. The loader will subsequently fixup this
69 relocation with the module identity.
70
71 For global traditional TLS symbols the static linker places an
72 R_AARCH64_TLS_DTPREL relocation on the offset entry. The loader
73 will subsequently fixup the offset. For local TLS symbols the static
74 linker fixes up offset.
75
76 In the TLS descriptor mechanism the double GOT entry is used to
77 provide the descriptor. The static linker places the relocation
78 R_AARCH64_TLSDESC on the first GOT slot. The loader will
79 subsequently fix this up.
80
81 Implementation:
82
83 The handling of TLS symbols is implemented across a number of
84 different backend functions. The following is a top level view of
85 what processing is performed where.
86
87 The TLS implementation maintains state information for each TLS
88 symbol. The state information for local and global symbols is kept
89 in different places. Global symbols use generic BFD structures while
90 local symbols use backend specific structures that are allocated and
91 maintained entirely by the backend.
92
93 The flow:
94
95 elfNN_aarch64_check_relocs()
96
97 This function is invoked for each relocation.
98
99 The TLS relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} and
100 R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC} are
101 spotted. One time creation of local symbol data structures are
102 created when the first local symbol is seen.
103
104 The reference count for a symbol is incremented. The GOT type for
105 each symbol is marked as general dynamic.
106
107 elfNN_aarch64_allocate_dynrelocs ()
108
109 For each global with positive reference count we allocate a double
110 GOT slot. For a traditional TLS symbol we allocate space for two
111 relocation entries on the GOT, for a TLS descriptor symbol we
112 allocate space for one relocation on the slot. Record the GOT offset
113 for this symbol.
114
115 elfNN_aarch64_size_dynamic_sections ()
116
117 Iterate all input BFDS, look for in the local symbol data structure
118 constructed earlier for local TLS symbols and allocate them double
119 GOT slots along with space for a single GOT relocation. Update the
120 local symbol structure to record the GOT offset allocated.
121
122 elfNN_aarch64_relocate_section ()
123
124 Calls elfNN_aarch64_final_link_relocate ()
125
126 Emit the relevant TLS relocations against the GOT for each TLS
127 symbol. For local TLS symbols emit the GOT offset directly. The GOT
128 relocations are emitted once the first time a TLS symbol is
129 encountered. The implementation uses the LSB of the GOT offset to
130 flag that the relevant GOT relocations for a symbol have been
131 emitted. All of the TLS code that uses the GOT offset needs to take
132 care to mask out this flag bit before using the offset.
133
134 elfNN_aarch64_final_link_relocate ()
135
136 Fixup the R_AARCH64_TLSGD_{ADR_PREL21, ADD_LO12_NC} relocations. */
137
138 #include "sysdep.h"
139 #include "bfd.h"
140 #include "libiberty.h"
141 #include "libbfd.h"
142 #include "elf-bfd.h"
143 #include "bfdlink.h"
144 #include "objalloc.h"
145 #include "elf/aarch64.h"
146 #include "elfxx-aarch64.h"
147 #include "cpu-aarch64.h"
148
149 #define ARCH_SIZE NN
150
151 #if ARCH_SIZE == 64
152 #define AARCH64_R(NAME) R_AARCH64_ ## NAME
153 #define AARCH64_R_STR(NAME) "R_AARCH64_" #NAME
154 #define HOWTO64(...) HOWTO (__VA_ARGS__)
155 #define HOWTO32(...) EMPTY_HOWTO (0)
156 #define LOG_FILE_ALIGN 3
157 #define BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC BFD_RELOC_AARCH64_TLSDESC_LD64_LO12
158 #endif
159
160 #if ARCH_SIZE == 32
161 #define AARCH64_R(NAME) R_AARCH64_P32_ ## NAME
162 #define AARCH64_R_STR(NAME) "R_AARCH64_P32_" #NAME
163 #define HOWTO64(...) EMPTY_HOWTO (0)
164 #define HOWTO32(...) HOWTO (__VA_ARGS__)
165 #define LOG_FILE_ALIGN 2
166 #define BFD_RELOC_AARCH64_TLSDESC_LD32_LO12 BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
167 #define R_AARCH64_P32_TLSDESC_ADD_LO12 R_AARCH64_P32_TLSDESC_ADD_LO12_NC
168 #endif
169
170 #define IS_AARCH64_TLS_RELOC(R_TYPE) \
171 ((R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC \
172 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21 \
173 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PREL21 \
174 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC \
175 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_MOVW_G1 \
176 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
177 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC \
178 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \
179 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
180 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \
181 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \
182 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12 \
183 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12 \
184 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC \
185 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC \
186 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21 \
187 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21 \
188 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12 \
189 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC \
190 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12 \
191 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC \
192 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12 \
193 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC \
194 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12 \
195 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC \
196 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0 \
197 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC \
198 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1 \
199 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC \
200 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2 \
201 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12 \
202 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12 \
203 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC \
204 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12 \
205 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC \
206 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12 \
207 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC \
208 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12 \
209 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC \
210 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12 \
211 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC \
212 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0 \
213 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC \
214 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 \
215 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC \
216 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2 \
217 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPMOD \
218 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPREL \
219 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_TPREL \
220 || IS_AARCH64_TLSDESC_RELOC ((R_TYPE)))
221
222 #define IS_AARCH64_TLS_RELAX_RELOC(R_TYPE) \
223 ((R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD \
224 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12 \
225 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21 \
226 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 \
227 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_CALL \
228 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 \
229 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC \
230 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
231 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC \
232 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G1 \
233 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
234 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21 \
235 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PREL21 \
236 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC \
237 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC \
238 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_MOVW_G1 \
239 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
240 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
241 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC \
242 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC \
243 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21 \
244 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21)
245
246 #define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \
247 ((R_TYPE) == BFD_RELOC_AARCH64_TLSDESC \
248 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD \
249 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12 \
250 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21 \
251 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 \
252 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_CALL \
253 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC \
254 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD64_LO12 \
255 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
256 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 \
257 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC \
258 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G1)
259
260 #define ELIMINATE_COPY_RELOCS 1
261
262 /* Return size of a relocation entry. HTAB is the bfd's
263 elf_aarch64_link_hash_entry. */
264 #define RELOC_SIZE(HTAB) (sizeof (ElfNN_External_Rela))
265
266 /* GOT Entry size - 8 bytes in ELF64 and 4 bytes in ELF32. */
267 #define GOT_ENTRY_SIZE (ARCH_SIZE / 8)
268 #define PLT_ENTRY_SIZE (32)
269 #define PLT_SMALL_ENTRY_SIZE (16)
270 #define PLT_TLSDESC_ENTRY_SIZE (32)
271 /* PLT sizes with BTI insn. */
272 #define PLT_BTI_SMALL_ENTRY_SIZE (24)
273 /* PLT sizes with PAC insn. */
274 #define PLT_PAC_SMALL_ENTRY_SIZE (24)
275 /* PLT sizes with BTI and PAC insn. */
276 #define PLT_BTI_PAC_SMALL_ENTRY_SIZE (24)
277
278 /* Encoding of the nop instruction. */
279 #define INSN_NOP 0xd503201f
280
281 #define aarch64_compute_jump_table_size(htab) \
282 (((htab)->root.srelplt == NULL) ? 0 \
283 : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
284
285 /* The first entry in a procedure linkage table looks like this
286 if the distance between the PLTGOT and the PLT is < 4GB use
287 these PLT entries. Note that the dynamic linker gets &PLTGOT[2]
288 in x16 and needs to work out PLTGOT[1] by using an address of
289 [x16,#-GOT_ENTRY_SIZE]. */
290 static const bfd_byte elfNN_aarch64_small_plt0_entry[PLT_ENTRY_SIZE] =
291 {
292 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
293 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
294 #if ARCH_SIZE == 64
295 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
296 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
297 #else
298 0x11, 0x0A, 0x40, 0xb9, /* ldr w17, [x16, #PLT_GOT+0x8] */
299 0x10, 0x22, 0x00, 0x11, /* add w16, w16,#PLT_GOT+0x8 */
300 #endif
301 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
302 0x1f, 0x20, 0x03, 0xd5, /* nop */
303 0x1f, 0x20, 0x03, 0xd5, /* nop */
304 0x1f, 0x20, 0x03, 0xd5, /* nop */
305 };
306
307 static const bfd_byte elfNN_aarch64_small_plt0_bti_entry[PLT_ENTRY_SIZE] =
308 {
309 0x5f, 0x24, 0x03, 0xd5, /* bti c. */
310 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
311 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
312 #if ARCH_SIZE == 64
313 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
314 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
315 #else
316 0x11, 0x0A, 0x40, 0xb9, /* ldr w17, [x16, #PLT_GOT+0x8] */
317 0x10, 0x22, 0x00, 0x11, /* add w16, w16,#PLT_GOT+0x8 */
318 #endif
319 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
320 0x1f, 0x20, 0x03, 0xd5, /* nop */
321 0x1f, 0x20, 0x03, 0xd5, /* nop */
322 };
323
324 /* Per function entry in a procedure linkage table looks like this
325 if the distance between the PLTGOT and the PLT is < 4GB use
326 these PLT entries. Use BTI versions of the PLTs when enabled. */
327 static const bfd_byte elfNN_aarch64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
328 {
329 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
330 #if ARCH_SIZE == 64
331 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
332 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
333 #else
334 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
335 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
336 #endif
337 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
338 };
339
340 static const bfd_byte
341 elfNN_aarch64_small_plt_bti_entry[PLT_BTI_SMALL_ENTRY_SIZE] =
342 {
343 0x5f, 0x24, 0x03, 0xd5, /* bti c. */
344 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
345 #if ARCH_SIZE == 64
346 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
347 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
348 #else
349 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
350 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
351 #endif
352 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
353 0x1f, 0x20, 0x03, 0xd5, /* nop */
354 };
355
356 static const bfd_byte
357 elfNN_aarch64_small_plt_pac_entry[PLT_PAC_SMALL_ENTRY_SIZE] =
358 {
359 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
360 #if ARCH_SIZE == 64
361 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
362 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
363 #else
364 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
365 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
366 #endif
367 0x9f, 0x21, 0x03, 0xd5, /* autia1716 */
368 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
369 0x1f, 0x20, 0x03, 0xd5, /* nop */
370 };
371
372 static const bfd_byte
373 elfNN_aarch64_small_plt_bti_pac_entry[PLT_BTI_PAC_SMALL_ENTRY_SIZE] =
374 {
375 0x5f, 0x24, 0x03, 0xd5, /* bti c. */
376 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
377 #if ARCH_SIZE == 64
378 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
379 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
380 #else
381 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
382 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
383 #endif
384 0x9f, 0x21, 0x03, 0xd5, /* autia1716 */
385 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
386 };
387
388 static const bfd_byte
389 elfNN_aarch64_tlsdesc_small_plt_entry[PLT_TLSDESC_ENTRY_SIZE] =
390 {
391 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
392 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
393 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
394 #if ARCH_SIZE == 64
395 0x42, 0x00, 0x40, 0xf9, /* ldr x2, [x2, #0] */
396 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
397 #else
398 0x42, 0x00, 0x40, 0xb9, /* ldr w2, [x2, #0] */
399 0x63, 0x00, 0x00, 0x11, /* add w3, w3, 0 */
400 #endif
401 0x40, 0x00, 0x1f, 0xd6, /* br x2 */
402 0x1f, 0x20, 0x03, 0xd5, /* nop */
403 0x1f, 0x20, 0x03, 0xd5, /* nop */
404 };
405
406 static const bfd_byte
407 elfNN_aarch64_tlsdesc_small_plt_bti_entry[PLT_TLSDESC_ENTRY_SIZE] =
408 {
409 0x5f, 0x24, 0x03, 0xd5, /* bti c. */
410 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
411 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
412 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
413 #if ARCH_SIZE == 64
414 0x42, 0x00, 0x40, 0xf9, /* ldr x2, [x2, #0] */
415 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
416 #else
417 0x42, 0x00, 0x40, 0xb9, /* ldr w2, [x2, #0] */
418 0x63, 0x00, 0x00, 0x11, /* add w3, w3, 0 */
419 #endif
420 0x40, 0x00, 0x1f, 0xd6, /* br x2 */
421 0x1f, 0x20, 0x03, 0xd5, /* nop */
422 };
423
424 #define elf_info_to_howto elfNN_aarch64_info_to_howto
425 #define elf_info_to_howto_rel elfNN_aarch64_info_to_howto
426
427 #define AARCH64_ELF_ABI_VERSION 0
428
429 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
430 #define ALL_ONES (~ (bfd_vma) 0)
431
432 /* Indexed by the bfd interal reloc enumerators.
433 Therefore, the table needs to be synced with BFD_RELOC_AARCH64_*
434 in reloc.c. */
435
436 static reloc_howto_type elfNN_aarch64_howto_table[] =
437 {
438 EMPTY_HOWTO (0),
439
440 /* Basic data relocations. */
441
442 /* Deprecated, but retained for backwards compatibility. */
443 HOWTO64 (R_AARCH64_NULL, /* type */
444 0, /* rightshift */
445 0, /* size */
446 0, /* bitsize */
447 false, /* pc_relative */
448 0, /* bitpos */
449 complain_overflow_dont, /* complain_on_overflow */
450 bfd_elf_generic_reloc, /* special_function */
451 "R_AARCH64_NULL", /* name */
452 false, /* partial_inplace */
453 0, /* src_mask */
454 0, /* dst_mask */
455 false), /* pcrel_offset */
456 HOWTO (R_AARCH64_NONE, /* type */
457 0, /* rightshift */
458 0, /* size */
459 0, /* bitsize */
460 false, /* pc_relative */
461 0, /* bitpos */
462 complain_overflow_dont, /* complain_on_overflow */
463 bfd_elf_generic_reloc, /* special_function */
464 "R_AARCH64_NONE", /* name */
465 false, /* partial_inplace */
466 0, /* src_mask */
467 0, /* dst_mask */
468 false), /* pcrel_offset */
469
470 /* .xword: (S+A) */
471 HOWTO64 (AARCH64_R (ABS64), /* type */
472 0, /* rightshift */
473 8, /* size */
474 64, /* bitsize */
475 false, /* pc_relative */
476 0, /* bitpos */
477 complain_overflow_unsigned, /* complain_on_overflow */
478 bfd_elf_generic_reloc, /* special_function */
479 AARCH64_R_STR (ABS64), /* name */
480 false, /* partial_inplace */
481 ALL_ONES, /* src_mask */
482 ALL_ONES, /* dst_mask */
483 false), /* pcrel_offset */
484
485 /* .word: (S+A) */
486 HOWTO (AARCH64_R (ABS32), /* type */
487 0, /* rightshift */
488 4, /* size */
489 32, /* bitsize */
490 false, /* pc_relative */
491 0, /* bitpos */
492 complain_overflow_unsigned, /* complain_on_overflow */
493 bfd_elf_generic_reloc, /* special_function */
494 AARCH64_R_STR (ABS32), /* name */
495 false, /* partial_inplace */
496 0xffffffff, /* src_mask */
497 0xffffffff, /* dst_mask */
498 false), /* pcrel_offset */
499
500 /* .half: (S+A) */
501 HOWTO (AARCH64_R (ABS16), /* type */
502 0, /* rightshift */
503 2, /* size */
504 16, /* bitsize */
505 false, /* pc_relative */
506 0, /* bitpos */
507 complain_overflow_unsigned, /* complain_on_overflow */
508 bfd_elf_generic_reloc, /* special_function */
509 AARCH64_R_STR (ABS16), /* name */
510 false, /* partial_inplace */
511 0xffff, /* src_mask */
512 0xffff, /* dst_mask */
513 false), /* pcrel_offset */
514
515 /* .xword: (S+A-P) */
516 HOWTO64 (AARCH64_R (PREL64), /* type */
517 0, /* rightshift */
518 8, /* size */
519 64, /* bitsize */
520 true, /* pc_relative */
521 0, /* bitpos */
522 complain_overflow_signed, /* complain_on_overflow */
523 bfd_elf_generic_reloc, /* special_function */
524 AARCH64_R_STR (PREL64), /* name */
525 false, /* partial_inplace */
526 ALL_ONES, /* src_mask */
527 ALL_ONES, /* dst_mask */
528 true), /* pcrel_offset */
529
530 /* .word: (S+A-P) */
531 HOWTO (AARCH64_R (PREL32), /* type */
532 0, /* rightshift */
533 4, /* size */
534 32, /* bitsize */
535 true, /* pc_relative */
536 0, /* bitpos */
537 complain_overflow_signed, /* complain_on_overflow */
538 bfd_elf_generic_reloc, /* special_function */
539 AARCH64_R_STR (PREL32), /* name */
540 false, /* partial_inplace */
541 0xffffffff, /* src_mask */
542 0xffffffff, /* dst_mask */
543 true), /* pcrel_offset */
544
545 /* .half: (S+A-P) */
546 HOWTO (AARCH64_R (PREL16), /* type */
547 0, /* rightshift */
548 2, /* size */
549 16, /* bitsize */
550 true, /* pc_relative */
551 0, /* bitpos */
552 complain_overflow_signed, /* complain_on_overflow */
553 bfd_elf_generic_reloc, /* special_function */
554 AARCH64_R_STR (PREL16), /* name */
555 false, /* partial_inplace */
556 0xffff, /* src_mask */
557 0xffff, /* dst_mask */
558 true), /* pcrel_offset */
559
560 /* Group relocations to create a 16, 32, 48 or 64 bit
561 unsigned data or abs address inline. */
562
563 /* MOVZ: ((S+A) >> 0) & 0xffff */
564 HOWTO (AARCH64_R (MOVW_UABS_G0), /* type */
565 0, /* rightshift */
566 4, /* size */
567 16, /* bitsize */
568 false, /* pc_relative */
569 0, /* bitpos */
570 complain_overflow_unsigned, /* complain_on_overflow */
571 bfd_elf_generic_reloc, /* special_function */
572 AARCH64_R_STR (MOVW_UABS_G0), /* name */
573 false, /* partial_inplace */
574 0xffff, /* src_mask */
575 0xffff, /* dst_mask */
576 false), /* pcrel_offset */
577
578 /* MOVK: ((S+A) >> 0) & 0xffff [no overflow check] */
579 HOWTO (AARCH64_R (MOVW_UABS_G0_NC), /* type */
580 0, /* rightshift */
581 4, /* size */
582 16, /* bitsize */
583 false, /* pc_relative */
584 0, /* bitpos */
585 complain_overflow_dont, /* complain_on_overflow */
586 bfd_elf_generic_reloc, /* special_function */
587 AARCH64_R_STR (MOVW_UABS_G0_NC), /* name */
588 false, /* partial_inplace */
589 0xffff, /* src_mask */
590 0xffff, /* dst_mask */
591 false), /* pcrel_offset */
592
593 /* MOVZ: ((S+A) >> 16) & 0xffff */
594 HOWTO (AARCH64_R (MOVW_UABS_G1), /* type */
595 16, /* rightshift */
596 4, /* size */
597 16, /* bitsize */
598 false, /* pc_relative */
599 0, /* bitpos */
600 complain_overflow_unsigned, /* complain_on_overflow */
601 bfd_elf_generic_reloc, /* special_function */
602 AARCH64_R_STR (MOVW_UABS_G1), /* name */
603 false, /* partial_inplace */
604 0xffff, /* src_mask */
605 0xffff, /* dst_mask */
606 false), /* pcrel_offset */
607
608 /* MOVK: ((S+A) >> 16) & 0xffff [no overflow check] */
609 HOWTO64 (AARCH64_R (MOVW_UABS_G1_NC), /* type */
610 16, /* rightshift */
611 4, /* size */
612 16, /* bitsize */
613 false, /* pc_relative */
614 0, /* bitpos */
615 complain_overflow_dont, /* complain_on_overflow */
616 bfd_elf_generic_reloc, /* special_function */
617 AARCH64_R_STR (MOVW_UABS_G1_NC), /* name */
618 false, /* partial_inplace */
619 0xffff, /* src_mask */
620 0xffff, /* dst_mask */
621 false), /* pcrel_offset */
622
623 /* MOVZ: ((S+A) >> 32) & 0xffff */
624 HOWTO64 (AARCH64_R (MOVW_UABS_G2), /* type */
625 32, /* rightshift */
626 4, /* size */
627 16, /* bitsize */
628 false, /* pc_relative */
629 0, /* bitpos */
630 complain_overflow_unsigned, /* complain_on_overflow */
631 bfd_elf_generic_reloc, /* special_function */
632 AARCH64_R_STR (MOVW_UABS_G2), /* name */
633 false, /* partial_inplace */
634 0xffff, /* src_mask */
635 0xffff, /* dst_mask */
636 false), /* pcrel_offset */
637
638 /* MOVK: ((S+A) >> 32) & 0xffff [no overflow check] */
639 HOWTO64 (AARCH64_R (MOVW_UABS_G2_NC), /* type */
640 32, /* rightshift */
641 4, /* size */
642 16, /* bitsize */
643 false, /* pc_relative */
644 0, /* bitpos */
645 complain_overflow_dont, /* complain_on_overflow */
646 bfd_elf_generic_reloc, /* special_function */
647 AARCH64_R_STR (MOVW_UABS_G2_NC), /* name */
648 false, /* partial_inplace */
649 0xffff, /* src_mask */
650 0xffff, /* dst_mask */
651 false), /* pcrel_offset */
652
653 /* MOVZ: ((S+A) >> 48) & 0xffff */
654 HOWTO64 (AARCH64_R (MOVW_UABS_G3), /* type */
655 48, /* rightshift */
656 4, /* size */
657 16, /* bitsize */
658 false, /* pc_relative */
659 0, /* bitpos */
660 complain_overflow_unsigned, /* complain_on_overflow */
661 bfd_elf_generic_reloc, /* special_function */
662 AARCH64_R_STR (MOVW_UABS_G3), /* name */
663 false, /* partial_inplace */
664 0xffff, /* src_mask */
665 0xffff, /* dst_mask */
666 false), /* pcrel_offset */
667
668 /* Group relocations to create high part of a 16, 32, 48 or 64 bit
669 signed data or abs address inline. Will change instruction
670 to MOVN or MOVZ depending on sign of calculated value. */
671
672 /* MOV[ZN]: ((S+A) >> 0) & 0xffff */
673 HOWTO (AARCH64_R (MOVW_SABS_G0), /* type */
674 0, /* rightshift */
675 4, /* size */
676 17, /* bitsize */
677 false, /* pc_relative */
678 0, /* bitpos */
679 complain_overflow_signed, /* complain_on_overflow */
680 bfd_elf_generic_reloc, /* special_function */
681 AARCH64_R_STR (MOVW_SABS_G0), /* name */
682 false, /* partial_inplace */
683 0xffff, /* src_mask */
684 0xffff, /* dst_mask */
685 false), /* pcrel_offset */
686
687 /* MOV[ZN]: ((S+A) >> 16) & 0xffff */
688 HOWTO64 (AARCH64_R (MOVW_SABS_G1), /* type */
689 16, /* rightshift */
690 4, /* size */
691 17, /* bitsize */
692 false, /* pc_relative */
693 0, /* bitpos */
694 complain_overflow_signed, /* complain_on_overflow */
695 bfd_elf_generic_reloc, /* special_function */
696 AARCH64_R_STR (MOVW_SABS_G1), /* name */
697 false, /* partial_inplace */
698 0xffff, /* src_mask */
699 0xffff, /* dst_mask */
700 false), /* pcrel_offset */
701
702 /* MOV[ZN]: ((S+A) >> 32) & 0xffff */
703 HOWTO64 (AARCH64_R (MOVW_SABS_G2), /* type */
704 32, /* rightshift */
705 4, /* size */
706 17, /* bitsize */
707 false, /* pc_relative */
708 0, /* bitpos */
709 complain_overflow_signed, /* complain_on_overflow */
710 bfd_elf_generic_reloc, /* special_function */
711 AARCH64_R_STR (MOVW_SABS_G2), /* name */
712 false, /* partial_inplace */
713 0xffff, /* src_mask */
714 0xffff, /* dst_mask */
715 false), /* pcrel_offset */
716
717 /* Group relocations to create a 16, 32, 48 or 64 bit
718 PC relative address inline. */
719
720 /* MOV[NZ]: ((S+A-P) >> 0) & 0xffff */
721 HOWTO (AARCH64_R (MOVW_PREL_G0), /* type */
722 0, /* rightshift */
723 4, /* size */
724 17, /* bitsize */
725 true, /* pc_relative */
726 0, /* bitpos */
727 complain_overflow_signed, /* complain_on_overflow */
728 bfd_elf_generic_reloc, /* special_function */
729 AARCH64_R_STR (MOVW_PREL_G0), /* name */
730 false, /* partial_inplace */
731 0xffff, /* src_mask */
732 0xffff, /* dst_mask */
733 true), /* pcrel_offset */
734
735 /* MOVK: ((S+A-P) >> 0) & 0xffff [no overflow check] */
736 HOWTO (AARCH64_R (MOVW_PREL_G0_NC), /* type */
737 0, /* rightshift */
738 4, /* size */
739 16, /* bitsize */
740 true, /* pc_relative */
741 0, /* bitpos */
742 complain_overflow_dont, /* complain_on_overflow */
743 bfd_elf_generic_reloc, /* special_function */
744 AARCH64_R_STR (MOVW_PREL_G0_NC), /* name */
745 false, /* partial_inplace */
746 0xffff, /* src_mask */
747 0xffff, /* dst_mask */
748 true), /* pcrel_offset */
749
750 /* MOV[NZ]: ((S+A-P) >> 16) & 0xffff */
751 HOWTO (AARCH64_R (MOVW_PREL_G1), /* type */
752 16, /* rightshift */
753 4, /* size */
754 17, /* bitsize */
755 true, /* pc_relative */
756 0, /* bitpos */
757 complain_overflow_signed, /* complain_on_overflow */
758 bfd_elf_generic_reloc, /* special_function */
759 AARCH64_R_STR (MOVW_PREL_G1), /* name */
760 false, /* partial_inplace */
761 0xffff, /* src_mask */
762 0xffff, /* dst_mask */
763 true), /* pcrel_offset */
764
765 /* MOVK: ((S+A-P) >> 16) & 0xffff [no overflow check] */
766 HOWTO64 (AARCH64_R (MOVW_PREL_G1_NC), /* type */
767 16, /* rightshift */
768 4, /* size */
769 16, /* bitsize */
770 true, /* pc_relative */
771 0, /* bitpos */
772 complain_overflow_dont, /* complain_on_overflow */
773 bfd_elf_generic_reloc, /* special_function */
774 AARCH64_R_STR (MOVW_PREL_G1_NC), /* name */
775 false, /* partial_inplace */
776 0xffff, /* src_mask */
777 0xffff, /* dst_mask */
778 true), /* pcrel_offset */
779
780 /* MOV[NZ]: ((S+A-P) >> 32) & 0xffff */
781 HOWTO64 (AARCH64_R (MOVW_PREL_G2), /* type */
782 32, /* rightshift */
783 4, /* size */
784 17, /* bitsize */
785 true, /* pc_relative */
786 0, /* bitpos */
787 complain_overflow_signed, /* complain_on_overflow */
788 bfd_elf_generic_reloc, /* special_function */
789 AARCH64_R_STR (MOVW_PREL_G2), /* name */
790 false, /* partial_inplace */
791 0xffff, /* src_mask */
792 0xffff, /* dst_mask */
793 true), /* pcrel_offset */
794
795 /* MOVK: ((S+A-P) >> 32) & 0xffff [no overflow check] */
796 HOWTO64 (AARCH64_R (MOVW_PREL_G2_NC), /* type */
797 32, /* rightshift */
798 4, /* size */
799 16, /* bitsize */
800 true, /* pc_relative */
801 0, /* bitpos */
802 complain_overflow_dont, /* complain_on_overflow */
803 bfd_elf_generic_reloc, /* special_function */
804 AARCH64_R_STR (MOVW_PREL_G2_NC), /* name */
805 false, /* partial_inplace */
806 0xffff, /* src_mask */
807 0xffff, /* dst_mask */
808 true), /* pcrel_offset */
809
810 /* MOV[NZ]: ((S+A-P) >> 48) & 0xffff */
811 HOWTO64 (AARCH64_R (MOVW_PREL_G3), /* type */
812 48, /* rightshift */
813 4, /* size */
814 16, /* bitsize */
815 true, /* pc_relative */
816 0, /* bitpos */
817 complain_overflow_dont, /* complain_on_overflow */
818 bfd_elf_generic_reloc, /* special_function */
819 AARCH64_R_STR (MOVW_PREL_G3), /* name */
820 false, /* partial_inplace */
821 0xffff, /* src_mask */
822 0xffff, /* dst_mask */
823 true), /* pcrel_offset */
824
825 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store
826 addresses: PG(x) is (x & ~0xfff). */
827
828 /* LD-lit: ((S+A-P) >> 2) & 0x7ffff */
829 HOWTO (AARCH64_R (LD_PREL_LO19), /* type */
830 2, /* rightshift */
831 4, /* size */
832 19, /* bitsize */
833 true, /* pc_relative */
834 0, /* bitpos */
835 complain_overflow_signed, /* complain_on_overflow */
836 bfd_elf_generic_reloc, /* special_function */
837 AARCH64_R_STR (LD_PREL_LO19), /* name */
838 false, /* partial_inplace */
839 0x7ffff, /* src_mask */
840 0x7ffff, /* dst_mask */
841 true), /* pcrel_offset */
842
843 /* ADR: (S+A-P) & 0x1fffff */
844 HOWTO (AARCH64_R (ADR_PREL_LO21), /* type */
845 0, /* rightshift */
846 4, /* size */
847 21, /* bitsize */
848 true, /* pc_relative */
849 0, /* bitpos */
850 complain_overflow_signed, /* complain_on_overflow */
851 bfd_elf_generic_reloc, /* special_function */
852 AARCH64_R_STR (ADR_PREL_LO21), /* name */
853 false, /* partial_inplace */
854 0x1fffff, /* src_mask */
855 0x1fffff, /* dst_mask */
856 true), /* pcrel_offset */
857
858 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
859 HOWTO (AARCH64_R (ADR_PREL_PG_HI21), /* type */
860 12, /* rightshift */
861 4, /* size */
862 21, /* bitsize */
863 true, /* pc_relative */
864 0, /* bitpos */
865 complain_overflow_signed, /* complain_on_overflow */
866 bfd_elf_generic_reloc, /* special_function */
867 AARCH64_R_STR (ADR_PREL_PG_HI21), /* name */
868 false, /* partial_inplace */
869 0x1fffff, /* src_mask */
870 0x1fffff, /* dst_mask */
871 true), /* pcrel_offset */
872
873 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff [no overflow check] */
874 HOWTO64 (AARCH64_R (ADR_PREL_PG_HI21_NC), /* type */
875 12, /* rightshift */
876 4, /* size */
877 21, /* bitsize */
878 true, /* pc_relative */
879 0, /* bitpos */
880 complain_overflow_dont, /* complain_on_overflow */
881 bfd_elf_generic_reloc, /* special_function */
882 AARCH64_R_STR (ADR_PREL_PG_HI21_NC), /* name */
883 false, /* partial_inplace */
884 0x1fffff, /* src_mask */
885 0x1fffff, /* dst_mask */
886 true), /* pcrel_offset */
887
888 /* ADD: (S+A) & 0xfff [no overflow check] */
889 HOWTO (AARCH64_R (ADD_ABS_LO12_NC), /* type */
890 0, /* rightshift */
891 4, /* size */
892 12, /* bitsize */
893 false, /* pc_relative */
894 10, /* bitpos */
895 complain_overflow_dont, /* complain_on_overflow */
896 bfd_elf_generic_reloc, /* special_function */
897 AARCH64_R_STR (ADD_ABS_LO12_NC), /* name */
898 false, /* partial_inplace */
899 0x3ffc00, /* src_mask */
900 0x3ffc00, /* dst_mask */
901 false), /* pcrel_offset */
902
903 /* LD/ST8: (S+A) & 0xfff */
904 HOWTO (AARCH64_R (LDST8_ABS_LO12_NC), /* type */
905 0, /* rightshift */
906 4, /* size */
907 12, /* bitsize */
908 false, /* pc_relative */
909 0, /* bitpos */
910 complain_overflow_dont, /* complain_on_overflow */
911 bfd_elf_generic_reloc, /* special_function */
912 AARCH64_R_STR (LDST8_ABS_LO12_NC), /* name */
913 false, /* partial_inplace */
914 0xfff, /* src_mask */
915 0xfff, /* dst_mask */
916 false), /* pcrel_offset */
917
918 /* Relocations for control-flow instructions. */
919
920 /* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */
921 HOWTO (AARCH64_R (TSTBR14), /* type */
922 2, /* rightshift */
923 4, /* size */
924 14, /* bitsize */
925 true, /* pc_relative */
926 0, /* bitpos */
927 complain_overflow_signed, /* complain_on_overflow */
928 bfd_elf_generic_reloc, /* special_function */
929 AARCH64_R_STR (TSTBR14), /* name */
930 false, /* partial_inplace */
931 0x3fff, /* src_mask */
932 0x3fff, /* dst_mask */
933 true), /* pcrel_offset */
934
935 /* B.cond: ((S+A-P) >> 2) & 0x7ffff */
936 HOWTO (AARCH64_R (CONDBR19), /* type */
937 2, /* rightshift */
938 4, /* size */
939 19, /* bitsize */
940 true, /* pc_relative */
941 0, /* bitpos */
942 complain_overflow_signed, /* complain_on_overflow */
943 bfd_elf_generic_reloc, /* special_function */
944 AARCH64_R_STR (CONDBR19), /* name */
945 false, /* partial_inplace */
946 0x7ffff, /* src_mask */
947 0x7ffff, /* dst_mask */
948 true), /* pcrel_offset */
949
950 /* B: ((S+A-P) >> 2) & 0x3ffffff */
951 HOWTO (AARCH64_R (JUMP26), /* type */
952 2, /* rightshift */
953 4, /* size */
954 26, /* bitsize */
955 true, /* pc_relative */
956 0, /* bitpos */
957 complain_overflow_signed, /* complain_on_overflow */
958 bfd_elf_generic_reloc, /* special_function */
959 AARCH64_R_STR (JUMP26), /* name */
960 false, /* partial_inplace */
961 0x3ffffff, /* src_mask */
962 0x3ffffff, /* dst_mask */
963 true), /* pcrel_offset */
964
965 /* BL: ((S+A-P) >> 2) & 0x3ffffff */
966 HOWTO (AARCH64_R (CALL26), /* type */
967 2, /* rightshift */
968 4, /* size */
969 26, /* bitsize */
970 true, /* pc_relative */
971 0, /* bitpos */
972 complain_overflow_signed, /* complain_on_overflow */
973 bfd_elf_generic_reloc, /* special_function */
974 AARCH64_R_STR (CALL26), /* name */
975 false, /* partial_inplace */
976 0x3ffffff, /* src_mask */
977 0x3ffffff, /* dst_mask */
978 true), /* pcrel_offset */
979
980 /* LD/ST16: (S+A) & 0xffe */
981 HOWTO (AARCH64_R (LDST16_ABS_LO12_NC), /* type */
982 1, /* rightshift */
983 4, /* size */
984 12, /* bitsize */
985 false, /* pc_relative */
986 0, /* bitpos */
987 complain_overflow_dont, /* complain_on_overflow */
988 bfd_elf_generic_reloc, /* special_function */
989 AARCH64_R_STR (LDST16_ABS_LO12_NC), /* name */
990 false, /* partial_inplace */
991 0xffe, /* src_mask */
992 0xffe, /* dst_mask */
993 false), /* pcrel_offset */
994
995 /* LD/ST32: (S+A) & 0xffc */
996 HOWTO (AARCH64_R (LDST32_ABS_LO12_NC), /* type */
997 2, /* rightshift */
998 4, /* size */
999 12, /* bitsize */
1000 false, /* pc_relative */
1001 0, /* bitpos */
1002 complain_overflow_dont, /* complain_on_overflow */
1003 bfd_elf_generic_reloc, /* special_function */
1004 AARCH64_R_STR (LDST32_ABS_LO12_NC), /* name */
1005 false, /* partial_inplace */
1006 0xffc, /* src_mask */
1007 0xffc, /* dst_mask */
1008 false), /* pcrel_offset */
1009
1010 /* LD/ST64: (S+A) & 0xff8 */
1011 HOWTO (AARCH64_R (LDST64_ABS_LO12_NC), /* type */
1012 3, /* rightshift */
1013 4, /* size */
1014 12, /* bitsize */
1015 false, /* pc_relative */
1016 0, /* bitpos */
1017 complain_overflow_dont, /* complain_on_overflow */
1018 bfd_elf_generic_reloc, /* special_function */
1019 AARCH64_R_STR (LDST64_ABS_LO12_NC), /* name */
1020 false, /* partial_inplace */
1021 0xff8, /* src_mask */
1022 0xff8, /* dst_mask */
1023 false), /* pcrel_offset */
1024
1025 /* LD/ST128: (S+A) & 0xff0 */
1026 HOWTO (AARCH64_R (LDST128_ABS_LO12_NC), /* type */
1027 4, /* rightshift */
1028 4, /* size */
1029 12, /* bitsize */
1030 false, /* pc_relative */
1031 0, /* bitpos */
1032 complain_overflow_dont, /* complain_on_overflow */
1033 bfd_elf_generic_reloc, /* special_function */
1034 AARCH64_R_STR (LDST128_ABS_LO12_NC), /* name */
1035 false, /* partial_inplace */
1036 0xff0, /* src_mask */
1037 0xff0, /* dst_mask */
1038 false), /* pcrel_offset */
1039
1040 /* Set a load-literal immediate field to bits
1041 0x1FFFFC of G(S)-P */
1042 HOWTO (AARCH64_R (GOT_LD_PREL19), /* type */
1043 2, /* rightshift */
1044 4, /* size */
1045 19, /* bitsize */
1046 true, /* pc_relative */
1047 0, /* bitpos */
1048 complain_overflow_signed, /* complain_on_overflow */
1049 bfd_elf_generic_reloc, /* special_function */
1050 AARCH64_R_STR (GOT_LD_PREL19), /* name */
1051 false, /* partial_inplace */
1052 0xffffe0, /* src_mask */
1053 0xffffe0, /* dst_mask */
1054 true), /* pcrel_offset */
1055
1056 /* Get to the page for the GOT entry for the symbol
1057 (G(S) - P) using an ADRP instruction. */
1058 HOWTO (AARCH64_R (ADR_GOT_PAGE), /* type */
1059 12, /* rightshift */
1060 4, /* size */
1061 21, /* bitsize */
1062 true, /* pc_relative */
1063 0, /* bitpos */
1064 complain_overflow_dont, /* complain_on_overflow */
1065 bfd_elf_generic_reloc, /* special_function */
1066 AARCH64_R_STR (ADR_GOT_PAGE), /* name */
1067 false, /* partial_inplace */
1068 0x1fffff, /* src_mask */
1069 0x1fffff, /* dst_mask */
1070 true), /* pcrel_offset */
1071
1072 /* LD64: GOT offset G(S) & 0xff8 */
1073 HOWTO64 (AARCH64_R (LD64_GOT_LO12_NC), /* type */
1074 3, /* rightshift */
1075 4, /* size */
1076 12, /* bitsize */
1077 false, /* pc_relative */
1078 0, /* bitpos */
1079 complain_overflow_dont, /* complain_on_overflow */
1080 bfd_elf_generic_reloc, /* special_function */
1081 AARCH64_R_STR (LD64_GOT_LO12_NC), /* name */
1082 false, /* partial_inplace */
1083 0xff8, /* src_mask */
1084 0xff8, /* dst_mask */
1085 false), /* pcrel_offset */
1086
1087 /* LD32: GOT offset G(S) & 0xffc */
1088 HOWTO32 (AARCH64_R (LD32_GOT_LO12_NC), /* type */
1089 2, /* rightshift */
1090 4, /* size */
1091 12, /* bitsize */
1092 false, /* pc_relative */
1093 0, /* bitpos */
1094 complain_overflow_dont, /* complain_on_overflow */
1095 bfd_elf_generic_reloc, /* special_function */
1096 AARCH64_R_STR (LD32_GOT_LO12_NC), /* name */
1097 false, /* partial_inplace */
1098 0xffc, /* src_mask */
1099 0xffc, /* dst_mask */
1100 false), /* pcrel_offset */
1101
1102 /* Lower 16 bits of GOT offset for the symbol. */
1103 HOWTO64 (AARCH64_R (MOVW_GOTOFF_G0_NC), /* type */
1104 0, /* rightshift */
1105 4, /* size */
1106 16, /* bitsize */
1107 false, /* pc_relative */
1108 0, /* bitpos */
1109 complain_overflow_dont, /* complain_on_overflow */
1110 bfd_elf_generic_reloc, /* special_function */
1111 AARCH64_R_STR (MOVW_GOTOFF_G0_NC), /* name */
1112 false, /* partial_inplace */
1113 0xffff, /* src_mask */
1114 0xffff, /* dst_mask */
1115 false), /* pcrel_offset */
1116
1117 /* Higher 16 bits of GOT offset for the symbol. */
1118 HOWTO64 (AARCH64_R (MOVW_GOTOFF_G1), /* type */
1119 16, /* rightshift */
1120 4, /* size */
1121 16, /* bitsize */
1122 false, /* pc_relative */
1123 0, /* bitpos */
1124 complain_overflow_unsigned, /* complain_on_overflow */
1125 bfd_elf_generic_reloc, /* special_function */
1126 AARCH64_R_STR (MOVW_GOTOFF_G1), /* name */
1127 false, /* partial_inplace */
1128 0xffff, /* src_mask */
1129 0xffff, /* dst_mask */
1130 false), /* pcrel_offset */
1131
1132 /* LD64: GOT offset for the symbol. */
1133 HOWTO64 (AARCH64_R (LD64_GOTOFF_LO15), /* type */
1134 3, /* rightshift */
1135 4, /* size */
1136 12, /* bitsize */
1137 false, /* pc_relative */
1138 0, /* bitpos */
1139 complain_overflow_unsigned, /* complain_on_overflow */
1140 bfd_elf_generic_reloc, /* special_function */
1141 AARCH64_R_STR (LD64_GOTOFF_LO15), /* name */
1142 false, /* partial_inplace */
1143 0x7ff8, /* src_mask */
1144 0x7ff8, /* dst_mask */
1145 false), /* pcrel_offset */
1146
1147 /* LD32: GOT offset to the page address of GOT table.
1148 (G(S) - PAGE (_GLOBAL_OFFSET_TABLE_)) & 0x5ffc. */
1149 HOWTO32 (AARCH64_R (LD32_GOTPAGE_LO14), /* type */
1150 2, /* rightshift */
1151 4, /* size */
1152 12, /* bitsize */
1153 false, /* pc_relative */
1154 0, /* bitpos */
1155 complain_overflow_unsigned, /* complain_on_overflow */
1156 bfd_elf_generic_reloc, /* special_function */
1157 AARCH64_R_STR (LD32_GOTPAGE_LO14), /* name */
1158 false, /* partial_inplace */
1159 0x5ffc, /* src_mask */
1160 0x5ffc, /* dst_mask */
1161 false), /* pcrel_offset */
1162
1163 /* LD64: GOT offset to the page address of GOT table.
1164 (G(S) - PAGE (_GLOBAL_OFFSET_TABLE_)) & 0x7ff8. */
1165 HOWTO64 (AARCH64_R (LD64_GOTPAGE_LO15), /* type */
1166 3, /* rightshift */
1167 4, /* size */
1168 12, /* bitsize */
1169 false, /* pc_relative */
1170 0, /* bitpos */
1171 complain_overflow_unsigned, /* complain_on_overflow */
1172 bfd_elf_generic_reloc, /* special_function */
1173 AARCH64_R_STR (LD64_GOTPAGE_LO15), /* name */
1174 false, /* partial_inplace */
1175 0x7ff8, /* src_mask */
1176 0x7ff8, /* dst_mask */
1177 false), /* pcrel_offset */
1178
1179 /* Get to the page for the GOT entry for the symbol
1180 (G(S) - P) using an ADRP instruction. */
1181 HOWTO (AARCH64_R (TLSGD_ADR_PAGE21), /* type */
1182 12, /* rightshift */
1183 4, /* size */
1184 21, /* bitsize */
1185 true, /* pc_relative */
1186 0, /* bitpos */
1187 complain_overflow_dont, /* complain_on_overflow */
1188 bfd_elf_generic_reloc, /* special_function */
1189 AARCH64_R_STR (TLSGD_ADR_PAGE21), /* name */
1190 false, /* partial_inplace */
1191 0x1fffff, /* src_mask */
1192 0x1fffff, /* dst_mask */
1193 true), /* pcrel_offset */
1194
1195 HOWTO (AARCH64_R (TLSGD_ADR_PREL21), /* type */
1196 0, /* rightshift */
1197 4, /* size */
1198 21, /* bitsize */
1199 true, /* pc_relative */
1200 0, /* bitpos */
1201 complain_overflow_dont, /* complain_on_overflow */
1202 bfd_elf_generic_reloc, /* special_function */
1203 AARCH64_R_STR (TLSGD_ADR_PREL21), /* name */
1204 false, /* partial_inplace */
1205 0x1fffff, /* src_mask */
1206 0x1fffff, /* dst_mask */
1207 true), /* pcrel_offset */
1208
1209 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
1210 HOWTO (AARCH64_R (TLSGD_ADD_LO12_NC), /* type */
1211 0, /* rightshift */
1212 4, /* size */
1213 12, /* bitsize */
1214 false, /* pc_relative */
1215 0, /* bitpos */
1216 complain_overflow_dont, /* complain_on_overflow */
1217 bfd_elf_generic_reloc, /* special_function */
1218 AARCH64_R_STR (TLSGD_ADD_LO12_NC), /* name */
1219 false, /* partial_inplace */
1220 0xfff, /* src_mask */
1221 0xfff, /* dst_mask */
1222 false), /* pcrel_offset */
1223
1224 /* Lower 16 bits of GOT offset to tls_index. */
1225 HOWTO64 (AARCH64_R (TLSGD_MOVW_G0_NC), /* type */
1226 0, /* rightshift */
1227 4, /* size */
1228 16, /* bitsize */
1229 false, /* pc_relative */
1230 0, /* bitpos */
1231 complain_overflow_dont, /* complain_on_overflow */
1232 bfd_elf_generic_reloc, /* special_function */
1233 AARCH64_R_STR (TLSGD_MOVW_G0_NC), /* name */
1234 false, /* partial_inplace */
1235 0xffff, /* src_mask */
1236 0xffff, /* dst_mask */
1237 false), /* pcrel_offset */
1238
1239 /* Higher 16 bits of GOT offset to tls_index. */
1240 HOWTO64 (AARCH64_R (TLSGD_MOVW_G1), /* type */
1241 16, /* rightshift */
1242 4, /* size */
1243 16, /* bitsize */
1244 false, /* pc_relative */
1245 0, /* bitpos */
1246 complain_overflow_unsigned, /* complain_on_overflow */
1247 bfd_elf_generic_reloc, /* special_function */
1248 AARCH64_R_STR (TLSGD_MOVW_G1), /* name */
1249 false, /* partial_inplace */
1250 0xffff, /* src_mask */
1251 0xffff, /* dst_mask */
1252 false), /* pcrel_offset */
1253
1254 HOWTO (AARCH64_R (TLSIE_ADR_GOTTPREL_PAGE21), /* type */
1255 12, /* rightshift */
1256 4, /* size */
1257 21, /* bitsize */
1258 false, /* pc_relative */
1259 0, /* bitpos */
1260 complain_overflow_dont, /* complain_on_overflow */
1261 bfd_elf_generic_reloc, /* special_function */
1262 AARCH64_R_STR (TLSIE_ADR_GOTTPREL_PAGE21), /* name */
1263 false, /* partial_inplace */
1264 0x1fffff, /* src_mask */
1265 0x1fffff, /* dst_mask */
1266 false), /* pcrel_offset */
1267
1268 HOWTO64 (AARCH64_R (TLSIE_LD64_GOTTPREL_LO12_NC), /* type */
1269 3, /* rightshift */
1270 4, /* size */
1271 12, /* bitsize */
1272 false, /* pc_relative */
1273 0, /* bitpos */
1274 complain_overflow_dont, /* complain_on_overflow */
1275 bfd_elf_generic_reloc, /* special_function */
1276 AARCH64_R_STR (TLSIE_LD64_GOTTPREL_LO12_NC), /* name */
1277 false, /* partial_inplace */
1278 0xff8, /* src_mask */
1279 0xff8, /* dst_mask */
1280 false), /* pcrel_offset */
1281
1282 HOWTO32 (AARCH64_R (TLSIE_LD32_GOTTPREL_LO12_NC), /* type */
1283 2, /* rightshift */
1284 4, /* size */
1285 12, /* bitsize */
1286 false, /* pc_relative */
1287 0, /* bitpos */
1288 complain_overflow_dont, /* complain_on_overflow */
1289 bfd_elf_generic_reloc, /* special_function */
1290 AARCH64_R_STR (TLSIE_LD32_GOTTPREL_LO12_NC), /* name */
1291 false, /* partial_inplace */
1292 0xffc, /* src_mask */
1293 0xffc, /* dst_mask */
1294 false), /* pcrel_offset */
1295
1296 HOWTO (AARCH64_R (TLSIE_LD_GOTTPREL_PREL19), /* type */
1297 2, /* rightshift */
1298 4, /* size */
1299 19, /* bitsize */
1300 false, /* pc_relative */
1301 0, /* bitpos */
1302 complain_overflow_dont, /* complain_on_overflow */
1303 bfd_elf_generic_reloc, /* special_function */
1304 AARCH64_R_STR (TLSIE_LD_GOTTPREL_PREL19), /* name */
1305 false, /* partial_inplace */
1306 0x1ffffc, /* src_mask */
1307 0x1ffffc, /* dst_mask */
1308 false), /* pcrel_offset */
1309
1310 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G0_NC), /* type */
1311 0, /* rightshift */
1312 4, /* size */
1313 16, /* bitsize */
1314 false, /* pc_relative */
1315 0, /* bitpos */
1316 complain_overflow_dont, /* complain_on_overflow */
1317 bfd_elf_generic_reloc, /* special_function */
1318 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G0_NC), /* name */
1319 false, /* partial_inplace */
1320 0xffff, /* src_mask */
1321 0xffff, /* dst_mask */
1322 false), /* pcrel_offset */
1323
1324 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G1), /* type */
1325 16, /* rightshift */
1326 4, /* size */
1327 16, /* bitsize */
1328 false, /* pc_relative */
1329 0, /* bitpos */
1330 complain_overflow_unsigned, /* complain_on_overflow */
1331 bfd_elf_generic_reloc, /* special_function */
1332 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G1), /* name */
1333 false, /* partial_inplace */
1334 0xffff, /* src_mask */
1335 0xffff, /* dst_mask */
1336 false), /* pcrel_offset */
1337
1338 /* ADD: bit[23:12] of byte offset to module TLS base address. */
1339 HOWTO (AARCH64_R (TLSLD_ADD_DTPREL_HI12), /* type */
1340 12, /* rightshift */
1341 4, /* size */
1342 12, /* bitsize */
1343 false, /* pc_relative */
1344 0, /* bitpos */
1345 complain_overflow_unsigned, /* complain_on_overflow */
1346 bfd_elf_generic_reloc, /* special_function */
1347 AARCH64_R_STR (TLSLD_ADD_DTPREL_HI12), /* name */
1348 false, /* partial_inplace */
1349 0xfff, /* src_mask */
1350 0xfff, /* dst_mask */
1351 false), /* pcrel_offset */
1352
1353 /* Unsigned 12 bit byte offset to module TLS base address. */
1354 HOWTO (AARCH64_R (TLSLD_ADD_DTPREL_LO12), /* type */
1355 0, /* rightshift */
1356 4, /* size */
1357 12, /* bitsize */
1358 false, /* pc_relative */
1359 0, /* bitpos */
1360 complain_overflow_unsigned, /* complain_on_overflow */
1361 bfd_elf_generic_reloc, /* special_function */
1362 AARCH64_R_STR (TLSLD_ADD_DTPREL_LO12), /* name */
1363 false, /* partial_inplace */
1364 0xfff, /* src_mask */
1365 0xfff, /* dst_mask */
1366 false), /* pcrel_offset */
1367
1368 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12. */
1369 HOWTO (AARCH64_R (TLSLD_ADD_DTPREL_LO12_NC), /* type */
1370 0, /* rightshift */
1371 4, /* size */
1372 12, /* bitsize */
1373 false, /* pc_relative */
1374 0, /* bitpos */
1375 complain_overflow_dont, /* complain_on_overflow */
1376 bfd_elf_generic_reloc, /* special_function */
1377 AARCH64_R_STR (TLSLD_ADD_DTPREL_LO12_NC), /* name */
1378 false, /* partial_inplace */
1379 0xfff, /* src_mask */
1380 0xfff, /* dst_mask */
1381 false), /* pcrel_offset */
1382
1383 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
1384 HOWTO (AARCH64_R (TLSLD_ADD_LO12_NC), /* type */
1385 0, /* rightshift */
1386 4, /* size */
1387 12, /* bitsize */
1388 false, /* pc_relative */
1389 0, /* bitpos */
1390 complain_overflow_dont, /* complain_on_overflow */
1391 bfd_elf_generic_reloc, /* special_function */
1392 AARCH64_R_STR (TLSLD_ADD_LO12_NC), /* name */
1393 false, /* partial_inplace */
1394 0xfff, /* src_mask */
1395 0xfff, /* dst_mask */
1396 false), /* pcrel_offset */
1397
1398 /* Get to the page for the GOT entry for the symbol
1399 (G(S) - P) using an ADRP instruction. */
1400 HOWTO (AARCH64_R (TLSLD_ADR_PAGE21), /* type */
1401 12, /* rightshift */
1402 4, /* size */
1403 21, /* bitsize */
1404 true, /* pc_relative */
1405 0, /* bitpos */
1406 complain_overflow_signed, /* complain_on_overflow */
1407 bfd_elf_generic_reloc, /* special_function */
1408 AARCH64_R_STR (TLSLD_ADR_PAGE21), /* name */
1409 false, /* partial_inplace */
1410 0x1fffff, /* src_mask */
1411 0x1fffff, /* dst_mask */
1412 true), /* pcrel_offset */
1413
1414 HOWTO (AARCH64_R (TLSLD_ADR_PREL21), /* type */
1415 0, /* rightshift */
1416 4, /* size */
1417 21, /* bitsize */
1418 true, /* pc_relative */
1419 0, /* bitpos */
1420 complain_overflow_signed, /* complain_on_overflow */
1421 bfd_elf_generic_reloc, /* special_function */
1422 AARCH64_R_STR (TLSLD_ADR_PREL21), /* name */
1423 false, /* partial_inplace */
1424 0x1fffff, /* src_mask */
1425 0x1fffff, /* dst_mask */
1426 true), /* pcrel_offset */
1427
1428 /* LD/ST16: bit[11:1] of byte offset to module TLS base address. */
1429 HOWTO64 (AARCH64_R (TLSLD_LDST16_DTPREL_LO12), /* type */
1430 1, /* rightshift */
1431 4, /* size */
1432 11, /* bitsize */
1433 false, /* pc_relative */
1434 10, /* bitpos */
1435 complain_overflow_unsigned, /* complain_on_overflow */
1436 bfd_elf_generic_reloc, /* special_function */
1437 AARCH64_R_STR (TLSLD_LDST16_DTPREL_LO12), /* name */
1438 false, /* partial_inplace */
1439 0x1ffc00, /* src_mask */
1440 0x1ffc00, /* dst_mask */
1441 false), /* pcrel_offset */
1442
1443 /* Same as BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12, but no overflow check. */
1444 HOWTO64 (AARCH64_R (TLSLD_LDST16_DTPREL_LO12_NC), /* type */
1445 1, /* rightshift */
1446 4, /* size */
1447 11, /* bitsize */
1448 false, /* pc_relative */
1449 10, /* bitpos */
1450 complain_overflow_dont, /* complain_on_overflow */
1451 bfd_elf_generic_reloc, /* special_function */
1452 AARCH64_R_STR (TLSLD_LDST16_DTPREL_LO12_NC), /* name */
1453 false, /* partial_inplace */
1454 0x1ffc00, /* src_mask */
1455 0x1ffc00, /* dst_mask */
1456 false), /* pcrel_offset */
1457
1458 /* LD/ST32: bit[11:2] of byte offset to module TLS base address. */
1459 HOWTO64 (AARCH64_R (TLSLD_LDST32_DTPREL_LO12), /* type */
1460 2, /* rightshift */
1461 4, /* size */
1462 10, /* bitsize */
1463 false, /* pc_relative */
1464 10, /* bitpos */
1465 complain_overflow_unsigned, /* complain_on_overflow */
1466 bfd_elf_generic_reloc, /* special_function */
1467 AARCH64_R_STR (TLSLD_LDST32_DTPREL_LO12), /* name */
1468 false, /* partial_inplace */
1469 0x3ffc00, /* src_mask */
1470 0x3ffc00, /* dst_mask */
1471 false), /* pcrel_offset */
1472
1473 /* Same as BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12, but no overflow check. */
1474 HOWTO64 (AARCH64_R (TLSLD_LDST32_DTPREL_LO12_NC), /* type */
1475 2, /* rightshift */
1476 4, /* size */
1477 10, /* bitsize */
1478 false, /* pc_relative */
1479 10, /* bitpos */
1480 complain_overflow_dont, /* complain_on_overflow */
1481 bfd_elf_generic_reloc, /* special_function */
1482 AARCH64_R_STR (TLSLD_LDST32_DTPREL_LO12_NC), /* name */
1483 false, /* partial_inplace */
1484 0xffc00, /* src_mask */
1485 0xffc00, /* dst_mask */
1486 false), /* pcrel_offset */
1487
1488 /* LD/ST64: bit[11:3] of byte offset to module TLS base address. */
1489 HOWTO64 (AARCH64_R (TLSLD_LDST64_DTPREL_LO12), /* type */
1490 3, /* rightshift */
1491 4, /* size */
1492 9, /* bitsize */
1493 false, /* pc_relative */
1494 10, /* bitpos */
1495 complain_overflow_unsigned, /* complain_on_overflow */
1496 bfd_elf_generic_reloc, /* special_function */
1497 AARCH64_R_STR (TLSLD_LDST64_DTPREL_LO12), /* name */
1498 false, /* partial_inplace */
1499 0x3ffc00, /* src_mask */
1500 0x3ffc00, /* dst_mask */
1501 false), /* pcrel_offset */
1502
1503 /* Same as BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12, but no overflow check. */
1504 HOWTO64 (AARCH64_R (TLSLD_LDST64_DTPREL_LO12_NC), /* type */
1505 3, /* rightshift */
1506 4, /* size */
1507 9, /* bitsize */
1508 false, /* pc_relative */
1509 10, /* bitpos */
1510 complain_overflow_dont, /* complain_on_overflow */
1511 bfd_elf_generic_reloc, /* special_function */
1512 AARCH64_R_STR (TLSLD_LDST64_DTPREL_LO12_NC), /* name */
1513 false, /* partial_inplace */
1514 0x7fc00, /* src_mask */
1515 0x7fc00, /* dst_mask */
1516 false), /* pcrel_offset */
1517
1518 /* LD/ST8: bit[11:0] of byte offset to module TLS base address. */
1519 HOWTO64 (AARCH64_R (TLSLD_LDST8_DTPREL_LO12), /* type */
1520 0, /* rightshift */
1521 4, /* size */
1522 12, /* bitsize */
1523 false, /* pc_relative */
1524 10, /* bitpos */
1525 complain_overflow_unsigned, /* complain_on_overflow */
1526 bfd_elf_generic_reloc, /* special_function */
1527 AARCH64_R_STR (TLSLD_LDST8_DTPREL_LO12), /* name */
1528 false, /* partial_inplace */
1529 0x3ffc00, /* src_mask */
1530 0x3ffc00, /* dst_mask */
1531 false), /* pcrel_offset */
1532
1533 /* Same as BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12, but no overflow check. */
1534 HOWTO64 (AARCH64_R (TLSLD_LDST8_DTPREL_LO12_NC), /* type */
1535 0, /* rightshift */
1536 4, /* size */
1537 12, /* bitsize */
1538 false, /* pc_relative */
1539 10, /* bitpos */
1540 complain_overflow_dont, /* complain_on_overflow */
1541 bfd_elf_generic_reloc, /* special_function */
1542 AARCH64_R_STR (TLSLD_LDST8_DTPREL_LO12_NC), /* name */
1543 false, /* partial_inplace */
1544 0x3ffc00, /* src_mask */
1545 0x3ffc00, /* dst_mask */
1546 false), /* pcrel_offset */
1547
1548 /* MOVZ: bit[15:0] of byte offset to module TLS base address. */
1549 HOWTO (AARCH64_R (TLSLD_MOVW_DTPREL_G0), /* type */
1550 0, /* rightshift */
1551 4, /* size */
1552 16, /* bitsize */
1553 false, /* pc_relative */
1554 0, /* bitpos */
1555 complain_overflow_unsigned, /* complain_on_overflow */
1556 bfd_elf_generic_reloc, /* special_function */
1557 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G0), /* name */
1558 false, /* partial_inplace */
1559 0xffff, /* src_mask */
1560 0xffff, /* dst_mask */
1561 false), /* pcrel_offset */
1562
1563 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
1564 HOWTO (AARCH64_R (TLSLD_MOVW_DTPREL_G0_NC), /* type */
1565 0, /* rightshift */
1566 4, /* size */
1567 16, /* bitsize */
1568 false, /* pc_relative */
1569 0, /* bitpos */
1570 complain_overflow_dont, /* complain_on_overflow */
1571 bfd_elf_generic_reloc, /* special_function */
1572 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G0_NC), /* name */
1573 false, /* partial_inplace */
1574 0xffff, /* src_mask */
1575 0xffff, /* dst_mask */
1576 false), /* pcrel_offset */
1577
1578 /* MOVZ: bit[31:16] of byte offset to module TLS base address. */
1579 HOWTO (AARCH64_R (TLSLD_MOVW_DTPREL_G1), /* type */
1580 16, /* rightshift */
1581 4, /* size */
1582 16, /* bitsize */
1583 false, /* pc_relative */
1584 0, /* bitpos */
1585 complain_overflow_unsigned, /* complain_on_overflow */
1586 bfd_elf_generic_reloc, /* special_function */
1587 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G1), /* name */
1588 false, /* partial_inplace */
1589 0xffff, /* src_mask */
1590 0xffff, /* dst_mask */
1591 false), /* pcrel_offset */
1592
1593 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
1594 HOWTO64 (AARCH64_R (TLSLD_MOVW_DTPREL_G1_NC), /* type */
1595 16, /* rightshift */
1596 4, /* size */
1597 16, /* bitsize */
1598 false, /* pc_relative */
1599 0, /* bitpos */
1600 complain_overflow_dont, /* complain_on_overflow */
1601 bfd_elf_generic_reloc, /* special_function */
1602 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G1_NC), /* name */
1603 false, /* partial_inplace */
1604 0xffff, /* src_mask */
1605 0xffff, /* dst_mask */
1606 false), /* pcrel_offset */
1607
1608 /* MOVZ: bit[47:32] of byte offset to module TLS base address. */
1609 HOWTO64 (AARCH64_R (TLSLD_MOVW_DTPREL_G2), /* type */
1610 32, /* rightshift */
1611 4, /* size */
1612 16, /* bitsize */
1613 false, /* pc_relative */
1614 0, /* bitpos */
1615 complain_overflow_unsigned, /* complain_on_overflow */
1616 bfd_elf_generic_reloc, /* special_function */
1617 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G2), /* name */
1618 false, /* partial_inplace */
1619 0xffff, /* src_mask */
1620 0xffff, /* dst_mask */
1621 false), /* pcrel_offset */
1622
1623 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G2), /* type */
1624 32, /* rightshift */
1625 4, /* size */
1626 16, /* bitsize */
1627 false, /* pc_relative */
1628 0, /* bitpos */
1629 complain_overflow_unsigned, /* complain_on_overflow */
1630 bfd_elf_generic_reloc, /* special_function */
1631 AARCH64_R_STR (TLSLE_MOVW_TPREL_G2), /* name */
1632 false, /* partial_inplace */
1633 0xffff, /* src_mask */
1634 0xffff, /* dst_mask */
1635 false), /* pcrel_offset */
1636
1637 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G1), /* type */
1638 16, /* rightshift */
1639 4, /* size */
1640 16, /* bitsize */
1641 false, /* pc_relative */
1642 0, /* bitpos */
1643 complain_overflow_dont, /* complain_on_overflow */
1644 bfd_elf_generic_reloc, /* special_function */
1645 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1), /* name */
1646 false, /* partial_inplace */
1647 0xffff, /* src_mask */
1648 0xffff, /* dst_mask */
1649 false), /* pcrel_offset */
1650
1651 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G1_NC), /* type */
1652 16, /* rightshift */
1653 4, /* size */
1654 16, /* bitsize */
1655 false, /* pc_relative */
1656 0, /* bitpos */
1657 complain_overflow_dont, /* complain_on_overflow */
1658 bfd_elf_generic_reloc, /* special_function */
1659 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1_NC), /* name */
1660 false, /* partial_inplace */
1661 0xffff, /* src_mask */
1662 0xffff, /* dst_mask */
1663 false), /* pcrel_offset */
1664
1665 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0), /* type */
1666 0, /* rightshift */
1667 4, /* size */
1668 16, /* bitsize */
1669 false, /* pc_relative */
1670 0, /* bitpos */
1671 complain_overflow_dont, /* complain_on_overflow */
1672 bfd_elf_generic_reloc, /* special_function */
1673 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0), /* name */
1674 false, /* partial_inplace */
1675 0xffff, /* src_mask */
1676 0xffff, /* dst_mask */
1677 false), /* pcrel_offset */
1678
1679 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0_NC), /* type */
1680 0, /* rightshift */
1681 4, /* size */
1682 16, /* bitsize */
1683 false, /* pc_relative */
1684 0, /* bitpos */
1685 complain_overflow_dont, /* complain_on_overflow */
1686 bfd_elf_generic_reloc, /* special_function */
1687 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0_NC), /* name */
1688 false, /* partial_inplace */
1689 0xffff, /* src_mask */
1690 0xffff, /* dst_mask */
1691 false), /* pcrel_offset */
1692
1693 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_HI12), /* type */
1694 12, /* rightshift */
1695 4, /* size */
1696 12, /* bitsize */
1697 false, /* pc_relative */
1698 0, /* bitpos */
1699 complain_overflow_unsigned, /* complain_on_overflow */
1700 bfd_elf_generic_reloc, /* special_function */
1701 AARCH64_R_STR (TLSLE_ADD_TPREL_HI12), /* name */
1702 false, /* partial_inplace */
1703 0xfff, /* src_mask */
1704 0xfff, /* dst_mask */
1705 false), /* pcrel_offset */
1706
1707 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12), /* type */
1708 0, /* rightshift */
1709 4, /* size */
1710 12, /* bitsize */
1711 false, /* pc_relative */
1712 0, /* bitpos */
1713 complain_overflow_unsigned, /* complain_on_overflow */
1714 bfd_elf_generic_reloc, /* special_function */
1715 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12), /* name */
1716 false, /* partial_inplace */
1717 0xfff, /* src_mask */
1718 0xfff, /* dst_mask */
1719 false), /* pcrel_offset */
1720
1721 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12_NC), /* type */
1722 0, /* rightshift */
1723 4, /* size */
1724 12, /* bitsize */
1725 false, /* pc_relative */
1726 0, /* bitpos */
1727 complain_overflow_dont, /* complain_on_overflow */
1728 bfd_elf_generic_reloc, /* special_function */
1729 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12_NC), /* name */
1730 false, /* partial_inplace */
1731 0xfff, /* src_mask */
1732 0xfff, /* dst_mask */
1733 false), /* pcrel_offset */
1734
1735 /* LD/ST16: bit[11:1] of byte offset to module TLS base address. */
1736 HOWTO (AARCH64_R (TLSLE_LDST16_TPREL_LO12), /* type */
1737 1, /* rightshift */
1738 4, /* size */
1739 11, /* bitsize */
1740 false, /* pc_relative */
1741 10, /* bitpos */
1742 complain_overflow_unsigned, /* complain_on_overflow */
1743 bfd_elf_generic_reloc, /* special_function */
1744 AARCH64_R_STR (TLSLE_LDST16_TPREL_LO12), /* name */
1745 false, /* partial_inplace */
1746 0x1ffc00, /* src_mask */
1747 0x1ffc00, /* dst_mask */
1748 false), /* pcrel_offset */
1749
1750 /* Same as BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12, but no overflow check. */
1751 HOWTO (AARCH64_R (TLSLE_LDST16_TPREL_LO12_NC), /* type */
1752 1, /* rightshift */
1753 4, /* size */
1754 11, /* bitsize */
1755 false, /* pc_relative */
1756 10, /* bitpos */
1757 complain_overflow_dont, /* complain_on_overflow */
1758 bfd_elf_generic_reloc, /* special_function */
1759 AARCH64_R_STR (TLSLE_LDST16_TPREL_LO12_NC), /* name */
1760 false, /* partial_inplace */
1761 0x1ffc00, /* src_mask */
1762 0x1ffc00, /* dst_mask */
1763 false), /* pcrel_offset */
1764
1765 /* LD/ST32: bit[11:2] of byte offset to module TLS base address. */
1766 HOWTO (AARCH64_R (TLSLE_LDST32_TPREL_LO12), /* type */
1767 2, /* rightshift */
1768 4, /* size */
1769 10, /* bitsize */
1770 false, /* pc_relative */
1771 10, /* bitpos */
1772 complain_overflow_unsigned, /* complain_on_overflow */
1773 bfd_elf_generic_reloc, /* special_function */
1774 AARCH64_R_STR (TLSLE_LDST32_TPREL_LO12), /* name */
1775 false, /* partial_inplace */
1776 0xffc00, /* src_mask */
1777 0xffc00, /* dst_mask */
1778 false), /* pcrel_offset */
1779
1780 /* Same as BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12, but no overflow check. */
1781 HOWTO (AARCH64_R (TLSLE_LDST32_TPREL_LO12_NC), /* type */
1782 2, /* rightshift */
1783 4, /* size */
1784 10, /* bitsize */
1785 false, /* pc_relative */
1786 10, /* bitpos */
1787 complain_overflow_dont, /* complain_on_overflow */
1788 bfd_elf_generic_reloc, /* special_function */
1789 AARCH64_R_STR (TLSLE_LDST32_TPREL_LO12_NC), /* name */
1790 false, /* partial_inplace */
1791 0xffc00, /* src_mask */
1792 0xffc00, /* dst_mask */
1793 false), /* pcrel_offset */
1794
1795 /* LD/ST64: bit[11:3] of byte offset to module TLS base address. */
1796 HOWTO (AARCH64_R (TLSLE_LDST64_TPREL_LO12), /* type */
1797 3, /* rightshift */
1798 4, /* size */
1799 9, /* bitsize */
1800 false, /* pc_relative */
1801 10, /* bitpos */
1802 complain_overflow_unsigned, /* complain_on_overflow */
1803 bfd_elf_generic_reloc, /* special_function */
1804 AARCH64_R_STR (TLSLE_LDST64_TPREL_LO12), /* name */
1805 false, /* partial_inplace */
1806 0x7fc00, /* src_mask */
1807 0x7fc00, /* dst_mask */
1808 false), /* pcrel_offset */
1809
1810 /* Same as BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12, but no overflow check. */
1811 HOWTO (AARCH64_R (TLSLE_LDST64_TPREL_LO12_NC), /* type */
1812 3, /* rightshift */
1813 4, /* size */
1814 9, /* bitsize */
1815 false, /* pc_relative */
1816 10, /* bitpos */
1817 complain_overflow_dont, /* complain_on_overflow */
1818 bfd_elf_generic_reloc, /* special_function */
1819 AARCH64_R_STR (TLSLE_LDST64_TPREL_LO12_NC), /* name */
1820 false, /* partial_inplace */
1821 0x7fc00, /* src_mask */
1822 0x7fc00, /* dst_mask */
1823 false), /* pcrel_offset */
1824
1825 /* LD/ST8: bit[11:0] of byte offset to module TLS base address. */
1826 HOWTO (AARCH64_R (TLSLE_LDST8_TPREL_LO12), /* type */
1827 0, /* rightshift */
1828 4, /* size */
1829 12, /* bitsize */
1830 false, /* pc_relative */
1831 10, /* bitpos */
1832 complain_overflow_unsigned, /* complain_on_overflow */
1833 bfd_elf_generic_reloc, /* special_function */
1834 AARCH64_R_STR (TLSLE_LDST8_TPREL_LO12), /* name */
1835 false, /* partial_inplace */
1836 0x3ffc00, /* src_mask */
1837 0x3ffc00, /* dst_mask */
1838 false), /* pcrel_offset */
1839
1840 /* Same as BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12, but no overflow check. */
1841 HOWTO (AARCH64_R (TLSLE_LDST8_TPREL_LO12_NC), /* type */
1842 0, /* rightshift */
1843 4, /* size */
1844 12, /* bitsize */
1845 false, /* pc_relative */
1846 10, /* bitpos */
1847 complain_overflow_dont, /* complain_on_overflow */
1848 bfd_elf_generic_reloc, /* special_function */
1849 AARCH64_R_STR (TLSLE_LDST8_TPREL_LO12_NC), /* name */
1850 false, /* partial_inplace */
1851 0x3ffc00, /* src_mask */
1852 0x3ffc00, /* dst_mask */
1853 false), /* pcrel_offset */
1854
1855 HOWTO (AARCH64_R (TLSDESC_LD_PREL19), /* type */
1856 2, /* rightshift */
1857 4, /* size */
1858 19, /* bitsize */
1859 true, /* pc_relative */
1860 0, /* bitpos */
1861 complain_overflow_dont, /* complain_on_overflow */
1862 bfd_elf_generic_reloc, /* special_function */
1863 AARCH64_R_STR (TLSDESC_LD_PREL19), /* name */
1864 false, /* partial_inplace */
1865 0x0ffffe0, /* src_mask */
1866 0x0ffffe0, /* dst_mask */
1867 true), /* pcrel_offset */
1868
1869 HOWTO (AARCH64_R (TLSDESC_ADR_PREL21), /* type */
1870 0, /* rightshift */
1871 4, /* size */
1872 21, /* bitsize */
1873 true, /* pc_relative */
1874 0, /* bitpos */
1875 complain_overflow_dont, /* complain_on_overflow */
1876 bfd_elf_generic_reloc, /* special_function */
1877 AARCH64_R_STR (TLSDESC_ADR_PREL21), /* name */
1878 false, /* partial_inplace */
1879 0x1fffff, /* src_mask */
1880 0x1fffff, /* dst_mask */
1881 true), /* pcrel_offset */
1882
1883 /* Get to the page for the GOT entry for the symbol
1884 (G(S) - P) using an ADRP instruction. */
1885 HOWTO (AARCH64_R (TLSDESC_ADR_PAGE21), /* type */
1886 12, /* rightshift */
1887 4, /* size */
1888 21, /* bitsize */
1889 true, /* pc_relative */
1890 0, /* bitpos */
1891 complain_overflow_dont, /* complain_on_overflow */
1892 bfd_elf_generic_reloc, /* special_function */
1893 AARCH64_R_STR (TLSDESC_ADR_PAGE21), /* name */
1894 false, /* partial_inplace */
1895 0x1fffff, /* src_mask */
1896 0x1fffff, /* dst_mask */
1897 true), /* pcrel_offset */
1898
1899 /* LD64: GOT offset G(S) & 0xff8. */
1900 HOWTO64 (AARCH64_R (TLSDESC_LD64_LO12), /* type */
1901 3, /* rightshift */
1902 4, /* size */
1903 12, /* bitsize */
1904 false, /* pc_relative */
1905 0, /* bitpos */
1906 complain_overflow_dont, /* complain_on_overflow */
1907 bfd_elf_generic_reloc, /* special_function */
1908 AARCH64_R_STR (TLSDESC_LD64_LO12), /* name */
1909 false, /* partial_inplace */
1910 0xff8, /* src_mask */
1911 0xff8, /* dst_mask */
1912 false), /* pcrel_offset */
1913
1914 /* LD32: GOT offset G(S) & 0xffc. */
1915 HOWTO32 (AARCH64_R (TLSDESC_LD32_LO12_NC), /* type */
1916 2, /* rightshift */
1917 4, /* size */
1918 12, /* bitsize */
1919 false, /* pc_relative */
1920 0, /* bitpos */
1921 complain_overflow_dont, /* complain_on_overflow */
1922 bfd_elf_generic_reloc, /* special_function */
1923 AARCH64_R_STR (TLSDESC_LD32_LO12_NC), /* name */
1924 false, /* partial_inplace */
1925 0xffc, /* src_mask */
1926 0xffc, /* dst_mask */
1927 false), /* pcrel_offset */
1928
1929 /* ADD: GOT offset G(S) & 0xfff. */
1930 HOWTO (AARCH64_R (TLSDESC_ADD_LO12), /* type */
1931 0, /* rightshift */
1932 4, /* size */
1933 12, /* bitsize */
1934 false, /* pc_relative */
1935 0, /* bitpos */
1936 complain_overflow_dont,/* complain_on_overflow */
1937 bfd_elf_generic_reloc, /* special_function */
1938 AARCH64_R_STR (TLSDESC_ADD_LO12), /* name */
1939 false, /* partial_inplace */
1940 0xfff, /* src_mask */
1941 0xfff, /* dst_mask */
1942 false), /* pcrel_offset */
1943
1944 HOWTO64 (AARCH64_R (TLSDESC_OFF_G1), /* type */
1945 16, /* rightshift */
1946 4, /* size */
1947 12, /* bitsize */
1948 false, /* pc_relative */
1949 0, /* bitpos */
1950 complain_overflow_unsigned, /* complain_on_overflow */
1951 bfd_elf_generic_reloc, /* special_function */
1952 AARCH64_R_STR (TLSDESC_OFF_G1), /* name */
1953 false, /* partial_inplace */
1954 0xffff, /* src_mask */
1955 0xffff, /* dst_mask */
1956 false), /* pcrel_offset */
1957
1958 HOWTO64 (AARCH64_R (TLSDESC_OFF_G0_NC), /* type */
1959 0, /* rightshift */
1960 4, /* size */
1961 12, /* bitsize */
1962 false, /* pc_relative */
1963 0, /* bitpos */
1964 complain_overflow_dont, /* complain_on_overflow */
1965 bfd_elf_generic_reloc, /* special_function */
1966 AARCH64_R_STR (TLSDESC_OFF_G0_NC), /* name */
1967 false, /* partial_inplace */
1968 0xffff, /* src_mask */
1969 0xffff, /* dst_mask */
1970 false), /* pcrel_offset */
1971
1972 HOWTO64 (AARCH64_R (TLSDESC_LDR), /* type */
1973 0, /* rightshift */
1974 4, /* size */
1975 12, /* bitsize */
1976 false, /* pc_relative */
1977 0, /* bitpos */
1978 complain_overflow_dont, /* complain_on_overflow */
1979 bfd_elf_generic_reloc, /* special_function */
1980 AARCH64_R_STR (TLSDESC_LDR), /* name */
1981 false, /* partial_inplace */
1982 0x0, /* src_mask */
1983 0x0, /* dst_mask */
1984 false), /* pcrel_offset */
1985
1986 HOWTO64 (AARCH64_R (TLSDESC_ADD), /* type */
1987 0, /* rightshift */
1988 4, /* size */
1989 12, /* bitsize */
1990 false, /* pc_relative */
1991 0, /* bitpos */
1992 complain_overflow_dont, /* complain_on_overflow */
1993 bfd_elf_generic_reloc, /* special_function */
1994 AARCH64_R_STR (TLSDESC_ADD), /* name */
1995 false, /* partial_inplace */
1996 0x0, /* src_mask */
1997 0x0, /* dst_mask */
1998 false), /* pcrel_offset */
1999
2000 HOWTO (AARCH64_R (TLSDESC_CALL), /* type */
2001 0, /* rightshift */
2002 4, /* size */
2003 0, /* bitsize */
2004 false, /* pc_relative */
2005 0, /* bitpos */
2006 complain_overflow_dont, /* complain_on_overflow */
2007 bfd_elf_generic_reloc, /* special_function */
2008 AARCH64_R_STR (TLSDESC_CALL), /* name */
2009 false, /* partial_inplace */
2010 0x0, /* src_mask */
2011 0x0, /* dst_mask */
2012 false), /* pcrel_offset */
2013
2014 HOWTO (AARCH64_R (COPY), /* type */
2015 0, /* rightshift */
2016 4, /* size */
2017 64, /* bitsize */
2018 false, /* pc_relative */
2019 0, /* bitpos */
2020 complain_overflow_bitfield, /* complain_on_overflow */
2021 bfd_elf_generic_reloc, /* special_function */
2022 AARCH64_R_STR (COPY), /* name */
2023 true, /* partial_inplace */
2024 0xffffffff, /* src_mask */
2025 0xffffffff, /* dst_mask */
2026 false), /* pcrel_offset */
2027
2028 HOWTO (AARCH64_R (GLOB_DAT), /* type */
2029 0, /* rightshift */
2030 4, /* size */
2031 64, /* bitsize */
2032 false, /* pc_relative */
2033 0, /* bitpos */
2034 complain_overflow_bitfield, /* complain_on_overflow */
2035 bfd_elf_generic_reloc, /* special_function */
2036 AARCH64_R_STR (GLOB_DAT), /* name */
2037 true, /* partial_inplace */
2038 0xffffffff, /* src_mask */
2039 0xffffffff, /* dst_mask */
2040 false), /* pcrel_offset */
2041
2042 HOWTO (AARCH64_R (JUMP_SLOT), /* type */
2043 0, /* rightshift */
2044 4, /* size */
2045 64, /* bitsize */
2046 false, /* pc_relative */
2047 0, /* bitpos */
2048 complain_overflow_bitfield, /* complain_on_overflow */
2049 bfd_elf_generic_reloc, /* special_function */
2050 AARCH64_R_STR (JUMP_SLOT), /* name */
2051 true, /* partial_inplace */
2052 0xffffffff, /* src_mask */
2053 0xffffffff, /* dst_mask */
2054 false), /* pcrel_offset */
2055
2056 HOWTO (AARCH64_R (RELATIVE), /* type */
2057 0, /* rightshift */
2058 4, /* size */
2059 64, /* bitsize */
2060 false, /* pc_relative */
2061 0, /* bitpos */
2062 complain_overflow_bitfield, /* complain_on_overflow */
2063 bfd_elf_generic_reloc, /* special_function */
2064 AARCH64_R_STR (RELATIVE), /* name */
2065 true, /* partial_inplace */
2066 ALL_ONES, /* src_mask */
2067 ALL_ONES, /* dst_mask */
2068 false), /* pcrel_offset */
2069
2070 HOWTO (AARCH64_R (TLS_DTPMOD), /* type */
2071 0, /* rightshift */
2072 4, /* size */
2073 64, /* bitsize */
2074 false, /* pc_relative */
2075 0, /* bitpos */
2076 complain_overflow_dont, /* complain_on_overflow */
2077 bfd_elf_generic_reloc, /* special_function */
2078 #if ARCH_SIZE == 64
2079 AARCH64_R_STR (TLS_DTPMOD64), /* name */
2080 #else
2081 AARCH64_R_STR (TLS_DTPMOD), /* name */
2082 #endif
2083 false, /* partial_inplace */
2084 0, /* src_mask */
2085 ALL_ONES, /* dst_mask */
2086 false), /* pc_reloffset */
2087
2088 HOWTO (AARCH64_R (TLS_DTPREL), /* type */
2089 0, /* rightshift */
2090 4, /* size */
2091 64, /* bitsize */
2092 false, /* pc_relative */
2093 0, /* bitpos */
2094 complain_overflow_dont, /* complain_on_overflow */
2095 bfd_elf_generic_reloc, /* special_function */
2096 #if ARCH_SIZE == 64
2097 AARCH64_R_STR (TLS_DTPREL64), /* name */
2098 #else
2099 AARCH64_R_STR (TLS_DTPREL), /* name */
2100 #endif
2101 false, /* partial_inplace */
2102 0, /* src_mask */
2103 ALL_ONES, /* dst_mask */
2104 false), /* pcrel_offset */
2105
2106 HOWTO (AARCH64_R (TLS_TPREL), /* type */
2107 0, /* rightshift */
2108 4, /* size */
2109 64, /* bitsize */
2110 false, /* pc_relative */
2111 0, /* bitpos */
2112 complain_overflow_dont, /* complain_on_overflow */
2113 bfd_elf_generic_reloc, /* special_function */
2114 #if ARCH_SIZE == 64
2115 AARCH64_R_STR (TLS_TPREL64), /* name */
2116 #else
2117 AARCH64_R_STR (TLS_TPREL), /* name */
2118 #endif
2119 false, /* partial_inplace */
2120 0, /* src_mask */
2121 ALL_ONES, /* dst_mask */
2122 false), /* pcrel_offset */
2123
2124 HOWTO (AARCH64_R (TLSDESC), /* type */
2125 0, /* rightshift */
2126 4, /* size */
2127 64, /* bitsize */
2128 false, /* pc_relative */
2129 0, /* bitpos */
2130 complain_overflow_dont, /* complain_on_overflow */
2131 bfd_elf_generic_reloc, /* special_function */
2132 AARCH64_R_STR (TLSDESC), /* name */
2133 false, /* partial_inplace */
2134 0, /* src_mask */
2135 ALL_ONES, /* dst_mask */
2136 false), /* pcrel_offset */
2137
2138 HOWTO (AARCH64_R (IRELATIVE), /* type */
2139 0, /* rightshift */
2140 4, /* size */
2141 64, /* bitsize */
2142 false, /* pc_relative */
2143 0, /* bitpos */
2144 complain_overflow_bitfield, /* complain_on_overflow */
2145 bfd_elf_generic_reloc, /* special_function */
2146 AARCH64_R_STR (IRELATIVE), /* name */
2147 false, /* partial_inplace */
2148 0, /* src_mask */
2149 ALL_ONES, /* dst_mask */
2150 false), /* pcrel_offset */
2151
2152 EMPTY_HOWTO (0),
2153 };
2154
2155 static reloc_howto_type elfNN_aarch64_howto_none =
2156 HOWTO (R_AARCH64_NONE, /* type */
2157 0, /* rightshift */
2158 0, /* size */
2159 0, /* bitsize */
2160 false, /* pc_relative */
2161 0, /* bitpos */
2162 complain_overflow_dont,/* complain_on_overflow */
2163 bfd_elf_generic_reloc, /* special_function */
2164 "R_AARCH64_NONE", /* name */
2165 false, /* partial_inplace */
2166 0, /* src_mask */
2167 0, /* dst_mask */
2168 false); /* pcrel_offset */
2169
2170 /* Given HOWTO, return the bfd internal relocation enumerator. */
2171
2172 static bfd_reloc_code_real_type
2173 elfNN_aarch64_bfd_reloc_from_howto (reloc_howto_type *howto)
2174 {
2175 const int size
2176 = (int) ARRAY_SIZE (elfNN_aarch64_howto_table);
2177 const ptrdiff_t offset
2178 = howto - elfNN_aarch64_howto_table;
2179
2180 if (offset > 0 && offset < size - 1)
2181 return BFD_RELOC_AARCH64_RELOC_START + offset;
2182
2183 if (howto == &elfNN_aarch64_howto_none)
2184 return BFD_RELOC_AARCH64_NONE;
2185
2186 return BFD_RELOC_AARCH64_RELOC_START;
2187 }
2188
2189 /* Given R_TYPE, return the bfd internal relocation enumerator. */
2190
2191 static bfd_reloc_code_real_type
2192 elfNN_aarch64_bfd_reloc_from_type (bfd *abfd, unsigned int r_type)
2193 {
2194 static bool initialized_p = false;
2195 /* Indexed by R_TYPE, values are offsets in the howto_table. */
2196 static unsigned int offsets[R_AARCH64_end];
2197
2198 if (!initialized_p)
2199 {
2200 unsigned int i;
2201
2202 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
2203 if (elfNN_aarch64_howto_table[i].type != 0)
2204 offsets[elfNN_aarch64_howto_table[i].type] = i;
2205
2206 initialized_p = true;
2207 }
2208
2209 if (r_type == R_AARCH64_NONE || r_type == R_AARCH64_NULL)
2210 return BFD_RELOC_AARCH64_NONE;
2211
2212 /* PR 17512: file: b371e70a. */
2213 if (r_type >= R_AARCH64_end)
2214 {
2215 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
2216 abfd, r_type);
2217 bfd_set_error (bfd_error_bad_value);
2218 return BFD_RELOC_AARCH64_NONE;
2219 }
2220
2221 return BFD_RELOC_AARCH64_RELOC_START + offsets[r_type];
2222 }
2223
2224 struct elf_aarch64_reloc_map
2225 {
2226 bfd_reloc_code_real_type from;
2227 bfd_reloc_code_real_type to;
2228 };
2229
2230 /* Map bfd generic reloc to AArch64-specific reloc. */
2231 static const struct elf_aarch64_reloc_map elf_aarch64_reloc_map[] =
2232 {
2233 {BFD_RELOC_NONE, BFD_RELOC_AARCH64_NONE},
2234
2235 /* Basic data relocations. */
2236 {BFD_RELOC_CTOR, BFD_RELOC_AARCH64_NN},
2237 {BFD_RELOC_64, BFD_RELOC_AARCH64_64},
2238 {BFD_RELOC_32, BFD_RELOC_AARCH64_32},
2239 {BFD_RELOC_16, BFD_RELOC_AARCH64_16},
2240 {BFD_RELOC_64_PCREL, BFD_RELOC_AARCH64_64_PCREL},
2241 {BFD_RELOC_32_PCREL, BFD_RELOC_AARCH64_32_PCREL},
2242 {BFD_RELOC_16_PCREL, BFD_RELOC_AARCH64_16_PCREL},
2243 };
2244
2245 /* Given the bfd internal relocation enumerator in CODE, return the
2246 corresponding howto entry. */
2247
2248 static reloc_howto_type *
2249 elfNN_aarch64_howto_from_bfd_reloc (bfd_reloc_code_real_type code)
2250 {
2251 unsigned int i;
2252
2253 /* Convert bfd generic reloc to AArch64-specific reloc. */
2254 if (code < BFD_RELOC_AARCH64_RELOC_START
2255 || code > BFD_RELOC_AARCH64_RELOC_END)
2256 for (i = 0; i < ARRAY_SIZE (elf_aarch64_reloc_map); i++)
2257 if (elf_aarch64_reloc_map[i].from == code)
2258 {
2259 code = elf_aarch64_reloc_map[i].to;
2260 break;
2261 }
2262
2263 if (code > BFD_RELOC_AARCH64_RELOC_START
2264 && code < BFD_RELOC_AARCH64_RELOC_END)
2265 if (elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START].type)
2266 return &elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START];
2267
2268 if (code == BFD_RELOC_AARCH64_NONE)
2269 return &elfNN_aarch64_howto_none;
2270
2271 return NULL;
2272 }
2273
2274 static reloc_howto_type *
2275 elfNN_aarch64_howto_from_type (bfd *abfd, unsigned int r_type)
2276 {
2277 bfd_reloc_code_real_type val;
2278 reloc_howto_type *howto;
2279
2280 #if ARCH_SIZE == 32
2281 if (r_type > 256)
2282 {
2283 bfd_set_error (bfd_error_bad_value);
2284 return NULL;
2285 }
2286 #endif
2287
2288 if (r_type == R_AARCH64_NONE)
2289 return &elfNN_aarch64_howto_none;
2290
2291 val = elfNN_aarch64_bfd_reloc_from_type (abfd, r_type);
2292 howto = elfNN_aarch64_howto_from_bfd_reloc (val);
2293
2294 if (howto != NULL)
2295 return howto;
2296
2297 bfd_set_error (bfd_error_bad_value);
2298 return NULL;
2299 }
2300
2301 static bool
2302 elfNN_aarch64_info_to_howto (bfd *abfd, arelent *bfd_reloc,
2303 Elf_Internal_Rela *elf_reloc)
2304 {
2305 unsigned int r_type;
2306
2307 r_type = ELFNN_R_TYPE (elf_reloc->r_info);
2308 bfd_reloc->howto = elfNN_aarch64_howto_from_type (abfd, r_type);
2309
2310 if (bfd_reloc->howto == NULL)
2311 {
2312 /* xgettext:c-format */
2313 _bfd_error_handler (_("%pB: unsupported relocation type %#x"), abfd, r_type);
2314 return false;
2315 }
2316 return true;
2317 }
2318
2319 static reloc_howto_type *
2320 elfNN_aarch64_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2321 bfd_reloc_code_real_type code)
2322 {
2323 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (code);
2324
2325 if (howto != NULL)
2326 return howto;
2327
2328 bfd_set_error (bfd_error_bad_value);
2329 return NULL;
2330 }
2331
2332 static reloc_howto_type *
2333 elfNN_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2334 const char *r_name)
2335 {
2336 unsigned int i;
2337
2338 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
2339 if (elfNN_aarch64_howto_table[i].name != NULL
2340 && strcasecmp (elfNN_aarch64_howto_table[i].name, r_name) == 0)
2341 return &elfNN_aarch64_howto_table[i];
2342
2343 return NULL;
2344 }
2345
2346 #define TARGET_LITTLE_SYM aarch64_elfNN_le_vec
2347 #define TARGET_LITTLE_NAME "elfNN-littleaarch64"
2348 #define TARGET_BIG_SYM aarch64_elfNN_be_vec
2349 #define TARGET_BIG_NAME "elfNN-bigaarch64"
2350
2351 /* The linker script knows the section names for placement.
2352 The entry_names are used to do simple name mangling on the stubs.
2353 Given a function name, and its type, the stub can be found. The
2354 name can be changed. The only requirement is the %s be present. */
2355 #define STUB_ENTRY_NAME "__%s_veneer"
2356
2357 /* The name of the dynamic interpreter. This is put in the .interp
2358 section. */
2359 #define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
2360
2361 #define AARCH64_MAX_FWD_BRANCH_OFFSET \
2362 (((1 << 25) - 1) << 2)
2363 #define AARCH64_MAX_BWD_BRANCH_OFFSET \
2364 (-((1 << 25) << 2))
2365
2366 #define AARCH64_MAX_ADRP_IMM ((1 << 20) - 1)
2367 #define AARCH64_MIN_ADRP_IMM (-(1 << 20))
2368
2369 static int
2370 aarch64_valid_for_adrp_p (bfd_vma value, bfd_vma place)
2371 {
2372 bfd_signed_vma offset = (bfd_signed_vma) (PG (value) - PG (place)) >> 12;
2373 return offset <= AARCH64_MAX_ADRP_IMM && offset >= AARCH64_MIN_ADRP_IMM;
2374 }
2375
2376 static int
2377 aarch64_valid_branch_p (bfd_vma value, bfd_vma place)
2378 {
2379 bfd_signed_vma offset = (bfd_signed_vma) (value - place);
2380 return (offset <= AARCH64_MAX_FWD_BRANCH_OFFSET
2381 && offset >= AARCH64_MAX_BWD_BRANCH_OFFSET);
2382 }
2383
2384 static const uint32_t aarch64_adrp_branch_stub [] =
2385 {
2386 0x90000010, /* adrp ip0, X */
2387 /* R_AARCH64_ADR_HI21_PCREL(X) */
2388 0x91000210, /* add ip0, ip0, :lo12:X */
2389 /* R_AARCH64_ADD_ABS_LO12_NC(X) */
2390 0xd61f0200, /* br ip0 */
2391 };
2392
2393 static const uint32_t aarch64_long_branch_stub[] =
2394 {
2395 #if ARCH_SIZE == 64
2396 0x58000090, /* ldr ip0, 1f */
2397 #else
2398 0x18000090, /* ldr wip0, 1f */
2399 #endif
2400 0x10000011, /* adr ip1, #0 */
2401 0x8b110210, /* add ip0, ip0, ip1 */
2402 0xd61f0200, /* br ip0 */
2403 0x00000000, /* 1: .xword or .word
2404 R_AARCH64_PRELNN(X) + 12
2405 */
2406 0x00000000,
2407 };
2408
2409 static const uint32_t aarch64_erratum_835769_stub[] =
2410 {
2411 0x00000000, /* Placeholder for multiply accumulate. */
2412 0x14000000, /* b <label> */
2413 };
2414
2415 static const uint32_t aarch64_erratum_843419_stub[] =
2416 {
2417 0x00000000, /* Placeholder for LDR instruction. */
2418 0x14000000, /* b <label> */
2419 };
2420
2421 /* Section name for stubs is the associated section name plus this
2422 string. */
2423 #define STUB_SUFFIX ".stub"
2424
2425 enum elf_aarch64_stub_type
2426 {
2427 aarch64_stub_none,
2428 aarch64_stub_adrp_branch,
2429 aarch64_stub_long_branch,
2430 aarch64_stub_erratum_835769_veneer,
2431 aarch64_stub_erratum_843419_veneer,
2432 };
2433
2434 struct elf_aarch64_stub_hash_entry
2435 {
2436 /* Base hash table entry structure. */
2437 struct bfd_hash_entry root;
2438
2439 /* The stub section. */
2440 asection *stub_sec;
2441
2442 /* Offset within stub_sec of the beginning of this stub. */
2443 bfd_vma stub_offset;
2444
2445 /* Given the symbol's value and its section we can determine its final
2446 value when building the stubs (so the stub knows where to jump). */
2447 bfd_vma target_value;
2448 asection *target_section;
2449
2450 enum elf_aarch64_stub_type stub_type;
2451
2452 /* The symbol table entry, if any, that this was derived from. */
2453 struct elf_aarch64_link_hash_entry *h;
2454
2455 /* Destination symbol type */
2456 unsigned char st_type;
2457
2458 /* Where this stub is being called from, or, in the case of combined
2459 stub sections, the first input section in the group. */
2460 asection *id_sec;
2461
2462 /* The name for the local symbol at the start of this stub. The
2463 stub name in the hash table has to be unique; this does not, so
2464 it can be friendlier. */
2465 char *output_name;
2466
2467 /* The instruction which caused this stub to be generated (only valid for
2468 erratum 835769 workaround stubs at present). */
2469 uint32_t veneered_insn;
2470
2471 /* In an erratum 843419 workaround stub, the ADRP instruction offset. */
2472 bfd_vma adrp_offset;
2473 };
2474
2475 /* Used to build a map of a section. This is required for mixed-endian
2476 code/data. */
2477
2478 typedef struct elf_elf_section_map
2479 {
2480 bfd_vma vma;
2481 char type;
2482 }
2483 elf_aarch64_section_map;
2484
2485
2486 typedef struct _aarch64_elf_section_data
2487 {
2488 struct bfd_elf_section_data elf;
2489 unsigned int mapcount;
2490 unsigned int mapsize;
2491 elf_aarch64_section_map *map;
2492 }
2493 _aarch64_elf_section_data;
2494
2495 #define elf_aarch64_section_data(sec) \
2496 ((_aarch64_elf_section_data *) elf_section_data (sec))
2497
2498 /* The size of the thread control block which is defined to be two pointers. */
2499 #define TCB_SIZE (ARCH_SIZE/8)*2
2500
2501 struct elf_aarch64_local_symbol
2502 {
2503 unsigned int got_type;
2504 bfd_signed_vma got_refcount;
2505 bfd_vma got_offset;
2506
2507 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The
2508 offset is from the end of the jump table and reserved entries
2509 within the PLTGOT.
2510
2511 The magic value (bfd_vma) -1 indicates that an offset has not be
2512 allocated. */
2513 bfd_vma tlsdesc_got_jump_table_offset;
2514 };
2515
2516 struct elf_aarch64_obj_tdata
2517 {
2518 struct elf_obj_tdata root;
2519
2520 /* local symbol descriptors */
2521 struct elf_aarch64_local_symbol *locals;
2522
2523 /* Zero to warn when linking objects with incompatible enum sizes. */
2524 int no_enum_size_warning;
2525
2526 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2527 int no_wchar_size_warning;
2528
2529 /* All GNU_PROPERTY_AARCH64_FEATURE_1_AND properties. */
2530 uint32_t gnu_and_prop;
2531
2532 /* Zero to warn when linking objects with incompatible
2533 GNU_PROPERTY_AARCH64_FEATURE_1_BTI. */
2534 int no_bti_warn;
2535
2536 /* PLT type based on security. */
2537 aarch64_plt_type plt_type;
2538 };
2539
2540 #define elf_aarch64_tdata(bfd) \
2541 ((struct elf_aarch64_obj_tdata *) (bfd)->tdata.any)
2542
2543 #define elf_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals)
2544
2545 #define is_aarch64_elf(bfd) \
2546 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2547 && elf_tdata (bfd) != NULL \
2548 && elf_object_id (bfd) == AARCH64_ELF_DATA)
2549
2550 static bool
2551 elfNN_aarch64_mkobject (bfd *abfd)
2552 {
2553 return bfd_elf_allocate_object (abfd, sizeof (struct elf_aarch64_obj_tdata),
2554 AARCH64_ELF_DATA);
2555 }
2556
2557 #define elf_aarch64_hash_entry(ent) \
2558 ((struct elf_aarch64_link_hash_entry *)(ent))
2559
2560 #define GOT_UNKNOWN 0
2561 #define GOT_NORMAL 1
2562 #define GOT_TLS_GD 2
2563 #define GOT_TLS_IE 4
2564 #define GOT_TLSDESC_GD 8
2565
2566 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD))
2567
2568 /* AArch64 ELF linker hash entry. */
2569 struct elf_aarch64_link_hash_entry
2570 {
2571 struct elf_link_hash_entry root;
2572
2573 /* Since PLT entries have variable size, we need to record the
2574 index into .got.plt instead of recomputing it from the PLT
2575 offset. */
2576 bfd_signed_vma plt_got_offset;
2577
2578 /* Bit mask representing the type of GOT entry(s) if any required by
2579 this symbol. */
2580 unsigned int got_type;
2581
2582 /* TRUE if symbol is defined as a protected symbol. */
2583 unsigned int def_protected : 1;
2584
2585 /* A pointer to the most recently used stub hash entry against this
2586 symbol. */
2587 struct elf_aarch64_stub_hash_entry *stub_cache;
2588
2589 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The offset
2590 is from the end of the jump table and reserved entries within the PLTGOT.
2591
2592 The magic value (bfd_vma) -1 indicates that an offset has not
2593 be allocated. */
2594 bfd_vma tlsdesc_got_jump_table_offset;
2595 };
2596
2597 static unsigned int
2598 elfNN_aarch64_symbol_got_type (struct elf_link_hash_entry *h,
2599 bfd *abfd,
2600 unsigned long r_symndx)
2601 {
2602 if (h)
2603 return elf_aarch64_hash_entry (h)->got_type;
2604
2605 if (! elf_aarch64_locals (abfd))
2606 return GOT_UNKNOWN;
2607
2608 return elf_aarch64_locals (abfd)[r_symndx].got_type;
2609 }
2610
2611 /* Get the AArch64 elf linker hash table from a link_info structure. */
2612 #define elf_aarch64_hash_table(info) \
2613 ((struct elf_aarch64_link_hash_table *) ((info)->hash))
2614
2615 #define aarch64_stub_hash_lookup(table, string, create, copy) \
2616 ((struct elf_aarch64_stub_hash_entry *) \
2617 bfd_hash_lookup ((table), (string), (create), (copy)))
2618
2619 /* AArch64 ELF linker hash table. */
2620 struct elf_aarch64_link_hash_table
2621 {
2622 /* The main hash table. */
2623 struct elf_link_hash_table root;
2624
2625 /* Nonzero to force PIC branch veneers. */
2626 int pic_veneer;
2627
2628 /* Fix erratum 835769. */
2629 int fix_erratum_835769;
2630
2631 /* Fix erratum 843419. */
2632 erratum_84319_opts fix_erratum_843419;
2633
2634 /* Don't apply link-time values for dynamic relocations. */
2635 int no_apply_dynamic_relocs;
2636
2637 /* The number of bytes in the initial entry in the PLT. */
2638 bfd_size_type plt_header_size;
2639
2640 /* The bytes of the initial PLT entry. */
2641 const bfd_byte *plt0_entry;
2642
2643 /* The number of bytes in the subsequent PLT entries. */
2644 bfd_size_type plt_entry_size;
2645
2646 /* The bytes of the subsequent PLT entry. */
2647 const bfd_byte *plt_entry;
2648
2649 /* For convenience in allocate_dynrelocs. */
2650 bfd *obfd;
2651
2652 /* The amount of space used by the reserved portion of the sgotplt
2653 section, plus whatever space is used by the jump slots. */
2654 bfd_vma sgotplt_jump_table_size;
2655
2656 /* The stub hash table. */
2657 struct bfd_hash_table stub_hash_table;
2658
2659 /* Linker stub bfd. */
2660 bfd *stub_bfd;
2661
2662 /* Linker call-backs. */
2663 asection *(*add_stub_section) (const char *, asection *);
2664 void (*layout_sections_again) (void);
2665
2666 /* Array to keep track of which stub sections have been created, and
2667 information on stub grouping. */
2668 struct map_stub
2669 {
2670 /* This is the section to which stubs in the group will be
2671 attached. */
2672 asection *link_sec;
2673 /* The stub section. */
2674 asection *stub_sec;
2675 } *stub_group;
2676
2677 /* Assorted information used by elfNN_aarch64_size_stubs. */
2678 unsigned int bfd_count;
2679 unsigned int top_index;
2680 asection **input_list;
2681
2682 /* JUMP_SLOT relocs for variant PCS symbols may be present. */
2683 int variant_pcs;
2684
2685 /* The number of bytes in the PLT enty for the TLS descriptor. */
2686 bfd_size_type tlsdesc_plt_entry_size;
2687
2688 /* Used by local STT_GNU_IFUNC symbols. */
2689 htab_t loc_hash_table;
2690 void * loc_hash_memory;
2691 };
2692
2693 /* Create an entry in an AArch64 ELF linker hash table. */
2694
2695 static struct bfd_hash_entry *
2696 elfNN_aarch64_link_hash_newfunc (struct bfd_hash_entry *entry,
2697 struct bfd_hash_table *table,
2698 const char *string)
2699 {
2700 struct elf_aarch64_link_hash_entry *ret =
2701 (struct elf_aarch64_link_hash_entry *) entry;
2702
2703 /* Allocate the structure if it has not already been allocated by a
2704 subclass. */
2705 if (ret == NULL)
2706 ret = bfd_hash_allocate (table,
2707 sizeof (struct elf_aarch64_link_hash_entry));
2708 if (ret == NULL)
2709 return (struct bfd_hash_entry *) ret;
2710
2711 /* Call the allocation method of the superclass. */
2712 ret = ((struct elf_aarch64_link_hash_entry *)
2713 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
2714 table, string));
2715 if (ret != NULL)
2716 {
2717 ret->got_type = GOT_UNKNOWN;
2718 ret->def_protected = 0;
2719 ret->plt_got_offset = (bfd_vma) - 1;
2720 ret->stub_cache = NULL;
2721 ret->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
2722 }
2723
2724 return (struct bfd_hash_entry *) ret;
2725 }
2726
2727 /* Initialize an entry in the stub hash table. */
2728
2729 static struct bfd_hash_entry *
2730 stub_hash_newfunc (struct bfd_hash_entry *entry,
2731 struct bfd_hash_table *table, const char *string)
2732 {
2733 /* Allocate the structure if it has not already been allocated by a
2734 subclass. */
2735 if (entry == NULL)
2736 {
2737 entry = bfd_hash_allocate (table,
2738 sizeof (struct
2739 elf_aarch64_stub_hash_entry));
2740 if (entry == NULL)
2741 return entry;
2742 }
2743
2744 /* Call the allocation method of the superclass. */
2745 entry = bfd_hash_newfunc (entry, table, string);
2746 if (entry != NULL)
2747 {
2748 struct elf_aarch64_stub_hash_entry *eh;
2749
2750 /* Initialize the local fields. */
2751 eh = (struct elf_aarch64_stub_hash_entry *) entry;
2752 eh->adrp_offset = 0;
2753 eh->stub_sec = NULL;
2754 eh->stub_offset = 0;
2755 eh->target_value = 0;
2756 eh->target_section = NULL;
2757 eh->stub_type = aarch64_stub_none;
2758 eh->h = NULL;
2759 eh->id_sec = NULL;
2760 }
2761
2762 return entry;
2763 }
2764
2765 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
2766 for local symbol so that we can handle local STT_GNU_IFUNC symbols
2767 as global symbol. We reuse indx and dynstr_index for local symbol
2768 hash since they aren't used by global symbols in this backend. */
2769
2770 static hashval_t
2771 elfNN_aarch64_local_htab_hash (const void *ptr)
2772 {
2773 struct elf_link_hash_entry *h
2774 = (struct elf_link_hash_entry *) ptr;
2775 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
2776 }
2777
2778 /* Compare local hash entries. */
2779
2780 static int
2781 elfNN_aarch64_local_htab_eq (const void *ptr1, const void *ptr2)
2782 {
2783 struct elf_link_hash_entry *h1
2784 = (struct elf_link_hash_entry *) ptr1;
2785 struct elf_link_hash_entry *h2
2786 = (struct elf_link_hash_entry *) ptr2;
2787
2788 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
2789 }
2790
2791 /* Find and/or create a hash entry for local symbol. */
2792
2793 static struct elf_link_hash_entry *
2794 elfNN_aarch64_get_local_sym_hash (struct elf_aarch64_link_hash_table *htab,
2795 bfd *abfd, const Elf_Internal_Rela *rel,
2796 bool create)
2797 {
2798 struct elf_aarch64_link_hash_entry e, *ret;
2799 asection *sec = abfd->sections;
2800 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
2801 ELFNN_R_SYM (rel->r_info));
2802 void **slot;
2803
2804 e.root.indx = sec->id;
2805 e.root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2806 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
2807 create ? INSERT : NO_INSERT);
2808
2809 if (!slot)
2810 return NULL;
2811
2812 if (*slot)
2813 {
2814 ret = (struct elf_aarch64_link_hash_entry *) *slot;
2815 return &ret->root;
2816 }
2817
2818 ret = (struct elf_aarch64_link_hash_entry *)
2819 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
2820 sizeof (struct elf_aarch64_link_hash_entry));
2821 if (ret)
2822 {
2823 memset (ret, 0, sizeof (*ret));
2824 ret->root.indx = sec->id;
2825 ret->root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2826 ret->root.dynindx = -1;
2827 *slot = ret;
2828 }
2829 return &ret->root;
2830 }
2831
2832 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2833
2834 static void
2835 elfNN_aarch64_copy_indirect_symbol (struct bfd_link_info *info,
2836 struct elf_link_hash_entry *dir,
2837 struct elf_link_hash_entry *ind)
2838 {
2839 struct elf_aarch64_link_hash_entry *edir, *eind;
2840
2841 edir = (struct elf_aarch64_link_hash_entry *) dir;
2842 eind = (struct elf_aarch64_link_hash_entry *) ind;
2843
2844 if (ind->root.type == bfd_link_hash_indirect)
2845 {
2846 /* Copy over PLT info. */
2847 if (dir->got.refcount <= 0)
2848 {
2849 edir->got_type = eind->got_type;
2850 eind->got_type = GOT_UNKNOWN;
2851 }
2852 }
2853
2854 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2855 }
2856
2857 /* Merge non-visibility st_other attributes. */
2858
2859 static void
2860 elfNN_aarch64_merge_symbol_attribute (struct elf_link_hash_entry *h,
2861 unsigned int st_other,
2862 bool definition,
2863 bool dynamic ATTRIBUTE_UNUSED)
2864 {
2865 if (definition)
2866 {
2867 struct elf_aarch64_link_hash_entry *eh
2868 = (struct elf_aarch64_link_hash_entry *)h;
2869 eh->def_protected = ELF_ST_VISIBILITY (st_other) == STV_PROTECTED;
2870 }
2871
2872 unsigned int isym_sto = st_other & ~ELF_ST_VISIBILITY (-1);
2873 unsigned int h_sto = h->other & ~ELF_ST_VISIBILITY (-1);
2874
2875 if (isym_sto == h_sto)
2876 return;
2877
2878 if (isym_sto & ~STO_AARCH64_VARIANT_PCS)
2879 /* Not fatal, this callback cannot fail. */
2880 _bfd_error_handler (_("unknown attribute for symbol `%s': 0x%02x"),
2881 h->root.root.string, isym_sto);
2882
2883 /* Note: Ideally we would warn about any attribute mismatch, but
2884 this api does not allow that without substantial changes. */
2885 if (isym_sto & STO_AARCH64_VARIANT_PCS)
2886 h->other |= STO_AARCH64_VARIANT_PCS;
2887 }
2888
2889 /* Destroy an AArch64 elf linker hash table. */
2890
2891 static void
2892 elfNN_aarch64_link_hash_table_free (bfd *obfd)
2893 {
2894 struct elf_aarch64_link_hash_table *ret
2895 = (struct elf_aarch64_link_hash_table *) obfd->link.hash;
2896
2897 if (ret->loc_hash_table)
2898 htab_delete (ret->loc_hash_table);
2899 if (ret->loc_hash_memory)
2900 objalloc_free ((struct objalloc *) ret->loc_hash_memory);
2901
2902 bfd_hash_table_free (&ret->stub_hash_table);
2903 _bfd_elf_link_hash_table_free (obfd);
2904 }
2905
2906 /* Create an AArch64 elf linker hash table. */
2907
2908 static struct bfd_link_hash_table *
2909 elfNN_aarch64_link_hash_table_create (bfd *abfd)
2910 {
2911 struct elf_aarch64_link_hash_table *ret;
2912 size_t amt = sizeof (struct elf_aarch64_link_hash_table);
2913
2914 ret = bfd_zmalloc (amt);
2915 if (ret == NULL)
2916 return NULL;
2917
2918 if (!_bfd_elf_link_hash_table_init
2919 (&ret->root, abfd, elfNN_aarch64_link_hash_newfunc,
2920 sizeof (struct elf_aarch64_link_hash_entry), AARCH64_ELF_DATA))
2921 {
2922 free (ret);
2923 return NULL;
2924 }
2925
2926 ret->plt_header_size = PLT_ENTRY_SIZE;
2927 ret->plt0_entry = elfNN_aarch64_small_plt0_entry;
2928 ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
2929 ret->plt_entry = elfNN_aarch64_small_plt_entry;
2930 ret->tlsdesc_plt_entry_size = PLT_TLSDESC_ENTRY_SIZE;
2931 ret->obfd = abfd;
2932 ret->root.tlsdesc_got = (bfd_vma) - 1;
2933
2934 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2935 sizeof (struct elf_aarch64_stub_hash_entry)))
2936 {
2937 _bfd_elf_link_hash_table_free (abfd);
2938 return NULL;
2939 }
2940
2941 ret->loc_hash_table = htab_try_create (1024,
2942 elfNN_aarch64_local_htab_hash,
2943 elfNN_aarch64_local_htab_eq,
2944 NULL);
2945 ret->loc_hash_memory = objalloc_create ();
2946 if (!ret->loc_hash_table || !ret->loc_hash_memory)
2947 {
2948 elfNN_aarch64_link_hash_table_free (abfd);
2949 return NULL;
2950 }
2951 ret->root.root.hash_table_free = elfNN_aarch64_link_hash_table_free;
2952
2953 return &ret->root.root;
2954 }
2955
2956 /* Perform relocation R_TYPE. Returns TRUE upon success, FALSE otherwise. */
2957
2958 static bool
2959 aarch64_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
2960 bfd_vma offset, bfd_vma value)
2961 {
2962 reloc_howto_type *howto;
2963 bfd_vma place;
2964
2965 howto = elfNN_aarch64_howto_from_type (input_bfd, r_type);
2966 place = (input_section->output_section->vma + input_section->output_offset
2967 + offset);
2968
2969 r_type = elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type);
2970 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, r_type, place,
2971 value, 0, false);
2972 return _bfd_aarch64_elf_put_addend (input_bfd,
2973 input_section->contents + offset, r_type,
2974 howto, value) == bfd_reloc_ok;
2975 }
2976
2977 static enum elf_aarch64_stub_type
2978 aarch64_select_branch_stub (bfd_vma value, bfd_vma place)
2979 {
2980 if (aarch64_valid_for_adrp_p (value, place))
2981 return aarch64_stub_adrp_branch;
2982 return aarch64_stub_long_branch;
2983 }
2984
2985 /* Determine the type of stub needed, if any, for a call. */
2986
2987 static enum elf_aarch64_stub_type
2988 aarch64_type_of_stub (asection *input_sec,
2989 const Elf_Internal_Rela *rel,
2990 asection *sym_sec,
2991 unsigned char st_type,
2992 bfd_vma destination)
2993 {
2994 bfd_vma location;
2995 bfd_signed_vma branch_offset;
2996 unsigned int r_type;
2997 enum elf_aarch64_stub_type stub_type = aarch64_stub_none;
2998
2999 if (st_type != STT_FUNC
3000 && (sym_sec == input_sec))
3001 return stub_type;
3002
3003 /* Determine where the call point is. */
3004 location = (input_sec->output_offset
3005 + input_sec->output_section->vma + rel->r_offset);
3006
3007 branch_offset = (bfd_signed_vma) (destination - location);
3008
3009 r_type = ELFNN_R_TYPE (rel->r_info);
3010
3011 /* We don't want to redirect any old unconditional jump in this way,
3012 only one which is being used for a sibcall, where it is
3013 acceptable for the IP0 and IP1 registers to be clobbered. */
3014 if ((r_type == AARCH64_R (CALL26) || r_type == AARCH64_R (JUMP26))
3015 && (branch_offset > AARCH64_MAX_FWD_BRANCH_OFFSET
3016 || branch_offset < AARCH64_MAX_BWD_BRANCH_OFFSET))
3017 {
3018 stub_type = aarch64_stub_long_branch;
3019 }
3020
3021 return stub_type;
3022 }
3023
3024 /* Build a name for an entry in the stub hash table. */
3025
3026 static char *
3027 elfNN_aarch64_stub_name (const asection *input_section,
3028 const asection *sym_sec,
3029 const struct elf_aarch64_link_hash_entry *hash,
3030 const Elf_Internal_Rela *rel)
3031 {
3032 char *stub_name;
3033 bfd_size_type len;
3034
3035 if (hash)
3036 {
3037 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
3038 stub_name = bfd_malloc (len);
3039 if (stub_name != NULL)
3040 snprintf (stub_name, len, "%08x_%s+%" PRIx64,
3041 (unsigned int) input_section->id,
3042 hash->root.root.root.string,
3043 (uint64_t) rel->r_addend);
3044 }
3045 else
3046 {
3047 len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
3048 stub_name = bfd_malloc (len);
3049 if (stub_name != NULL)
3050 snprintf (stub_name, len, "%08x_%x:%x+%" PRIx64,
3051 (unsigned int) input_section->id,
3052 (unsigned int) sym_sec->id,
3053 (unsigned int) ELFNN_R_SYM (rel->r_info),
3054 (uint64_t) rel->r_addend);
3055 }
3056
3057 return stub_name;
3058 }
3059
3060 /* Return TRUE if symbol H should be hashed in the `.gnu.hash' section. For
3061 executable PLT slots where the executable never takes the address of those
3062 functions, the function symbols are not added to the hash table. */
3063
3064 static bool
3065 elf_aarch64_hash_symbol (struct elf_link_hash_entry *h)
3066 {
3067 if (h->plt.offset != (bfd_vma) -1
3068 && !h->def_regular
3069 && !h->pointer_equality_needed)
3070 return false;
3071
3072 return _bfd_elf_hash_symbol (h);
3073 }
3074
3075
3076 /* Look up an entry in the stub hash. Stub entries are cached because
3077 creating the stub name takes a bit of time. */
3078
3079 static struct elf_aarch64_stub_hash_entry *
3080 elfNN_aarch64_get_stub_entry (const asection *input_section,
3081 const asection *sym_sec,
3082 struct elf_link_hash_entry *hash,
3083 const Elf_Internal_Rela *rel,
3084 struct elf_aarch64_link_hash_table *htab)
3085 {
3086 struct elf_aarch64_stub_hash_entry *stub_entry;
3087 struct elf_aarch64_link_hash_entry *h =
3088 (struct elf_aarch64_link_hash_entry *) hash;
3089 const asection *id_sec;
3090
3091 if ((input_section->flags & SEC_CODE) == 0)
3092 return NULL;
3093
3094 /* If this input section is part of a group of sections sharing one
3095 stub section, then use the id of the first section in the group.
3096 Stub names need to include a section id, as there may well be
3097 more than one stub used to reach say, printf, and we need to
3098 distinguish between them. */
3099 id_sec = htab->stub_group[input_section->id].link_sec;
3100
3101 if (h != NULL && h->stub_cache != NULL
3102 && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
3103 {
3104 stub_entry = h->stub_cache;
3105 }
3106 else
3107 {
3108 char *stub_name;
3109
3110 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, h, rel);
3111 if (stub_name == NULL)
3112 return NULL;
3113
3114 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table,
3115 stub_name, false, false);
3116 if (h != NULL)
3117 h->stub_cache = stub_entry;
3118
3119 free (stub_name);
3120 }
3121
3122 return stub_entry;
3123 }
3124
3125
3126 /* Create a stub section. */
3127
3128 static asection *
3129 _bfd_aarch64_create_stub_section (asection *section,
3130 struct elf_aarch64_link_hash_table *htab)
3131 {
3132 size_t namelen;
3133 bfd_size_type len;
3134 char *s_name;
3135
3136 namelen = strlen (section->name);
3137 len = namelen + sizeof (STUB_SUFFIX);
3138 s_name = bfd_alloc (htab->stub_bfd, len);
3139 if (s_name == NULL)
3140 return NULL;
3141
3142 memcpy (s_name, section->name, namelen);
3143 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
3144 return (*htab->add_stub_section) (s_name, section);
3145 }
3146
3147
3148 /* Find or create a stub section for a link section.
3149
3150 Fix or create the stub section used to collect stubs attached to
3151 the specified link section. */
3152
3153 static asection *
3154 _bfd_aarch64_get_stub_for_link_section (asection *link_section,
3155 struct elf_aarch64_link_hash_table *htab)
3156 {
3157 if (htab->stub_group[link_section->id].stub_sec == NULL)
3158 htab->stub_group[link_section->id].stub_sec
3159 = _bfd_aarch64_create_stub_section (link_section, htab);
3160 return htab->stub_group[link_section->id].stub_sec;
3161 }
3162
3163
3164 /* Find or create a stub section in the stub group for an input
3165 section. */
3166
3167 static asection *
3168 _bfd_aarch64_create_or_find_stub_sec (asection *section,
3169 struct elf_aarch64_link_hash_table *htab)
3170 {
3171 asection *link_sec = htab->stub_group[section->id].link_sec;
3172 return _bfd_aarch64_get_stub_for_link_section (link_sec, htab);
3173 }
3174
3175
3176 /* Add a new stub entry in the stub group associated with an input
3177 section to the stub hash. Not all fields of the new stub entry are
3178 initialised. */
3179
3180 static struct elf_aarch64_stub_hash_entry *
3181 _bfd_aarch64_add_stub_entry_in_group (const char *stub_name,
3182 asection *section,
3183 struct elf_aarch64_link_hash_table *htab)
3184 {
3185 asection *link_sec;
3186 asection *stub_sec;
3187 struct elf_aarch64_stub_hash_entry *stub_entry;
3188
3189 link_sec = htab->stub_group[section->id].link_sec;
3190 stub_sec = _bfd_aarch64_create_or_find_stub_sec (section, htab);
3191
3192 /* Enter this entry into the linker stub hash table. */
3193 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3194 true, false);
3195 if (stub_entry == NULL)
3196 {
3197 /* xgettext:c-format */
3198 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
3199 section->owner, stub_name);
3200 return NULL;
3201 }
3202
3203 stub_entry->stub_sec = stub_sec;
3204 stub_entry->stub_offset = 0;
3205 stub_entry->id_sec = link_sec;
3206
3207 return stub_entry;
3208 }
3209
3210 /* Add a new stub entry in the final stub section to the stub hash.
3211 Not all fields of the new stub entry are initialised. */
3212
3213 static struct elf_aarch64_stub_hash_entry *
3214 _bfd_aarch64_add_stub_entry_after (const char *stub_name,
3215 asection *link_section,
3216 struct elf_aarch64_link_hash_table *htab)
3217 {
3218 asection *stub_sec;
3219 struct elf_aarch64_stub_hash_entry *stub_entry;
3220
3221 stub_sec = NULL;
3222 /* Only create the actual stub if we will end up needing it. */
3223 if (htab->fix_erratum_843419 & ERRAT_ADRP)
3224 stub_sec = _bfd_aarch64_get_stub_for_link_section (link_section, htab);
3225 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3226 true, false);
3227 if (stub_entry == NULL)
3228 {
3229 _bfd_error_handler (_("cannot create stub entry %s"), stub_name);
3230 return NULL;
3231 }
3232
3233 stub_entry->stub_sec = stub_sec;
3234 stub_entry->stub_offset = 0;
3235 stub_entry->id_sec = link_section;
3236
3237 return stub_entry;
3238 }
3239
3240
3241 static bool
3242 aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
3243 void *in_arg)
3244 {
3245 struct elf_aarch64_stub_hash_entry *stub_entry;
3246 asection *stub_sec;
3247 bfd *stub_bfd;
3248 bfd_byte *loc;
3249 bfd_vma sym_value;
3250 bfd_vma veneered_insn_loc;
3251 bfd_vma veneer_entry_loc;
3252 bfd_signed_vma branch_offset = 0;
3253 unsigned int template_size;
3254 const uint32_t *template;
3255 unsigned int i;
3256 struct bfd_link_info *info;
3257
3258 /* Massage our args to the form they really have. */
3259 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
3260
3261 info = (struct bfd_link_info *) in_arg;
3262
3263 /* Fail if the target section could not be assigned to an output
3264 section. The user should fix his linker script. */
3265 if (stub_entry->target_section->output_section == NULL
3266 && info->non_contiguous_regions)
3267 info->callbacks->einfo (_("%F%P: Could not assign '%pA' to an output section. "
3268 "Retry without "
3269 "--enable-non-contiguous-regions.\n"),
3270 stub_entry->target_section);
3271
3272 stub_sec = stub_entry->stub_sec;
3273
3274 /* Make a note of the offset within the stubs for this entry. */
3275 stub_entry->stub_offset = stub_sec->size;
3276 loc = stub_sec->contents + stub_entry->stub_offset;
3277
3278 stub_bfd = stub_sec->owner;
3279
3280 /* This is the address of the stub destination. */
3281 sym_value = (stub_entry->target_value
3282 + stub_entry->target_section->output_offset
3283 + stub_entry->target_section->output_section->vma);
3284
3285 if (stub_entry->stub_type == aarch64_stub_long_branch)
3286 {
3287 bfd_vma place = (stub_entry->stub_offset + stub_sec->output_section->vma
3288 + stub_sec->output_offset);
3289
3290 /* See if we can relax the stub. */
3291 if (aarch64_valid_for_adrp_p (sym_value, place))
3292 stub_entry->stub_type = aarch64_select_branch_stub (sym_value, place);
3293 }
3294
3295 switch (stub_entry->stub_type)
3296 {
3297 case aarch64_stub_adrp_branch:
3298 template = aarch64_adrp_branch_stub;
3299 template_size = sizeof (aarch64_adrp_branch_stub);
3300 break;
3301 case aarch64_stub_long_branch:
3302 template = aarch64_long_branch_stub;
3303 template_size = sizeof (aarch64_long_branch_stub);
3304 break;
3305 case aarch64_stub_erratum_835769_veneer:
3306 template = aarch64_erratum_835769_stub;
3307 template_size = sizeof (aarch64_erratum_835769_stub);
3308 break;
3309 case aarch64_stub_erratum_843419_veneer:
3310 template = aarch64_erratum_843419_stub;
3311 template_size = sizeof (aarch64_erratum_843419_stub);
3312 break;
3313 default:
3314 abort ();
3315 }
3316
3317 for (i = 0; i < (template_size / sizeof template[0]); i++)
3318 {
3319 bfd_putl32 (template[i], loc);
3320 loc += 4;
3321 }
3322
3323 template_size = (template_size + 7) & ~7;
3324 stub_sec->size += template_size;
3325
3326 switch (stub_entry->stub_type)
3327 {
3328 case aarch64_stub_adrp_branch:
3329 if (!aarch64_relocate (AARCH64_R (ADR_PREL_PG_HI21), stub_bfd, stub_sec,
3330 stub_entry->stub_offset, sym_value))
3331 /* The stub would not have been relaxed if the offset was out
3332 of range. */
3333 BFD_FAIL ();
3334
3335 if (!aarch64_relocate (AARCH64_R (ADD_ABS_LO12_NC), stub_bfd, stub_sec,
3336 stub_entry->stub_offset + 4, sym_value))
3337 BFD_FAIL ();
3338 break;
3339
3340 case aarch64_stub_long_branch:
3341 /* We want the value relative to the address 12 bytes back from the
3342 value itself. */
3343 if (!aarch64_relocate (AARCH64_R (PRELNN), stub_bfd, stub_sec,
3344 stub_entry->stub_offset + 16, sym_value + 12))
3345 BFD_FAIL ();
3346 break;
3347
3348 case aarch64_stub_erratum_835769_veneer:
3349 veneered_insn_loc = stub_entry->target_section->output_section->vma
3350 + stub_entry->target_section->output_offset
3351 + stub_entry->target_value;
3352 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
3353 + stub_entry->stub_sec->output_offset
3354 + stub_entry->stub_offset;
3355 branch_offset = veneered_insn_loc - veneer_entry_loc;
3356 branch_offset >>= 2;
3357 branch_offset &= 0x3ffffff;
3358 bfd_putl32 (stub_entry->veneered_insn,
3359 stub_sec->contents + stub_entry->stub_offset);
3360 bfd_putl32 (template[1] | branch_offset,
3361 stub_sec->contents + stub_entry->stub_offset + 4);
3362 break;
3363
3364 case aarch64_stub_erratum_843419_veneer:
3365 if (!aarch64_relocate (AARCH64_R (JUMP26), stub_bfd, stub_sec,
3366 stub_entry->stub_offset + 4, sym_value + 4))
3367 BFD_FAIL ();
3368 break;
3369
3370 default:
3371 abort ();
3372 }
3373
3374 return true;
3375 }
3376
3377 /* As above, but don't actually build the stub. Just bump offset so
3378 we know stub section sizes. */
3379
3380 static bool
3381 aarch64_size_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
3382 {
3383 struct elf_aarch64_stub_hash_entry *stub_entry;
3384 struct elf_aarch64_link_hash_table *htab;
3385 int size;
3386
3387 /* Massage our args to the form they really have. */
3388 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
3389 htab = (struct elf_aarch64_link_hash_table *) in_arg;
3390
3391 switch (stub_entry->stub_type)
3392 {
3393 case aarch64_stub_adrp_branch:
3394 size = sizeof (aarch64_adrp_branch_stub);
3395 break;
3396 case aarch64_stub_long_branch:
3397 size = sizeof (aarch64_long_branch_stub);
3398 break;
3399 case aarch64_stub_erratum_835769_veneer:
3400 size = sizeof (aarch64_erratum_835769_stub);
3401 break;
3402 case aarch64_stub_erratum_843419_veneer:
3403 {
3404 if (htab->fix_erratum_843419 == ERRAT_ADR)
3405 return true;
3406 size = sizeof (aarch64_erratum_843419_stub);
3407 }
3408 break;
3409 default:
3410 abort ();
3411 }
3412
3413 size = (size + 7) & ~7;
3414 stub_entry->stub_sec->size += size;
3415 return true;
3416 }
3417
3418 /* External entry points for sizing and building linker stubs. */
3419
3420 /* Set up various things so that we can make a list of input sections
3421 for each output section included in the link. Returns -1 on error,
3422 0 when no stubs will be needed, and 1 on success. */
3423
3424 int
3425 elfNN_aarch64_setup_section_lists (bfd *output_bfd,
3426 struct bfd_link_info *info)
3427 {
3428 bfd *input_bfd;
3429 unsigned int bfd_count;
3430 unsigned int top_id, top_index;
3431 asection *section;
3432 asection **input_list, **list;
3433 size_t amt;
3434 struct elf_aarch64_link_hash_table *htab =
3435 elf_aarch64_hash_table (info);
3436
3437 if (!is_elf_hash_table (&htab->root.root))
3438 return 0;
3439
3440 /* Count the number of input BFDs and find the top input section id. */
3441 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
3442 input_bfd != NULL; input_bfd = input_bfd->link.next)
3443 {
3444 bfd_count += 1;
3445 for (section = input_bfd->sections;
3446 section != NULL; section = section->next)
3447 {
3448 if (top_id < section->id)
3449 top_id = section->id;
3450 }
3451 }
3452 htab->bfd_count = bfd_count;
3453
3454 amt = sizeof (struct map_stub) * (top_id + 1);
3455 htab->stub_group = bfd_zmalloc (amt);
3456 if (htab->stub_group == NULL)
3457 return -1;
3458
3459 /* We can't use output_bfd->section_count here to find the top output
3460 section index as some sections may have been removed, and
3461 _bfd_strip_section_from_output doesn't renumber the indices. */
3462 for (section = output_bfd->sections, top_index = 0;
3463 section != NULL; section = section->next)
3464 {
3465 if (top_index < section->index)
3466 top_index = section->index;
3467 }
3468
3469 htab->top_index = top_index;
3470 amt = sizeof (asection *) * (top_index + 1);
3471 input_list = bfd_malloc (amt);
3472 htab->input_list = input_list;
3473 if (input_list == NULL)
3474 return -1;
3475
3476 /* For sections we aren't interested in, mark their entries with a
3477 value we can check later. */
3478 list = input_list + top_index;
3479 do
3480 *list = bfd_abs_section_ptr;
3481 while (list-- != input_list);
3482
3483 for (section = output_bfd->sections;
3484 section != NULL; section = section->next)
3485 {
3486 if ((section->flags & SEC_CODE) != 0)
3487 input_list[section->index] = NULL;
3488 }
3489
3490 return 1;
3491 }
3492
3493 /* Used by elfNN_aarch64_next_input_section and group_sections. */
3494 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
3495
3496 /* The linker repeatedly calls this function for each input section,
3497 in the order that input sections are linked into output sections.
3498 Build lists of input sections to determine groupings between which
3499 we may insert linker stubs. */
3500
3501 void
3502 elfNN_aarch64_next_input_section (struct bfd_link_info *info, asection *isec)
3503 {
3504 struct elf_aarch64_link_hash_table *htab =
3505 elf_aarch64_hash_table (info);
3506
3507 if (isec->output_section->index <= htab->top_index)
3508 {
3509 asection **list = htab->input_list + isec->output_section->index;
3510
3511 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
3512 {
3513 /* Steal the link_sec pointer for our list. */
3514 /* This happens to make the list in reverse order,
3515 which is what we want. */
3516 PREV_SEC (isec) = *list;
3517 *list = isec;
3518 }
3519 }
3520 }
3521
3522 /* See whether we can group stub sections together. Grouping stub
3523 sections may result in fewer stubs. More importantly, we need to
3524 put all .init* and .fini* stubs at the beginning of the .init or
3525 .fini output sections respectively, because glibc splits the
3526 _init and _fini functions into multiple parts. Putting a stub in
3527 the middle of a function is not a good idea. */
3528
3529 static void
3530 group_sections (struct elf_aarch64_link_hash_table *htab,
3531 bfd_size_type stub_group_size,
3532 bool stubs_always_after_branch)
3533 {
3534 asection **list = htab->input_list;
3535
3536 do
3537 {
3538 asection *tail = *list;
3539 asection *head;
3540
3541 if (tail == bfd_abs_section_ptr)
3542 continue;
3543
3544 /* Reverse the list: we must avoid placing stubs at the
3545 beginning of the section because the beginning of the text
3546 section may be required for an interrupt vector in bare metal
3547 code. */
3548 #define NEXT_SEC PREV_SEC
3549 head = NULL;
3550 while (tail != NULL)
3551 {
3552 /* Pop from tail. */
3553 asection *item = tail;
3554 tail = PREV_SEC (item);
3555
3556 /* Push on head. */
3557 NEXT_SEC (item) = head;
3558 head = item;
3559 }
3560
3561 while (head != NULL)
3562 {
3563 asection *curr;
3564 asection *next;
3565 bfd_vma stub_group_start = head->output_offset;
3566 bfd_vma end_of_next;
3567
3568 curr = head;
3569 while (NEXT_SEC (curr) != NULL)
3570 {
3571 next = NEXT_SEC (curr);
3572 end_of_next = next->output_offset + next->size;
3573 if (end_of_next - stub_group_start >= stub_group_size)
3574 /* End of NEXT is too far from start, so stop. */
3575 break;
3576 /* Add NEXT to the group. */
3577 curr = next;
3578 }
3579
3580 /* OK, the size from the start to the start of CURR is less
3581 than stub_group_size and thus can be handled by one stub
3582 section. (Or the head section is itself larger than
3583 stub_group_size, in which case we may be toast.)
3584 We should really be keeping track of the total size of
3585 stubs added here, as stubs contribute to the final output
3586 section size. */
3587 do
3588 {
3589 next = NEXT_SEC (head);
3590 /* Set up this stub group. */
3591 htab->stub_group[head->id].link_sec = curr;
3592 }
3593 while (head != curr && (head = next) != NULL);
3594
3595 /* But wait, there's more! Input sections up to stub_group_size
3596 bytes after the stub section can be handled by it too. */
3597 if (!stubs_always_after_branch)
3598 {
3599 stub_group_start = curr->output_offset + curr->size;
3600
3601 while (next != NULL)
3602 {
3603 end_of_next = next->output_offset + next->size;
3604 if (end_of_next - stub_group_start >= stub_group_size)
3605 /* End of NEXT is too far from stubs, so stop. */
3606 break;
3607 /* Add NEXT to the stub group. */
3608 head = next;
3609 next = NEXT_SEC (head);
3610 htab->stub_group[head->id].link_sec = curr;
3611 }
3612 }
3613 head = next;
3614 }
3615 }
3616 while (list++ != htab->input_list + htab->top_index);
3617
3618 free (htab->input_list);
3619 }
3620
3621 #undef PREV_SEC
3622 #undef PREV_SEC
3623
3624 #define AARCH64_BITS(x, pos, n) (((x) >> (pos)) & ((1 << (n)) - 1))
3625
3626 #define AARCH64_RT(insn) AARCH64_BITS (insn, 0, 5)
3627 #define AARCH64_RT2(insn) AARCH64_BITS (insn, 10, 5)
3628 #define AARCH64_RA(insn) AARCH64_BITS (insn, 10, 5)
3629 #define AARCH64_RD(insn) AARCH64_BITS (insn, 0, 5)
3630 #define AARCH64_RN(insn) AARCH64_BITS (insn, 5, 5)
3631 #define AARCH64_RM(insn) AARCH64_BITS (insn, 16, 5)
3632
3633 #define AARCH64_MAC(insn) (((insn) & 0xff000000) == 0x9b000000)
3634 #define AARCH64_BIT(insn, n) AARCH64_BITS (insn, n, 1)
3635 #define AARCH64_OP31(insn) AARCH64_BITS (insn, 21, 3)
3636 #define AARCH64_ZR 0x1f
3637
3638 /* All ld/st ops. See C4-182 of the ARM ARM. The encoding space for
3639 LD_PCREL, LDST_RO, LDST_UI and LDST_UIMM cover prefetch ops. */
3640
3641 #define AARCH64_LD(insn) (AARCH64_BIT (insn, 22) == 1)
3642 #define AARCH64_LDST(insn) (((insn) & 0x0a000000) == 0x08000000)
3643 #define AARCH64_LDST_EX(insn) (((insn) & 0x3f000000) == 0x08000000)
3644 #define AARCH64_LDST_PCREL(insn) (((insn) & 0x3b000000) == 0x18000000)
3645 #define AARCH64_LDST_NAP(insn) (((insn) & 0x3b800000) == 0x28000000)
3646 #define AARCH64_LDSTP_PI(insn) (((insn) & 0x3b800000) == 0x28800000)
3647 #define AARCH64_LDSTP_O(insn) (((insn) & 0x3b800000) == 0x29000000)
3648 #define AARCH64_LDSTP_PRE(insn) (((insn) & 0x3b800000) == 0x29800000)
3649 #define AARCH64_LDST_UI(insn) (((insn) & 0x3b200c00) == 0x38000000)
3650 #define AARCH64_LDST_PIIMM(insn) (((insn) & 0x3b200c00) == 0x38000400)
3651 #define AARCH64_LDST_U(insn) (((insn) & 0x3b200c00) == 0x38000800)
3652 #define AARCH64_LDST_PREIMM(insn) (((insn) & 0x3b200c00) == 0x38000c00)
3653 #define AARCH64_LDST_RO(insn) (((insn) & 0x3b200c00) == 0x38200800)
3654 #define AARCH64_LDST_UIMM(insn) (((insn) & 0x3b000000) == 0x39000000)
3655 #define AARCH64_LDST_SIMD_M(insn) (((insn) & 0xbfbf0000) == 0x0c000000)
3656 #define AARCH64_LDST_SIMD_M_PI(insn) (((insn) & 0xbfa00000) == 0x0c800000)
3657 #define AARCH64_LDST_SIMD_S(insn) (((insn) & 0xbf9f0000) == 0x0d000000)
3658 #define AARCH64_LDST_SIMD_S_PI(insn) (((insn) & 0xbf800000) == 0x0d800000)
3659
3660 /* Classify an INSN if it is indeed a load/store.
3661
3662 Return TRUE if INSN is a LD/ST instruction otherwise return FALSE.
3663
3664 For scalar LD/ST instructions PAIR is FALSE, RT is returned and RT2
3665 is set equal to RT.
3666
3667 For LD/ST pair instructions PAIR is TRUE, RT and RT2 are returned. */
3668
3669 static bool
3670 aarch64_mem_op_p (uint32_t insn, unsigned int *rt, unsigned int *rt2,
3671 bool *pair, bool *load)
3672 {
3673 uint32_t opcode;
3674 unsigned int r;
3675 uint32_t opc = 0;
3676 uint32_t v = 0;
3677 uint32_t opc_v = 0;
3678
3679 /* Bail out quickly if INSN doesn't fall into the load-store
3680 encoding space. */
3681 if (!AARCH64_LDST (insn))
3682 return false;
3683
3684 *pair = false;
3685 *load = false;
3686 if (AARCH64_LDST_EX (insn))
3687 {
3688 *rt = AARCH64_RT (insn);
3689 *rt2 = *rt;
3690 if (AARCH64_BIT (insn, 21) == 1)
3691 {
3692 *pair = true;
3693 *rt2 = AARCH64_RT2 (insn);
3694 }
3695 *load = AARCH64_LD (insn);
3696 return true;
3697 }
3698 else if (AARCH64_LDST_NAP (insn)
3699 || AARCH64_LDSTP_PI (insn)
3700 || AARCH64_LDSTP_O (insn)
3701 || AARCH64_LDSTP_PRE (insn))
3702 {
3703 *pair = true;
3704 *rt = AARCH64_RT (insn);
3705 *rt2 = AARCH64_RT2 (insn);
3706 *load = AARCH64_LD (insn);
3707 return true;
3708 }
3709 else if (AARCH64_LDST_PCREL (insn)
3710 || AARCH64_LDST_UI (insn)
3711 || AARCH64_LDST_PIIMM (insn)
3712 || AARCH64_LDST_U (insn)
3713 || AARCH64_LDST_PREIMM (insn)
3714 || AARCH64_LDST_RO (insn)
3715 || AARCH64_LDST_UIMM (insn))
3716 {
3717 *rt = AARCH64_RT (insn);
3718 *rt2 = *rt;
3719 if (AARCH64_LDST_PCREL (insn))
3720 *load = true;
3721 opc = AARCH64_BITS (insn, 22, 2);
3722 v = AARCH64_BIT (insn, 26);
3723 opc_v = opc | (v << 2);
3724 *load = (opc_v == 1 || opc_v == 2 || opc_v == 3
3725 || opc_v == 5 || opc_v == 7);
3726 return true;
3727 }
3728 else if (AARCH64_LDST_SIMD_M (insn)
3729 || AARCH64_LDST_SIMD_M_PI (insn))
3730 {
3731 *rt = AARCH64_RT (insn);
3732 *load = AARCH64_BIT (insn, 22);
3733 opcode = (insn >> 12) & 0xf;
3734 switch (opcode)
3735 {
3736 case 0:
3737 case 2:
3738 *rt2 = *rt + 3;
3739 break;
3740
3741 case 4:
3742 case 6:
3743 *rt2 = *rt + 2;
3744 break;
3745
3746 case 7:
3747 *rt2 = *rt;
3748 break;
3749
3750 case 8:
3751 case 10:
3752 *rt2 = *rt + 1;
3753 break;
3754
3755 default:
3756 return false;
3757 }
3758 return true;
3759 }
3760 else if (AARCH64_LDST_SIMD_S (insn)
3761 || AARCH64_LDST_SIMD_S_PI (insn))
3762 {
3763 *rt = AARCH64_RT (insn);
3764 r = (insn >> 21) & 1;
3765 *load = AARCH64_BIT (insn, 22);
3766 opcode = (insn >> 13) & 0x7;
3767 switch (opcode)
3768 {
3769 case 0:
3770 case 2:
3771 case 4:
3772 *rt2 = *rt + r;
3773 break;
3774
3775 case 1:
3776 case 3:
3777 case 5:
3778 *rt2 = *rt + (r == 0 ? 2 : 3);
3779 break;
3780
3781 case 6:
3782 *rt2 = *rt + r;
3783 break;
3784
3785 case 7:
3786 *rt2 = *rt + (r == 0 ? 2 : 3);
3787 break;
3788
3789 default:
3790 return false;
3791 }
3792 return true;
3793 }
3794
3795 return false;
3796 }
3797
3798 /* Return TRUE if INSN is multiply-accumulate. */
3799
3800 static bool
3801 aarch64_mlxl_p (uint32_t insn)
3802 {
3803 uint32_t op31 = AARCH64_OP31 (insn);
3804
3805 if (AARCH64_MAC (insn)
3806 && (op31 == 0 || op31 == 1 || op31 == 5)
3807 /* Exclude MUL instructions which are encoded as a multiple accumulate
3808 with RA = XZR. */
3809 && AARCH64_RA (insn) != AARCH64_ZR)
3810 return true;
3811
3812 return false;
3813 }
3814
3815 /* Some early revisions of the Cortex-A53 have an erratum (835769) whereby
3816 it is possible for a 64-bit multiply-accumulate instruction to generate an
3817 incorrect result. The details are quite complex and hard to
3818 determine statically, since branches in the code may exist in some
3819 circumstances, but all cases end with a memory (load, store, or
3820 prefetch) instruction followed immediately by the multiply-accumulate
3821 operation. We employ a linker patching technique, by moving the potentially
3822 affected multiply-accumulate instruction into a patch region and replacing
3823 the original instruction with a branch to the patch. This function checks
3824 if INSN_1 is the memory operation followed by a multiply-accumulate
3825 operation (INSN_2). Return TRUE if an erratum sequence is found, FALSE
3826 if INSN_1 and INSN_2 are safe. */
3827
3828 static bool
3829 aarch64_erratum_sequence (uint32_t insn_1, uint32_t insn_2)
3830 {
3831 uint32_t rt;
3832 uint32_t rt2;
3833 uint32_t rn;
3834 uint32_t rm;
3835 uint32_t ra;
3836 bool pair;
3837 bool load;
3838
3839 if (aarch64_mlxl_p (insn_2)
3840 && aarch64_mem_op_p (insn_1, &rt, &rt2, &pair, &load))
3841 {
3842 /* Any SIMD memory op is independent of the subsequent MLA
3843 by definition of the erratum. */
3844 if (AARCH64_BIT (insn_1, 26))
3845 return true;
3846
3847 /* If not SIMD, check for integer memory ops and MLA relationship. */
3848 rn = AARCH64_RN (insn_2);
3849 ra = AARCH64_RA (insn_2);
3850 rm = AARCH64_RM (insn_2);
3851
3852 /* If this is a load and there's a true(RAW) dependency, we are safe
3853 and this is not an erratum sequence. */
3854 if (load &&
3855 (rt == rn || rt == rm || rt == ra
3856 || (pair && (rt2 == rn || rt2 == rm || rt2 == ra))))
3857 return false;
3858
3859 /* We conservatively put out stubs for all other cases (including
3860 writebacks). */
3861 return true;
3862 }
3863
3864 return false;
3865 }
3866
3867 /* Used to order a list of mapping symbols by address. */
3868
3869 static int
3870 elf_aarch64_compare_mapping (const void *a, const void *b)
3871 {
3872 const elf_aarch64_section_map *amap = (const elf_aarch64_section_map *) a;
3873 const elf_aarch64_section_map *bmap = (const elf_aarch64_section_map *) b;
3874
3875 if (amap->vma > bmap->vma)
3876 return 1;
3877 else if (amap->vma < bmap->vma)
3878 return -1;
3879 else if (amap->type > bmap->type)
3880 /* Ensure results do not depend on the host qsort for objects with
3881 multiple mapping symbols at the same address by sorting on type
3882 after vma. */
3883 return 1;
3884 else if (amap->type < bmap->type)
3885 return -1;
3886 else
3887 return 0;
3888 }
3889
3890
3891 static char *
3892 _bfd_aarch64_erratum_835769_stub_name (unsigned num_fixes)
3893 {
3894 char *stub_name = (char *) bfd_malloc
3895 (strlen ("__erratum_835769_veneer_") + 16);
3896 if (stub_name != NULL)
3897 sprintf (stub_name,"__erratum_835769_veneer_%d", num_fixes);
3898 return stub_name;
3899 }
3900
3901 /* Scan for Cortex-A53 erratum 835769 sequence.
3902
3903 Return TRUE else FALSE on abnormal termination. */
3904
3905 static bool
3906 _bfd_aarch64_erratum_835769_scan (bfd *input_bfd,
3907 struct bfd_link_info *info,
3908 unsigned int *num_fixes_p)
3909 {
3910 asection *section;
3911 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3912 unsigned int num_fixes = *num_fixes_p;
3913
3914 if (htab == NULL)
3915 return true;
3916
3917 for (section = input_bfd->sections;
3918 section != NULL;
3919 section = section->next)
3920 {
3921 bfd_byte *contents = NULL;
3922 struct _aarch64_elf_section_data *sec_data;
3923 unsigned int span;
3924
3925 if (elf_section_type (section) != SHT_PROGBITS
3926 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3927 || (section->flags & SEC_EXCLUDE) != 0
3928 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
3929 || (section->output_section == bfd_abs_section_ptr))
3930 continue;
3931
3932 if (elf_section_data (section)->this_hdr.contents != NULL)
3933 contents = elf_section_data (section)->this_hdr.contents;
3934 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3935 return false;
3936
3937 sec_data = elf_aarch64_section_data (section);
3938
3939 if (sec_data->mapcount)
3940 qsort (sec_data->map, sec_data->mapcount,
3941 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
3942
3943 for (span = 0; span < sec_data->mapcount; span++)
3944 {
3945 unsigned int span_start = sec_data->map[span].vma;
3946 unsigned int span_end = ((span == sec_data->mapcount - 1)
3947 ? sec_data->map[0].vma + section->size
3948 : sec_data->map[span + 1].vma);
3949 unsigned int i;
3950 char span_type = sec_data->map[span].type;
3951
3952 if (span_type == 'd')
3953 continue;
3954
3955 for (i = span_start; i + 4 < span_end; i += 4)
3956 {
3957 uint32_t insn_1 = bfd_getl32 (contents + i);
3958 uint32_t insn_2 = bfd_getl32 (contents + i + 4);
3959
3960 if (aarch64_erratum_sequence (insn_1, insn_2))
3961 {
3962 struct elf_aarch64_stub_hash_entry *stub_entry;
3963 char *stub_name = _bfd_aarch64_erratum_835769_stub_name (num_fixes);
3964 if (! stub_name)
3965 return false;
3966
3967 stub_entry = _bfd_aarch64_add_stub_entry_in_group (stub_name,
3968 section,
3969 htab);
3970 if (! stub_entry)
3971 return false;
3972
3973 stub_entry->stub_type = aarch64_stub_erratum_835769_veneer;
3974 stub_entry->target_section = section;
3975 stub_entry->target_value = i + 4;
3976 stub_entry->veneered_insn = insn_2;
3977 stub_entry->output_name = stub_name;
3978 num_fixes++;
3979 }
3980 }
3981 }
3982 if (elf_section_data (section)->this_hdr.contents == NULL)
3983 free (contents);
3984 }
3985
3986 *num_fixes_p = num_fixes;
3987
3988 return true;
3989 }
3990
3991
3992 /* Test if instruction INSN is ADRP. */
3993
3994 static bool
3995 _bfd_aarch64_adrp_p (uint32_t insn)
3996 {
3997 return ((insn & AARCH64_ADRP_OP_MASK) == AARCH64_ADRP_OP);
3998 }
3999
4000
4001 /* Helper predicate to look for cortex-a53 erratum 843419 sequence 1. */
4002
4003 static bool
4004 _bfd_aarch64_erratum_843419_sequence_p (uint32_t insn_1, uint32_t insn_2,
4005 uint32_t insn_3)
4006 {
4007 uint32_t rt;
4008 uint32_t rt2;
4009 bool pair;
4010 bool load;
4011
4012 return (aarch64_mem_op_p (insn_2, &rt, &rt2, &pair, &load)
4013 && (!pair
4014 || (pair && !load))
4015 && AARCH64_LDST_UIMM (insn_3)
4016 && AARCH64_RN (insn_3) == AARCH64_RD (insn_1));
4017 }
4018
4019
4020 /* Test for the presence of Cortex-A53 erratum 843419 instruction sequence.
4021
4022 Return TRUE if section CONTENTS at offset I contains one of the
4023 erratum 843419 sequences, otherwise return FALSE. If a sequence is
4024 seen set P_VENEER_I to the offset of the final LOAD/STORE
4025 instruction in the sequence.
4026 */
4027
4028 static bool
4029 _bfd_aarch64_erratum_843419_p (bfd_byte *contents, bfd_vma vma,
4030 bfd_vma i, bfd_vma span_end,
4031 bfd_vma *p_veneer_i)
4032 {
4033 uint32_t insn_1 = bfd_getl32 (contents + i);
4034
4035 if (!_bfd_aarch64_adrp_p (insn_1))
4036 return false;
4037
4038 if (span_end < i + 12)
4039 return false;
4040
4041 uint32_t insn_2 = bfd_getl32 (contents + i + 4);
4042 uint32_t insn_3 = bfd_getl32 (contents + i + 8);
4043
4044 if ((vma & 0xfff) != 0xff8 && (vma & 0xfff) != 0xffc)
4045 return false;
4046
4047 if (_bfd_aarch64_erratum_843419_sequence_p (insn_1, insn_2, insn_3))
4048 {
4049 *p_veneer_i = i + 8;
4050 return true;
4051 }
4052
4053 if (span_end < i + 16)
4054 return false;
4055
4056 uint32_t insn_4 = bfd_getl32 (contents + i + 12);
4057
4058 if (_bfd_aarch64_erratum_843419_sequence_p (insn_1, insn_2, insn_4))
4059 {
4060 *p_veneer_i = i + 12;
4061 return true;
4062 }
4063
4064 return false;
4065 }
4066
4067
4068 /* Resize all stub sections. */
4069
4070 static void
4071 _bfd_aarch64_resize_stubs (struct elf_aarch64_link_hash_table *htab)
4072 {
4073 asection *section;
4074
4075 /* OK, we've added some stubs. Find out the new size of the
4076 stub sections. */
4077 for (section = htab->stub_bfd->sections;
4078 section != NULL; section = section->next)
4079 {
4080 /* Ignore non-stub sections. */
4081 if (!strstr (section->name, STUB_SUFFIX))
4082 continue;
4083 section->size = 0;
4084 }
4085
4086 bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab);
4087
4088 for (section = htab->stub_bfd->sections;
4089 section != NULL; section = section->next)
4090 {
4091 if (!strstr (section->name, STUB_SUFFIX))
4092 continue;
4093
4094 /* Add space for a branch. Add 8 bytes to keep section 8 byte aligned,
4095 as long branch stubs contain a 64-bit address. */
4096 if (section->size)
4097 section->size += 8;
4098
4099 /* Ensure all stub sections have a size which is a multiple of
4100 4096. This is important in order to ensure that the insertion
4101 of stub sections does not in itself move existing code around
4102 in such a way that new errata sequences are created. We only do this
4103 when the ADRP workaround is enabled. If only the ADR workaround is
4104 enabled then the stubs workaround won't ever be used. */
4105 if (htab->fix_erratum_843419 & ERRAT_ADRP)
4106 if (section->size)
4107 section->size = BFD_ALIGN (section->size, 0x1000);
4108 }
4109 }
4110
4111 /* Construct an erratum 843419 workaround stub name. */
4112
4113 static char *
4114 _bfd_aarch64_erratum_843419_stub_name (asection *input_section,
4115 bfd_vma offset)
4116 {
4117 const bfd_size_type len = 8 + 4 + 1 + 8 + 1 + 16 + 1;
4118 char *stub_name = bfd_malloc (len);
4119
4120 if (stub_name != NULL)
4121 snprintf (stub_name, len, "e843419@%04x_%08x_%" PRIx64,
4122 input_section->owner->id,
4123 input_section->id,
4124 (uint64_t) offset);
4125 return stub_name;
4126 }
4127
4128 /* Build a stub_entry structure describing an 843419 fixup.
4129
4130 The stub_entry constructed is populated with the bit pattern INSN
4131 of the instruction located at OFFSET within input SECTION.
4132
4133 Returns TRUE on success. */
4134
4135 static bool
4136 _bfd_aarch64_erratum_843419_fixup (uint32_t insn,
4137 bfd_vma adrp_offset,
4138 bfd_vma ldst_offset,
4139 asection *section,
4140 struct bfd_link_info *info)
4141 {
4142 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
4143 char *stub_name;
4144 struct elf_aarch64_stub_hash_entry *stub_entry;
4145
4146 stub_name = _bfd_aarch64_erratum_843419_stub_name (section, ldst_offset);
4147 if (stub_name == NULL)
4148 return false;
4149 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4150 false, false);
4151 if (stub_entry)
4152 {
4153 free (stub_name);
4154 return true;
4155 }
4156
4157 /* We always place an 843419 workaround veneer in the stub section
4158 attached to the input section in which an erratum sequence has
4159 been found. This ensures that later in the link process (in
4160 elfNN_aarch64_write_section) when we copy the veneered
4161 instruction from the input section into the stub section the
4162 copied instruction will have had any relocations applied to it.
4163 If we placed workaround veneers in any other stub section then we
4164 could not assume that all relocations have been processed on the
4165 corresponding input section at the point we output the stub
4166 section. */
4167
4168 stub_entry = _bfd_aarch64_add_stub_entry_after (stub_name, section, htab);
4169 if (stub_entry == NULL)
4170 {
4171 free (stub_name);
4172 return false;
4173 }
4174
4175 stub_entry->adrp_offset = adrp_offset;
4176 stub_entry->target_value = ldst_offset;
4177 stub_entry->target_section = section;
4178 stub_entry->stub_type = aarch64_stub_erratum_843419_veneer;
4179 stub_entry->veneered_insn = insn;
4180 stub_entry->output_name = stub_name;
4181
4182 return true;
4183 }
4184
4185
4186 /* Scan an input section looking for the signature of erratum 843419.
4187
4188 Scans input SECTION in INPUT_BFD looking for erratum 843419
4189 signatures, for each signature found a stub_entry is created
4190 describing the location of the erratum for subsequent fixup.
4191
4192 Return TRUE on successful scan, FALSE on failure to scan.
4193 */
4194
4195 static bool
4196 _bfd_aarch64_erratum_843419_scan (bfd *input_bfd, asection *section,
4197 struct bfd_link_info *info)
4198 {
4199 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
4200
4201 if (htab == NULL)
4202 return true;
4203
4204 if (elf_section_type (section) != SHT_PROGBITS
4205 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
4206 || (section->flags & SEC_EXCLUDE) != 0
4207 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
4208 || (section->output_section == bfd_abs_section_ptr))
4209 return true;
4210
4211 do
4212 {
4213 bfd_byte *contents = NULL;
4214 struct _aarch64_elf_section_data *sec_data;
4215 unsigned int span;
4216
4217 if (elf_section_data (section)->this_hdr.contents != NULL)
4218 contents = elf_section_data (section)->this_hdr.contents;
4219 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
4220 return false;
4221
4222 sec_data = elf_aarch64_section_data (section);
4223
4224 if (sec_data->mapcount)
4225 qsort (sec_data->map, sec_data->mapcount,
4226 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
4227
4228 for (span = 0; span < sec_data->mapcount; span++)
4229 {
4230 unsigned int span_start = sec_data->map[span].vma;
4231 unsigned int span_end = ((span == sec_data->mapcount - 1)
4232 ? sec_data->map[0].vma + section->size
4233 : sec_data->map[span + 1].vma);
4234 unsigned int i;
4235 char span_type = sec_data->map[span].type;
4236
4237 if (span_type == 'd')
4238 continue;
4239
4240 for (i = span_start; i + 8 < span_end; i += 4)
4241 {
4242 bfd_vma vma = (section->output_section->vma
4243 + section->output_offset
4244 + i);
4245 bfd_vma veneer_i;
4246
4247 if (_bfd_aarch64_erratum_843419_p
4248 (contents, vma, i, span_end, &veneer_i))
4249 {
4250 uint32_t insn = bfd_getl32 (contents + veneer_i);
4251
4252 if (!_bfd_aarch64_erratum_843419_fixup (insn, i, veneer_i,
4253 section, info))
4254 return false;
4255 }
4256 }
4257 }
4258
4259 if (elf_section_data (section)->this_hdr.contents == NULL)
4260 free (contents);
4261 }
4262 while (0);
4263
4264 return true;
4265 }
4266
4267
4268 /* Determine and set the size of the stub section for a final link.
4269
4270 The basic idea here is to examine all the relocations looking for
4271 PC-relative calls to a target that is unreachable with a "bl"
4272 instruction. */
4273
4274 bool
4275 elfNN_aarch64_size_stubs (bfd *output_bfd,
4276 bfd *stub_bfd,
4277 struct bfd_link_info *info,
4278 bfd_signed_vma group_size,
4279 asection * (*add_stub_section) (const char *,
4280 asection *),
4281 void (*layout_sections_again) (void))
4282 {
4283 bfd_size_type stub_group_size;
4284 bool stubs_always_before_branch;
4285 bool stub_changed = false;
4286 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
4287 unsigned int num_erratum_835769_fixes = 0;
4288
4289 /* Propagate mach to stub bfd, because it may not have been
4290 finalized when we created stub_bfd. */
4291 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
4292 bfd_get_mach (output_bfd));
4293
4294 /* Stash our params away. */
4295 htab->stub_bfd = stub_bfd;
4296 htab->add_stub_section = add_stub_section;
4297 htab->layout_sections_again = layout_sections_again;
4298 stubs_always_before_branch = group_size < 0;
4299 if (group_size < 0)
4300 stub_group_size = -group_size;
4301 else
4302 stub_group_size = group_size;
4303
4304 if (stub_group_size == 1)
4305 {
4306 /* Default values. */
4307 /* AArch64 branch range is +-128MB. The value used is 1MB less. */
4308 stub_group_size = 127 * 1024 * 1024;
4309 }
4310
4311 group_sections (htab, stub_group_size, stubs_always_before_branch);
4312
4313 (*htab->layout_sections_again) ();
4314
4315 if (htab->fix_erratum_835769)
4316 {
4317 bfd *input_bfd;
4318
4319 for (input_bfd = info->input_bfds;
4320 input_bfd != NULL; input_bfd = input_bfd->link.next)
4321 {
4322 if (!is_aarch64_elf (input_bfd)
4323 || (input_bfd->flags & BFD_LINKER_CREATED) != 0)
4324 continue;
4325
4326 if (!_bfd_aarch64_erratum_835769_scan (input_bfd, info,
4327 &num_erratum_835769_fixes))
4328 return false;
4329 }
4330
4331 _bfd_aarch64_resize_stubs (htab);
4332 (*htab->layout_sections_again) ();
4333 }
4334
4335 if (htab->fix_erratum_843419 != ERRAT_NONE)
4336 {
4337 bfd *input_bfd;
4338
4339 for (input_bfd = info->input_bfds;
4340 input_bfd != NULL;
4341 input_bfd = input_bfd->link.next)
4342 {
4343 asection *section;
4344
4345 if (!is_aarch64_elf (input_bfd)
4346 || (input_bfd->flags & BFD_LINKER_CREATED) != 0)
4347 continue;
4348
4349 for (section = input_bfd->sections;
4350 section != NULL;
4351 section = section->next)
4352 if (!_bfd_aarch64_erratum_843419_scan (input_bfd, section, info))
4353 return false;
4354 }
4355
4356 _bfd_aarch64_resize_stubs (htab);
4357 (*htab->layout_sections_again) ();
4358 }
4359
4360 while (1)
4361 {
4362 bfd *input_bfd;
4363
4364 for (input_bfd = info->input_bfds;
4365 input_bfd != NULL; input_bfd = input_bfd->link.next)
4366 {
4367 Elf_Internal_Shdr *symtab_hdr;
4368 asection *section;
4369 Elf_Internal_Sym *local_syms = NULL;
4370
4371 if (!is_aarch64_elf (input_bfd)
4372 || (input_bfd->flags & BFD_LINKER_CREATED) != 0)
4373 continue;
4374
4375 /* We'll need the symbol table in a second. */
4376 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4377 if (symtab_hdr->sh_info == 0)
4378 continue;
4379
4380 /* Walk over each section attached to the input bfd. */
4381 for (section = input_bfd->sections;
4382 section != NULL; section = section->next)
4383 {
4384 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
4385
4386 /* If there aren't any relocs, then there's nothing more
4387 to do. */
4388 if ((section->flags & SEC_RELOC) == 0
4389 || section->reloc_count == 0
4390 || (section->flags & SEC_CODE) == 0)
4391 continue;
4392
4393 /* If this section is a link-once section that will be
4394 discarded, then don't create any stubs. */
4395 if (section->output_section == NULL
4396 || section->output_section->owner != output_bfd)
4397 continue;
4398
4399 /* Get the relocs. */
4400 internal_relocs
4401 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
4402 NULL, info->keep_memory);
4403 if (internal_relocs == NULL)
4404 goto error_ret_free_local;
4405
4406 /* Now examine each relocation. */
4407 irela = internal_relocs;
4408 irelaend = irela + section->reloc_count;
4409 for (; irela < irelaend; irela++)
4410 {
4411 unsigned int r_type, r_indx;
4412 enum elf_aarch64_stub_type stub_type;
4413 struct elf_aarch64_stub_hash_entry *stub_entry;
4414 asection *sym_sec;
4415 bfd_vma sym_value;
4416 bfd_vma destination;
4417 struct elf_aarch64_link_hash_entry *hash;
4418 const char *sym_name;
4419 char *stub_name;
4420 const asection *id_sec;
4421 unsigned char st_type;
4422 bfd_size_type len;
4423
4424 r_type = ELFNN_R_TYPE (irela->r_info);
4425 r_indx = ELFNN_R_SYM (irela->r_info);
4426
4427 if (r_type >= (unsigned int) R_AARCH64_end)
4428 {
4429 bfd_set_error (bfd_error_bad_value);
4430 error_ret_free_internal:
4431 if (elf_section_data (section)->relocs == NULL)
4432 free (internal_relocs);
4433 goto error_ret_free_local;
4434 }
4435
4436 /* Only look for stubs on unconditional branch and
4437 branch and link instructions. */
4438 if (r_type != (unsigned int) AARCH64_R (CALL26)
4439 && r_type != (unsigned int) AARCH64_R (JUMP26))
4440 continue;
4441
4442 /* Now determine the call target, its name, value,
4443 section. */
4444 sym_sec = NULL;
4445 sym_value = 0;
4446 destination = 0;
4447 hash = NULL;
4448 sym_name = NULL;
4449 if (r_indx < symtab_hdr->sh_info)
4450 {
4451 /* It's a local symbol. */
4452 Elf_Internal_Sym *sym;
4453 Elf_Internal_Shdr *hdr;
4454
4455 if (local_syms == NULL)
4456 {
4457 local_syms
4458 = (Elf_Internal_Sym *) symtab_hdr->contents;
4459 if (local_syms == NULL)
4460 local_syms
4461 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
4462 symtab_hdr->sh_info, 0,
4463 NULL, NULL, NULL);
4464 if (local_syms == NULL)
4465 goto error_ret_free_internal;
4466 }
4467
4468 sym = local_syms + r_indx;
4469 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
4470 sym_sec = hdr->bfd_section;
4471 if (!sym_sec)
4472 /* This is an undefined symbol. It can never
4473 be resolved. */
4474 continue;
4475
4476 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
4477 sym_value = sym->st_value;
4478 destination = (sym_value + irela->r_addend
4479 + sym_sec->output_offset
4480 + sym_sec->output_section->vma);
4481 st_type = ELF_ST_TYPE (sym->st_info);
4482 sym_name
4483 = bfd_elf_string_from_elf_section (input_bfd,
4484 symtab_hdr->sh_link,
4485 sym->st_name);
4486 }
4487 else
4488 {
4489 int e_indx;
4490
4491 e_indx = r_indx - symtab_hdr->sh_info;
4492 hash = ((struct elf_aarch64_link_hash_entry *)
4493 elf_sym_hashes (input_bfd)[e_indx]);
4494
4495 while (hash->root.root.type == bfd_link_hash_indirect
4496 || hash->root.root.type == bfd_link_hash_warning)
4497 hash = ((struct elf_aarch64_link_hash_entry *)
4498 hash->root.root.u.i.link);
4499
4500 if (hash->root.root.type == bfd_link_hash_defined
4501 || hash->root.root.type == bfd_link_hash_defweak)
4502 {
4503 struct elf_aarch64_link_hash_table *globals =
4504 elf_aarch64_hash_table (info);
4505 sym_sec = hash->root.root.u.def.section;
4506 sym_value = hash->root.root.u.def.value;
4507 /* For a destination in a shared library,
4508 use the PLT stub as target address to
4509 decide whether a branch stub is
4510 needed. */
4511 if (globals->root.splt != NULL && hash != NULL
4512 && hash->root.plt.offset != (bfd_vma) - 1)
4513 {
4514 sym_sec = globals->root.splt;
4515 sym_value = hash->root.plt.offset;
4516 if (sym_sec->output_section != NULL)
4517 destination = (sym_value
4518 + sym_sec->output_offset
4519 +
4520 sym_sec->output_section->vma);
4521 }
4522 else if (sym_sec->output_section != NULL)
4523 destination = (sym_value + irela->r_addend
4524 + sym_sec->output_offset
4525 + sym_sec->output_section->vma);
4526 }
4527 else if (hash->root.root.type == bfd_link_hash_undefined
4528 || (hash->root.root.type
4529 == bfd_link_hash_undefweak))
4530 {
4531 /* For a shared library, use the PLT stub as
4532 target address to decide whether a long
4533 branch stub is needed.
4534 For absolute code, they cannot be handled. */
4535 struct elf_aarch64_link_hash_table *globals =
4536 elf_aarch64_hash_table (info);
4537
4538 if (globals->root.splt != NULL && hash != NULL
4539 && hash->root.plt.offset != (bfd_vma) - 1)
4540 {
4541 sym_sec = globals->root.splt;
4542 sym_value = hash->root.plt.offset;
4543 if (sym_sec->output_section != NULL)
4544 destination = (sym_value
4545 + sym_sec->output_offset
4546 +
4547 sym_sec->output_section->vma);
4548 }
4549 else
4550 continue;
4551 }
4552 else
4553 {
4554 bfd_set_error (bfd_error_bad_value);
4555 goto error_ret_free_internal;
4556 }
4557 st_type = ELF_ST_TYPE (hash->root.type);
4558 sym_name = hash->root.root.root.string;
4559 }
4560
4561 /* Determine what (if any) linker stub is needed. */
4562 stub_type = aarch64_type_of_stub (section, irela, sym_sec,
4563 st_type, destination);
4564 if (stub_type == aarch64_stub_none)
4565 continue;
4566
4567 /* Support for grouping stub sections. */
4568 id_sec = htab->stub_group[section->id].link_sec;
4569
4570 /* Get the name of this stub. */
4571 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, hash,
4572 irela);
4573 if (!stub_name)
4574 goto error_ret_free_internal;
4575
4576 stub_entry =
4577 aarch64_stub_hash_lookup (&htab->stub_hash_table,
4578 stub_name, false, false);
4579 if (stub_entry != NULL)
4580 {
4581 /* The proper stub has already been created. */
4582 free (stub_name);
4583 /* Always update this stub's target since it may have
4584 changed after layout. */
4585 stub_entry->target_value = sym_value + irela->r_addend;
4586 continue;
4587 }
4588
4589 stub_entry = _bfd_aarch64_add_stub_entry_in_group
4590 (stub_name, section, htab);
4591 if (stub_entry == NULL)
4592 {
4593 free (stub_name);
4594 goto error_ret_free_internal;
4595 }
4596
4597 stub_entry->target_value = sym_value + irela->r_addend;
4598 stub_entry->target_section = sym_sec;
4599 stub_entry->stub_type = stub_type;
4600 stub_entry->h = hash;
4601 stub_entry->st_type = st_type;
4602
4603 if (sym_name == NULL)
4604 sym_name = "unnamed";
4605 len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
4606 stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
4607 if (stub_entry->output_name == NULL)
4608 {
4609 free (stub_name);
4610 goto error_ret_free_internal;
4611 }
4612
4613 snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
4614 sym_name);
4615
4616 stub_changed = true;
4617 }
4618
4619 /* We're done with the internal relocs, free them. */
4620 if (elf_section_data (section)->relocs == NULL)
4621 free (internal_relocs);
4622 }
4623 }
4624
4625 if (!stub_changed)
4626 break;
4627
4628 _bfd_aarch64_resize_stubs (htab);
4629
4630 /* Ask the linker to do its stuff. */
4631 (*htab->layout_sections_again) ();
4632 stub_changed = false;
4633 }
4634
4635 return true;
4636
4637 error_ret_free_local:
4638 return false;
4639 }
4640
4641 /* Build all the stubs associated with the current output file. The
4642 stubs are kept in a hash table attached to the main linker hash
4643 table. We also set up the .plt entries for statically linked PIC
4644 functions here. This function is called via aarch64_elf_finish in the
4645 linker. */
4646
4647 bool
4648 elfNN_aarch64_build_stubs (struct bfd_link_info *info)
4649 {
4650 asection *stub_sec;
4651 struct bfd_hash_table *table;
4652 struct elf_aarch64_link_hash_table *htab;
4653
4654 htab = elf_aarch64_hash_table (info);
4655
4656 for (stub_sec = htab->stub_bfd->sections;
4657 stub_sec != NULL; stub_sec = stub_sec->next)
4658 {
4659 bfd_size_type size;
4660
4661 /* Ignore non-stub sections. */
4662 if (!strstr (stub_sec->name, STUB_SUFFIX))
4663 continue;
4664
4665 /* Allocate memory to hold the linker stubs. */
4666 size = stub_sec->size;
4667 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
4668 if (stub_sec->contents == NULL && size != 0)
4669 return false;
4670 stub_sec->size = 0;
4671
4672 /* Add a branch around the stub section, and a nop, to keep it 8 byte
4673 aligned, as long branch stubs contain a 64-bit address. */
4674 bfd_putl32 (0x14000000 | (size >> 2), stub_sec->contents);
4675 bfd_putl32 (INSN_NOP, stub_sec->contents + 4);
4676 stub_sec->size += 8;
4677 }
4678
4679 /* Build the stubs as directed by the stub hash table. */
4680 table = &htab->stub_hash_table;
4681 bfd_hash_traverse (table, aarch64_build_one_stub, info);
4682
4683 return true;
4684 }
4685
4686
4687 /* Add an entry to the code/data map for section SEC. */
4688
4689 static void
4690 elfNN_aarch64_section_map_add (asection *sec, char type, bfd_vma vma)
4691 {
4692 struct _aarch64_elf_section_data *sec_data =
4693 elf_aarch64_section_data (sec);
4694 unsigned int newidx;
4695
4696 if (sec_data->map == NULL)
4697 {
4698 sec_data->map = bfd_malloc (sizeof (elf_aarch64_section_map));
4699 sec_data->mapcount = 0;
4700 sec_data->mapsize = 1;
4701 }
4702
4703 newidx = sec_data->mapcount++;
4704
4705 if (sec_data->mapcount > sec_data->mapsize)
4706 {
4707 sec_data->mapsize *= 2;
4708 sec_data->map = bfd_realloc_or_free
4709 (sec_data->map, sec_data->mapsize * sizeof (elf_aarch64_section_map));
4710 }
4711
4712 if (sec_data->map)
4713 {
4714 sec_data->map[newidx].vma = vma;
4715 sec_data->map[newidx].type = type;
4716 }
4717 }
4718
4719
4720 /* Initialise maps of insn/data for input BFDs. */
4721 void
4722 bfd_elfNN_aarch64_init_maps (bfd *abfd)
4723 {
4724 Elf_Internal_Sym *isymbuf;
4725 Elf_Internal_Shdr *hdr;
4726 unsigned int i, localsyms;
4727
4728 /* Make sure that we are dealing with an AArch64 elf binary. */
4729 if (!is_aarch64_elf (abfd))
4730 return;
4731
4732 if ((abfd->flags & DYNAMIC) != 0)
4733 return;
4734
4735 hdr = &elf_symtab_hdr (abfd);
4736 localsyms = hdr->sh_info;
4737
4738 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
4739 should contain the number of local symbols, which should come before any
4740 global symbols. Mapping symbols are always local. */
4741 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL, NULL);
4742
4743 /* No internal symbols read? Skip this BFD. */
4744 if (isymbuf == NULL)
4745 return;
4746
4747 for (i = 0; i < localsyms; i++)
4748 {
4749 Elf_Internal_Sym *isym = &isymbuf[i];
4750 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
4751 const char *name;
4752
4753 if (sec != NULL && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
4754 {
4755 name = bfd_elf_string_from_elf_section (abfd,
4756 hdr->sh_link,
4757 isym->st_name);
4758
4759 if (bfd_is_aarch64_special_symbol_name
4760 (name, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP))
4761 elfNN_aarch64_section_map_add (sec, name[1], isym->st_value);
4762 }
4763 }
4764 }
4765
4766 static void
4767 setup_plt_values (struct bfd_link_info *link_info,
4768 aarch64_plt_type plt_type)
4769 {
4770 struct elf_aarch64_link_hash_table *globals;
4771 globals = elf_aarch64_hash_table (link_info);
4772
4773 if (plt_type == PLT_BTI_PAC)
4774 {
4775 globals->plt0_entry = elfNN_aarch64_small_plt0_bti_entry;
4776
4777 /* Only in ET_EXEC we need PLTn with BTI. */
4778 if (bfd_link_pde (link_info))
4779 {
4780 globals->plt_entry_size = PLT_BTI_PAC_SMALL_ENTRY_SIZE;
4781 globals->plt_entry = elfNN_aarch64_small_plt_bti_pac_entry;
4782 }
4783 else
4784 {
4785 globals->plt_entry_size = PLT_PAC_SMALL_ENTRY_SIZE;
4786 globals->plt_entry = elfNN_aarch64_small_plt_pac_entry;
4787 }
4788 }
4789 else if (plt_type == PLT_BTI)
4790 {
4791 globals->plt0_entry = elfNN_aarch64_small_plt0_bti_entry;
4792
4793 /* Only in ET_EXEC we need PLTn with BTI. */
4794 if (bfd_link_pde (link_info))
4795 {
4796 globals->plt_entry_size = PLT_BTI_SMALL_ENTRY_SIZE;
4797 globals->plt_entry = elfNN_aarch64_small_plt_bti_entry;
4798 }
4799 }
4800 else if (plt_type == PLT_PAC)
4801 {
4802 globals->plt_entry_size = PLT_PAC_SMALL_ENTRY_SIZE;
4803 globals->plt_entry = elfNN_aarch64_small_plt_pac_entry;
4804 }
4805 }
4806
4807 /* Set option values needed during linking. */
4808 void
4809 bfd_elfNN_aarch64_set_options (struct bfd *output_bfd,
4810 struct bfd_link_info *link_info,
4811 int no_enum_warn,
4812 int no_wchar_warn, int pic_veneer,
4813 int fix_erratum_835769,
4814 erratum_84319_opts fix_erratum_843419,
4815 int no_apply_dynamic_relocs,
4816 aarch64_bti_pac_info bp_info)
4817 {
4818 struct elf_aarch64_link_hash_table *globals;
4819
4820 globals = elf_aarch64_hash_table (link_info);
4821 globals->pic_veneer = pic_veneer;
4822 globals->fix_erratum_835769 = fix_erratum_835769;
4823 /* If the default options are used, then ERRAT_ADR will be set by default
4824 which will enable the ADRP->ADR workaround for the erratum 843419
4825 workaround. */
4826 globals->fix_erratum_843419 = fix_erratum_843419;
4827 globals->no_apply_dynamic_relocs = no_apply_dynamic_relocs;
4828
4829 BFD_ASSERT (is_aarch64_elf (output_bfd));
4830 elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
4831 elf_aarch64_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
4832
4833 switch (bp_info.bti_type)
4834 {
4835 case BTI_WARN:
4836 elf_aarch64_tdata (output_bfd)->no_bti_warn = 0;
4837 elf_aarch64_tdata (output_bfd)->gnu_and_prop
4838 |= GNU_PROPERTY_AARCH64_FEATURE_1_BTI;
4839 break;
4840
4841 default:
4842 break;
4843 }
4844 elf_aarch64_tdata (output_bfd)->plt_type = bp_info.plt_type;
4845 setup_plt_values (link_info, bp_info.plt_type);
4846 }
4847
4848 static bfd_vma
4849 aarch64_calculate_got_entry_vma (struct elf_link_hash_entry *h,
4850 struct elf_aarch64_link_hash_table
4851 *globals, struct bfd_link_info *info,
4852 bfd_vma value, bfd *output_bfd,
4853 bool *unresolved_reloc_p)
4854 {
4855 bfd_vma off = (bfd_vma) - 1;
4856 asection *basegot = globals->root.sgot;
4857 bool dyn = globals->root.dynamic_sections_created;
4858
4859 if (h != NULL)
4860 {
4861 BFD_ASSERT (basegot != NULL);
4862 off = h->got.offset;
4863 BFD_ASSERT (off != (bfd_vma) - 1);
4864 if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, bfd_link_pic (info), h)
4865 || (bfd_link_pic (info)
4866 && SYMBOL_REFERENCES_LOCAL (info, h))
4867 || (ELF_ST_VISIBILITY (h->other)
4868 && h->root.type == bfd_link_hash_undefweak))
4869 {
4870 /* This is actually a static link, or it is a -Bsymbolic link
4871 and the symbol is defined locally. We must initialize this
4872 entry in the global offset table. Since the offset must
4873 always be a multiple of 8 (4 in the case of ILP32), we use
4874 the least significant bit to record whether we have
4875 initialized it already.
4876 When doing a dynamic link, we create a .rel(a).got relocation
4877 entry to initialize the value. This is done in the
4878 finish_dynamic_symbol routine. */
4879 if ((off & 1) != 0)
4880 off &= ~1;
4881 else
4882 {
4883 bfd_put_NN (output_bfd, value, basegot->contents + off);
4884 h->got.offset |= 1;
4885 }
4886 }
4887 else
4888 *unresolved_reloc_p = false;
4889
4890 off = off + basegot->output_section->vma + basegot->output_offset;
4891 }
4892
4893 return off;
4894 }
4895
4896 /* Change R_TYPE to a more efficient access model where possible,
4897 return the new reloc type. */
4898
4899 static bfd_reloc_code_real_type
4900 aarch64_tls_transition_without_check (bfd_reloc_code_real_type r_type,
4901 struct elf_link_hash_entry *h,
4902 struct bfd_link_info *info)
4903 {
4904 bool local_exec = bfd_link_executable (info)
4905 && SYMBOL_REFERENCES_LOCAL (info, h);
4906
4907 switch (r_type)
4908 {
4909 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4910 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4911 return (local_exec
4912 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
4913 : BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21);
4914
4915 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
4916 return (local_exec
4917 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
4918 : r_type);
4919
4920 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
4921 return (local_exec
4922 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
4923 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
4924
4925 case BFD_RELOC_AARCH64_TLSDESC_LDR:
4926 return (local_exec
4927 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
4928 : BFD_RELOC_AARCH64_NONE);
4929
4930 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
4931 return (local_exec
4932 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC
4933 : BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC);
4934
4935 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
4936 return (local_exec
4937 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2
4938 : BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1);
4939
4940 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
4941 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4942 return (local_exec
4943 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
4944 : BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC);
4945
4946 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4947 return local_exec ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 : r_type;
4948
4949 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
4950 return local_exec ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC : r_type;
4951
4952 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
4953 return r_type;
4954
4955 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4956 return (local_exec
4957 ? BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12
4958 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
4959
4960 case BFD_RELOC_AARCH64_TLSDESC_ADD:
4961 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
4962 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4963 /* Instructions with these relocations will become NOPs. */
4964 return BFD_RELOC_AARCH64_NONE;
4965
4966 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
4967 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
4968 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
4969 return local_exec ? BFD_RELOC_AARCH64_NONE : r_type;
4970
4971 #if ARCH_SIZE == 64
4972 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
4973 return local_exec
4974 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC
4975 : BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC;
4976
4977 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
4978 return local_exec
4979 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2
4980 : BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1;
4981 #endif
4982
4983 default:
4984 break;
4985 }
4986
4987 return r_type;
4988 }
4989
4990 static unsigned int
4991 aarch64_reloc_got_type (bfd_reloc_code_real_type r_type)
4992 {
4993 switch (r_type)
4994 {
4995 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4996 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4997 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
4998 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4999 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
5000 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
5001 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
5002 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5003 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5004 return GOT_NORMAL;
5005
5006 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5007 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5008 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5009 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5010 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5011 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
5012 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
5013 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
5014 return GOT_TLS_GD;
5015
5016 case BFD_RELOC_AARCH64_TLSDESC_ADD:
5017 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
5018 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5019 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5020 case BFD_RELOC_AARCH64_TLSDESC_CALL:
5021 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
5022 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
5023 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5024 case BFD_RELOC_AARCH64_TLSDESC_LDR:
5025 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5026 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5027 return GOT_TLSDESC_GD;
5028
5029 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5030 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
5031 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5032 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5033 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5034 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5035 return GOT_TLS_IE;
5036
5037 default:
5038 break;
5039 }
5040 return GOT_UNKNOWN;
5041 }
5042
5043 static bool
5044 aarch64_can_relax_tls (bfd *input_bfd,
5045 struct bfd_link_info *info,
5046 bfd_reloc_code_real_type r_type,
5047 struct elf_link_hash_entry *h,
5048 unsigned long r_symndx)
5049 {
5050 unsigned int symbol_got_type;
5051 unsigned int reloc_got_type;
5052
5053 if (! IS_AARCH64_TLS_RELAX_RELOC (r_type))
5054 return false;
5055
5056 symbol_got_type = elfNN_aarch64_symbol_got_type (h, input_bfd, r_symndx);
5057 reloc_got_type = aarch64_reloc_got_type (r_type);
5058
5059 if (symbol_got_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (reloc_got_type))
5060 return true;
5061
5062 if (!bfd_link_executable (info))
5063 return false;
5064
5065 if (h && h->root.type == bfd_link_hash_undefweak)
5066 return false;
5067
5068 return true;
5069 }
5070
5071 /* Given the relocation code R_TYPE, return the relaxed bfd reloc
5072 enumerator. */
5073
5074 static bfd_reloc_code_real_type
5075 aarch64_tls_transition (bfd *input_bfd,
5076 struct bfd_link_info *info,
5077 unsigned int r_type,
5078 struct elf_link_hash_entry *h,
5079 unsigned long r_symndx)
5080 {
5081 bfd_reloc_code_real_type bfd_r_type
5082 = elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type);
5083
5084 if (! aarch64_can_relax_tls (input_bfd, info, bfd_r_type, h, r_symndx))
5085 return bfd_r_type;
5086
5087 return aarch64_tls_transition_without_check (bfd_r_type, h, info);
5088 }
5089
5090 /* Return the base VMA address which should be subtracted from real addresses
5091 when resolving R_AARCH64_TLS_DTPREL relocation. */
5092
5093 static bfd_vma
5094 dtpoff_base (struct bfd_link_info *info)
5095 {
5096 /* If tls_sec is NULL, we should have signalled an error already. */
5097 BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
5098 return elf_hash_table (info)->tls_sec->vma;
5099 }
5100
5101 /* Return the base VMA address which should be subtracted from real addresses
5102 when resolving R_AARCH64_TLS_GOTTPREL64 relocations. */
5103
5104 static bfd_vma
5105 tpoff_base (struct bfd_link_info *info)
5106 {
5107 struct elf_link_hash_table *htab = elf_hash_table (info);
5108
5109 /* If tls_sec is NULL, we should have signalled an error already. */
5110 BFD_ASSERT (htab->tls_sec != NULL);
5111
5112 bfd_vma base = align_power ((bfd_vma) TCB_SIZE,
5113 htab->tls_sec->alignment_power);
5114 return htab->tls_sec->vma - base;
5115 }
5116
5117 static bfd_vma *
5118 symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
5119 unsigned long r_symndx)
5120 {
5121 /* Calculate the address of the GOT entry for symbol
5122 referred to in h. */
5123 if (h != NULL)
5124 return &h->got.offset;
5125 else
5126 {
5127 /* local symbol */
5128 struct elf_aarch64_local_symbol *l;
5129
5130 l = elf_aarch64_locals (input_bfd);
5131 return &l[r_symndx].got_offset;
5132 }
5133 }
5134
5135 static void
5136 symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
5137 unsigned long r_symndx)
5138 {
5139 bfd_vma *p;
5140 p = symbol_got_offset_ref (input_bfd, h, r_symndx);
5141 *p |= 1;
5142 }
5143
5144 static int
5145 symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
5146 unsigned long r_symndx)
5147 {
5148 bfd_vma value;
5149 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
5150 return value & 1;
5151 }
5152
5153 static bfd_vma
5154 symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
5155 unsigned long r_symndx)
5156 {
5157 bfd_vma value;
5158 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
5159 value &= ~1;
5160 return value;
5161 }
5162
5163 static bfd_vma *
5164 symbol_tlsdesc_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
5165 unsigned long r_symndx)
5166 {
5167 /* Calculate the address of the GOT entry for symbol
5168 referred to in h. */
5169 if (h != NULL)
5170 {
5171 struct elf_aarch64_link_hash_entry *eh;
5172 eh = (struct elf_aarch64_link_hash_entry *) h;
5173 return &eh->tlsdesc_got_jump_table_offset;
5174 }
5175 else
5176 {
5177 /* local symbol */
5178 struct elf_aarch64_local_symbol *l;
5179
5180 l = elf_aarch64_locals (input_bfd);
5181 return &l[r_symndx].tlsdesc_got_jump_table_offset;
5182 }
5183 }
5184
5185 static void
5186 symbol_tlsdesc_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
5187 unsigned long r_symndx)
5188 {
5189 bfd_vma *p;
5190 p = symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
5191 *p |= 1;
5192 }
5193
5194 static int
5195 symbol_tlsdesc_got_offset_mark_p (bfd *input_bfd,
5196 struct elf_link_hash_entry *h,
5197 unsigned long r_symndx)
5198 {
5199 bfd_vma value;
5200 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
5201 return value & 1;
5202 }
5203
5204 static bfd_vma
5205 symbol_tlsdesc_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
5206 unsigned long r_symndx)
5207 {
5208 bfd_vma value;
5209 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
5210 value &= ~1;
5211 return value;
5212 }
5213
5214 /* Data for make_branch_to_erratum_835769_stub(). */
5215
5216 struct erratum_835769_branch_to_stub_data
5217 {
5218 struct bfd_link_info *info;
5219 asection *output_section;
5220 bfd_byte *contents;
5221 };
5222
5223 /* Helper to insert branches to erratum 835769 stubs in the right
5224 places for a particular section. */
5225
5226 static bool
5227 make_branch_to_erratum_835769_stub (struct bfd_hash_entry *gen_entry,
5228 void *in_arg)
5229 {
5230 struct elf_aarch64_stub_hash_entry *stub_entry;
5231 struct erratum_835769_branch_to_stub_data *data;
5232 bfd_byte *contents;
5233 unsigned long branch_insn = 0;
5234 bfd_vma veneered_insn_loc, veneer_entry_loc;
5235 bfd_signed_vma branch_offset;
5236 unsigned int target;
5237 bfd *abfd;
5238
5239 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
5240 data = (struct erratum_835769_branch_to_stub_data *) in_arg;
5241
5242 if (stub_entry->target_section != data->output_section
5243 || stub_entry->stub_type != aarch64_stub_erratum_835769_veneer)
5244 return true;
5245
5246 contents = data->contents;
5247 veneered_insn_loc = stub_entry->target_section->output_section->vma
5248 + stub_entry->target_section->output_offset
5249 + stub_entry->target_value;
5250 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
5251 + stub_entry->stub_sec->output_offset
5252 + stub_entry->stub_offset;
5253 branch_offset = veneer_entry_loc - veneered_insn_loc;
5254
5255 abfd = stub_entry->target_section->owner;
5256 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
5257 _bfd_error_handler
5258 (_("%pB: error: erratum 835769 stub out "
5259 "of range (input file too large)"), abfd);
5260
5261 target = stub_entry->target_value;
5262 branch_insn = 0x14000000;
5263 branch_offset >>= 2;
5264 branch_offset &= 0x3ffffff;
5265 branch_insn |= branch_offset;
5266 bfd_putl32 (branch_insn, &contents[target]);
5267
5268 return true;
5269 }
5270
5271
5272 static bool
5273 _bfd_aarch64_erratum_843419_branch_to_stub (struct bfd_hash_entry *gen_entry,
5274 void *in_arg)
5275 {
5276 struct elf_aarch64_stub_hash_entry *stub_entry
5277 = (struct elf_aarch64_stub_hash_entry *) gen_entry;
5278 struct erratum_835769_branch_to_stub_data *data
5279 = (struct erratum_835769_branch_to_stub_data *) in_arg;
5280 struct bfd_link_info *info;
5281 struct elf_aarch64_link_hash_table *htab;
5282 bfd_byte *contents;
5283 asection *section;
5284 bfd *abfd;
5285 bfd_vma place;
5286 uint32_t insn;
5287
5288 info = data->info;
5289 contents = data->contents;
5290 section = data->output_section;
5291
5292 htab = elf_aarch64_hash_table (info);
5293
5294 if (stub_entry->target_section != section
5295 || stub_entry->stub_type != aarch64_stub_erratum_843419_veneer)
5296 return true;
5297
5298 BFD_ASSERT (((htab->fix_erratum_843419 & ERRAT_ADRP) && stub_entry->stub_sec)
5299 || (htab->fix_erratum_843419 & ERRAT_ADR));
5300
5301 /* Only update the stub section if we have one. We should always have one if
5302 we're allowed to use the ADRP errata workaround, otherwise it is not
5303 required. */
5304 if (stub_entry->stub_sec)
5305 {
5306 insn = bfd_getl32 (contents + stub_entry->target_value);
5307 bfd_putl32 (insn,
5308 stub_entry->stub_sec->contents + stub_entry->stub_offset);
5309 }
5310
5311 place = (section->output_section->vma + section->output_offset
5312 + stub_entry->adrp_offset);
5313 insn = bfd_getl32 (contents + stub_entry->adrp_offset);
5314
5315 if (!_bfd_aarch64_adrp_p (insn))
5316 abort ();
5317
5318 bfd_signed_vma imm =
5319 (_bfd_aarch64_sign_extend
5320 ((bfd_vma) _bfd_aarch64_decode_adrp_imm (insn) << 12, 33)
5321 - (place & 0xfff));
5322
5323 if ((htab->fix_erratum_843419 & ERRAT_ADR)
5324 && (imm >= AARCH64_MIN_ADRP_IMM && imm <= AARCH64_MAX_ADRP_IMM))
5325 {
5326 insn = (_bfd_aarch64_reencode_adr_imm (AARCH64_ADR_OP, imm)
5327 | AARCH64_RT (insn));
5328 bfd_putl32 (insn, contents + stub_entry->adrp_offset);
5329 /* Stub is not needed, don't map it out. */
5330 stub_entry->stub_type = aarch64_stub_none;
5331 }
5332 else if (htab->fix_erratum_843419 & ERRAT_ADRP)
5333 {
5334 bfd_vma veneered_insn_loc;
5335 bfd_vma veneer_entry_loc;
5336 bfd_signed_vma branch_offset;
5337 uint32_t branch_insn;
5338
5339 veneered_insn_loc = stub_entry->target_section->output_section->vma
5340 + stub_entry->target_section->output_offset
5341 + stub_entry->target_value;
5342 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
5343 + stub_entry->stub_sec->output_offset
5344 + stub_entry->stub_offset;
5345 branch_offset = veneer_entry_loc - veneered_insn_loc;
5346
5347 abfd = stub_entry->target_section->owner;
5348 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
5349 _bfd_error_handler
5350 (_("%pB: error: erratum 843419 stub out "
5351 "of range (input file too large)"), abfd);
5352
5353 branch_insn = 0x14000000;
5354 branch_offset >>= 2;
5355 branch_offset &= 0x3ffffff;
5356 branch_insn |= branch_offset;
5357 bfd_putl32 (branch_insn, contents + stub_entry->target_value);
5358 }
5359 else
5360 {
5361 abfd = stub_entry->target_section->owner;
5362 _bfd_error_handler
5363 (_("%pB: error: erratum 843419 immediate 0x%" PRIx64
5364 " out of range for ADR (input file too large) and "
5365 "--fix-cortex-a53-843419=adr used. Run the linker with "
5366 "--fix-cortex-a53-843419=full instead"),
5367 abfd, (uint64_t) (bfd_vma) imm);
5368 bfd_set_error (bfd_error_bad_value);
5369 /* This function is called inside a hashtable traversal and the error
5370 handlers called above turn into non-fatal errors. Which means this
5371 case ld returns an exit code 0 and also produces a broken object file.
5372 To prevent this, issue a hard abort. */
5373 BFD_FAIL ();
5374 }
5375 return true;
5376 }
5377
5378
5379 static bool
5380 elfNN_aarch64_write_section (bfd *output_bfd ATTRIBUTE_UNUSED,
5381 struct bfd_link_info *link_info,
5382 asection *sec,
5383 bfd_byte *contents)
5384
5385 {
5386 struct elf_aarch64_link_hash_table *globals =
5387 elf_aarch64_hash_table (link_info);
5388
5389 if (globals == NULL)
5390 return false;
5391
5392 /* Fix code to point to erratum 835769 stubs. */
5393 if (globals->fix_erratum_835769)
5394 {
5395 struct erratum_835769_branch_to_stub_data data;
5396
5397 data.info = link_info;
5398 data.output_section = sec;
5399 data.contents = contents;
5400 bfd_hash_traverse (&globals->stub_hash_table,
5401 make_branch_to_erratum_835769_stub, &data);
5402 }
5403
5404 if (globals->fix_erratum_843419)
5405 {
5406 struct erratum_835769_branch_to_stub_data data;
5407
5408 data.info = link_info;
5409 data.output_section = sec;
5410 data.contents = contents;
5411 bfd_hash_traverse (&globals->stub_hash_table,
5412 _bfd_aarch64_erratum_843419_branch_to_stub, &data);
5413 }
5414
5415 return false;
5416 }
5417
5418 /* Return TRUE if RELOC is a relocation against the base of GOT table. */
5419
5420 static bool
5421 aarch64_relocation_aginst_gp_p (bfd_reloc_code_real_type reloc)
5422 {
5423 return (reloc == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14
5424 || reloc == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
5425 || reloc == BFD_RELOC_AARCH64_LD64_GOTOFF_LO15
5426 || reloc == BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC
5427 || reloc == BFD_RELOC_AARCH64_MOVW_GOTOFF_G1);
5428 }
5429
5430 /* Perform a relocation as part of a final link. The input relocation type
5431 should be TLS relaxed. */
5432
5433 static bfd_reloc_status_type
5434 elfNN_aarch64_final_link_relocate (reloc_howto_type *howto,
5435 bfd *input_bfd,
5436 bfd *output_bfd,
5437 asection *input_section,
5438 bfd_byte *contents,
5439 Elf_Internal_Rela *rel,
5440 bfd_vma value,
5441 struct bfd_link_info *info,
5442 asection *sym_sec,
5443 struct elf_link_hash_entry *h,
5444 bool *unresolved_reloc_p,
5445 bool save_addend,
5446 bfd_vma *saved_addend,
5447 Elf_Internal_Sym *sym)
5448 {
5449 Elf_Internal_Shdr *symtab_hdr;
5450 unsigned int r_type = howto->type;
5451 bfd_reloc_code_real_type bfd_r_type
5452 = elfNN_aarch64_bfd_reloc_from_howto (howto);
5453 unsigned long r_symndx;
5454 bfd_byte *hit_data = contents + rel->r_offset;
5455 bfd_vma place, off, got_entry_addr = 0;
5456 bfd_signed_vma signed_addend;
5457 struct elf_aarch64_link_hash_table *globals;
5458 bool weak_undef_p;
5459 bool relative_reloc;
5460 asection *base_got;
5461 bfd_vma orig_value = value;
5462 bool resolved_to_zero;
5463 bool abs_symbol_p;
5464
5465 globals = elf_aarch64_hash_table (info);
5466
5467 symtab_hdr = &elf_symtab_hdr (input_bfd);
5468
5469 BFD_ASSERT (is_aarch64_elf (input_bfd));
5470
5471 r_symndx = ELFNN_R_SYM (rel->r_info);
5472
5473 place = input_section->output_section->vma
5474 + input_section->output_offset + rel->r_offset;
5475
5476 /* Get addend, accumulating the addend for consecutive relocs
5477 which refer to the same offset. */
5478 signed_addend = saved_addend ? *saved_addend : 0;
5479 signed_addend += rel->r_addend;
5480
5481 weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
5482 : bfd_is_und_section (sym_sec));
5483 abs_symbol_p = h != NULL && bfd_is_abs_symbol (&h->root);
5484
5485
5486 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
5487 it here if it is defined in a non-shared object. */
5488 if (h != NULL
5489 && h->type == STT_GNU_IFUNC
5490 && h->def_regular)
5491 {
5492 asection *plt;
5493 const char *name;
5494 bfd_vma addend = 0;
5495
5496 if ((input_section->flags & SEC_ALLOC) == 0)
5497 {
5498 /* If this is a SHT_NOTE section without SHF_ALLOC, treat
5499 STT_GNU_IFUNC symbol as STT_FUNC. */
5500 if (elf_section_type (input_section) == SHT_NOTE)
5501 goto skip_ifunc;
5502
5503 /* Dynamic relocs are not propagated for SEC_DEBUGGING
5504 sections because such sections are not SEC_ALLOC and
5505 thus ld.so will not process them. */
5506 if ((input_section->flags & SEC_DEBUGGING) != 0)
5507 return bfd_reloc_ok;
5508
5509 if (h->root.root.string)
5510 name = h->root.root.string;
5511 else
5512 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, NULL);
5513 _bfd_error_handler
5514 /* xgettext:c-format */
5515 (_("%pB(%pA+%#" PRIx64 "): "
5516 "unresolvable %s relocation against symbol `%s'"),
5517 input_bfd, input_section, (uint64_t) rel->r_offset,
5518 howto->name, name);
5519 bfd_set_error (bfd_error_bad_value);
5520 return bfd_reloc_notsupported;
5521 }
5522 else if (h->plt.offset == (bfd_vma) -1)
5523 goto bad_ifunc_reloc;
5524
5525 /* STT_GNU_IFUNC symbol must go through PLT. */
5526 plt = globals->root.splt ? globals->root.splt : globals->root.iplt;
5527 value = (plt->output_section->vma + plt->output_offset + h->plt.offset);
5528
5529 switch (bfd_r_type)
5530 {
5531 default:
5532 bad_ifunc_reloc:
5533 if (h->root.root.string)
5534 name = h->root.root.string;
5535 else
5536 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
5537 NULL);
5538 _bfd_error_handler
5539 /* xgettext:c-format */
5540 (_("%pB: relocation %s against STT_GNU_IFUNC "
5541 "symbol `%s' isn't handled by %s"), input_bfd,
5542 howto->name, name, __FUNCTION__);
5543 bfd_set_error (bfd_error_bad_value);
5544 return bfd_reloc_notsupported;
5545
5546 case BFD_RELOC_AARCH64_NN:
5547 if (rel->r_addend != 0)
5548 {
5549 if (h->root.root.string)
5550 name = h->root.root.string;
5551 else
5552 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
5553 sym, NULL);
5554 _bfd_error_handler
5555 /* xgettext:c-format */
5556 (_("%pB: relocation %s against STT_GNU_IFUNC "
5557 "symbol `%s' has non-zero addend: %" PRId64),
5558 input_bfd, howto->name, name, (int64_t) rel->r_addend);
5559 bfd_set_error (bfd_error_bad_value);
5560 return bfd_reloc_notsupported;
5561 }
5562
5563 /* Generate dynamic relocation only when there is a
5564 non-GOT reference in a shared object. */
5565 if (bfd_link_pic (info) && h->non_got_ref)
5566 {
5567 Elf_Internal_Rela outrel;
5568 asection *sreloc;
5569
5570 /* Need a dynamic relocation to get the real function
5571 address. */
5572 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
5573 info,
5574 input_section,
5575 rel->r_offset);
5576 if (outrel.r_offset == (bfd_vma) -1
5577 || outrel.r_offset == (bfd_vma) -2)
5578 abort ();
5579
5580 outrel.r_offset += (input_section->output_section->vma
5581 + input_section->output_offset);
5582
5583 if (h->dynindx == -1
5584 || h->forced_local
5585 || bfd_link_executable (info))
5586 {
5587 /* This symbol is resolved locally. */
5588 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
5589 outrel.r_addend = (h->root.u.def.value
5590 + h->root.u.def.section->output_section->vma
5591 + h->root.u.def.section->output_offset);
5592 }
5593 else
5594 {
5595 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
5596 outrel.r_addend = 0;
5597 }
5598
5599 sreloc = globals->root.irelifunc;
5600 elf_append_rela (output_bfd, sreloc, &outrel);
5601
5602 /* If this reloc is against an external symbol, we
5603 do not want to fiddle with the addend. Otherwise,
5604 we need to include the symbol value so that it
5605 becomes an addend for the dynamic reloc. For an
5606 internal symbol, we have updated addend. */
5607 return bfd_reloc_ok;
5608 }
5609 /* FALLTHROUGH */
5610 case BFD_RELOC_AARCH64_CALL26:
5611 case BFD_RELOC_AARCH64_JUMP26:
5612 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
5613 place, value,
5614 signed_addend,
5615 weak_undef_p);
5616 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
5617 howto, value);
5618 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
5619 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
5620 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
5621 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
5622 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
5623 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5624 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5625 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
5626 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
5627 base_got = globals->root.sgot;
5628 off = h->got.offset;
5629
5630 if (base_got == NULL)
5631 abort ();
5632
5633 if (off == (bfd_vma) -1)
5634 {
5635 bfd_vma plt_index;
5636
5637 /* We can't use h->got.offset here to save state, or
5638 even just remember the offset, as finish_dynamic_symbol
5639 would use that as offset into .got. */
5640
5641 if (globals->root.splt != NULL)
5642 {
5643 plt_index = ((h->plt.offset - globals->plt_header_size) /
5644 globals->plt_entry_size);
5645 off = (plt_index + 3) * GOT_ENTRY_SIZE;
5646 base_got = globals->root.sgotplt;
5647 }
5648 else
5649 {
5650 plt_index = h->plt.offset / globals->plt_entry_size;
5651 off = plt_index * GOT_ENTRY_SIZE;
5652 base_got = globals->root.igotplt;
5653 }
5654
5655 if (h->dynindx == -1
5656 || h->forced_local
5657 || info->symbolic)
5658 {
5659 /* This references the local definition. We must
5660 initialize this entry in the global offset table.
5661 Since the offset must always be a multiple of 8,
5662 we use the least significant bit to record
5663 whether we have initialized it already.
5664
5665 When doing a dynamic link, we create a .rela.got
5666 relocation entry to initialize the value. This
5667 is done in the finish_dynamic_symbol routine. */
5668 if ((off & 1) != 0)
5669 off &= ~1;
5670 else
5671 {
5672 bfd_put_NN (output_bfd, value,
5673 base_got->contents + off);
5674 /* Note that this is harmless as -1 | 1 still is -1. */
5675 h->got.offset |= 1;
5676 }
5677 }
5678 value = (base_got->output_section->vma
5679 + base_got->output_offset + off);
5680 }
5681 else
5682 value = aarch64_calculate_got_entry_vma (h, globals, info,
5683 value, output_bfd,
5684 unresolved_reloc_p);
5685
5686 if (aarch64_relocation_aginst_gp_p (bfd_r_type))
5687 addend = (globals->root.sgot->output_section->vma
5688 + globals->root.sgot->output_offset);
5689
5690 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
5691 place, value,
5692 addend, weak_undef_p);
5693 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type, howto, value);
5694 case BFD_RELOC_AARCH64_ADD_LO12:
5695 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
5696 break;
5697 }
5698 }
5699
5700 skip_ifunc:
5701 resolved_to_zero = (h != NULL
5702 && UNDEFWEAK_NO_DYNAMIC_RELOC (info, h));
5703
5704 switch (bfd_r_type)
5705 {
5706 case BFD_RELOC_AARCH64_NONE:
5707 case BFD_RELOC_AARCH64_TLSDESC_ADD:
5708 case BFD_RELOC_AARCH64_TLSDESC_CALL:
5709 case BFD_RELOC_AARCH64_TLSDESC_LDR:
5710 *unresolved_reloc_p = false;
5711 return bfd_reloc_ok;
5712
5713 case BFD_RELOC_AARCH64_NN:
5714
5715 /* When generating a shared object or relocatable executable, these
5716 relocations are copied into the output file to be resolved at
5717 run time. */
5718 if (((bfd_link_pic (info)
5719 || globals->root.is_relocatable_executable)
5720 && (input_section->flags & SEC_ALLOC)
5721 && (h == NULL
5722 || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5723 && !resolved_to_zero)
5724 || h->root.type != bfd_link_hash_undefweak))
5725 /* Or we are creating an executable, we may need to keep relocations
5726 for symbols satisfied by a dynamic library if we manage to avoid
5727 copy relocs for the symbol. */
5728 || (ELIMINATE_COPY_RELOCS
5729 && !bfd_link_pic (info)
5730 && h != NULL
5731 && (input_section->flags & SEC_ALLOC)
5732 && h->dynindx != -1
5733 && !h->non_got_ref
5734 && ((h->def_dynamic
5735 && !h->def_regular)
5736 || h->root.type == bfd_link_hash_undefweak
5737 || h->root.type == bfd_link_hash_undefined)))
5738 {
5739 Elf_Internal_Rela outrel;
5740 bfd_byte *loc;
5741 bool skip, relocate;
5742 asection *sreloc;
5743
5744 *unresolved_reloc_p = false;
5745
5746 skip = false;
5747 relocate = false;
5748
5749 outrel.r_addend = signed_addend;
5750 outrel.r_offset =
5751 _bfd_elf_section_offset (output_bfd, info, input_section,
5752 rel->r_offset);
5753 if (outrel.r_offset == (bfd_vma) - 1)
5754 skip = true;
5755 else if (outrel.r_offset == (bfd_vma) - 2)
5756 {
5757 skip = true;
5758 relocate = true;
5759 }
5760 else if (abs_symbol_p)
5761 {
5762 /* Local absolute symbol. */
5763 skip = (h->forced_local || (h->dynindx == -1));
5764 relocate = skip;
5765 }
5766
5767 outrel.r_offset += (input_section->output_section->vma
5768 + input_section->output_offset);
5769
5770 if (skip)
5771 memset (&outrel, 0, sizeof outrel);
5772 else if (h != NULL
5773 && h->dynindx != -1
5774 && (!bfd_link_pic (info)
5775 || !(bfd_link_pie (info) || SYMBOLIC_BIND (info, h))
5776 || !h->def_regular))
5777 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
5778 else
5779 {
5780 int symbol;
5781
5782 /* On SVR4-ish systems, the dynamic loader cannot
5783 relocate the text and data segments independently,
5784 so the symbol does not matter. */
5785 symbol = 0;
5786 relocate = !globals->no_apply_dynamic_relocs;
5787 outrel.r_info = ELFNN_R_INFO (symbol, AARCH64_R (RELATIVE));
5788 outrel.r_addend += value;
5789 }
5790
5791 sreloc = elf_section_data (input_section)->sreloc;
5792 if (sreloc == NULL || sreloc->contents == NULL)
5793 return bfd_reloc_notsupported;
5794
5795 loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (globals);
5796 bfd_elfNN_swap_reloca_out (output_bfd, &outrel, loc);
5797
5798 if (sreloc->reloc_count * RELOC_SIZE (globals) > sreloc->size)
5799 {
5800 /* Sanity to check that we have previously allocated
5801 sufficient space in the relocation section for the
5802 number of relocations we actually want to emit. */
5803 abort ();
5804 }
5805
5806 /* If this reloc is against an external symbol, we do not want to
5807 fiddle with the addend. Otherwise, we need to include the symbol
5808 value so that it becomes an addend for the dynamic reloc. */
5809 if (!relocate)
5810 return bfd_reloc_ok;
5811
5812 return _bfd_final_link_relocate (howto, input_bfd, input_section,
5813 contents, rel->r_offset, value,
5814 signed_addend);
5815 }
5816 else
5817 value += signed_addend;
5818 break;
5819
5820 case BFD_RELOC_AARCH64_CALL26:
5821 case BFD_RELOC_AARCH64_JUMP26:
5822 {
5823 asection *splt = globals->root.splt;
5824 bool via_plt_p =
5825 splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
5826
5827 /* A call to an undefined weak symbol is converted to a jump to
5828 the next instruction unless a PLT entry will be created.
5829 The jump to the next instruction is optimized as a NOP.
5830 Do the same for local undefined symbols. */
5831 if (weak_undef_p && ! via_plt_p)
5832 {
5833 bfd_putl32 (INSN_NOP, hit_data);
5834 return bfd_reloc_ok;
5835 }
5836
5837 /* If the call goes through a PLT entry, make sure to
5838 check distance to the right destination address. */
5839 if (via_plt_p)
5840 value = (splt->output_section->vma
5841 + splt->output_offset + h->plt.offset);
5842
5843 /* Check if a stub has to be inserted because the destination
5844 is too far away. */
5845 struct elf_aarch64_stub_hash_entry *stub_entry = NULL;
5846
5847 /* If the branch destination is directed to plt stub, "value" will be
5848 the final destination, otherwise we should plus signed_addend, it may
5849 contain non-zero value, for example call to local function symbol
5850 which are turned into "sec_sym + sec_off", and sec_off is kept in
5851 signed_addend. */
5852 if (! aarch64_valid_branch_p (via_plt_p ? value : value + signed_addend,
5853 place))
5854 /* The target is out of reach, so redirect the branch to
5855 the local stub for this function. */
5856 stub_entry = elfNN_aarch64_get_stub_entry (input_section, sym_sec, h,
5857 rel, globals);
5858 if (stub_entry != NULL)
5859 {
5860 value = (stub_entry->stub_offset
5861 + stub_entry->stub_sec->output_offset
5862 + stub_entry->stub_sec->output_section->vma);
5863
5864 /* We have redirected the destination to stub entry address,
5865 so ignore any addend record in the original rela entry. */
5866 signed_addend = 0;
5867 }
5868 }
5869 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
5870 place, value,
5871 signed_addend, weak_undef_p);
5872 *unresolved_reloc_p = false;
5873 break;
5874
5875 case BFD_RELOC_AARCH64_16_PCREL:
5876 case BFD_RELOC_AARCH64_32_PCREL:
5877 case BFD_RELOC_AARCH64_64_PCREL:
5878 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
5879 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
5880 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
5881 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
5882 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5883 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
5884 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5885 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
5886 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5887 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
5888 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5889 if (bfd_link_pic (info)
5890 && (input_section->flags & SEC_ALLOC) != 0
5891 && (input_section->flags & SEC_READONLY) != 0
5892 && !_bfd_elf_symbol_refs_local_p (h, info, 1))
5893 {
5894 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
5895
5896 _bfd_error_handler
5897 /* xgettext:c-format */
5898 (_("%pB: relocation %s against symbol `%s' which may bind "
5899 "externally can not be used when making a shared object; "
5900 "recompile with -fPIC"),
5901 input_bfd, elfNN_aarch64_howto_table[howto_index].name,
5902 h->root.root.string);
5903 bfd_set_error (bfd_error_bad_value);
5904 return bfd_reloc_notsupported;
5905 }
5906 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
5907 place, value,
5908 signed_addend,
5909 weak_undef_p);
5910 break;
5911
5912 case BFD_RELOC_AARCH64_BRANCH19:
5913 case BFD_RELOC_AARCH64_TSTBR14:
5914 if (h && h->root.type == bfd_link_hash_undefined)
5915 {
5916 _bfd_error_handler
5917 /* xgettext:c-format */
5918 (_("%pB: conditional branch to undefined symbol `%s' "
5919 "not allowed"), input_bfd, h->root.root.string);
5920 bfd_set_error (bfd_error_bad_value);
5921 return bfd_reloc_notsupported;
5922 }
5923 /* Fall through. */
5924
5925 case BFD_RELOC_AARCH64_16:
5926 #if ARCH_SIZE == 64
5927 case BFD_RELOC_AARCH64_32:
5928 #endif
5929 case BFD_RELOC_AARCH64_ADD_LO12:
5930 case BFD_RELOC_AARCH64_LDST128_LO12:
5931 case BFD_RELOC_AARCH64_LDST16_LO12:
5932 case BFD_RELOC_AARCH64_LDST32_LO12:
5933 case BFD_RELOC_AARCH64_LDST64_LO12:
5934 case BFD_RELOC_AARCH64_LDST8_LO12:
5935 case BFD_RELOC_AARCH64_MOVW_G0:
5936 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5937 case BFD_RELOC_AARCH64_MOVW_G0_S:
5938 case BFD_RELOC_AARCH64_MOVW_G1:
5939 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5940 case BFD_RELOC_AARCH64_MOVW_G1_S:
5941 case BFD_RELOC_AARCH64_MOVW_G2:
5942 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5943 case BFD_RELOC_AARCH64_MOVW_G2_S:
5944 case BFD_RELOC_AARCH64_MOVW_G3:
5945 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
5946 place, value,
5947 signed_addend, weak_undef_p);
5948 break;
5949
5950 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
5951 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
5952 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
5953 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
5954 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
5955 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
5956 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
5957 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5958 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5959 if (globals->root.sgot == NULL)
5960 BFD_ASSERT (h != NULL);
5961
5962 relative_reloc = false;
5963 if (h != NULL)
5964 {
5965 bfd_vma addend = 0;
5966
5967 /* If a symbol is not dynamic and is not undefined weak, bind it
5968 locally and generate a RELATIVE relocation under PIC mode.
5969
5970 NOTE: one symbol may be referenced by several relocations, we
5971 should only generate one RELATIVE relocation for that symbol.
5972 Therefore, check GOT offset mark first. */
5973 if (h->dynindx == -1
5974 && !h->forced_local
5975 && h->root.type != bfd_link_hash_undefweak
5976 && bfd_link_pic (info)
5977 && !symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5978 relative_reloc = true;
5979
5980 value = aarch64_calculate_got_entry_vma (h, globals, info, value,
5981 output_bfd,
5982 unresolved_reloc_p);
5983 /* Record the GOT entry address which will be used when generating
5984 RELATIVE relocation. */
5985 if (relative_reloc)
5986 got_entry_addr = value;
5987
5988 if (aarch64_relocation_aginst_gp_p (bfd_r_type))
5989 addend = (globals->root.sgot->output_section->vma
5990 + globals->root.sgot->output_offset);
5991 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
5992 place, value,
5993 addend, weak_undef_p);
5994 }
5995 else
5996 {
5997 bfd_vma addend = 0;
5998 struct elf_aarch64_local_symbol *locals
5999 = elf_aarch64_locals (input_bfd);
6000
6001 if (locals == NULL)
6002 {
6003 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
6004 _bfd_error_handler
6005 /* xgettext:c-format */
6006 (_("%pB: local symbol descriptor table be NULL when applying "
6007 "relocation %s against local symbol"),
6008 input_bfd, elfNN_aarch64_howto_table[howto_index].name);
6009 abort ();
6010 }
6011
6012 off = symbol_got_offset (input_bfd, h, r_symndx);
6013 base_got = globals->root.sgot;
6014 got_entry_addr = (base_got->output_section->vma
6015 + base_got->output_offset + off);
6016
6017 if (!symbol_got_offset_mark_p (input_bfd, h, r_symndx))
6018 {
6019 bfd_put_64 (output_bfd, value, base_got->contents + off);
6020
6021 /* For local symbol, we have done absolute relocation in static
6022 linking stage. While for shared library, we need to update the
6023 content of GOT entry according to the shared object's runtime
6024 base address. So, we need to generate a R_AARCH64_RELATIVE reloc
6025 for dynamic linker. */
6026 if (bfd_link_pic (info))
6027 relative_reloc = true;
6028
6029 symbol_got_offset_mark (input_bfd, h, r_symndx);
6030 }
6031
6032 /* Update the relocation value to GOT entry addr as we have transformed
6033 the direct data access into indirect data access through GOT. */
6034 value = got_entry_addr;
6035
6036 if (aarch64_relocation_aginst_gp_p (bfd_r_type))
6037 addend = base_got->output_section->vma + base_got->output_offset;
6038
6039 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6040 place, value,
6041 addend, weak_undef_p);
6042 }
6043
6044 if (relative_reloc)
6045 {
6046 asection *s;
6047 Elf_Internal_Rela outrel;
6048
6049 s = globals->root.srelgot;
6050 if (s == NULL)
6051 abort ();
6052
6053 outrel.r_offset = got_entry_addr;
6054 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
6055 outrel.r_addend = orig_value;
6056 elf_append_rela (output_bfd, s, &outrel);
6057 }
6058 break;
6059
6060 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6061 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6062 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6063 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6064 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6065 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6066 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6067 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
6068 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
6069 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
6070 if (globals->root.sgot == NULL)
6071 return bfd_reloc_notsupported;
6072
6073 value = (symbol_got_offset (input_bfd, h, r_symndx)
6074 + globals->root.sgot->output_section->vma
6075 + globals->root.sgot->output_offset);
6076
6077 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6078 place, value,
6079 0, weak_undef_p);
6080 *unresolved_reloc_p = false;
6081 break;
6082
6083 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6084 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6085 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
6086 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
6087 if (globals->root.sgot == NULL)
6088 return bfd_reloc_notsupported;
6089
6090 value = symbol_got_offset (input_bfd, h, r_symndx);
6091 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6092 place, value,
6093 0, weak_undef_p);
6094 *unresolved_reloc_p = false;
6095 break;
6096
6097 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
6098 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
6099 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
6100 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
6101 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
6102 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
6103 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
6104 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
6105 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
6106 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
6107 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
6108 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
6109 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6110 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
6111 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
6112 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
6113 {
6114 if (!(weak_undef_p || elf_hash_table (info)->tls_sec))
6115 {
6116 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
6117 _bfd_error_handler
6118 /* xgettext:c-format */
6119 (_("%pB: TLS relocation %s against undefined symbol `%s'"),
6120 input_bfd, elfNN_aarch64_howto_table[howto_index].name,
6121 h->root.root.string);
6122 bfd_set_error (bfd_error_bad_value);
6123 return bfd_reloc_notsupported;
6124 }
6125
6126 bfd_vma def_value
6127 = weak_undef_p ? 0 : signed_addend - dtpoff_base (info);
6128 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6129 place, value,
6130 def_value, weak_undef_p);
6131 break;
6132 }
6133
6134 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6135 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6136 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6137 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
6138 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
6139 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
6140 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
6141 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
6142 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
6143 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
6144 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
6145 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6146 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6147 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6148 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6149 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6150 {
6151 if (!(weak_undef_p || elf_hash_table (info)->tls_sec))
6152 {
6153 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
6154 _bfd_error_handler
6155 /* xgettext:c-format */
6156 (_("%pB: TLS relocation %s against undefined symbol `%s'"),
6157 input_bfd, elfNN_aarch64_howto_table[howto_index].name,
6158 h->root.root.string);
6159 bfd_set_error (bfd_error_bad_value);
6160 return bfd_reloc_notsupported;
6161 }
6162
6163 bfd_vma def_value
6164 = weak_undef_p ? 0 : signed_addend - tpoff_base (info);
6165 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6166 place, value,
6167 def_value, weak_undef_p);
6168 *unresolved_reloc_p = false;
6169 break;
6170 }
6171
6172 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
6173 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6174 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6175 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6176 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
6177 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6178 if (globals->root.sgot == NULL)
6179 return bfd_reloc_notsupported;
6180 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
6181 + globals->root.sgotplt->output_section->vma
6182 + globals->root.sgotplt->output_offset
6183 + globals->sgotplt_jump_table_size);
6184
6185 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6186 place, value,
6187 0, weak_undef_p);
6188 *unresolved_reloc_p = false;
6189 break;
6190
6191 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6192 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6193 if (globals->root.sgot == NULL)
6194 return bfd_reloc_notsupported;
6195
6196 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
6197 + globals->root.sgotplt->output_section->vma
6198 + globals->root.sgotplt->output_offset
6199 + globals->sgotplt_jump_table_size);
6200
6201 value -= (globals->root.sgot->output_section->vma
6202 + globals->root.sgot->output_offset);
6203
6204 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6205 place, value,
6206 0, weak_undef_p);
6207 *unresolved_reloc_p = false;
6208 break;
6209
6210 default:
6211 return bfd_reloc_notsupported;
6212 }
6213
6214 if (saved_addend)
6215 *saved_addend = value;
6216
6217 /* Only apply the final relocation in a sequence. */
6218 if (save_addend)
6219 return bfd_reloc_continue;
6220
6221 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
6222 howto, value);
6223 }
6224
6225 /* LP64 and ILP32 operates on x- and w-registers respectively.
6226 Next definitions take into account the difference between
6227 corresponding machine codes. R means x-register if the target
6228 arch is LP64, and w-register if the target is ILP32. */
6229
6230 #if ARCH_SIZE == 64
6231 # define add_R0_R0 (0x91000000)
6232 # define add_R0_R0_R1 (0x8b000020)
6233 # define add_R0_R1 (0x91400020)
6234 # define ldr_R0 (0x58000000)
6235 # define ldr_R0_mask(i) (i & 0xffffffe0)
6236 # define ldr_R0_x0 (0xf9400000)
6237 # define ldr_hw_R0 (0xf2a00000)
6238 # define movk_R0 (0xf2800000)
6239 # define movz_R0 (0xd2a00000)
6240 # define movz_hw_R0 (0xd2c00000)
6241 #else /*ARCH_SIZE == 32 */
6242 # define add_R0_R0 (0x11000000)
6243 # define add_R0_R0_R1 (0x0b000020)
6244 # define add_R0_R1 (0x11400020)
6245 # define ldr_R0 (0x18000000)
6246 # define ldr_R0_mask(i) (i & 0xbfffffe0)
6247 # define ldr_R0_x0 (0xb9400000)
6248 # define ldr_hw_R0 (0x72a00000)
6249 # define movk_R0 (0x72800000)
6250 # define movz_R0 (0x52a00000)
6251 # define movz_hw_R0 (0x52c00000)
6252 #endif
6253
6254 /* Structure to hold payload for _bfd_aarch64_erratum_843419_clear_stub,
6255 it is used to identify the stub information to reset. */
6256
6257 struct erratum_843419_branch_to_stub_clear_data
6258 {
6259 bfd_vma adrp_offset;
6260 asection *output_section;
6261 };
6262
6263 /* Clear the erratum information for GEN_ENTRY if the ADRP_OFFSET and
6264 section inside IN_ARG matches. The clearing is done by setting the
6265 stub_type to none. */
6266
6267 static bool
6268 _bfd_aarch64_erratum_843419_clear_stub (struct bfd_hash_entry *gen_entry,
6269 void *in_arg)
6270 {
6271 struct elf_aarch64_stub_hash_entry *stub_entry
6272 = (struct elf_aarch64_stub_hash_entry *) gen_entry;
6273 struct erratum_843419_branch_to_stub_clear_data *data
6274 = (struct erratum_843419_branch_to_stub_clear_data *) in_arg;
6275
6276 if (stub_entry->target_section != data->output_section
6277 || stub_entry->stub_type != aarch64_stub_erratum_843419_veneer
6278 || stub_entry->adrp_offset != data->adrp_offset)
6279 return true;
6280
6281 /* Change the stub type instead of removing the entry, removing from the hash
6282 table would be slower and we have already reserved the memory for the entry
6283 so there wouldn't be much gain. Changing the stub also keeps around a
6284 record of what was there before. */
6285 stub_entry->stub_type = aarch64_stub_none;
6286
6287 /* We're done and there could have been only one matching stub at that
6288 particular offset, so abort further traversal. */
6289 return false;
6290 }
6291
6292 /* TLS Relaxations may relax an adrp sequence that matches the erratum 843419
6293 sequence. In this case the erratum no longer applies and we need to remove
6294 the entry from the pending stub generation. This clears matching adrp insn
6295 at ADRP_OFFSET in INPUT_SECTION in the stub table defined in GLOBALS. */
6296
6297 static void
6298 clear_erratum_843419_entry (struct elf_aarch64_link_hash_table *globals,
6299 bfd_vma adrp_offset, asection *input_section)
6300 {
6301 if (globals->fix_erratum_843419 & ERRAT_ADRP)
6302 {
6303 struct erratum_843419_branch_to_stub_clear_data data;
6304 data.adrp_offset = adrp_offset;
6305 data.output_section = input_section;
6306
6307 bfd_hash_traverse (&globals->stub_hash_table,
6308 _bfd_aarch64_erratum_843419_clear_stub, &data);
6309 }
6310 }
6311
6312 /* Handle TLS relaxations. Relaxing is possible for symbols that use
6313 R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static
6314 link.
6315
6316 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
6317 is to then call final_link_relocate. Return other values in the
6318 case of error. */
6319
6320 static bfd_reloc_status_type
6321 elfNN_aarch64_tls_relax (struct elf_aarch64_link_hash_table *globals,
6322 bfd *input_bfd, asection *input_section,
6323 bfd_byte *contents, Elf_Internal_Rela *rel,
6324 struct elf_link_hash_entry *h,
6325 struct bfd_link_info *info)
6326 {
6327 bool local_exec = bfd_link_executable (info)
6328 && SYMBOL_REFERENCES_LOCAL (info, h);
6329 unsigned int r_type = ELFNN_R_TYPE (rel->r_info);
6330 unsigned long insn;
6331
6332 BFD_ASSERT (globals && input_bfd && contents && rel);
6333
6334 switch (elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type))
6335 {
6336 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6337 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6338 if (local_exec)
6339 {
6340 /* GD->LE relaxation:
6341 adrp x0, :tlsgd:var => movz R0, :tprel_g1:var
6342 or
6343 adrp x0, :tlsdesc:var => movz R0, :tprel_g1:var
6344
6345 Where R is x for LP64, and w for ILP32. */
6346 bfd_putl32 (movz_R0, contents + rel->r_offset);
6347 /* We have relaxed the adrp into a mov, we may have to clear any
6348 pending erratum fixes. */
6349 clear_erratum_843419_entry (globals, rel->r_offset, input_section);
6350 return bfd_reloc_continue;
6351 }
6352 else
6353 {
6354 /* GD->IE relaxation:
6355 adrp x0, :tlsgd:var => adrp x0, :gottprel:var
6356 or
6357 adrp x0, :tlsdesc:var => adrp x0, :gottprel:var
6358 */
6359 return bfd_reloc_continue;
6360 }
6361
6362 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6363 BFD_ASSERT (0);
6364 break;
6365
6366 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6367 if (local_exec)
6368 {
6369 /* Tiny TLSDESC->LE relaxation:
6370 ldr x1, :tlsdesc:var => movz R0, #:tprel_g1:var
6371 adr x0, :tlsdesc:var => movk R0, #:tprel_g0_nc:var
6372 .tlsdesccall var
6373 blr x1 => nop
6374
6375 Where R is x for LP64, and w for ILP32. */
6376 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
6377 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
6378
6379 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
6380 AARCH64_R (TLSLE_MOVW_TPREL_G0_NC));
6381 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6382
6383 bfd_putl32 (movz_R0, contents + rel->r_offset);
6384 bfd_putl32 (movk_R0, contents + rel->r_offset + 4);
6385 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
6386 return bfd_reloc_continue;
6387 }
6388 else
6389 {
6390 /* Tiny TLSDESC->IE relaxation:
6391 ldr x1, :tlsdesc:var => ldr x0, :gottprel:var
6392 adr x0, :tlsdesc:var => nop
6393 .tlsdesccall var
6394 blr x1 => nop
6395 */
6396 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
6397 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
6398
6399 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6400 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6401
6402 bfd_putl32 (ldr_R0, contents + rel->r_offset);
6403 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 4);
6404 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
6405 return bfd_reloc_continue;
6406 }
6407
6408 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6409 if (local_exec)
6410 {
6411 /* Tiny GD->LE relaxation:
6412 adr x0, :tlsgd:var => mrs x1, tpidr_el0
6413 bl __tls_get_addr => add R0, R1, #:tprel_hi12:x, lsl #12
6414 nop => add R0, R0, #:tprel_lo12_nc:x
6415
6416 Where R is x for LP64, and x for Ilp32. */
6417
6418 /* First kill the tls_get_addr reloc on the bl instruction. */
6419 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
6420
6421 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 0);
6422 bfd_putl32 (add_R0_R1, contents + rel->r_offset + 4);
6423 bfd_putl32 (add_R0_R0, contents + rel->r_offset + 8);
6424
6425 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
6426 AARCH64_R (TLSLE_ADD_TPREL_LO12_NC));
6427 rel[1].r_offset = rel->r_offset + 8;
6428
6429 /* Move the current relocation to the second instruction in
6430 the sequence. */
6431 rel->r_offset += 4;
6432 rel->r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
6433 AARCH64_R (TLSLE_ADD_TPREL_HI12));
6434 return bfd_reloc_continue;
6435 }
6436 else
6437 {
6438 /* Tiny GD->IE relaxation:
6439 adr x0, :tlsgd:var => ldr R0, :gottprel:var
6440 bl __tls_get_addr => mrs x1, tpidr_el0
6441 nop => add R0, R0, R1
6442
6443 Where R is x for LP64, and w for Ilp32. */
6444
6445 /* First kill the tls_get_addr reloc on the bl instruction. */
6446 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
6447 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6448
6449 bfd_putl32 (ldr_R0, contents + rel->r_offset);
6450 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
6451 bfd_putl32 (add_R0_R0_R1, contents + rel->r_offset + 8);
6452 return bfd_reloc_continue;
6453 }
6454
6455 #if ARCH_SIZE == 64
6456 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6457 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSGD_MOVW_G0_NC));
6458 BFD_ASSERT (rel->r_offset + 12 == rel[2].r_offset);
6459 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (CALL26));
6460
6461 if (local_exec)
6462 {
6463 /* Large GD->LE relaxation:
6464 movz x0, #:tlsgd_g1:var => movz x0, #:tprel_g2:var, lsl #32
6465 movk x0, #:tlsgd_g0_nc:var => movk x0, #:tprel_g1_nc:var, lsl #16
6466 add x0, gp, x0 => movk x0, #:tprel_g0_nc:var
6467 bl __tls_get_addr => mrs x1, tpidr_el0
6468 nop => add x0, x0, x1
6469 */
6470 rel[2].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
6471 AARCH64_R (TLSLE_MOVW_TPREL_G0_NC));
6472 rel[2].r_offset = rel->r_offset + 8;
6473
6474 bfd_putl32 (movz_hw_R0, contents + rel->r_offset + 0);
6475 bfd_putl32 (ldr_hw_R0, contents + rel->r_offset + 4);
6476 bfd_putl32 (movk_R0, contents + rel->r_offset + 8);
6477 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 12);
6478 bfd_putl32 (add_R0_R0_R1, contents + rel->r_offset + 16);
6479 }
6480 else
6481 {
6482 /* Large GD->IE relaxation:
6483 movz x0, #:tlsgd_g1:var => movz x0, #:gottprel_g1:var, lsl #16
6484 movk x0, #:tlsgd_g0_nc:var => movk x0, #:gottprel_g0_nc:var
6485 add x0, gp, x0 => ldr x0, [gp, x0]
6486 bl __tls_get_addr => mrs x1, tpidr_el0
6487 nop => add x0, x0, x1
6488 */
6489 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6490 bfd_putl32 (0xd2a80000, contents + rel->r_offset + 0);
6491 bfd_putl32 (ldr_R0, contents + rel->r_offset + 8);
6492 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 12);
6493 bfd_putl32 (add_R0_R0_R1, contents + rel->r_offset + 16);
6494 }
6495 return bfd_reloc_continue;
6496
6497 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6498 return bfd_reloc_continue;
6499 #endif
6500
6501 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6502 return bfd_reloc_continue;
6503
6504 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
6505 if (local_exec)
6506 {
6507 /* GD->LE relaxation:
6508 ldr xd, [x0, #:tlsdesc_lo12:var] => movk x0, :tprel_g0_nc:var
6509
6510 Where R is x for lp64 mode, and w for ILP32 mode. */
6511 bfd_putl32 (movk_R0, contents + rel->r_offset);
6512 return bfd_reloc_continue;
6513 }
6514 else
6515 {
6516 /* GD->IE relaxation:
6517 ldr xd, [x0, #:tlsdesc_lo12:var] => ldr R0, [x0, #:gottprel_lo12:var]
6518
6519 Where R is x for lp64 mode, and w for ILP32 mode. */
6520 insn = bfd_getl32 (contents + rel->r_offset);
6521 bfd_putl32 (ldr_R0_mask (insn), contents + rel->r_offset);
6522 return bfd_reloc_continue;
6523 }
6524
6525 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6526 if (local_exec)
6527 {
6528 /* GD->LE relaxation
6529 add x0, #:tlsgd_lo12:var => movk R0, :tprel_g0_nc:var
6530 bl __tls_get_addr => mrs x1, tpidr_el0
6531 nop => add R0, R1, R0
6532
6533 Where R is x for lp64 mode, and w for ILP32 mode. */
6534
6535 /* First kill the tls_get_addr reloc on the bl instruction. */
6536 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
6537 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6538
6539 bfd_putl32 (movk_R0, contents + rel->r_offset);
6540 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
6541 bfd_putl32 (add_R0_R0_R1, contents + rel->r_offset + 8);
6542 return bfd_reloc_continue;
6543 }
6544 else
6545 {
6546 /* GD->IE relaxation
6547 ADD x0, #:tlsgd_lo12:var => ldr R0, [x0, #:gottprel_lo12:var]
6548 BL __tls_get_addr => mrs x1, tpidr_el0
6549 R_AARCH64_CALL26
6550 NOP => add R0, R1, R0
6551
6552 Where R is x for lp64 mode, and w for ilp32 mode. */
6553
6554 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
6555
6556 /* Remove the relocation on the BL instruction. */
6557 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6558
6559 /* We choose to fixup the BL and NOP instructions using the
6560 offset from the second relocation to allow flexibility in
6561 scheduling instructions between the ADD and BL. */
6562 bfd_putl32 (ldr_R0_x0, contents + rel->r_offset);
6563 bfd_putl32 (0xd53bd041, contents + rel[1].r_offset);
6564 bfd_putl32 (add_R0_R0_R1, contents + rel[1].r_offset + 4);
6565 return bfd_reloc_continue;
6566 }
6567
6568 case BFD_RELOC_AARCH64_TLSDESC_ADD:
6569 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
6570 case BFD_RELOC_AARCH64_TLSDESC_CALL:
6571 /* GD->IE/LE relaxation:
6572 add x0, x0, #:tlsdesc_lo12:var => nop
6573 blr xd => nop
6574 */
6575 bfd_putl32 (INSN_NOP, contents + rel->r_offset);
6576 return bfd_reloc_ok;
6577
6578 case BFD_RELOC_AARCH64_TLSDESC_LDR:
6579 if (local_exec)
6580 {
6581 /* GD->LE relaxation:
6582 ldr xd, [gp, xn] => movk R0, #:tprel_g0_nc:var
6583
6584 Where R is x for lp64 mode, and w for ILP32 mode. */
6585 bfd_putl32 (movk_R0, contents + rel->r_offset);
6586 return bfd_reloc_continue;
6587 }
6588 else
6589 {
6590 /* GD->IE relaxation:
6591 ldr xd, [gp, xn] => ldr R0, [gp, xn]
6592
6593 Where R is x for lp64 mode, and w for ILP32 mode. */
6594 insn = bfd_getl32 (contents + rel->r_offset);
6595 bfd_putl32 (ldr_R0_mask (insn), contents + rel->r_offset);
6596 return bfd_reloc_ok;
6597 }
6598
6599 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6600 /* GD->LE relaxation:
6601 movk xd, #:tlsdesc_off_g0_nc:var => movk R0, #:tprel_g1_nc:var, lsl #16
6602 GD->IE relaxation:
6603 movk xd, #:tlsdesc_off_g0_nc:var => movk Rd, #:gottprel_g0_nc:var
6604
6605 Where R is x for lp64 mode, and w for ILP32 mode. */
6606 if (local_exec)
6607 bfd_putl32 (ldr_hw_R0, contents + rel->r_offset);
6608 return bfd_reloc_continue;
6609
6610 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6611 if (local_exec)
6612 {
6613 /* GD->LE relaxation:
6614 movz xd, #:tlsdesc_off_g1:var => movz R0, #:tprel_g2:var, lsl #32
6615
6616 Where R is x for lp64 mode, and w for ILP32 mode. */
6617 bfd_putl32 (movz_hw_R0, contents + rel->r_offset);
6618 return bfd_reloc_continue;
6619 }
6620 else
6621 {
6622 /* GD->IE relaxation:
6623 movz xd, #:tlsdesc_off_g1:var => movz Rd, #:gottprel_g1:var, lsl #16
6624
6625 Where R is x for lp64 mode, and w for ILP32 mode. */
6626 insn = bfd_getl32 (contents + rel->r_offset);
6627 bfd_putl32 (movz_R0 | (insn & 0x1f), contents + rel->r_offset);
6628 return bfd_reloc_continue;
6629 }
6630
6631 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6632 /* IE->LE relaxation:
6633 adrp xd, :gottprel:var => movz Rd, :tprel_g1:var
6634
6635 Where R is x for lp64 mode, and w for ILP32 mode. */
6636 if (local_exec)
6637 {
6638 insn = bfd_getl32 (contents + rel->r_offset);
6639 bfd_putl32 (movz_R0 | (insn & 0x1f), contents + rel->r_offset);
6640 /* We have relaxed the adrp into a mov, we may have to clear any
6641 pending erratum fixes. */
6642 clear_erratum_843419_entry (globals, rel->r_offset, input_section);
6643 }
6644 return bfd_reloc_continue;
6645
6646 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
6647 /* IE->LE relaxation:
6648 ldr xd, [xm, #:gottprel_lo12:var] => movk Rd, :tprel_g0_nc:var
6649
6650 Where R is x for lp64 mode, and w for ILP32 mode. */
6651 if (local_exec)
6652 {
6653 insn = bfd_getl32 (contents + rel->r_offset);
6654 bfd_putl32 (movk_R0 | (insn & 0x1f), contents + rel->r_offset);
6655 }
6656 return bfd_reloc_continue;
6657
6658 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
6659 /* LD->LE relaxation (tiny):
6660 adr x0, :tlsldm:x => mrs x0, tpidr_el0
6661 bl __tls_get_addr => add R0, R0, TCB_SIZE
6662
6663 Where R is x for lp64 mode, and w for ilp32 mode. */
6664 if (local_exec)
6665 {
6666 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
6667 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
6668 /* No need of CALL26 relocation for tls_get_addr. */
6669 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6670 bfd_putl32 (0xd53bd040, contents + rel->r_offset + 0);
6671 bfd_putl32 (add_R0_R0 | (TCB_SIZE << 10),
6672 contents + rel->r_offset + 4);
6673 return bfd_reloc_ok;
6674 }
6675 return bfd_reloc_continue;
6676
6677 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
6678 /* LD->LE relaxation (small):
6679 adrp x0, :tlsldm:x => mrs x0, tpidr_el0
6680 */
6681 if (local_exec)
6682 {
6683 bfd_putl32 (0xd53bd040, contents + rel->r_offset);
6684 return bfd_reloc_ok;
6685 }
6686 return bfd_reloc_continue;
6687
6688 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
6689 /* LD->LE relaxation (small):
6690 add x0, #:tlsldm_lo12:x => add R0, R0, TCB_SIZE
6691 bl __tls_get_addr => nop
6692
6693 Where R is x for lp64 mode, and w for ilp32 mode. */
6694 if (local_exec)
6695 {
6696 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
6697 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
6698 /* No need of CALL26 relocation for tls_get_addr. */
6699 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6700 bfd_putl32 (add_R0_R0 | (TCB_SIZE << 10),
6701 contents + rel->r_offset + 0);
6702 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 4);
6703 return bfd_reloc_ok;
6704 }
6705 return bfd_reloc_continue;
6706
6707 default:
6708 return bfd_reloc_continue;
6709 }
6710
6711 return bfd_reloc_ok;
6712 }
6713
6714 /* Relocate an AArch64 ELF section. */
6715
6716 static int
6717 elfNN_aarch64_relocate_section (bfd *output_bfd,
6718 struct bfd_link_info *info,
6719 bfd *input_bfd,
6720 asection *input_section,
6721 bfd_byte *contents,
6722 Elf_Internal_Rela *relocs,
6723 Elf_Internal_Sym *local_syms,
6724 asection **local_sections)
6725 {
6726 Elf_Internal_Shdr *symtab_hdr;
6727 struct elf_link_hash_entry **sym_hashes;
6728 Elf_Internal_Rela *rel;
6729 Elf_Internal_Rela *relend;
6730 const char *name;
6731 struct elf_aarch64_link_hash_table *globals;
6732 bool save_addend = false;
6733 bfd_vma addend = 0;
6734
6735 globals = elf_aarch64_hash_table (info);
6736
6737 symtab_hdr = &elf_symtab_hdr (input_bfd);
6738 sym_hashes = elf_sym_hashes (input_bfd);
6739
6740 rel = relocs;
6741 relend = relocs + input_section->reloc_count;
6742 for (; rel < relend; rel++)
6743 {
6744 unsigned int r_type;
6745 bfd_reloc_code_real_type bfd_r_type;
6746 bfd_reloc_code_real_type relaxed_bfd_r_type;
6747 reloc_howto_type *howto;
6748 unsigned long r_symndx;
6749 Elf_Internal_Sym *sym;
6750 asection *sec;
6751 struct elf_link_hash_entry *h;
6752 bfd_vma relocation;
6753 bfd_reloc_status_type r;
6754 arelent bfd_reloc;
6755 char sym_type;
6756 bool unresolved_reloc = false;
6757 char *error_message = NULL;
6758
6759 r_symndx = ELFNN_R_SYM (rel->r_info);
6760 r_type = ELFNN_R_TYPE (rel->r_info);
6761
6762 bfd_reloc.howto = elfNN_aarch64_howto_from_type (input_bfd, r_type);
6763 howto = bfd_reloc.howto;
6764
6765 if (howto == NULL)
6766 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
6767
6768 bfd_r_type = elfNN_aarch64_bfd_reloc_from_howto (howto);
6769
6770 h = NULL;
6771 sym = NULL;
6772 sec = NULL;
6773
6774 if (r_symndx < symtab_hdr->sh_info)
6775 {
6776 sym = local_syms + r_symndx;
6777 sym_type = ELFNN_ST_TYPE (sym->st_info);
6778 sec = local_sections[r_symndx];
6779
6780 /* An object file might have a reference to a local
6781 undefined symbol. This is a daft object file, but we
6782 should at least do something about it. */
6783 if (r_type != R_AARCH64_NONE && r_type != R_AARCH64_NULL
6784 && bfd_is_und_section (sec)
6785 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
6786 (*info->callbacks->undefined_symbol)
6787 (info, bfd_elf_string_from_elf_section
6788 (input_bfd, symtab_hdr->sh_link, sym->st_name),
6789 input_bfd, input_section, rel->r_offset, true);
6790
6791 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
6792
6793 /* Relocate against local STT_GNU_IFUNC symbol. */
6794 if (!bfd_link_relocatable (info)
6795 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
6796 {
6797 h = elfNN_aarch64_get_local_sym_hash (globals, input_bfd,
6798 rel, false);
6799 if (h == NULL)
6800 abort ();
6801
6802 /* Set STT_GNU_IFUNC symbol value. */
6803 h->root.u.def.value = sym->st_value;
6804 h->root.u.def.section = sec;
6805 }
6806 }
6807 else
6808 {
6809 bool warned, ignored;
6810
6811 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
6812 r_symndx, symtab_hdr, sym_hashes,
6813 h, sec, relocation,
6814 unresolved_reloc, warned, ignored);
6815
6816 sym_type = h->type;
6817 }
6818
6819 if (sec != NULL && discarded_section (sec))
6820 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
6821 rel, 1, relend, howto, 0, contents);
6822
6823 if (bfd_link_relocatable (info))
6824 continue;
6825
6826 if (h != NULL)
6827 name = h->root.root.string;
6828 else
6829 {
6830 name = (bfd_elf_string_from_elf_section
6831 (input_bfd, symtab_hdr->sh_link, sym->st_name));
6832 if (name == NULL || *name == '\0')
6833 name = bfd_section_name (sec);
6834 }
6835
6836 if (r_symndx != 0
6837 && r_type != R_AARCH64_NONE
6838 && r_type != R_AARCH64_NULL
6839 && (h == NULL
6840 || h->root.type == bfd_link_hash_defined
6841 || h->root.type == bfd_link_hash_defweak)
6842 && IS_AARCH64_TLS_RELOC (bfd_r_type) != (sym_type == STT_TLS))
6843 {
6844 _bfd_error_handler
6845 ((sym_type == STT_TLS
6846 /* xgettext:c-format */
6847 ? _("%pB(%pA+%#" PRIx64 "): %s used with TLS symbol %s")
6848 /* xgettext:c-format */
6849 : _("%pB(%pA+%#" PRIx64 "): %s used with non-TLS symbol %s")),
6850 input_bfd,
6851 input_section, (uint64_t) rel->r_offset, howto->name, name);
6852 }
6853
6854 /* We relax only if we can see that there can be a valid transition
6855 from a reloc type to another.
6856 We call elfNN_aarch64_final_link_relocate unless we're completely
6857 done, i.e., the relaxation produced the final output we want. */
6858
6859 relaxed_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type,
6860 h, r_symndx);
6861 if (relaxed_bfd_r_type != bfd_r_type)
6862 {
6863 bfd_r_type = relaxed_bfd_r_type;
6864 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
6865 BFD_ASSERT (howto != NULL);
6866 r_type = howto->type;
6867 r = elfNN_aarch64_tls_relax (globals, input_bfd, input_section,
6868 contents, rel, h, info);
6869 unresolved_reloc = 0;
6870 }
6871 else
6872 r = bfd_reloc_continue;
6873
6874 /* There may be multiple consecutive relocations for the
6875 same offset. In that case we are supposed to treat the
6876 output of each relocation as the addend for the next. */
6877 if (rel + 1 < relend
6878 && rel->r_offset == rel[1].r_offset
6879 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NONE
6880 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NULL)
6881 save_addend = true;
6882 else
6883 save_addend = false;
6884
6885 if (r == bfd_reloc_continue)
6886 r = elfNN_aarch64_final_link_relocate (howto, input_bfd, output_bfd,
6887 input_section, contents, rel,
6888 relocation, info, sec,
6889 h, &unresolved_reloc,
6890 save_addend, &addend, sym);
6891
6892 switch (elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type))
6893 {
6894 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6895 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6896 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6897 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6898 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6899 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
6900 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
6901 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
6902 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
6903 {
6904 bool need_relocs = false;
6905 bfd_byte *loc;
6906 int indx;
6907 bfd_vma off;
6908
6909 off = symbol_got_offset (input_bfd, h, r_symndx);
6910 indx = h && h->dynindx != -1 ? h->dynindx : 0;
6911
6912 need_relocs =
6913 (!bfd_link_executable (info) || indx != 0) &&
6914 (h == NULL
6915 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6916 || h->root.type != bfd_link_hash_undefweak);
6917
6918 BFD_ASSERT (globals->root.srelgot != NULL);
6919
6920 if (need_relocs)
6921 {
6922 Elf_Internal_Rela rela;
6923 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPMOD));
6924 rela.r_addend = 0;
6925 rela.r_offset = globals->root.sgot->output_section->vma +
6926 globals->root.sgot->output_offset + off;
6927
6928
6929 loc = globals->root.srelgot->contents;
6930 loc += globals->root.srelgot->reloc_count++
6931 * RELOC_SIZE (htab);
6932 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
6933
6934 bfd_reloc_code_real_type real_type =
6935 elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type);
6936
6937 if (real_type == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21
6938 || real_type == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21
6939 || real_type == BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC)
6940 {
6941 /* For local dynamic, don't generate DTPREL in any case.
6942 Initialize the DTPREL slot into zero, so we get module
6943 base address when invoke runtime TLS resolver. */
6944 bfd_put_NN (output_bfd, 0,
6945 globals->root.sgot->contents + off
6946 + GOT_ENTRY_SIZE);
6947 }
6948 else if (indx == 0)
6949 {
6950 bfd_put_NN (output_bfd,
6951 relocation - dtpoff_base (info),
6952 globals->root.sgot->contents + off
6953 + GOT_ENTRY_SIZE);
6954 }
6955 else
6956 {
6957 /* This TLS symbol is global. We emit a
6958 relocation to fixup the tls offset at load
6959 time. */
6960 rela.r_info =
6961 ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPREL));
6962 rela.r_addend = 0;
6963 rela.r_offset =
6964 (globals->root.sgot->output_section->vma
6965 + globals->root.sgot->output_offset + off
6966 + GOT_ENTRY_SIZE);
6967
6968 loc = globals->root.srelgot->contents;
6969 loc += globals->root.srelgot->reloc_count++
6970 * RELOC_SIZE (globals);
6971 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
6972 bfd_put_NN (output_bfd, (bfd_vma) 0,
6973 globals->root.sgot->contents + off
6974 + GOT_ENTRY_SIZE);
6975 }
6976 }
6977 else
6978 {
6979 bfd_put_NN (output_bfd, (bfd_vma) 1,
6980 globals->root.sgot->contents + off);
6981 bfd_put_NN (output_bfd,
6982 relocation - dtpoff_base (info),
6983 globals->root.sgot->contents + off
6984 + GOT_ENTRY_SIZE);
6985 }
6986
6987 symbol_got_offset_mark (input_bfd, h, r_symndx);
6988 }
6989 break;
6990
6991 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6992 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
6993 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6994 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
6995 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
6996 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
6997 {
6998 bool need_relocs = false;
6999 bfd_byte *loc;
7000 int indx;
7001 bfd_vma off;
7002
7003 off = symbol_got_offset (input_bfd, h, r_symndx);
7004
7005 indx = h && h->dynindx != -1 ? h->dynindx : 0;
7006
7007 need_relocs =
7008 (!bfd_link_executable (info) || indx != 0) &&
7009 (h == NULL
7010 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7011 || h->root.type != bfd_link_hash_undefweak);
7012
7013 BFD_ASSERT (globals->root.srelgot != NULL);
7014
7015 if (need_relocs)
7016 {
7017 Elf_Internal_Rela rela;
7018
7019 if (indx == 0)
7020 rela.r_addend = relocation - dtpoff_base (info);
7021 else
7022 rela.r_addend = 0;
7023
7024 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_TPREL));
7025 rela.r_offset = globals->root.sgot->output_section->vma +
7026 globals->root.sgot->output_offset + off;
7027
7028 loc = globals->root.srelgot->contents;
7029 loc += globals->root.srelgot->reloc_count++
7030 * RELOC_SIZE (htab);
7031
7032 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
7033
7034 bfd_put_NN (output_bfd, rela.r_addend,
7035 globals->root.sgot->contents + off);
7036 }
7037 else
7038 bfd_put_NN (output_bfd, relocation - tpoff_base (info),
7039 globals->root.sgot->contents + off);
7040
7041 symbol_got_offset_mark (input_bfd, h, r_symndx);
7042 }
7043 break;
7044
7045 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
7046 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7047 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7048 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
7049 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7050 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7051 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7052 if (! symbol_tlsdesc_got_offset_mark_p (input_bfd, h, r_symndx))
7053 {
7054 bool need_relocs = false;
7055 int indx = h && h->dynindx != -1 ? h->dynindx : 0;
7056 bfd_vma off = symbol_tlsdesc_got_offset (input_bfd, h, r_symndx);
7057
7058 need_relocs = (h == NULL
7059 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7060 || h->root.type != bfd_link_hash_undefweak);
7061
7062 BFD_ASSERT (globals->root.srelgot != NULL);
7063 BFD_ASSERT (globals->root.sgot != NULL);
7064
7065 if (need_relocs)
7066 {
7067 bfd_byte *loc;
7068 Elf_Internal_Rela rela;
7069 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLSDESC));
7070
7071 rela.r_addend = 0;
7072 rela.r_offset = (globals->root.sgotplt->output_section->vma
7073 + globals->root.sgotplt->output_offset
7074 + off + globals->sgotplt_jump_table_size);
7075
7076 if (indx == 0)
7077 rela.r_addend = relocation - dtpoff_base (info);
7078
7079 /* Allocate the next available slot in the PLT reloc
7080 section to hold our R_AARCH64_TLSDESC, the next
7081 available slot is determined from reloc_count,
7082 which we step. But note, reloc_count was
7083 artifically moved down while allocating slots for
7084 real PLT relocs such that all of the PLT relocs
7085 will fit above the initial reloc_count and the
7086 extra stuff will fit below. */
7087 loc = globals->root.srelplt->contents;
7088 loc += globals->root.srelplt->reloc_count++
7089 * RELOC_SIZE (globals);
7090
7091 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
7092
7093 bfd_put_NN (output_bfd, (bfd_vma) 0,
7094 globals->root.sgotplt->contents + off +
7095 globals->sgotplt_jump_table_size);
7096 bfd_put_NN (output_bfd, (bfd_vma) 0,
7097 globals->root.sgotplt->contents + off +
7098 globals->sgotplt_jump_table_size +
7099 GOT_ENTRY_SIZE);
7100 }
7101
7102 symbol_tlsdesc_got_offset_mark (input_bfd, h, r_symndx);
7103 }
7104 break;
7105 default:
7106 break;
7107 }
7108
7109 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
7110 because such sections are not SEC_ALLOC and thus ld.so will
7111 not process them. */
7112 if (unresolved_reloc
7113 && !((input_section->flags & SEC_DEBUGGING) != 0
7114 && h->def_dynamic)
7115 && _bfd_elf_section_offset (output_bfd, info, input_section,
7116 +rel->r_offset) != (bfd_vma) - 1)
7117 {
7118 _bfd_error_handler
7119 /* xgettext:c-format */
7120 (_("%pB(%pA+%#" PRIx64 "): "
7121 "unresolvable %s relocation against symbol `%s'"),
7122 input_bfd, input_section, (uint64_t) rel->r_offset, howto->name,
7123 h->root.root.string);
7124 return false;
7125 }
7126
7127 if (r != bfd_reloc_ok && r != bfd_reloc_continue)
7128 {
7129 bfd_reloc_code_real_type real_r_type
7130 = elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type);
7131
7132 switch (r)
7133 {
7134 case bfd_reloc_overflow:
7135 (*info->callbacks->reloc_overflow)
7136 (info, (h ? &h->root : NULL), name, howto->name, (bfd_vma) 0,
7137 input_bfd, input_section, rel->r_offset);
7138 if (real_r_type == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
7139 || real_r_type == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14)
7140 {
7141 (*info->callbacks->warning)
7142 (info,
7143 _("too many GOT entries for -fpic, "
7144 "please recompile with -fPIC"),
7145 name, input_bfd, input_section, rel->r_offset);
7146 return false;
7147 }
7148 /* Overflow can occur when a variable is referenced with a type
7149 that has a larger alignment than the type with which it was
7150 declared. eg:
7151 file1.c: extern int foo; int a (void) { return foo; }
7152 file2.c: char bar, foo, baz;
7153 If the variable is placed into a data section at an offset
7154 that is incompatible with the larger alignment requirement
7155 overflow will occur. (Strictly speaking this is not overflow
7156 but rather an alignment problem, but the bfd_reloc_ error
7157 enum does not have a value to cover that situation).
7158
7159 Try to catch this situation here and provide a more helpful
7160 error message to the user. */
7161 if (addend & (((bfd_vma) 1 << howto->rightshift) - 1)
7162 /* FIXME: Are we testing all of the appropriate reloc
7163 types here ? */
7164 && (real_r_type == BFD_RELOC_AARCH64_LD_LO19_PCREL
7165 || real_r_type == BFD_RELOC_AARCH64_LDST16_LO12
7166 || real_r_type == BFD_RELOC_AARCH64_LDST32_LO12
7167 || real_r_type == BFD_RELOC_AARCH64_LDST64_LO12
7168 || real_r_type == BFD_RELOC_AARCH64_LDST128_LO12))
7169 {
7170 info->callbacks->warning
7171 (info, _("one possible cause of this error is that the \
7172 symbol is being referenced in the indicated code as if it had a larger \
7173 alignment than was declared where it was defined"),
7174 name, input_bfd, input_section, rel->r_offset);
7175 }
7176 break;
7177
7178 case bfd_reloc_undefined:
7179 (*info->callbacks->undefined_symbol)
7180 (info, name, input_bfd, input_section, rel->r_offset, true);
7181 break;
7182
7183 case bfd_reloc_outofrange:
7184 error_message = _("out of range");
7185 goto common_error;
7186
7187 case bfd_reloc_notsupported:
7188 error_message = _("unsupported relocation");
7189 goto common_error;
7190
7191 case bfd_reloc_dangerous:
7192 /* error_message should already be set. */
7193 goto common_error;
7194
7195 default:
7196 error_message = _("unknown error");
7197 /* Fall through. */
7198
7199 common_error:
7200 BFD_ASSERT (error_message != NULL);
7201 (*info->callbacks->reloc_dangerous)
7202 (info, error_message, input_bfd, input_section, rel->r_offset);
7203 break;
7204 }
7205 }
7206
7207 if (!save_addend)
7208 addend = 0;
7209 }
7210
7211 return true;
7212 }
7213
7214 /* Set the right machine number. */
7215
7216 static bool
7217 elfNN_aarch64_object_p (bfd *abfd)
7218 {
7219 #if ARCH_SIZE == 32
7220 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64_ilp32);
7221 #else
7222 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64);
7223 #endif
7224 return true;
7225 }
7226
7227 /* Function to keep AArch64 specific flags in the ELF header. */
7228
7229 static bool
7230 elfNN_aarch64_set_private_flags (bfd *abfd, flagword flags)
7231 {
7232 if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
7233 {
7234 }
7235 else
7236 {
7237 elf_elfheader (abfd)->e_flags = flags;
7238 elf_flags_init (abfd) = true;
7239 }
7240
7241 return true;
7242 }
7243
7244 /* Merge backend specific data from an object file to the output
7245 object file when linking. */
7246
7247 static bool
7248 elfNN_aarch64_merge_private_bfd_data (bfd *ibfd, struct bfd_link_info *info)
7249 {
7250 bfd *obfd = info->output_bfd;
7251 flagword out_flags;
7252 flagword in_flags;
7253 bool flags_compatible = true;
7254 asection *sec;
7255
7256 /* Check if we have the same endianess. */
7257 if (!_bfd_generic_verify_endian_match (ibfd, info))
7258 return false;
7259
7260 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
7261 return true;
7262
7263 /* The input BFD must have had its flags initialised. */
7264 /* The following seems bogus to me -- The flags are initialized in
7265 the assembler but I don't think an elf_flags_init field is
7266 written into the object. */
7267 /* BFD_ASSERT (elf_flags_init (ibfd)); */
7268
7269 in_flags = elf_elfheader (ibfd)->e_flags;
7270 out_flags = elf_elfheader (obfd)->e_flags;
7271
7272 if (!elf_flags_init (obfd))
7273 {
7274 /* If the input is the default architecture and had the default
7275 flags then do not bother setting the flags for the output
7276 architecture, instead allow future merges to do this. If no
7277 future merges ever set these flags then they will retain their
7278 uninitialised values, which surprise surprise, correspond
7279 to the default values. */
7280 if (bfd_get_arch_info (ibfd)->the_default
7281 && elf_elfheader (ibfd)->e_flags == 0)
7282 return true;
7283
7284 elf_flags_init (obfd) = true;
7285 elf_elfheader (obfd)->e_flags = in_flags;
7286
7287 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
7288 && bfd_get_arch_info (obfd)->the_default)
7289 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
7290 bfd_get_mach (ibfd));
7291
7292 return true;
7293 }
7294
7295 /* Identical flags must be compatible. */
7296 if (in_flags == out_flags)
7297 return true;
7298
7299 /* Check to see if the input BFD actually contains any sections. If
7300 not, its flags may not have been initialised either, but it
7301 cannot actually cause any incompatiblity. Do not short-circuit
7302 dynamic objects; their section list may be emptied by
7303 elf_link_add_object_symbols.
7304
7305 Also check to see if there are no code sections in the input.
7306 In this case there is no need to check for code specific flags.
7307 XXX - do we need to worry about floating-point format compatability
7308 in data sections ? */
7309 if (!(ibfd->flags & DYNAMIC))
7310 {
7311 bool null_input_bfd = true;
7312 bool only_data_sections = true;
7313
7314 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
7315 {
7316 if ((bfd_section_flags (sec)
7317 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
7318 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
7319 only_data_sections = false;
7320
7321 null_input_bfd = false;
7322 break;
7323 }
7324
7325 if (null_input_bfd || only_data_sections)
7326 return true;
7327 }
7328
7329 return flags_compatible;
7330 }
7331
7332 /* Display the flags field. */
7333
7334 static bool
7335 elfNN_aarch64_print_private_bfd_data (bfd *abfd, void *ptr)
7336 {
7337 FILE *file = (FILE *) ptr;
7338 unsigned long flags;
7339
7340 BFD_ASSERT (abfd != NULL && ptr != NULL);
7341
7342 /* Print normal ELF private data. */
7343 _bfd_elf_print_private_bfd_data (abfd, ptr);
7344
7345 flags = elf_elfheader (abfd)->e_flags;
7346 /* Ignore init flag - it may not be set, despite the flags field
7347 containing valid data. */
7348
7349 /* xgettext:c-format */
7350 fprintf (file, _("private flags = 0x%lx:"), elf_elfheader (abfd)->e_flags);
7351
7352 if (flags)
7353 fprintf (file, _(" <Unrecognised flag bits set>"));
7354
7355 fputc ('\n', file);
7356
7357 return true;
7358 }
7359
7360 /* Return true if we need copy relocation against EH. */
7361
7362 static bool
7363 need_copy_relocation_p (struct elf_aarch64_link_hash_entry *eh)
7364 {
7365 struct elf_dyn_relocs *p;
7366 asection *s;
7367
7368 for (p = eh->root.dyn_relocs; p != NULL; p = p->next)
7369 {
7370 /* If there is any pc-relative reference, we need to keep copy relocation
7371 to avoid propagating the relocation into runtime that current glibc
7372 does not support. */
7373 if (p->pc_count)
7374 return true;
7375
7376 s = p->sec->output_section;
7377 /* Need copy relocation if it's against read-only section. */
7378 if (s != NULL && (s->flags & SEC_READONLY) != 0)
7379 return true;
7380 }
7381
7382 return false;
7383 }
7384
7385 /* Adjust a symbol defined by a dynamic object and referenced by a
7386 regular object. The current definition is in some section of the
7387 dynamic object, but we're not including those sections. We have to
7388 change the definition to something the rest of the link can
7389 understand. */
7390
7391 static bool
7392 elfNN_aarch64_adjust_dynamic_symbol (struct bfd_link_info *info,
7393 struct elf_link_hash_entry *h)
7394 {
7395 struct elf_aarch64_link_hash_table *htab;
7396 asection *s, *srel;
7397
7398 /* If this is a function, put it in the procedure linkage table. We
7399 will fill in the contents of the procedure linkage table later,
7400 when we know the address of the .got section. */
7401 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
7402 {
7403 if (h->plt.refcount <= 0
7404 || (h->type != STT_GNU_IFUNC
7405 && (SYMBOL_CALLS_LOCAL (info, h)
7406 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
7407 && h->root.type == bfd_link_hash_undefweak))))
7408 {
7409 /* This case can occur if we saw a CALL26 reloc in
7410 an input file, but the symbol wasn't referred to
7411 by a dynamic object or all references were
7412 garbage collected. In which case we can end up
7413 resolving. */
7414 h->plt.offset = (bfd_vma) - 1;
7415 h->needs_plt = 0;
7416 }
7417
7418 return true;
7419 }
7420 else
7421 /* Otherwise, reset to -1. */
7422 h->plt.offset = (bfd_vma) - 1;
7423
7424
7425 /* If this is a weak symbol, and there is a real definition, the
7426 processor independent code will have arranged for us to see the
7427 real definition first, and we can just use the same value. */
7428 if (h->is_weakalias)
7429 {
7430 struct elf_link_hash_entry *def = weakdef (h);
7431 BFD_ASSERT (def->root.type == bfd_link_hash_defined);
7432 h->root.u.def.section = def->root.u.def.section;
7433 h->root.u.def.value = def->root.u.def.value;
7434 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
7435 h->non_got_ref = def->non_got_ref;
7436 return true;
7437 }
7438
7439 /* If we are creating a shared library, we must presume that the
7440 only references to the symbol are via the global offset table.
7441 For such cases we need not do anything here; the relocations will
7442 be handled correctly by relocate_section. */
7443 if (bfd_link_pic (info))
7444 return true;
7445
7446 /* If there are no references to this symbol that do not use the
7447 GOT, we don't need to generate a copy reloc. */
7448 if (!h->non_got_ref)
7449 return true;
7450
7451 /* If -z nocopyreloc was given, we won't generate them either. */
7452 if (info->nocopyreloc)
7453 {
7454 h->non_got_ref = 0;
7455 return true;
7456 }
7457
7458 if (ELIMINATE_COPY_RELOCS)
7459 {
7460 struct elf_aarch64_link_hash_entry *eh;
7461 /* If we don't find any dynamic relocs in read-only sections, then
7462 we'll be keeping the dynamic relocs and avoiding the copy reloc. */
7463 eh = (struct elf_aarch64_link_hash_entry *) h;
7464 if (!need_copy_relocation_p (eh))
7465 {
7466 h->non_got_ref = 0;
7467 return true;
7468 }
7469 }
7470
7471 /* We must allocate the symbol in our .dynbss section, which will
7472 become part of the .bss section of the executable. There will be
7473 an entry for this symbol in the .dynsym section. The dynamic
7474 object will contain position independent code, so all references
7475 from the dynamic object to this symbol will go through the global
7476 offset table. The dynamic linker will use the .dynsym entry to
7477 determine the address it must put in the global offset table, so
7478 both the dynamic object and the regular object will refer to the
7479 same memory location for the variable. */
7480
7481 htab = elf_aarch64_hash_table (info);
7482
7483 /* We must generate a R_AARCH64_COPY reloc to tell the dynamic linker
7484 to copy the initial value out of the dynamic object and into the
7485 runtime process image. */
7486 if ((h->root.u.def.section->flags & SEC_READONLY) != 0)
7487 {
7488 s = htab->root.sdynrelro;
7489 srel = htab->root.sreldynrelro;
7490 }
7491 else
7492 {
7493 s = htab->root.sdynbss;
7494 srel = htab->root.srelbss;
7495 }
7496 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
7497 {
7498 srel->size += RELOC_SIZE (htab);
7499 h->needs_copy = 1;
7500 }
7501
7502 return _bfd_elf_adjust_dynamic_copy (info, h, s);
7503
7504 }
7505
7506 static bool
7507 elfNN_aarch64_allocate_local_symbols (bfd *abfd, unsigned number)
7508 {
7509 struct elf_aarch64_local_symbol *locals;
7510 locals = elf_aarch64_locals (abfd);
7511 if (locals == NULL)
7512 {
7513 locals = (struct elf_aarch64_local_symbol *)
7514 bfd_zalloc (abfd, number * sizeof (struct elf_aarch64_local_symbol));
7515 if (locals == NULL)
7516 return false;
7517 elf_aarch64_locals (abfd) = locals;
7518 }
7519 return true;
7520 }
7521
7522 /* Create the .got section to hold the global offset table. */
7523
7524 static bool
7525 aarch64_elf_create_got_section (bfd *abfd, struct bfd_link_info *info)
7526 {
7527 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
7528 flagword flags;
7529 asection *s;
7530 struct elf_link_hash_entry *h;
7531 struct elf_link_hash_table *htab = elf_hash_table (info);
7532
7533 /* This function may be called more than once. */
7534 if (htab->sgot != NULL)
7535 return true;
7536
7537 flags = bed->dynamic_sec_flags;
7538
7539 s = bfd_make_section_anyway_with_flags (abfd,
7540 (bed->rela_plts_and_copies_p
7541 ? ".rela.got" : ".rel.got"),
7542 (bed->dynamic_sec_flags
7543 | SEC_READONLY));
7544 if (s == NULL
7545 || !bfd_set_section_alignment (s, bed->s->log_file_align))
7546 return false;
7547 htab->srelgot = s;
7548
7549 s = bfd_make_section_anyway_with_flags (abfd, ".got", flags);
7550 if (s == NULL
7551 || !bfd_set_section_alignment (s, bed->s->log_file_align))
7552 return false;
7553 htab->sgot = s;
7554 htab->sgot->size += GOT_ENTRY_SIZE;
7555
7556 if (bed->want_got_sym)
7557 {
7558 /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the .got
7559 (or .got.plt) section. We don't do this in the linker script
7560 because we don't want to define the symbol if we are not creating
7561 a global offset table. */
7562 h = _bfd_elf_define_linkage_sym (abfd, info, s,
7563 "_GLOBAL_OFFSET_TABLE_");
7564 elf_hash_table (info)->hgot = h;
7565 if (h == NULL)
7566 return false;
7567 }
7568
7569 if (bed->want_got_plt)
7570 {
7571 s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags);
7572 if (s == NULL
7573 || !bfd_set_section_alignment (s, bed->s->log_file_align))
7574 return false;
7575 htab->sgotplt = s;
7576 }
7577
7578 /* The first bit of the global offset table is the header. */
7579 s->size += bed->got_header_size;
7580
7581 return true;
7582 }
7583
7584 /* Look through the relocs for a section during the first phase. */
7585
7586 static bool
7587 elfNN_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info,
7588 asection *sec, const Elf_Internal_Rela *relocs)
7589 {
7590 Elf_Internal_Shdr *symtab_hdr;
7591 struct elf_link_hash_entry **sym_hashes;
7592 const Elf_Internal_Rela *rel;
7593 const Elf_Internal_Rela *rel_end;
7594 asection *sreloc;
7595
7596 struct elf_aarch64_link_hash_table *htab;
7597
7598 if (bfd_link_relocatable (info))
7599 return true;
7600
7601 BFD_ASSERT (is_aarch64_elf (abfd));
7602
7603 htab = elf_aarch64_hash_table (info);
7604 sreloc = NULL;
7605
7606 symtab_hdr = &elf_symtab_hdr (abfd);
7607 sym_hashes = elf_sym_hashes (abfd);
7608
7609 rel_end = relocs + sec->reloc_count;
7610 for (rel = relocs; rel < rel_end; rel++)
7611 {
7612 struct elf_link_hash_entry *h;
7613 unsigned int r_symndx;
7614 unsigned int r_type;
7615 bfd_reloc_code_real_type bfd_r_type;
7616 Elf_Internal_Sym *isym;
7617
7618 r_symndx = ELFNN_R_SYM (rel->r_info);
7619 r_type = ELFNN_R_TYPE (rel->r_info);
7620
7621 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
7622 {
7623 /* xgettext:c-format */
7624 _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd, r_symndx);
7625 return false;
7626 }
7627
7628 if (r_symndx < symtab_hdr->sh_info)
7629 {
7630 /* A local symbol. */
7631 isym = bfd_sym_from_r_symndx (&htab->root.sym_cache,
7632 abfd, r_symndx);
7633 if (isym == NULL)
7634 return false;
7635
7636 /* Check relocation against local STT_GNU_IFUNC symbol. */
7637 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
7638 {
7639 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel,
7640 true);
7641 if (h == NULL)
7642 return false;
7643
7644 /* Fake a STT_GNU_IFUNC symbol. */
7645 h->type = STT_GNU_IFUNC;
7646 h->def_regular = 1;
7647 h->ref_regular = 1;
7648 h->forced_local = 1;
7649 h->root.type = bfd_link_hash_defined;
7650 }
7651 else
7652 h = NULL;
7653 }
7654 else
7655 {
7656 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
7657 while (h->root.type == bfd_link_hash_indirect
7658 || h->root.type == bfd_link_hash_warning)
7659 h = (struct elf_link_hash_entry *) h->root.u.i.link;
7660 }
7661
7662 /* Could be done earlier, if h were already available. */
7663 bfd_r_type = aarch64_tls_transition (abfd, info, r_type, h, r_symndx);
7664
7665 if (h != NULL)
7666 {
7667 /* If a relocation refers to _GLOBAL_OFFSET_TABLE_, create the .got.
7668 This shows up in particular in an R_AARCH64_PREL64 in large model
7669 when calculating the pc-relative address to .got section which is
7670 used to initialize the gp register. */
7671 if (h->root.root.string
7672 && strcmp (h->root.root.string, "_GLOBAL_OFFSET_TABLE_") == 0)
7673 {
7674 if (htab->root.dynobj == NULL)
7675 htab->root.dynobj = abfd;
7676
7677 if (! aarch64_elf_create_got_section (htab->root.dynobj, info))
7678 return false;
7679
7680 BFD_ASSERT (h == htab->root.hgot);
7681 }
7682
7683 /* Create the ifunc sections for static executables. If we
7684 never see an indirect function symbol nor we are building
7685 a static executable, those sections will be empty and
7686 won't appear in output. */
7687 switch (bfd_r_type)
7688 {
7689 default:
7690 break;
7691
7692 case BFD_RELOC_AARCH64_ADD_LO12:
7693 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7694 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7695 case BFD_RELOC_AARCH64_CALL26:
7696 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7697 case BFD_RELOC_AARCH64_JUMP26:
7698 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7699 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7700 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7701 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7702 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7703 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
7704 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7705 case BFD_RELOC_AARCH64_NN:
7706 if (htab->root.dynobj == NULL)
7707 htab->root.dynobj = abfd;
7708 if (!_bfd_elf_create_ifunc_sections (htab->root.dynobj, info))
7709 return false;
7710 break;
7711 }
7712
7713 /* It is referenced by a non-shared object. */
7714 h->ref_regular = 1;
7715 }
7716
7717 switch (bfd_r_type)
7718 {
7719 case BFD_RELOC_AARCH64_16:
7720 #if ARCH_SIZE == 64
7721 case BFD_RELOC_AARCH64_32:
7722 #endif
7723 if (bfd_link_pic (info) && (sec->flags & SEC_ALLOC) != 0)
7724 {
7725 if (h != NULL
7726 /* This is an absolute symbol. It represents a value instead
7727 of an address. */
7728 && (bfd_is_abs_symbol (&h->root)
7729 /* This is an undefined symbol. */
7730 || h->root.type == bfd_link_hash_undefined))
7731 break;
7732
7733 /* For local symbols, defined global symbols in a non-ABS section,
7734 it is assumed that the value is an address. */
7735 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
7736 _bfd_error_handler
7737 /* xgettext:c-format */
7738 (_("%pB: relocation %s against `%s' can not be used when making "
7739 "a shared object"),
7740 abfd, elfNN_aarch64_howto_table[howto_index].name,
7741 (h) ? h->root.root.string : "a local symbol");
7742 bfd_set_error (bfd_error_bad_value);
7743 return false;
7744 }
7745 else
7746 break;
7747
7748 case BFD_RELOC_AARCH64_MOVW_G0_NC:
7749 case BFD_RELOC_AARCH64_MOVW_G1_NC:
7750 case BFD_RELOC_AARCH64_MOVW_G2_NC:
7751 case BFD_RELOC_AARCH64_MOVW_G3:
7752 if (bfd_link_pic (info))
7753 {
7754 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
7755 _bfd_error_handler
7756 /* xgettext:c-format */
7757 (_("%pB: relocation %s against `%s' can not be used when making "
7758 "a shared object; recompile with -fPIC"),
7759 abfd, elfNN_aarch64_howto_table[howto_index].name,
7760 (h) ? h->root.root.string : "a local symbol");
7761 bfd_set_error (bfd_error_bad_value);
7762 return false;
7763 }
7764 /* Fall through. */
7765
7766 case BFD_RELOC_AARCH64_16_PCREL:
7767 case BFD_RELOC_AARCH64_32_PCREL:
7768 case BFD_RELOC_AARCH64_64_PCREL:
7769 case BFD_RELOC_AARCH64_ADD_LO12:
7770 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7771 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7772 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
7773 case BFD_RELOC_AARCH64_LDST128_LO12:
7774 case BFD_RELOC_AARCH64_LDST16_LO12:
7775 case BFD_RELOC_AARCH64_LDST32_LO12:
7776 case BFD_RELOC_AARCH64_LDST64_LO12:
7777 case BFD_RELOC_AARCH64_LDST8_LO12:
7778 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
7779 if (h == NULL || bfd_link_pic (info))
7780 break;
7781 /* Fall through. */
7782
7783 case BFD_RELOC_AARCH64_NN:
7784
7785 /* We don't need to handle relocs into sections not going into
7786 the "real" output. */
7787 if ((sec->flags & SEC_ALLOC) == 0)
7788 break;
7789
7790 if (h != NULL)
7791 {
7792 if (!bfd_link_pic (info))
7793 h->non_got_ref = 1;
7794
7795 h->plt.refcount += 1;
7796 h->pointer_equality_needed = 1;
7797 }
7798
7799 /* No need to do anything if we're not creating a shared
7800 object. */
7801 if (!(bfd_link_pic (info)
7802 /* If on the other hand, we are creating an executable, we
7803 may need to keep relocations for symbols satisfied by a
7804 dynamic library if we manage to avoid copy relocs for the
7805 symbol.
7806
7807 NOTE: Currently, there is no support of copy relocs
7808 elimination on pc-relative relocation types, because there is
7809 no dynamic relocation support for them in glibc. We still
7810 record the dynamic symbol reference for them. This is
7811 because one symbol may be referenced by both absolute
7812 relocation (for example, BFD_RELOC_AARCH64_NN) and
7813 pc-relative relocation. We need full symbol reference
7814 information to make correct decision later in
7815 elfNN_aarch64_adjust_dynamic_symbol. */
7816 || (ELIMINATE_COPY_RELOCS
7817 && !bfd_link_pic (info)
7818 && h != NULL
7819 && (h->root.type == bfd_link_hash_defweak
7820 || !h->def_regular))))
7821 break;
7822
7823 {
7824 struct elf_dyn_relocs *p;
7825 struct elf_dyn_relocs **head;
7826 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
7827
7828 /* We must copy these reloc types into the output file.
7829 Create a reloc section in dynobj and make room for
7830 this reloc. */
7831 if (sreloc == NULL)
7832 {
7833 if (htab->root.dynobj == NULL)
7834 htab->root.dynobj = abfd;
7835
7836 sreloc = _bfd_elf_make_dynamic_reloc_section
7837 (sec, htab->root.dynobj, LOG_FILE_ALIGN, abfd, /*rela? */ true);
7838
7839 if (sreloc == NULL)
7840 return false;
7841 }
7842
7843 /* If this is a global symbol, we count the number of
7844 relocations we need for this symbol. */
7845 if (h != NULL)
7846 {
7847 head = &h->dyn_relocs;
7848 }
7849 else
7850 {
7851 /* Track dynamic relocs needed for local syms too.
7852 We really need local syms available to do this
7853 easily. Oh well. */
7854
7855 asection *s;
7856 void **vpp;
7857
7858 isym = bfd_sym_from_r_symndx (&htab->root.sym_cache,
7859 abfd, r_symndx);
7860 if (isym == NULL)
7861 return false;
7862
7863 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
7864 if (s == NULL)
7865 s = sec;
7866
7867 /* Beware of type punned pointers vs strict aliasing
7868 rules. */
7869 vpp = &(elf_section_data (s)->local_dynrel);
7870 head = (struct elf_dyn_relocs **) vpp;
7871 }
7872
7873 p = *head;
7874 if (p == NULL || p->sec != sec)
7875 {
7876 size_t amt = sizeof *p;
7877 p = ((struct elf_dyn_relocs *)
7878 bfd_zalloc (htab->root.dynobj, amt));
7879 if (p == NULL)
7880 return false;
7881 p->next = *head;
7882 *head = p;
7883 p->sec = sec;
7884 }
7885
7886 p->count += 1;
7887
7888 if (elfNN_aarch64_howto_table[howto_index].pc_relative)
7889 p->pc_count += 1;
7890 }
7891 break;
7892
7893 /* RR: We probably want to keep a consistency check that
7894 there are no dangling GOT_PAGE relocs. */
7895 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7896 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7897 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7898 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7899 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7900 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7901 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7902 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
7903 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7904 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
7905 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7906 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7907 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
7908 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
7909 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7910 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7911 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7912 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7913 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7914 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7915 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
7916 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7917 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7918 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7919 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7920 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7921 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
7922 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
7923 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7924 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7925 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7926 {
7927 unsigned got_type;
7928 unsigned old_got_type;
7929
7930 got_type = aarch64_reloc_got_type (bfd_r_type);
7931
7932 if (h)
7933 {
7934 h->got.refcount += 1;
7935 old_got_type = elf_aarch64_hash_entry (h)->got_type;
7936 }
7937 else
7938 {
7939 struct elf_aarch64_local_symbol *locals;
7940
7941 if (!elfNN_aarch64_allocate_local_symbols
7942 (abfd, symtab_hdr->sh_info))
7943 return false;
7944
7945 locals = elf_aarch64_locals (abfd);
7946 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
7947 locals[r_symndx].got_refcount += 1;
7948 old_got_type = locals[r_symndx].got_type;
7949 }
7950
7951 /* If a variable is accessed with both general dynamic TLS
7952 methods, two slots may be created. */
7953 if (GOT_TLS_GD_ANY_P (old_got_type) && GOT_TLS_GD_ANY_P (got_type))
7954 got_type |= old_got_type;
7955
7956 /* We will already have issued an error message if there
7957 is a TLS/non-TLS mismatch, based on the symbol type.
7958 So just combine any TLS types needed. */
7959 if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
7960 && got_type != GOT_NORMAL)
7961 got_type |= old_got_type;
7962
7963 /* If the symbol is accessed by both IE and GD methods, we
7964 are able to relax. Turn off the GD flag, without
7965 messing up with any other kind of TLS types that may be
7966 involved. */
7967 if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type))
7968 got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD);
7969
7970 if (old_got_type != got_type)
7971 {
7972 if (h != NULL)
7973 elf_aarch64_hash_entry (h)->got_type = got_type;
7974 else
7975 {
7976 struct elf_aarch64_local_symbol *locals;
7977 locals = elf_aarch64_locals (abfd);
7978 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
7979 locals[r_symndx].got_type = got_type;
7980 }
7981 }
7982
7983 if (htab->root.dynobj == NULL)
7984 htab->root.dynobj = abfd;
7985 if (! aarch64_elf_create_got_section (htab->root.dynobj, info))
7986 return false;
7987 break;
7988 }
7989
7990 case BFD_RELOC_AARCH64_CALL26:
7991 case BFD_RELOC_AARCH64_JUMP26:
7992 /* If this is a local symbol then we resolve it
7993 directly without creating a PLT entry. */
7994 if (h == NULL)
7995 continue;
7996
7997 h->needs_plt = 1;
7998 if (h->plt.refcount <= 0)
7999 h->plt.refcount = 1;
8000 else
8001 h->plt.refcount += 1;
8002 break;
8003
8004 default:
8005 break;
8006 }
8007 }
8008
8009 return true;
8010 }
8011
8012 /* Treat mapping symbols as special target symbols. */
8013
8014 static bool
8015 elfNN_aarch64_is_target_special_symbol (bfd *abfd ATTRIBUTE_UNUSED,
8016 asymbol *sym)
8017 {
8018 return bfd_is_aarch64_special_symbol_name (sym->name,
8019 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY);
8020 }
8021
8022 /* If the ELF symbol SYM might be a function in SEC, return the
8023 function size and set *CODE_OFF to the function's entry point,
8024 otherwise return zero. */
8025
8026 static bfd_size_type
8027 elfNN_aarch64_maybe_function_sym (const asymbol *sym, asection *sec,
8028 bfd_vma *code_off)
8029 {
8030 bfd_size_type size;
8031 elf_symbol_type * elf_sym = (elf_symbol_type *) sym;
8032
8033 if ((sym->flags & (BSF_SECTION_SYM | BSF_FILE | BSF_OBJECT
8034 | BSF_THREAD_LOCAL | BSF_RELC | BSF_SRELC)) != 0
8035 || sym->section != sec)
8036 return 0;
8037
8038 size = (sym->flags & BSF_SYNTHETIC) ? 0 : elf_sym->internal_elf_sym.st_size;
8039
8040 if (!(sym->flags & BSF_SYNTHETIC))
8041 switch (ELF_ST_TYPE (elf_sym->internal_elf_sym.st_info))
8042 {
8043 case STT_NOTYPE:
8044 /* Ignore symbols created by the annobin plugin for gcc and clang.
8045 These symbols are hidden, local, notype and have a size of 0. */
8046 if (size == 0
8047 && sym->flags & BSF_LOCAL
8048 && ELF_ST_VISIBILITY (elf_sym->internal_elf_sym.st_other) == STV_HIDDEN)
8049 return 0;
8050 /* Fall through. */
8051 case STT_FUNC:
8052 /* FIXME: Allow STT_GNU_IFUNC as well ? */
8053 break;
8054 default:
8055 return 0;
8056 }
8057
8058 if ((sym->flags & BSF_LOCAL)
8059 && bfd_is_aarch64_special_symbol_name (sym->name,
8060 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY))
8061 return 0;
8062
8063 *code_off = sym->value;
8064
8065 /* Do not return 0 for the function's size. */
8066 return size ? size : 1;
8067 }
8068
8069 static bool
8070 elfNN_aarch64_find_inliner_info (bfd *abfd,
8071 const char **filename_ptr,
8072 const char **functionname_ptr,
8073 unsigned int *line_ptr)
8074 {
8075 bool found;
8076 found = _bfd_dwarf2_find_inliner_info
8077 (abfd, filename_ptr,
8078 functionname_ptr, line_ptr, &elf_tdata (abfd)->dwarf2_find_line_info);
8079 return found;
8080 }
8081
8082
8083 static bool
8084 elfNN_aarch64_init_file_header (bfd *abfd, struct bfd_link_info *link_info)
8085 {
8086 Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */
8087
8088 if (!_bfd_elf_init_file_header (abfd, link_info))
8089 return false;
8090
8091 i_ehdrp = elf_elfheader (abfd);
8092 i_ehdrp->e_ident[EI_ABIVERSION] = AARCH64_ELF_ABI_VERSION;
8093 return true;
8094 }
8095
8096 static enum elf_reloc_type_class
8097 elfNN_aarch64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
8098 const asection *rel_sec ATTRIBUTE_UNUSED,
8099 const Elf_Internal_Rela *rela)
8100 {
8101 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
8102
8103 if (htab->root.dynsym != NULL
8104 && htab->root.dynsym->contents != NULL)
8105 {
8106 /* Check relocation against STT_GNU_IFUNC symbol if there are
8107 dynamic symbols. */
8108 bfd *abfd = info->output_bfd;
8109 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
8110 unsigned long r_symndx = ELFNN_R_SYM (rela->r_info);
8111 if (r_symndx != STN_UNDEF)
8112 {
8113 Elf_Internal_Sym sym;
8114 if (!bed->s->swap_symbol_in (abfd,
8115 (htab->root.dynsym->contents
8116 + r_symndx * bed->s->sizeof_sym),
8117 0, &sym))
8118 {
8119 /* xgettext:c-format */
8120 _bfd_error_handler (_("%pB symbol number %lu references"
8121 " nonexistent SHT_SYMTAB_SHNDX section"),
8122 abfd, r_symndx);
8123 /* Ideally an error class should be returned here. */
8124 }
8125 else if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
8126 return reloc_class_ifunc;
8127 }
8128 }
8129
8130 switch ((int) ELFNN_R_TYPE (rela->r_info))
8131 {
8132 case AARCH64_R (IRELATIVE):
8133 return reloc_class_ifunc;
8134 case AARCH64_R (RELATIVE):
8135 return reloc_class_relative;
8136 case AARCH64_R (JUMP_SLOT):
8137 return reloc_class_plt;
8138 case AARCH64_R (COPY):
8139 return reloc_class_copy;
8140 default:
8141 return reloc_class_normal;
8142 }
8143 }
8144
8145 /* Handle an AArch64 specific section when reading an object file. This is
8146 called when bfd_section_from_shdr finds a section with an unknown
8147 type. */
8148
8149 static bool
8150 elfNN_aarch64_section_from_shdr (bfd *abfd,
8151 Elf_Internal_Shdr *hdr,
8152 const char *name, int shindex)
8153 {
8154 /* There ought to be a place to keep ELF backend specific flags, but
8155 at the moment there isn't one. We just keep track of the
8156 sections by their name, instead. Fortunately, the ABI gives
8157 names for all the AArch64 specific sections, so we will probably get
8158 away with this. */
8159 switch (hdr->sh_type)
8160 {
8161 case SHT_AARCH64_ATTRIBUTES:
8162 break;
8163
8164 default:
8165 return false;
8166 }
8167
8168 if (!_bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
8169 return false;
8170
8171 return true;
8172 }
8173
8174 /* Process any AArch64-specific program segment types. */
8175
8176 static bool
8177 elfNN_aarch64_section_from_phdr (bfd *abfd ATTRIBUTE_UNUSED,
8178 Elf_Internal_Phdr *hdr,
8179 int hdr_index ATTRIBUTE_UNUSED,
8180 const char *name ATTRIBUTE_UNUSED)
8181 {
8182 /* Right now we only handle the PT_AARCH64_MEMTAG_MTE segment type. */
8183 if (hdr == NULL || hdr->p_type != PT_AARCH64_MEMTAG_MTE)
8184 return false;
8185
8186 if (hdr->p_filesz > 0)
8187 {
8188 /* Sections created from memory tag p_type's are always named
8189 "memtag". This makes it easier for tools (for example, GDB)
8190 to find them. */
8191 asection *newsect = bfd_make_section_anyway (abfd, "memtag");
8192
8193 if (newsect == NULL)
8194 return false;
8195
8196 unsigned int opb = bfd_octets_per_byte (abfd, NULL);
8197
8198 /* p_vaddr holds the original start address of the tagged memory
8199 range. */
8200 newsect->vma = hdr->p_vaddr / opb;
8201
8202 /* p_filesz holds the storage size of the packed tags. */
8203 newsect->size = hdr->p_filesz;
8204 newsect->filepos = hdr->p_offset;
8205
8206 /* p_memsz holds the size of the memory range that contains tags. The
8207 section's rawsize field is reused for this purpose. */
8208 newsect->rawsize = hdr->p_memsz;
8209
8210 /* Make sure the section's flags has SEC_HAS_CONTENTS set, otherwise
8211 BFD will return all zeroes when attempting to get contents from this
8212 section. */
8213 newsect->flags |= SEC_HAS_CONTENTS;
8214 }
8215
8216 return true;
8217 }
8218
8219 /* Implements the bfd_elf_modify_headers hook for aarch64. */
8220
8221 static bool
8222 elfNN_aarch64_modify_headers (bfd *abfd,
8223 struct bfd_link_info *info)
8224 {
8225 struct elf_segment_map *m;
8226 unsigned int segment_count = 0;
8227 Elf_Internal_Phdr *p;
8228
8229 for (m = elf_seg_map (abfd); m != NULL; m = m->next, segment_count++)
8230 {
8231 /* We are only interested in the memory tag segment that will be dumped
8232 to a core file. If we have no memory tags or this isn't a core file we
8233 are dealing with, just skip this segment. */
8234 if (m->p_type != PT_AARCH64_MEMTAG_MTE
8235 || bfd_get_format (abfd) != bfd_core)
8236 continue;
8237
8238 /* For memory tag segments in core files, the size of the file contents
8239 is smaller than the size of the memory range. Adjust the memory size
8240 accordingly. The real memory size is held in the section's rawsize
8241 field. */
8242 if (m->count > 0)
8243 {
8244 p = elf_tdata (abfd)->phdr;
8245 p += m->idx;
8246 p->p_memsz = m->sections[0]->rawsize;
8247 p->p_flags = 0;
8248 p->p_paddr = 0;
8249 p->p_align = 0;
8250 }
8251 }
8252
8253 /* Give the generic code a chance to handle the headers. */
8254 return _bfd_elf_modify_headers (abfd, info);
8255 }
8256
8257 /* A structure used to record a list of sections, independently
8258 of the next and prev fields in the asection structure. */
8259 typedef struct section_list
8260 {
8261 asection *sec;
8262 struct section_list *next;
8263 struct section_list *prev;
8264 }
8265 section_list;
8266
8267 /* Unfortunately we need to keep a list of sections for which
8268 an _aarch64_elf_section_data structure has been allocated. This
8269 is because it is possible for functions like elfNN_aarch64_write_section
8270 to be called on a section which has had an elf_data_structure
8271 allocated for it (and so the used_by_bfd field is valid) but
8272 for which the AArch64 extended version of this structure - the
8273 _aarch64_elf_section_data structure - has not been allocated. */
8274 static section_list *sections_with_aarch64_elf_section_data = NULL;
8275
8276 static void
8277 record_section_with_aarch64_elf_section_data (asection *sec)
8278 {
8279 struct section_list *entry;
8280
8281 entry = bfd_malloc (sizeof (*entry));
8282 if (entry == NULL)
8283 return;
8284 entry->sec = sec;
8285 entry->next = sections_with_aarch64_elf_section_data;
8286 entry->prev = NULL;
8287 if (entry->next != NULL)
8288 entry->next->prev = entry;
8289 sections_with_aarch64_elf_section_data = entry;
8290 }
8291
8292 static struct section_list *
8293 find_aarch64_elf_section_entry (asection *sec)
8294 {
8295 struct section_list *entry;
8296 static struct section_list *last_entry = NULL;
8297
8298 /* This is a short cut for the typical case where the sections are added
8299 to the sections_with_aarch64_elf_section_data list in forward order and
8300 then looked up here in backwards order. This makes a real difference
8301 to the ld-srec/sec64k.exp linker test. */
8302 entry = sections_with_aarch64_elf_section_data;
8303 if (last_entry != NULL)
8304 {
8305 if (last_entry->sec == sec)
8306 entry = last_entry;
8307 else if (last_entry->next != NULL && last_entry->next->sec == sec)
8308 entry = last_entry->next;
8309 }
8310
8311 for (; entry; entry = entry->next)
8312 if (entry->sec == sec)
8313 break;
8314
8315 if (entry)
8316 /* Record the entry prior to this one - it is the entry we are
8317 most likely to want to locate next time. Also this way if we
8318 have been called from
8319 unrecord_section_with_aarch64_elf_section_data () we will not
8320 be caching a pointer that is about to be freed. */
8321 last_entry = entry->prev;
8322
8323 return entry;
8324 }
8325
8326 static void
8327 unrecord_section_with_aarch64_elf_section_data (asection *sec)
8328 {
8329 struct section_list *entry;
8330
8331 entry = find_aarch64_elf_section_entry (sec);
8332
8333 if (entry)
8334 {
8335 if (entry->prev != NULL)
8336 entry->prev->next = entry->next;
8337 if (entry->next != NULL)
8338 entry->next->prev = entry->prev;
8339 if (entry == sections_with_aarch64_elf_section_data)
8340 sections_with_aarch64_elf_section_data = entry->next;
8341 free (entry);
8342 }
8343 }
8344
8345
8346 typedef struct
8347 {
8348 void *finfo;
8349 struct bfd_link_info *info;
8350 asection *sec;
8351 int sec_shndx;
8352 int (*func) (void *, const char *, Elf_Internal_Sym *,
8353 asection *, struct elf_link_hash_entry *);
8354 } output_arch_syminfo;
8355
8356 enum map_symbol_type
8357 {
8358 AARCH64_MAP_INSN,
8359 AARCH64_MAP_DATA
8360 };
8361
8362
8363 /* Output a single mapping symbol. */
8364
8365 static bool
8366 elfNN_aarch64_output_map_sym (output_arch_syminfo *osi,
8367 enum map_symbol_type type, bfd_vma offset)
8368 {
8369 static const char *names[2] = { "$x", "$d" };
8370 Elf_Internal_Sym sym;
8371
8372 sym.st_value = (osi->sec->output_section->vma
8373 + osi->sec->output_offset + offset);
8374 sym.st_size = 0;
8375 sym.st_other = 0;
8376 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
8377 sym.st_shndx = osi->sec_shndx;
8378 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
8379 }
8380
8381 /* Output a single local symbol for a generated stub. */
8382
8383 static bool
8384 elfNN_aarch64_output_stub_sym (output_arch_syminfo *osi, const char *name,
8385 bfd_vma offset, bfd_vma size)
8386 {
8387 Elf_Internal_Sym sym;
8388
8389 sym.st_value = (osi->sec->output_section->vma
8390 + osi->sec->output_offset + offset);
8391 sym.st_size = size;
8392 sym.st_other = 0;
8393 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
8394 sym.st_shndx = osi->sec_shndx;
8395 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
8396 }
8397
8398 static bool
8399 aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
8400 {
8401 struct elf_aarch64_stub_hash_entry *stub_entry;
8402 asection *stub_sec;
8403 bfd_vma addr;
8404 char *stub_name;
8405 output_arch_syminfo *osi;
8406
8407 /* Massage our args to the form they really have. */
8408 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
8409 osi = (output_arch_syminfo *) in_arg;
8410
8411 stub_sec = stub_entry->stub_sec;
8412
8413 /* Ensure this stub is attached to the current section being
8414 processed. */
8415 if (stub_sec != osi->sec)
8416 return true;
8417
8418 addr = (bfd_vma) stub_entry->stub_offset;
8419
8420 stub_name = stub_entry->output_name;
8421
8422 switch (stub_entry->stub_type)
8423 {
8424 case aarch64_stub_adrp_branch:
8425 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
8426 sizeof (aarch64_adrp_branch_stub)))
8427 return false;
8428 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
8429 return false;
8430 break;
8431 case aarch64_stub_long_branch:
8432 if (!elfNN_aarch64_output_stub_sym
8433 (osi, stub_name, addr, sizeof (aarch64_long_branch_stub)))
8434 return false;
8435 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
8436 return false;
8437 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16))
8438 return false;
8439 break;
8440 case aarch64_stub_erratum_835769_veneer:
8441 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
8442 sizeof (aarch64_erratum_835769_stub)))
8443 return false;
8444 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
8445 return false;
8446 break;
8447 case aarch64_stub_erratum_843419_veneer:
8448 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
8449 sizeof (aarch64_erratum_843419_stub)))
8450 return false;
8451 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
8452 return false;
8453 break;
8454 case aarch64_stub_none:
8455 break;
8456
8457 default:
8458 abort ();
8459 }
8460
8461 return true;
8462 }
8463
8464 /* Output mapping symbols for linker generated sections. */
8465
8466 static bool
8467 elfNN_aarch64_output_arch_local_syms (bfd *output_bfd,
8468 struct bfd_link_info *info,
8469 void *finfo,
8470 int (*func) (void *, const char *,
8471 Elf_Internal_Sym *,
8472 asection *,
8473 struct elf_link_hash_entry
8474 *))
8475 {
8476 output_arch_syminfo osi;
8477 struct elf_aarch64_link_hash_table *htab;
8478
8479 htab = elf_aarch64_hash_table (info);
8480
8481 osi.finfo = finfo;
8482 osi.info = info;
8483 osi.func = func;
8484
8485 /* Long calls stubs. */
8486 if (htab->stub_bfd && htab->stub_bfd->sections)
8487 {
8488 asection *stub_sec;
8489
8490 for (stub_sec = htab->stub_bfd->sections;
8491 stub_sec != NULL; stub_sec = stub_sec->next)
8492 {
8493 /* Ignore non-stub sections. */
8494 if (!strstr (stub_sec->name, STUB_SUFFIX))
8495 continue;
8496
8497 osi.sec = stub_sec;
8498
8499 osi.sec_shndx = _bfd_elf_section_from_bfd_section
8500 (output_bfd, osi.sec->output_section);
8501
8502 /* The first instruction in a stub is always a branch. */
8503 if (!elfNN_aarch64_output_map_sym (&osi, AARCH64_MAP_INSN, 0))
8504 return false;
8505
8506 bfd_hash_traverse (&htab->stub_hash_table, aarch64_map_one_stub,
8507 &osi);
8508 }
8509 }
8510
8511 /* Finally, output mapping symbols for the PLT. */
8512 if (!htab->root.splt || htab->root.splt->size == 0)
8513 return true;
8514
8515 osi.sec_shndx = _bfd_elf_section_from_bfd_section
8516 (output_bfd, htab->root.splt->output_section);
8517 osi.sec = htab->root.splt;
8518
8519 elfNN_aarch64_output_map_sym (&osi, AARCH64_MAP_INSN, 0);
8520
8521 return true;
8522
8523 }
8524
8525 /* Allocate target specific section data. */
8526
8527 static bool
8528 elfNN_aarch64_new_section_hook (bfd *abfd, asection *sec)
8529 {
8530 if (!sec->used_by_bfd)
8531 {
8532 _aarch64_elf_section_data *sdata;
8533 size_t amt = sizeof (*sdata);
8534
8535 sdata = bfd_zalloc (abfd, amt);
8536 if (sdata == NULL)
8537 return false;
8538 sec->used_by_bfd = sdata;
8539 }
8540
8541 record_section_with_aarch64_elf_section_data (sec);
8542
8543 return _bfd_elf_new_section_hook (abfd, sec);
8544 }
8545
8546
8547 static void
8548 unrecord_section_via_map_over_sections (bfd *abfd ATTRIBUTE_UNUSED,
8549 asection *sec,
8550 void *ignore ATTRIBUTE_UNUSED)
8551 {
8552 unrecord_section_with_aarch64_elf_section_data (sec);
8553 }
8554
8555 static bool
8556 elfNN_aarch64_close_and_cleanup (bfd *abfd)
8557 {
8558 if (abfd->sections)
8559 bfd_map_over_sections (abfd,
8560 unrecord_section_via_map_over_sections, NULL);
8561
8562 return _bfd_elf_close_and_cleanup (abfd);
8563 }
8564
8565 static bool
8566 elfNN_aarch64_bfd_free_cached_info (bfd *abfd)
8567 {
8568 if (abfd->sections)
8569 bfd_map_over_sections (abfd,
8570 unrecord_section_via_map_over_sections, NULL);
8571
8572 return _bfd_free_cached_info (abfd);
8573 }
8574
8575 /* Create dynamic sections. This is different from the ARM backend in that
8576 the got, plt, gotplt and their relocation sections are all created in the
8577 standard part of the bfd elf backend. */
8578
8579 static bool
8580 elfNN_aarch64_create_dynamic_sections (bfd *dynobj,
8581 struct bfd_link_info *info)
8582 {
8583 /* We need to create .got section. */
8584 if (!aarch64_elf_create_got_section (dynobj, info))
8585 return false;
8586
8587 return _bfd_elf_create_dynamic_sections (dynobj, info);
8588 }
8589
8590
8591 /* Allocate space in .plt, .got and associated reloc sections for
8592 dynamic relocs. */
8593
8594 static bool
8595 elfNN_aarch64_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
8596 {
8597 struct bfd_link_info *info;
8598 struct elf_aarch64_link_hash_table *htab;
8599 struct elf_aarch64_link_hash_entry *eh;
8600 struct elf_dyn_relocs *p;
8601
8602 /* An example of a bfd_link_hash_indirect symbol is versioned
8603 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
8604 -> __gxx_personality_v0(bfd_link_hash_defined)
8605
8606 There is no need to process bfd_link_hash_indirect symbols here
8607 because we will also be presented with the concrete instance of
8608 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
8609 called to copy all relevant data from the generic to the concrete
8610 symbol instance. */
8611 if (h->root.type == bfd_link_hash_indirect)
8612 return true;
8613
8614 if (h->root.type == bfd_link_hash_warning)
8615 h = (struct elf_link_hash_entry *) h->root.u.i.link;
8616
8617 info = (struct bfd_link_info *) inf;
8618 htab = elf_aarch64_hash_table (info);
8619
8620 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
8621 here if it is defined and referenced in a non-shared object. */
8622 if (h->type == STT_GNU_IFUNC
8623 && h->def_regular)
8624 return true;
8625 else if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
8626 {
8627 /* Make sure this symbol is output as a dynamic symbol.
8628 Undefined weak syms won't yet be marked as dynamic. */
8629 if (h->dynindx == -1 && !h->forced_local
8630 && h->root.type == bfd_link_hash_undefweak)
8631 {
8632 if (!bfd_elf_link_record_dynamic_symbol (info, h))
8633 return false;
8634 }
8635
8636 if (bfd_link_pic (info) || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
8637 {
8638 asection *s = htab->root.splt;
8639
8640 /* If this is the first .plt entry, make room for the special
8641 first entry. */
8642 if (s->size == 0)
8643 s->size += htab->plt_header_size;
8644
8645 h->plt.offset = s->size;
8646
8647 /* If this symbol is not defined in a regular file, and we are
8648 not generating a shared library, then set the symbol to this
8649 location in the .plt. This is required to make function
8650 pointers compare as equal between the normal executable and
8651 the shared library. */
8652 if (!bfd_link_pic (info) && !h->def_regular)
8653 {
8654 h->root.u.def.section = s;
8655 h->root.u.def.value = h->plt.offset;
8656 }
8657
8658 /* Make room for this entry. For now we only create the
8659 small model PLT entries. We later need to find a way
8660 of relaxing into these from the large model PLT entries. */
8661 s->size += htab->plt_entry_size;
8662
8663 /* We also need to make an entry in the .got.plt section, which
8664 will be placed in the .got section by the linker script. */
8665 htab->root.sgotplt->size += GOT_ENTRY_SIZE;
8666
8667 /* We also need to make an entry in the .rela.plt section. */
8668 htab->root.srelplt->size += RELOC_SIZE (htab);
8669
8670 /* We need to ensure that all GOT entries that serve the PLT
8671 are consecutive with the special GOT slots [0] [1] and
8672 [2]. Any addtional relocations, such as
8673 R_AARCH64_TLSDESC, must be placed after the PLT related
8674 entries. We abuse the reloc_count such that during
8675 sizing we adjust reloc_count to indicate the number of
8676 PLT related reserved entries. In subsequent phases when
8677 filling in the contents of the reloc entries, PLT related
8678 entries are placed by computing their PLT index (0
8679 .. reloc_count). While other none PLT relocs are placed
8680 at the slot indicated by reloc_count and reloc_count is
8681 updated. */
8682
8683 htab->root.srelplt->reloc_count++;
8684
8685 /* Mark the DSO in case R_<CLS>_JUMP_SLOT relocs against
8686 variant PCS symbols are present. */
8687 if (h->other & STO_AARCH64_VARIANT_PCS)
8688 htab->variant_pcs = 1;
8689
8690 }
8691 else
8692 {
8693 h->plt.offset = (bfd_vma) - 1;
8694 h->needs_plt = 0;
8695 }
8696 }
8697 else
8698 {
8699 h->plt.offset = (bfd_vma) - 1;
8700 h->needs_plt = 0;
8701 }
8702
8703 eh = (struct elf_aarch64_link_hash_entry *) h;
8704 eh->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
8705
8706 if (h->got.refcount > 0)
8707 {
8708 bool dyn;
8709 unsigned got_type = elf_aarch64_hash_entry (h)->got_type;
8710
8711 h->got.offset = (bfd_vma) - 1;
8712
8713 dyn = htab->root.dynamic_sections_created;
8714
8715 /* Make sure this symbol is output as a dynamic symbol.
8716 Undefined weak syms won't yet be marked as dynamic. */
8717 if (dyn && h->dynindx == -1 && !h->forced_local
8718 && h->root.type == bfd_link_hash_undefweak)
8719 {
8720 if (!bfd_elf_link_record_dynamic_symbol (info, h))
8721 return false;
8722 }
8723
8724 if (got_type == GOT_UNKNOWN)
8725 {
8726 }
8727 else if (got_type == GOT_NORMAL)
8728 {
8729 h->got.offset = htab->root.sgot->size;
8730 htab->root.sgot->size += GOT_ENTRY_SIZE;
8731 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
8732 || h->root.type != bfd_link_hash_undefweak)
8733 && (bfd_link_pic (info)
8734 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h))
8735 /* Undefined weak symbol in static PIE resolves to 0 without
8736 any dynamic relocations. */
8737 && !UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
8738 {
8739 htab->root.srelgot->size += RELOC_SIZE (htab);
8740 }
8741 }
8742 else
8743 {
8744 int indx;
8745 if (got_type & GOT_TLSDESC_GD)
8746 {
8747 eh->tlsdesc_got_jump_table_offset =
8748 (htab->root.sgotplt->size
8749 - aarch64_compute_jump_table_size (htab));
8750 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
8751 h->got.offset = (bfd_vma) - 2;
8752 }
8753
8754 if (got_type & GOT_TLS_GD)
8755 {
8756 h->got.offset = htab->root.sgot->size;
8757 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
8758 }
8759
8760 if (got_type & GOT_TLS_IE)
8761 {
8762 h->got.offset = htab->root.sgot->size;
8763 htab->root.sgot->size += GOT_ENTRY_SIZE;
8764 }
8765
8766 indx = h && h->dynindx != -1 ? h->dynindx : 0;
8767 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
8768 || h->root.type != bfd_link_hash_undefweak)
8769 && (!bfd_link_executable (info)
8770 || indx != 0
8771 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
8772 {
8773 if (got_type & GOT_TLSDESC_GD)
8774 {
8775 htab->root.srelplt->size += RELOC_SIZE (htab);
8776 /* Note reloc_count not incremented here! We have
8777 already adjusted reloc_count for this relocation
8778 type. */
8779
8780 /* TLSDESC PLT is now needed, but not yet determined. */
8781 htab->root.tlsdesc_plt = (bfd_vma) - 1;
8782 }
8783
8784 if (got_type & GOT_TLS_GD)
8785 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
8786
8787 if (got_type & GOT_TLS_IE)
8788 htab->root.srelgot->size += RELOC_SIZE (htab);
8789 }
8790 }
8791 }
8792 else
8793 {
8794 h->got.offset = (bfd_vma) - 1;
8795 }
8796
8797 if (h->dyn_relocs == NULL)
8798 return true;
8799
8800 for (p = h->dyn_relocs; p != NULL; p = p->next)
8801 if (eh->def_protected)
8802 {
8803 /* Disallow copy relocations against protected symbol. */
8804 asection *s = p->sec->output_section;
8805 if (s != NULL && (s->flags & SEC_READONLY) != 0)
8806 {
8807 info->callbacks->einfo
8808 /* xgettext:c-format */
8809 (_ ("%F%P: %pB: copy relocation against non-copyable "
8810 "protected symbol `%s'\n"),
8811 p->sec->owner, h->root.root.string);
8812 return false;
8813 }
8814 }
8815
8816 /* In the shared -Bsymbolic case, discard space allocated for
8817 dynamic pc-relative relocs against symbols which turn out to be
8818 defined in regular objects. For the normal shared case, discard
8819 space for pc-relative relocs that have become local due to symbol
8820 visibility changes. */
8821
8822 if (bfd_link_pic (info))
8823 {
8824 /* Relocs that use pc_count are those that appear on a call
8825 insn, or certain REL relocs that can generated via assembly.
8826 We want calls to protected symbols to resolve directly to the
8827 function rather than going via the plt. If people want
8828 function pointer comparisons to work as expected then they
8829 should avoid writing weird assembly. */
8830 if (SYMBOL_CALLS_LOCAL (info, h))
8831 {
8832 struct elf_dyn_relocs **pp;
8833
8834 for (pp = &h->dyn_relocs; (p = *pp) != NULL;)
8835 {
8836 p->count -= p->pc_count;
8837 p->pc_count = 0;
8838 if (p->count == 0)
8839 *pp = p->next;
8840 else
8841 pp = &p->next;
8842 }
8843 }
8844
8845 /* Also discard relocs on undefined weak syms with non-default
8846 visibility. */
8847 if (h->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
8848 {
8849 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
8850 || UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
8851 h->dyn_relocs = NULL;
8852
8853 /* Make sure undefined weak symbols are output as a dynamic
8854 symbol in PIEs. */
8855 else if (h->dynindx == -1
8856 && !h->forced_local
8857 && h->root.type == bfd_link_hash_undefweak
8858 && !bfd_elf_link_record_dynamic_symbol (info, h))
8859 return false;
8860 }
8861
8862 }
8863 else if (ELIMINATE_COPY_RELOCS)
8864 {
8865 /* For the non-shared case, discard space for relocs against
8866 symbols which turn out to need copy relocs or are not
8867 dynamic. */
8868
8869 if (!h->non_got_ref
8870 && ((h->def_dynamic
8871 && !h->def_regular)
8872 || (htab->root.dynamic_sections_created
8873 && (h->root.type == bfd_link_hash_undefweak
8874 || h->root.type == bfd_link_hash_undefined))))
8875 {
8876 /* Make sure this symbol is output as a dynamic symbol.
8877 Undefined weak syms won't yet be marked as dynamic. */
8878 if (h->dynindx == -1
8879 && !h->forced_local
8880 && h->root.type == bfd_link_hash_undefweak
8881 && !bfd_elf_link_record_dynamic_symbol (info, h))
8882 return false;
8883
8884 /* If that succeeded, we know we'll be keeping all the
8885 relocs. */
8886 if (h->dynindx != -1)
8887 goto keep;
8888 }
8889
8890 h->dyn_relocs = NULL;
8891
8892 keep:;
8893 }
8894
8895 /* Finally, allocate space. */
8896 for (p = h->dyn_relocs; p != NULL; p = p->next)
8897 {
8898 asection *sreloc;
8899
8900 sreloc = elf_section_data (p->sec)->sreloc;
8901
8902 BFD_ASSERT (sreloc != NULL);
8903
8904 sreloc->size += p->count * RELOC_SIZE (htab);
8905 }
8906
8907 return true;
8908 }
8909
8910 /* Allocate space in .plt, .got and associated reloc sections for
8911 ifunc dynamic relocs. */
8912
8913 static bool
8914 elfNN_aarch64_allocate_ifunc_dynrelocs (struct elf_link_hash_entry *h,
8915 void *inf)
8916 {
8917 struct bfd_link_info *info;
8918 struct elf_aarch64_link_hash_table *htab;
8919
8920 /* An example of a bfd_link_hash_indirect symbol is versioned
8921 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
8922 -> __gxx_personality_v0(bfd_link_hash_defined)
8923
8924 There is no need to process bfd_link_hash_indirect symbols here
8925 because we will also be presented with the concrete instance of
8926 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
8927 called to copy all relevant data from the generic to the concrete
8928 symbol instance. */
8929 if (h->root.type == bfd_link_hash_indirect)
8930 return true;
8931
8932 if (h->root.type == bfd_link_hash_warning)
8933 h = (struct elf_link_hash_entry *) h->root.u.i.link;
8934
8935 info = (struct bfd_link_info *) inf;
8936 htab = elf_aarch64_hash_table (info);
8937
8938 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
8939 here if it is defined and referenced in a non-shared object. */
8940 if (h->type == STT_GNU_IFUNC
8941 && h->def_regular)
8942 return _bfd_elf_allocate_ifunc_dyn_relocs (info, h,
8943 &h->dyn_relocs,
8944 htab->plt_entry_size,
8945 htab->plt_header_size,
8946 GOT_ENTRY_SIZE,
8947 false);
8948 return true;
8949 }
8950
8951 /* Allocate space in .plt, .got and associated reloc sections for
8952 local ifunc dynamic relocs. */
8953
8954 static int
8955 elfNN_aarch64_allocate_local_ifunc_dynrelocs (void **slot, void *inf)
8956 {
8957 struct elf_link_hash_entry *h
8958 = (struct elf_link_hash_entry *) *slot;
8959
8960 if (h->type != STT_GNU_IFUNC
8961 || !h->def_regular
8962 || !h->ref_regular
8963 || !h->forced_local
8964 || h->root.type != bfd_link_hash_defined)
8965 abort ();
8966
8967 return elfNN_aarch64_allocate_ifunc_dynrelocs (h, inf);
8968 }
8969
8970 /* This is the most important function of all . Innocuosly named
8971 though ! */
8972
8973 static bool
8974 elfNN_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
8975 struct bfd_link_info *info)
8976 {
8977 struct elf_aarch64_link_hash_table *htab;
8978 bfd *dynobj;
8979 asection *s;
8980 bool relocs;
8981 bfd *ibfd;
8982
8983 htab = elf_aarch64_hash_table ((info));
8984 dynobj = htab->root.dynobj;
8985
8986 BFD_ASSERT (dynobj != NULL);
8987
8988 if (htab->root.dynamic_sections_created)
8989 {
8990 if (bfd_link_executable (info) && !info->nointerp)
8991 {
8992 s = bfd_get_linker_section (dynobj, ".interp");
8993 if (s == NULL)
8994 abort ();
8995 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
8996 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
8997 }
8998 }
8999
9000 /* Set up .got offsets for local syms, and space for local dynamic
9001 relocs. */
9002 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
9003 {
9004 struct elf_aarch64_local_symbol *locals = NULL;
9005 Elf_Internal_Shdr *symtab_hdr;
9006 asection *srel;
9007 unsigned int i;
9008
9009 if (!is_aarch64_elf (ibfd))
9010 continue;
9011
9012 for (s = ibfd->sections; s != NULL; s = s->next)
9013 {
9014 struct elf_dyn_relocs *p;
9015
9016 for (p = (struct elf_dyn_relocs *)
9017 (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
9018 {
9019 if (!bfd_is_abs_section (p->sec)
9020 && bfd_is_abs_section (p->sec->output_section))
9021 {
9022 /* Input section has been discarded, either because
9023 it is a copy of a linkonce section or due to
9024 linker script /DISCARD/, so we'll be discarding
9025 the relocs too. */
9026 }
9027 else if (p->count != 0)
9028 {
9029 srel = elf_section_data (p->sec)->sreloc;
9030 srel->size += p->count * RELOC_SIZE (htab);
9031 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
9032 info->flags |= DF_TEXTREL;
9033 }
9034 }
9035 }
9036
9037 locals = elf_aarch64_locals (ibfd);
9038 if (!locals)
9039 continue;
9040
9041 symtab_hdr = &elf_symtab_hdr (ibfd);
9042 srel = htab->root.srelgot;
9043 for (i = 0; i < symtab_hdr->sh_info; i++)
9044 {
9045 locals[i].got_offset = (bfd_vma) - 1;
9046 locals[i].tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
9047 if (locals[i].got_refcount > 0)
9048 {
9049 unsigned got_type = locals[i].got_type;
9050 if (got_type & GOT_TLSDESC_GD)
9051 {
9052 locals[i].tlsdesc_got_jump_table_offset =
9053 (htab->root.sgotplt->size
9054 - aarch64_compute_jump_table_size (htab));
9055 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
9056 locals[i].got_offset = (bfd_vma) - 2;
9057 }
9058
9059 if (got_type & GOT_TLS_GD)
9060 {
9061 locals[i].got_offset = htab->root.sgot->size;
9062 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
9063 }
9064
9065 if (got_type & GOT_TLS_IE
9066 || got_type & GOT_NORMAL)
9067 {
9068 locals[i].got_offset = htab->root.sgot->size;
9069 htab->root.sgot->size += GOT_ENTRY_SIZE;
9070 }
9071
9072 if (got_type == GOT_UNKNOWN)
9073 {
9074 }
9075
9076 if (bfd_link_pic (info))
9077 {
9078 if (got_type & GOT_TLSDESC_GD)
9079 {
9080 htab->root.srelplt->size += RELOC_SIZE (htab);
9081 /* Note RELOC_COUNT not incremented here! */
9082 htab->root.tlsdesc_plt = (bfd_vma) - 1;
9083 }
9084
9085 if (got_type & GOT_TLS_GD)
9086 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
9087
9088 if (got_type & GOT_TLS_IE
9089 || got_type & GOT_NORMAL)
9090 htab->root.srelgot->size += RELOC_SIZE (htab);
9091 }
9092 }
9093 else
9094 {
9095 locals[i].got_refcount = (bfd_vma) - 1;
9096 }
9097 }
9098 }
9099
9100
9101 /* Allocate global sym .plt and .got entries, and space for global
9102 sym dynamic relocs. */
9103 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_dynrelocs,
9104 info);
9105
9106 /* Allocate global ifunc sym .plt and .got entries, and space for global
9107 ifunc sym dynamic relocs. */
9108 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_ifunc_dynrelocs,
9109 info);
9110
9111 /* Allocate .plt and .got entries, and space for local ifunc symbols. */
9112 htab_traverse (htab->loc_hash_table,
9113 elfNN_aarch64_allocate_local_ifunc_dynrelocs,
9114 info);
9115
9116 /* For every jump slot reserved in the sgotplt, reloc_count is
9117 incremented. However, when we reserve space for TLS descriptors,
9118 it's not incremented, so in order to compute the space reserved
9119 for them, it suffices to multiply the reloc count by the jump
9120 slot size. */
9121
9122 if (htab->root.srelplt)
9123 htab->sgotplt_jump_table_size = aarch64_compute_jump_table_size (htab);
9124
9125 if (htab->root.tlsdesc_plt)
9126 {
9127 if (htab->root.splt->size == 0)
9128 htab->root.splt->size += htab->plt_header_size;
9129
9130 /* If we're not using lazy TLS relocations, don't generate the
9131 GOT and PLT entry required. */
9132 if ((info->flags & DF_BIND_NOW))
9133 htab->root.tlsdesc_plt = 0;
9134 else
9135 {
9136 htab->root.tlsdesc_plt = htab->root.splt->size;
9137 htab->root.splt->size += htab->tlsdesc_plt_entry_size;
9138
9139 htab->root.tlsdesc_got = htab->root.sgot->size;
9140 htab->root.sgot->size += GOT_ENTRY_SIZE;
9141 }
9142 }
9143
9144 /* Init mapping symbols information to use later to distingush between
9145 code and data while scanning for errata. */
9146 if (htab->fix_erratum_835769 || htab->fix_erratum_843419)
9147 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
9148 {
9149 if (!is_aarch64_elf (ibfd))
9150 continue;
9151 bfd_elfNN_aarch64_init_maps (ibfd);
9152 }
9153
9154 /* We now have determined the sizes of the various dynamic sections.
9155 Allocate memory for them. */
9156 relocs = false;
9157 for (s = dynobj->sections; s != NULL; s = s->next)
9158 {
9159 if ((s->flags & SEC_LINKER_CREATED) == 0)
9160 continue;
9161
9162 if (s == htab->root.splt
9163 || s == htab->root.sgot
9164 || s == htab->root.sgotplt
9165 || s == htab->root.iplt
9166 || s == htab->root.igotplt
9167 || s == htab->root.sdynbss
9168 || s == htab->root.sdynrelro)
9169 {
9170 /* Strip this section if we don't need it; see the
9171 comment below. */
9172 }
9173 else if (startswith (bfd_section_name (s), ".rela"))
9174 {
9175 if (s->size != 0 && s != htab->root.srelplt)
9176 relocs = true;
9177
9178 /* We use the reloc_count field as a counter if we need
9179 to copy relocs into the output file. */
9180 if (s != htab->root.srelplt)
9181 s->reloc_count = 0;
9182 }
9183 else
9184 {
9185 /* It's not one of our sections, so don't allocate space. */
9186 continue;
9187 }
9188
9189 if (s->size == 0)
9190 {
9191 /* If we don't need this section, strip it from the
9192 output file. This is mostly to handle .rela.bss and
9193 .rela.plt. We must create both sections in
9194 create_dynamic_sections, because they must be created
9195 before the linker maps input sections to output
9196 sections. The linker does that before
9197 adjust_dynamic_symbol is called, and it is that
9198 function which decides whether anything needs to go
9199 into these sections. */
9200 s->flags |= SEC_EXCLUDE;
9201 continue;
9202 }
9203
9204 if ((s->flags & SEC_HAS_CONTENTS) == 0)
9205 continue;
9206
9207 /* Allocate memory for the section contents. We use bfd_zalloc
9208 here in case unused entries are not reclaimed before the
9209 section's contents are written out. This should not happen,
9210 but this way if it does, we get a R_AARCH64_NONE reloc instead
9211 of garbage. */
9212 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
9213 if (s->contents == NULL)
9214 return false;
9215 }
9216
9217 if (htab->root.dynamic_sections_created)
9218 {
9219 /* Add some entries to the .dynamic section. We fill in the
9220 values later, in elfNN_aarch64_finish_dynamic_sections, but we
9221 must add the entries now so that we get the correct size for
9222 the .dynamic section. The DT_DEBUG entry is filled in by the
9223 dynamic linker and used by the debugger. */
9224 #define add_dynamic_entry(TAG, VAL) \
9225 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
9226
9227 if (!_bfd_elf_add_dynamic_tags (output_bfd, info, relocs))
9228 return false;
9229
9230 if (htab->root.splt->size != 0)
9231 {
9232 if (htab->variant_pcs
9233 && !add_dynamic_entry (DT_AARCH64_VARIANT_PCS, 0))
9234 return false;
9235
9236 if ((elf_aarch64_tdata (output_bfd)->plt_type == PLT_BTI_PAC)
9237 && (!add_dynamic_entry (DT_AARCH64_BTI_PLT, 0)
9238 || !add_dynamic_entry (DT_AARCH64_PAC_PLT, 0)))
9239 return false;
9240
9241 else if ((elf_aarch64_tdata (output_bfd)->plt_type == PLT_BTI)
9242 && !add_dynamic_entry (DT_AARCH64_BTI_PLT, 0))
9243 return false;
9244
9245 else if ((elf_aarch64_tdata (output_bfd)->plt_type == PLT_PAC)
9246 && !add_dynamic_entry (DT_AARCH64_PAC_PLT, 0))
9247 return false;
9248 }
9249 }
9250 #undef add_dynamic_entry
9251
9252 return true;
9253 }
9254
9255 static inline void
9256 elf_aarch64_update_plt_entry (bfd *output_bfd,
9257 bfd_reloc_code_real_type r_type,
9258 bfd_byte *plt_entry, bfd_vma value)
9259 {
9260 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (r_type);
9261
9262 /* FIXME: We should check the return value from this function call. */
9263 (void) _bfd_aarch64_elf_put_addend (output_bfd, plt_entry, r_type, howto, value);
9264 }
9265
9266 static void
9267 elfNN_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h,
9268 struct elf_aarch64_link_hash_table
9269 *htab, bfd *output_bfd,
9270 struct bfd_link_info *info)
9271 {
9272 bfd_byte *plt_entry;
9273 bfd_vma plt_index;
9274 bfd_vma got_offset;
9275 bfd_vma gotplt_entry_address;
9276 bfd_vma plt_entry_address;
9277 Elf_Internal_Rela rela;
9278 bfd_byte *loc;
9279 asection *plt, *gotplt, *relplt;
9280
9281 /* When building a static executable, use .iplt, .igot.plt and
9282 .rela.iplt sections for STT_GNU_IFUNC symbols. */
9283 if (htab->root.splt != NULL)
9284 {
9285 plt = htab->root.splt;
9286 gotplt = htab->root.sgotplt;
9287 relplt = htab->root.srelplt;
9288 }
9289 else
9290 {
9291 plt = htab->root.iplt;
9292 gotplt = htab->root.igotplt;
9293 relplt = htab->root.irelplt;
9294 }
9295
9296 /* Get the index in the procedure linkage table which
9297 corresponds to this symbol. This is the index of this symbol
9298 in all the symbols for which we are making plt entries. The
9299 first entry in the procedure linkage table is reserved.
9300
9301 Get the offset into the .got table of the entry that
9302 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
9303 bytes. The first three are reserved for the dynamic linker.
9304
9305 For static executables, we don't reserve anything. */
9306
9307 if (plt == htab->root.splt)
9308 {
9309 plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
9310 got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
9311 }
9312 else
9313 {
9314 plt_index = h->plt.offset / htab->plt_entry_size;
9315 got_offset = plt_index * GOT_ENTRY_SIZE;
9316 }
9317
9318 plt_entry = plt->contents + h->plt.offset;
9319 plt_entry_address = plt->output_section->vma
9320 + plt->output_offset + h->plt.offset;
9321 gotplt_entry_address = gotplt->output_section->vma +
9322 gotplt->output_offset + got_offset;
9323
9324 /* Copy in the boiler-plate for the PLTn entry. */
9325 memcpy (plt_entry, htab->plt_entry, htab->plt_entry_size);
9326
9327 /* First instruction in BTI enabled PLT stub is a BTI
9328 instruction so skip it. */
9329 if (elf_aarch64_tdata (output_bfd)->plt_type & PLT_BTI
9330 && elf_elfheader (output_bfd)->e_type == ET_EXEC)
9331 plt_entry = plt_entry + 4;
9332
9333 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
9334 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
9335 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
9336 plt_entry,
9337 PG (gotplt_entry_address) -
9338 PG (plt_entry_address));
9339
9340 /* Fill in the lo12 bits for the load from the pltgot. */
9341 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
9342 plt_entry + 4,
9343 PG_OFFSET (gotplt_entry_address));
9344
9345 /* Fill in the lo12 bits for the add from the pltgot entry. */
9346 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
9347 plt_entry + 8,
9348 PG_OFFSET (gotplt_entry_address));
9349
9350 /* All the GOTPLT Entries are essentially initialized to PLT0. */
9351 bfd_put_NN (output_bfd,
9352 plt->output_section->vma + plt->output_offset,
9353 gotplt->contents + got_offset);
9354
9355 rela.r_offset = gotplt_entry_address;
9356
9357 if (h->dynindx == -1
9358 || ((bfd_link_executable (info)
9359 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
9360 && h->def_regular
9361 && h->type == STT_GNU_IFUNC))
9362 {
9363 /* If an STT_GNU_IFUNC symbol is locally defined, generate
9364 R_AARCH64_IRELATIVE instead of R_AARCH64_JUMP_SLOT. */
9365 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
9366 rela.r_addend = (h->root.u.def.value
9367 + h->root.u.def.section->output_section->vma
9368 + h->root.u.def.section->output_offset);
9369 }
9370 else
9371 {
9372 /* Fill in the entry in the .rela.plt section. */
9373 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (JUMP_SLOT));
9374 rela.r_addend = 0;
9375 }
9376
9377 /* Compute the relocation entry to used based on PLT index and do
9378 not adjust reloc_count. The reloc_count has already been adjusted
9379 to account for this entry. */
9380 loc = relplt->contents + plt_index * RELOC_SIZE (htab);
9381 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
9382 }
9383
9384 /* Size sections even though they're not dynamic. We use it to setup
9385 _TLS_MODULE_BASE_, if needed. */
9386
9387 static bool
9388 elfNN_aarch64_always_size_sections (bfd *output_bfd,
9389 struct bfd_link_info *info)
9390 {
9391 asection *tls_sec;
9392
9393 if (bfd_link_relocatable (info))
9394 return true;
9395
9396 tls_sec = elf_hash_table (info)->tls_sec;
9397
9398 if (tls_sec)
9399 {
9400 struct elf_link_hash_entry *tlsbase;
9401
9402 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
9403 "_TLS_MODULE_BASE_", true, true, false);
9404
9405 if (tlsbase)
9406 {
9407 struct bfd_link_hash_entry *h = NULL;
9408 const struct elf_backend_data *bed =
9409 get_elf_backend_data (output_bfd);
9410
9411 if (!(_bfd_generic_link_add_one_symbol
9412 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
9413 tls_sec, 0, NULL, false, bed->collect, &h)))
9414 return false;
9415
9416 tlsbase->type = STT_TLS;
9417 tlsbase = (struct elf_link_hash_entry *) h;
9418 tlsbase->def_regular = 1;
9419 tlsbase->other = STV_HIDDEN;
9420 (*bed->elf_backend_hide_symbol) (info, tlsbase, true);
9421 }
9422 }
9423
9424 return true;
9425 }
9426
9427 /* Finish up dynamic symbol handling. We set the contents of various
9428 dynamic sections here. */
9429
9430 static bool
9431 elfNN_aarch64_finish_dynamic_symbol (bfd *output_bfd,
9432 struct bfd_link_info *info,
9433 struct elf_link_hash_entry *h,
9434 Elf_Internal_Sym *sym)
9435 {
9436 struct elf_aarch64_link_hash_table *htab;
9437 htab = elf_aarch64_hash_table (info);
9438
9439 if (h->plt.offset != (bfd_vma) - 1)
9440 {
9441 asection *plt, *gotplt, *relplt;
9442
9443 /* This symbol has an entry in the procedure linkage table. Set
9444 it up. */
9445
9446 /* When building a static executable, use .iplt, .igot.plt and
9447 .rela.iplt sections for STT_GNU_IFUNC symbols. */
9448 if (htab->root.splt != NULL)
9449 {
9450 plt = htab->root.splt;
9451 gotplt = htab->root.sgotplt;
9452 relplt = htab->root.srelplt;
9453 }
9454 else
9455 {
9456 plt = htab->root.iplt;
9457 gotplt = htab->root.igotplt;
9458 relplt = htab->root.irelplt;
9459 }
9460
9461 /* This symbol has an entry in the procedure linkage table. Set
9462 it up. */
9463 if ((h->dynindx == -1
9464 && !((h->forced_local || bfd_link_executable (info))
9465 && h->def_regular
9466 && h->type == STT_GNU_IFUNC))
9467 || plt == NULL
9468 || gotplt == NULL
9469 || relplt == NULL)
9470 return false;
9471
9472 elfNN_aarch64_create_small_pltn_entry (h, htab, output_bfd, info);
9473 if (!h->def_regular)
9474 {
9475 /* Mark the symbol as undefined, rather than as defined in
9476 the .plt section. */
9477 sym->st_shndx = SHN_UNDEF;
9478 /* If the symbol is weak we need to clear the value.
9479 Otherwise, the PLT entry would provide a definition for
9480 the symbol even if the symbol wasn't defined anywhere,
9481 and so the symbol would never be NULL. Leave the value if
9482 there were any relocations where pointer equality matters
9483 (this is a clue for the dynamic linker, to make function
9484 pointer comparisons work between an application and shared
9485 library). */
9486 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
9487 sym->st_value = 0;
9488 }
9489 }
9490
9491 if (h->got.offset != (bfd_vma) - 1
9492 && elf_aarch64_hash_entry (h)->got_type == GOT_NORMAL
9493 /* Undefined weak symbol in static PIE resolves to 0 without
9494 any dynamic relocations. */
9495 && !UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
9496 {
9497 Elf_Internal_Rela rela;
9498 bfd_byte *loc;
9499
9500 /* This symbol has an entry in the global offset table. Set it
9501 up. */
9502 if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
9503 abort ();
9504
9505 rela.r_offset = (htab->root.sgot->output_section->vma
9506 + htab->root.sgot->output_offset
9507 + (h->got.offset & ~(bfd_vma) 1));
9508
9509 if (h->def_regular
9510 && h->type == STT_GNU_IFUNC)
9511 {
9512 if (bfd_link_pic (info))
9513 {
9514 /* Generate R_AARCH64_GLOB_DAT. */
9515 goto do_glob_dat;
9516 }
9517 else
9518 {
9519 asection *plt;
9520
9521 if (!h->pointer_equality_needed)
9522 abort ();
9523
9524 /* For non-shared object, we can't use .got.plt, which
9525 contains the real function address if we need pointer
9526 equality. We load the GOT entry with the PLT entry. */
9527 plt = htab->root.splt ? htab->root.splt : htab->root.iplt;
9528 bfd_put_NN (output_bfd, (plt->output_section->vma
9529 + plt->output_offset
9530 + h->plt.offset),
9531 htab->root.sgot->contents
9532 + (h->got.offset & ~(bfd_vma) 1));
9533 return true;
9534 }
9535 }
9536 else if (bfd_link_pic (info) && SYMBOL_REFERENCES_LOCAL (info, h))
9537 {
9538 if (!(h->def_regular || ELF_COMMON_DEF_P (h)))
9539 return false;
9540
9541 BFD_ASSERT ((h->got.offset & 1) != 0);
9542 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
9543 rela.r_addend = (h->root.u.def.value
9544 + h->root.u.def.section->output_section->vma
9545 + h->root.u.def.section->output_offset);
9546 }
9547 else
9548 {
9549 do_glob_dat:
9550 BFD_ASSERT ((h->got.offset & 1) == 0);
9551 bfd_put_NN (output_bfd, (bfd_vma) 0,
9552 htab->root.sgot->contents + h->got.offset);
9553 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (GLOB_DAT));
9554 rela.r_addend = 0;
9555 }
9556
9557 loc = htab->root.srelgot->contents;
9558 loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
9559 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
9560 }
9561
9562 if (h->needs_copy)
9563 {
9564 Elf_Internal_Rela rela;
9565 asection *s;
9566 bfd_byte *loc;
9567
9568 /* This symbol needs a copy reloc. Set it up. */
9569 if (h->dynindx == -1
9570 || (h->root.type != bfd_link_hash_defined
9571 && h->root.type != bfd_link_hash_defweak)
9572 || htab->root.srelbss == NULL)
9573 abort ();
9574
9575 rela.r_offset = (h->root.u.def.value
9576 + h->root.u.def.section->output_section->vma
9577 + h->root.u.def.section->output_offset);
9578 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (COPY));
9579 rela.r_addend = 0;
9580 if (h->root.u.def.section == htab->root.sdynrelro)
9581 s = htab->root.sreldynrelro;
9582 else
9583 s = htab->root.srelbss;
9584 loc = s->contents + s->reloc_count++ * RELOC_SIZE (htab);
9585 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
9586 }
9587
9588 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may
9589 be NULL for local symbols. */
9590 if (sym != NULL
9591 && (h == elf_hash_table (info)->hdynamic
9592 || h == elf_hash_table (info)->hgot))
9593 sym->st_shndx = SHN_ABS;
9594
9595 return true;
9596 }
9597
9598 /* Finish up local dynamic symbol handling. We set the contents of
9599 various dynamic sections here. */
9600
9601 static int
9602 elfNN_aarch64_finish_local_dynamic_symbol (void **slot, void *inf)
9603 {
9604 struct elf_link_hash_entry *h
9605 = (struct elf_link_hash_entry *) *slot;
9606 struct bfd_link_info *info
9607 = (struct bfd_link_info *) inf;
9608
9609 return elfNN_aarch64_finish_dynamic_symbol (info->output_bfd,
9610 info, h, NULL);
9611 }
9612
9613 static void
9614 elfNN_aarch64_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
9615 struct elf_aarch64_link_hash_table
9616 *htab)
9617 {
9618 /* Fill in PLT0. Fixme:RR Note this doesn't distinguish between
9619 small and large plts and at the minute just generates
9620 the small PLT. */
9621
9622 /* PLT0 of the small PLT looks like this in ELF64 -
9623 stp x16, x30, [sp, #-16]! // Save the reloc and lr on stack.
9624 adrp x16, PLT_GOT + 16 // Get the page base of the GOTPLT
9625 ldr x17, [x16, #:lo12:PLT_GOT+16] // Load the address of the
9626 // symbol resolver
9627 add x16, x16, #:lo12:PLT_GOT+16 // Load the lo12 bits of the
9628 // GOTPLT entry for this.
9629 br x17
9630 PLT0 will be slightly different in ELF32 due to different got entry
9631 size. */
9632 bfd_vma plt_got_2nd_ent; /* Address of GOT[2]. */
9633 bfd_vma plt_base;
9634
9635
9636 memcpy (htab->root.splt->contents, htab->plt0_entry,
9637 htab->plt_header_size);
9638
9639 /* PR 26312: Explicitly set the sh_entsize to 0 so that
9640 consumers do not think that the section contains fixed
9641 sized objects. */
9642 elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize = 0;
9643
9644 plt_got_2nd_ent = (htab->root.sgotplt->output_section->vma
9645 + htab->root.sgotplt->output_offset
9646 + GOT_ENTRY_SIZE * 2);
9647
9648 plt_base = htab->root.splt->output_section->vma +
9649 htab->root.splt->output_offset;
9650
9651 /* First instruction in BTI enabled PLT stub is a BTI
9652 instruction so skip it. */
9653 bfd_byte *plt0_entry = htab->root.splt->contents;
9654 if (elf_aarch64_tdata (output_bfd)->plt_type & PLT_BTI)
9655 plt0_entry = plt0_entry + 4;
9656
9657 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
9658 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
9659 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
9660 plt0_entry + 4,
9661 PG (plt_got_2nd_ent) - PG (plt_base + 4));
9662
9663 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
9664 plt0_entry + 8,
9665 PG_OFFSET (plt_got_2nd_ent));
9666
9667 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
9668 plt0_entry + 12,
9669 PG_OFFSET (plt_got_2nd_ent));
9670 }
9671
9672 static bool
9673 elfNN_aarch64_finish_dynamic_sections (bfd *output_bfd,
9674 struct bfd_link_info *info)
9675 {
9676 struct elf_aarch64_link_hash_table *htab;
9677 bfd *dynobj;
9678 asection *sdyn;
9679
9680 htab = elf_aarch64_hash_table (info);
9681 dynobj = htab->root.dynobj;
9682 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
9683
9684 if (htab->root.dynamic_sections_created)
9685 {
9686 ElfNN_External_Dyn *dyncon, *dynconend;
9687
9688 if (sdyn == NULL || htab->root.sgot == NULL)
9689 abort ();
9690
9691 dyncon = (ElfNN_External_Dyn *) sdyn->contents;
9692 dynconend = (ElfNN_External_Dyn *) (sdyn->contents + sdyn->size);
9693 for (; dyncon < dynconend; dyncon++)
9694 {
9695 Elf_Internal_Dyn dyn;
9696 asection *s;
9697
9698 bfd_elfNN_swap_dyn_in (dynobj, dyncon, &dyn);
9699
9700 switch (dyn.d_tag)
9701 {
9702 default:
9703 continue;
9704
9705 case DT_PLTGOT:
9706 s = htab->root.sgotplt;
9707 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
9708 break;
9709
9710 case DT_JMPREL:
9711 s = htab->root.srelplt;
9712 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
9713 break;
9714
9715 case DT_PLTRELSZ:
9716 s = htab->root.srelplt;
9717 dyn.d_un.d_val = s->size;
9718 break;
9719
9720 case DT_TLSDESC_PLT:
9721 s = htab->root.splt;
9722 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
9723 + htab->root.tlsdesc_plt;
9724 break;
9725
9726 case DT_TLSDESC_GOT:
9727 s = htab->root.sgot;
9728 BFD_ASSERT (htab->root.tlsdesc_got != (bfd_vma)-1);
9729 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
9730 + htab->root.tlsdesc_got;
9731 break;
9732 }
9733
9734 bfd_elfNN_swap_dyn_out (output_bfd, &dyn, dyncon);
9735 }
9736
9737 }
9738
9739 /* Fill in the special first entry in the procedure linkage table. */
9740 if (htab->root.splt && htab->root.splt->size > 0)
9741 {
9742 elfNN_aarch64_init_small_plt0_entry (output_bfd, htab);
9743
9744 if (htab->root.tlsdesc_plt && !(info->flags & DF_BIND_NOW))
9745 {
9746 BFD_ASSERT (htab->root.tlsdesc_got != (bfd_vma)-1);
9747 bfd_put_NN (output_bfd, (bfd_vma) 0,
9748 htab->root.sgot->contents + htab->root.tlsdesc_got);
9749
9750 const bfd_byte *entry = elfNN_aarch64_tlsdesc_small_plt_entry;
9751 htab->tlsdesc_plt_entry_size = PLT_TLSDESC_ENTRY_SIZE;
9752
9753 aarch64_plt_type type = elf_aarch64_tdata (output_bfd)->plt_type;
9754 if (type == PLT_BTI || type == PLT_BTI_PAC)
9755 {
9756 entry = elfNN_aarch64_tlsdesc_small_plt_bti_entry;
9757 }
9758
9759 memcpy (htab->root.splt->contents + htab->root.tlsdesc_plt,
9760 entry, htab->tlsdesc_plt_entry_size);
9761
9762 {
9763 bfd_vma adrp1_addr =
9764 htab->root.splt->output_section->vma
9765 + htab->root.splt->output_offset
9766 + htab->root.tlsdesc_plt + 4;
9767
9768 bfd_vma adrp2_addr = adrp1_addr + 4;
9769
9770 bfd_vma got_addr =
9771 htab->root.sgot->output_section->vma
9772 + htab->root.sgot->output_offset;
9773
9774 bfd_vma pltgot_addr =
9775 htab->root.sgotplt->output_section->vma
9776 + htab->root.sgotplt->output_offset;
9777
9778 bfd_vma dt_tlsdesc_got = got_addr + htab->root.tlsdesc_got;
9779
9780 bfd_byte *plt_entry =
9781 htab->root.splt->contents + htab->root.tlsdesc_plt;
9782
9783 /* First instruction in BTI enabled PLT stub is a BTI
9784 instruction so skip it. */
9785 if (type & PLT_BTI)
9786 {
9787 plt_entry = plt_entry + 4;
9788 adrp1_addr = adrp1_addr + 4;
9789 adrp2_addr = adrp2_addr + 4;
9790 }
9791
9792 /* adrp x2, DT_TLSDESC_GOT */
9793 elf_aarch64_update_plt_entry (output_bfd,
9794 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
9795 plt_entry + 4,
9796 (PG (dt_tlsdesc_got)
9797 - PG (adrp1_addr)));
9798
9799 /* adrp x3, 0 */
9800 elf_aarch64_update_plt_entry (output_bfd,
9801 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
9802 plt_entry + 8,
9803 (PG (pltgot_addr)
9804 - PG (adrp2_addr)));
9805
9806 /* ldr x2, [x2, #0] */
9807 elf_aarch64_update_plt_entry (output_bfd,
9808 BFD_RELOC_AARCH64_LDSTNN_LO12,
9809 plt_entry + 12,
9810 PG_OFFSET (dt_tlsdesc_got));
9811
9812 /* add x3, x3, 0 */
9813 elf_aarch64_update_plt_entry (output_bfd,
9814 BFD_RELOC_AARCH64_ADD_LO12,
9815 plt_entry + 16,
9816 PG_OFFSET (pltgot_addr));
9817 }
9818 }
9819 }
9820
9821 if (htab->root.sgotplt)
9822 {
9823 if (bfd_is_abs_section (htab->root.sgotplt->output_section))
9824 {
9825 _bfd_error_handler
9826 (_("discarded output section: `%pA'"), htab->root.sgotplt);
9827 return false;
9828 }
9829
9830 /* Fill in the first three entries in the global offset table. */
9831 if (htab->root.sgotplt->size > 0)
9832 {
9833 bfd_put_NN (output_bfd, (bfd_vma) 0, htab->root.sgotplt->contents);
9834
9835 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
9836 bfd_put_NN (output_bfd,
9837 (bfd_vma) 0,
9838 htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
9839 bfd_put_NN (output_bfd,
9840 (bfd_vma) 0,
9841 htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
9842 }
9843
9844 if (htab->root.sgot)
9845 {
9846 if (htab->root.sgot->size > 0)
9847 {
9848 bfd_vma addr =
9849 sdyn ? sdyn->output_section->vma + sdyn->output_offset : 0;
9850 bfd_put_NN (output_bfd, addr, htab->root.sgot->contents);
9851 }
9852 }
9853
9854 elf_section_data (htab->root.sgotplt->output_section)->
9855 this_hdr.sh_entsize = GOT_ENTRY_SIZE;
9856 }
9857
9858 if (htab->root.sgot && htab->root.sgot->size > 0)
9859 elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
9860 = GOT_ENTRY_SIZE;
9861
9862 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
9863 htab_traverse (htab->loc_hash_table,
9864 elfNN_aarch64_finish_local_dynamic_symbol,
9865 info);
9866
9867 return true;
9868 }
9869
9870 /* Check if BTI enabled PLTs are needed. Returns the type needed. */
9871 static aarch64_plt_type
9872 get_plt_type (bfd *abfd)
9873 {
9874 aarch64_plt_type ret = PLT_NORMAL;
9875 bfd_byte *contents, *extdyn, *extdynend;
9876 asection *sec = bfd_get_section_by_name (abfd, ".dynamic");
9877 if (!sec
9878 || sec->size < sizeof (ElfNN_External_Dyn)
9879 || !bfd_malloc_and_get_section (abfd, sec, &contents))
9880 return ret;
9881 extdyn = contents;
9882 extdynend = contents + sec->size - sizeof (ElfNN_External_Dyn);
9883 for (; extdyn <= extdynend; extdyn += sizeof (ElfNN_External_Dyn))
9884 {
9885 Elf_Internal_Dyn dyn;
9886 bfd_elfNN_swap_dyn_in (abfd, extdyn, &dyn);
9887
9888 /* Let's check the processor specific dynamic array tags. */
9889 bfd_vma tag = dyn.d_tag;
9890 if (tag < DT_LOPROC || tag > DT_HIPROC)
9891 continue;
9892
9893 switch (tag)
9894 {
9895 case DT_AARCH64_BTI_PLT:
9896 ret |= PLT_BTI;
9897 break;
9898
9899 case DT_AARCH64_PAC_PLT:
9900 ret |= PLT_PAC;
9901 break;
9902
9903 default: break;
9904 }
9905 }
9906 free (contents);
9907 return ret;
9908 }
9909
9910 static long
9911 elfNN_aarch64_get_synthetic_symtab (bfd *abfd,
9912 long symcount,
9913 asymbol **syms,
9914 long dynsymcount,
9915 asymbol **dynsyms,
9916 asymbol **ret)
9917 {
9918 elf_aarch64_tdata (abfd)->plt_type = get_plt_type (abfd);
9919 return _bfd_elf_get_synthetic_symtab (abfd, symcount, syms,
9920 dynsymcount, dynsyms, ret);
9921 }
9922
9923 /* Return address for Ith PLT stub in section PLT, for relocation REL
9924 or (bfd_vma) -1 if it should not be included. */
9925
9926 static bfd_vma
9927 elfNN_aarch64_plt_sym_val (bfd_vma i, const asection *plt,
9928 const arelent *rel ATTRIBUTE_UNUSED)
9929 {
9930 size_t plt0_size = PLT_ENTRY_SIZE;
9931 size_t pltn_size = PLT_SMALL_ENTRY_SIZE;
9932
9933 if (elf_aarch64_tdata (plt->owner)->plt_type == PLT_BTI_PAC)
9934 {
9935 if (elf_elfheader (plt->owner)->e_type == ET_EXEC)
9936 pltn_size = PLT_BTI_PAC_SMALL_ENTRY_SIZE;
9937 else
9938 pltn_size = PLT_PAC_SMALL_ENTRY_SIZE;
9939 }
9940 else if (elf_aarch64_tdata (plt->owner)->plt_type == PLT_BTI)
9941 {
9942 if (elf_elfheader (plt->owner)->e_type == ET_EXEC)
9943 pltn_size = PLT_BTI_SMALL_ENTRY_SIZE;
9944 }
9945 else if (elf_aarch64_tdata (plt->owner)->plt_type == PLT_PAC)
9946 {
9947 pltn_size = PLT_PAC_SMALL_ENTRY_SIZE;
9948 }
9949
9950 return plt->vma + plt0_size + i * pltn_size;
9951 }
9952
9953 /* Returns TRUE if NAME is an AArch64 mapping symbol.
9954 The ARM ELF standard defines $x (for A64 code) and $d (for data).
9955 It also allows a period initiated suffix to be added to the symbol, ie:
9956 "$[adtx]\.[:sym_char]+". */
9957
9958 static bool
9959 is_aarch64_mapping_symbol (const char * name)
9960 {
9961 return name != NULL /* Paranoia. */
9962 && name[0] == '$' /* Note: if objcopy --prefix-symbols has been used then
9963 the mapping symbols could have acquired a prefix.
9964 We do not support this here, since such symbols no
9965 longer conform to the ARM ELF ABI. */
9966 && (name[1] == 'd' || name[1] == 'x')
9967 && (name[2] == 0 || name[2] == '.');
9968 /* FIXME: Strictly speaking the symbol is only a valid mapping symbol if
9969 any characters that follow the period are legal characters for the body
9970 of a symbol's name. For now we just assume that this is the case. */
9971 }
9972
9973 /* Make sure that mapping symbols in object files are not removed via the
9974 "strip --strip-unneeded" tool. These symbols might needed in order to
9975 correctly generate linked files. Once an object file has been linked,
9976 it should be safe to remove them. */
9977
9978 static void
9979 elfNN_aarch64_backend_symbol_processing (bfd *abfd, asymbol *sym)
9980 {
9981 if (((abfd->flags & (EXEC_P | DYNAMIC)) == 0)
9982 && sym->section != bfd_abs_section_ptr
9983 && is_aarch64_mapping_symbol (sym->name))
9984 sym->flags |= BSF_KEEP;
9985 }
9986
9987 /* Implement elf_backend_setup_gnu_properties for AArch64. It serves as a
9988 wrapper function for _bfd_aarch64_elf_link_setup_gnu_properties to account
9989 for the effect of GNU properties of the output_bfd. */
9990 static bfd *
9991 elfNN_aarch64_link_setup_gnu_properties (struct bfd_link_info *info)
9992 {
9993 uint32_t prop = elf_aarch64_tdata (info->output_bfd)->gnu_and_prop;
9994 bfd *pbfd = _bfd_aarch64_elf_link_setup_gnu_properties (info, &prop);
9995 elf_aarch64_tdata (info->output_bfd)->gnu_and_prop = prop;
9996 elf_aarch64_tdata (info->output_bfd)->plt_type
9997 |= (prop & GNU_PROPERTY_AARCH64_FEATURE_1_BTI) ? PLT_BTI : 0;
9998 setup_plt_values (info, elf_aarch64_tdata (info->output_bfd)->plt_type);
9999 return pbfd;
10000 }
10001
10002 /* Implement elf_backend_merge_gnu_properties for AArch64. It serves as a
10003 wrapper function for _bfd_aarch64_elf_merge_gnu_properties to account
10004 for the effect of GNU properties of the output_bfd. */
10005 static bool
10006 elfNN_aarch64_merge_gnu_properties (struct bfd_link_info *info,
10007 bfd *abfd, bfd *bbfd,
10008 elf_property *aprop,
10009 elf_property *bprop)
10010 {
10011 uint32_t prop
10012 = elf_aarch64_tdata (info->output_bfd)->gnu_and_prop;
10013
10014 /* If output has been marked with BTI using command line argument, give out
10015 warning if necessary. */
10016 /* Properties are merged per type, hence only check for warnings when merging
10017 GNU_PROPERTY_AARCH64_FEATURE_1_AND. */
10018 if (((aprop && aprop->pr_type == GNU_PROPERTY_AARCH64_FEATURE_1_AND)
10019 || (bprop && bprop->pr_type == GNU_PROPERTY_AARCH64_FEATURE_1_AND))
10020 && (prop & GNU_PROPERTY_AARCH64_FEATURE_1_BTI)
10021 && (!elf_aarch64_tdata (info->output_bfd)->no_bti_warn))
10022 {
10023 if ((aprop && !(aprop->u.number & GNU_PROPERTY_AARCH64_FEATURE_1_BTI))
10024 || !aprop)
10025 {
10026 _bfd_error_handler (_("%pB: warning: BTI turned on by -z force-bti when "
10027 "all inputs do not have BTI in NOTE section."),
10028 abfd);
10029 }
10030 if ((bprop && !(bprop->u.number & GNU_PROPERTY_AARCH64_FEATURE_1_BTI))
10031 || !bprop)
10032 {
10033 _bfd_error_handler (_("%pB: warning: BTI turned on by -z force-bti when "
10034 "all inputs do not have BTI in NOTE section."),
10035 bbfd);
10036 }
10037 }
10038
10039 return _bfd_aarch64_elf_merge_gnu_properties (info, abfd, aprop,
10040 bprop, prop);
10041 }
10042
10043 /* We use this so we can override certain functions
10044 (though currently we don't). */
10045
10046 const struct elf_size_info elfNN_aarch64_size_info =
10047 {
10048 sizeof (ElfNN_External_Ehdr),
10049 sizeof (ElfNN_External_Phdr),
10050 sizeof (ElfNN_External_Shdr),
10051 sizeof (ElfNN_External_Rel),
10052 sizeof (ElfNN_External_Rela),
10053 sizeof (ElfNN_External_Sym),
10054 sizeof (ElfNN_External_Dyn),
10055 sizeof (Elf_External_Note),
10056 4, /* Hash table entry size. */
10057 1, /* Internal relocs per external relocs. */
10058 ARCH_SIZE, /* Arch size. */
10059 LOG_FILE_ALIGN, /* Log_file_align. */
10060 ELFCLASSNN, EV_CURRENT,
10061 bfd_elfNN_write_out_phdrs,
10062 bfd_elfNN_write_shdrs_and_ehdr,
10063 bfd_elfNN_checksum_contents,
10064 bfd_elfNN_write_relocs,
10065 bfd_elfNN_swap_symbol_in,
10066 bfd_elfNN_swap_symbol_out,
10067 bfd_elfNN_slurp_reloc_table,
10068 bfd_elfNN_slurp_symbol_table,
10069 bfd_elfNN_swap_dyn_in,
10070 bfd_elfNN_swap_dyn_out,
10071 bfd_elfNN_swap_reloc_in,
10072 bfd_elfNN_swap_reloc_out,
10073 bfd_elfNN_swap_reloca_in,
10074 bfd_elfNN_swap_reloca_out
10075 };
10076
10077 #define ELF_ARCH bfd_arch_aarch64
10078 #define ELF_MACHINE_CODE EM_AARCH64
10079 #define ELF_MAXPAGESIZE 0x10000
10080 #define ELF_COMMONPAGESIZE 0x1000
10081
10082 #define bfd_elfNN_close_and_cleanup \
10083 elfNN_aarch64_close_and_cleanup
10084
10085 #define bfd_elfNN_bfd_free_cached_info \
10086 elfNN_aarch64_bfd_free_cached_info
10087
10088 #define bfd_elfNN_bfd_is_target_special_symbol \
10089 elfNN_aarch64_is_target_special_symbol
10090
10091 #define bfd_elfNN_bfd_link_hash_table_create \
10092 elfNN_aarch64_link_hash_table_create
10093
10094 #define bfd_elfNN_bfd_merge_private_bfd_data \
10095 elfNN_aarch64_merge_private_bfd_data
10096
10097 #define bfd_elfNN_bfd_print_private_bfd_data \
10098 elfNN_aarch64_print_private_bfd_data
10099
10100 #define bfd_elfNN_bfd_reloc_type_lookup \
10101 elfNN_aarch64_reloc_type_lookup
10102
10103 #define bfd_elfNN_bfd_reloc_name_lookup \
10104 elfNN_aarch64_reloc_name_lookup
10105
10106 #define bfd_elfNN_bfd_set_private_flags \
10107 elfNN_aarch64_set_private_flags
10108
10109 #define bfd_elfNN_find_inliner_info \
10110 elfNN_aarch64_find_inliner_info
10111
10112 #define bfd_elfNN_get_synthetic_symtab \
10113 elfNN_aarch64_get_synthetic_symtab
10114
10115 #define bfd_elfNN_mkobject \
10116 elfNN_aarch64_mkobject
10117
10118 #define bfd_elfNN_new_section_hook \
10119 elfNN_aarch64_new_section_hook
10120
10121 #define elf_backend_adjust_dynamic_symbol \
10122 elfNN_aarch64_adjust_dynamic_symbol
10123
10124 #define elf_backend_always_size_sections \
10125 elfNN_aarch64_always_size_sections
10126
10127 #define elf_backend_check_relocs \
10128 elfNN_aarch64_check_relocs
10129
10130 #define elf_backend_copy_indirect_symbol \
10131 elfNN_aarch64_copy_indirect_symbol
10132
10133 #define elf_backend_merge_symbol_attribute \
10134 elfNN_aarch64_merge_symbol_attribute
10135
10136 /* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
10137 to them in our hash. */
10138 #define elf_backend_create_dynamic_sections \
10139 elfNN_aarch64_create_dynamic_sections
10140
10141 #define elf_backend_init_index_section \
10142 _bfd_elf_init_2_index_sections
10143
10144 #define elf_backend_finish_dynamic_sections \
10145 elfNN_aarch64_finish_dynamic_sections
10146
10147 #define elf_backend_finish_dynamic_symbol \
10148 elfNN_aarch64_finish_dynamic_symbol
10149
10150 #define elf_backend_object_p \
10151 elfNN_aarch64_object_p
10152
10153 #define elf_backend_output_arch_local_syms \
10154 elfNN_aarch64_output_arch_local_syms
10155
10156 #define elf_backend_maybe_function_sym \
10157 elfNN_aarch64_maybe_function_sym
10158
10159 #define elf_backend_plt_sym_val \
10160 elfNN_aarch64_plt_sym_val
10161
10162 #define elf_backend_init_file_header \
10163 elfNN_aarch64_init_file_header
10164
10165 #define elf_backend_relocate_section \
10166 elfNN_aarch64_relocate_section
10167
10168 #define elf_backend_reloc_type_class \
10169 elfNN_aarch64_reloc_type_class
10170
10171 #define elf_backend_section_from_shdr \
10172 elfNN_aarch64_section_from_shdr
10173
10174 #define elf_backend_section_from_phdr \
10175 elfNN_aarch64_section_from_phdr
10176
10177 #define elf_backend_modify_headers \
10178 elfNN_aarch64_modify_headers
10179
10180 #define elf_backend_size_dynamic_sections \
10181 elfNN_aarch64_size_dynamic_sections
10182
10183 #define elf_backend_size_info \
10184 elfNN_aarch64_size_info
10185
10186 #define elf_backend_write_section \
10187 elfNN_aarch64_write_section
10188
10189 #define elf_backend_symbol_processing \
10190 elfNN_aarch64_backend_symbol_processing
10191
10192 #define elf_backend_setup_gnu_properties \
10193 elfNN_aarch64_link_setup_gnu_properties
10194
10195 #define elf_backend_merge_gnu_properties \
10196 elfNN_aarch64_merge_gnu_properties
10197
10198 #define elf_backend_can_refcount 1
10199 #define elf_backend_can_gc_sections 1
10200 #define elf_backend_plt_readonly 1
10201 #define elf_backend_want_got_plt 1
10202 #define elf_backend_want_plt_sym 0
10203 #define elf_backend_want_dynrelro 1
10204 #define elf_backend_may_use_rel_p 0
10205 #define elf_backend_may_use_rela_p 1
10206 #define elf_backend_default_use_rela_p 1
10207 #define elf_backend_rela_normal 1
10208 #define elf_backend_dtrel_excludes_plt 1
10209 #define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
10210 #define elf_backend_default_execstack 0
10211 #define elf_backend_extern_protected_data 0
10212 #define elf_backend_hash_symbol elf_aarch64_hash_symbol
10213
10214 #undef elf_backend_obj_attrs_section
10215 #define elf_backend_obj_attrs_section ".ARM.attributes"
10216
10217 #include "elfNN-target.h"
10218
10219 /* CloudABI support. */
10220
10221 #undef TARGET_LITTLE_SYM
10222 #define TARGET_LITTLE_SYM aarch64_elfNN_le_cloudabi_vec
10223 #undef TARGET_LITTLE_NAME
10224 #define TARGET_LITTLE_NAME "elfNN-littleaarch64-cloudabi"
10225 #undef TARGET_BIG_SYM
10226 #define TARGET_BIG_SYM aarch64_elfNN_be_cloudabi_vec
10227 #undef TARGET_BIG_NAME
10228 #define TARGET_BIG_NAME "elfNN-bigaarch64-cloudabi"
10229
10230 #undef ELF_OSABI
10231 #define ELF_OSABI ELFOSABI_CLOUDABI
10232
10233 #undef elfNN_bed
10234 #define elfNN_bed elfNN_aarch64_cloudabi_bed
10235
10236 #include "elfNN-target.h"