Make bfd_byte an int8_t, flagword a uint32_t
[binutils-gdb.git] / bfd / elfnn-aarch64.c
1 /* AArch64-specific support for NN-bit ELF.
2 Copyright (C) 2009-2023 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 /* Notes on implementation:
22
23 Thread Local Store (TLS)
24
25 Overview:
26
27 The implementation currently supports both traditional TLS and TLS
28 descriptors, but only general dynamic (GD).
29
30 For traditional TLS the assembler will present us with code
31 fragments of the form:
32
33 adrp x0, :tlsgd:foo
34 R_AARCH64_TLSGD_ADR_PAGE21(foo)
35 add x0, :tlsgd_lo12:foo
36 R_AARCH64_TLSGD_ADD_LO12_NC(foo)
37 bl __tls_get_addr
38 nop
39
40 For TLS descriptors the assembler will present us with code
41 fragments of the form:
42
43 adrp x0, :tlsdesc:foo R_AARCH64_TLSDESC_ADR_PAGE21(foo)
44 ldr x1, [x0, #:tlsdesc_lo12:foo] R_AARCH64_TLSDESC_LD64_LO12(foo)
45 add x0, x0, #:tlsdesc_lo12:foo R_AARCH64_TLSDESC_ADD_LO12(foo)
46 .tlsdesccall foo
47 blr x1 R_AARCH64_TLSDESC_CALL(foo)
48
49 The relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} against foo
50 indicate that foo is thread local and should be accessed via the
51 traditional TLS mechanims.
52
53 The relocations R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC}
54 against foo indicate that 'foo' is thread local and should be accessed
55 via a TLS descriptor mechanism.
56
57 The precise instruction sequence is only relevant from the
58 perspective of linker relaxation which is currently not implemented.
59
60 The static linker must detect that 'foo' is a TLS object and
61 allocate a double GOT entry. The GOT entry must be created for both
62 global and local TLS symbols. Note that this is different to none
63 TLS local objects which do not need a GOT entry.
64
65 In the traditional TLS mechanism, the double GOT entry is used to
66 provide the tls_index structure, containing module and offset
67 entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD
68 on the module entry. The loader will subsequently fixup this
69 relocation with the module identity.
70
71 For global traditional TLS symbols the static linker places an
72 R_AARCH64_TLS_DTPREL relocation on the offset entry. The loader
73 will subsequently fixup the offset. For local TLS symbols the static
74 linker fixes up offset.
75
76 In the TLS descriptor mechanism the double GOT entry is used to
77 provide the descriptor. The static linker places the relocation
78 R_AARCH64_TLSDESC on the first GOT slot. The loader will
79 subsequently fix this up.
80
81 Implementation:
82
83 The handling of TLS symbols is implemented across a number of
84 different backend functions. The following is a top level view of
85 what processing is performed where.
86
87 The TLS implementation maintains state information for each TLS
88 symbol. The state information for local and global symbols is kept
89 in different places. Global symbols use generic BFD structures while
90 local symbols use backend specific structures that are allocated and
91 maintained entirely by the backend.
92
93 The flow:
94
95 elfNN_aarch64_check_relocs()
96
97 This function is invoked for each relocation.
98
99 The TLS relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} and
100 R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC} are
101 spotted. One time creation of local symbol data structures are
102 created when the first local symbol is seen.
103
104 The reference count for a symbol is incremented. The GOT type for
105 each symbol is marked as general dynamic.
106
107 elfNN_aarch64_allocate_dynrelocs ()
108
109 For each global with positive reference count we allocate a double
110 GOT slot. For a traditional TLS symbol we allocate space for two
111 relocation entries on the GOT, for a TLS descriptor symbol we
112 allocate space for one relocation on the slot. Record the GOT offset
113 for this symbol.
114
115 elfNN_aarch64_size_dynamic_sections ()
116
117 Iterate all input BFDS, look for in the local symbol data structure
118 constructed earlier for local TLS symbols and allocate them double
119 GOT slots along with space for a single GOT relocation. Update the
120 local symbol structure to record the GOT offset allocated.
121
122 elfNN_aarch64_relocate_section ()
123
124 Calls elfNN_aarch64_final_link_relocate ()
125
126 Emit the relevant TLS relocations against the GOT for each TLS
127 symbol. For local TLS symbols emit the GOT offset directly. The GOT
128 relocations are emitted once the first time a TLS symbol is
129 encountered. The implementation uses the LSB of the GOT offset to
130 flag that the relevant GOT relocations for a symbol have been
131 emitted. All of the TLS code that uses the GOT offset needs to take
132 care to mask out this flag bit before using the offset.
133
134 elfNN_aarch64_final_link_relocate ()
135
136 Fixup the R_AARCH64_TLSGD_{ADR_PREL21, ADD_LO12_NC} relocations. */
137
138 #include "sysdep.h"
139 #include "bfd.h"
140 #include "libiberty.h"
141 #include "libbfd.h"
142 #include "elf-bfd.h"
143 #include "bfdlink.h"
144 #include "objalloc.h"
145 #include "elf/aarch64.h"
146 #include "elfxx-aarch64.h"
147 #include "cpu-aarch64.h"
148
149 #define ARCH_SIZE NN
150
151 #if ARCH_SIZE == 64
152 #define AARCH64_R(NAME) R_AARCH64_ ## NAME
153 #define AARCH64_R_STR(NAME) "R_AARCH64_" #NAME
154 #define HOWTO64(...) HOWTO (__VA_ARGS__)
155 #define HOWTO32(...) EMPTY_HOWTO (0)
156 #define LOG_FILE_ALIGN 3
157 #define BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC BFD_RELOC_AARCH64_TLSDESC_LD64_LO12
158 #endif
159
160 #if ARCH_SIZE == 32
161 #define AARCH64_R(NAME) R_AARCH64_P32_ ## NAME
162 #define AARCH64_R_STR(NAME) "R_AARCH64_P32_" #NAME
163 #define HOWTO64(...) EMPTY_HOWTO (0)
164 #define HOWTO32(...) HOWTO (__VA_ARGS__)
165 #define LOG_FILE_ALIGN 2
166 #define BFD_RELOC_AARCH64_TLSDESC_LD32_LO12 BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
167 #define R_AARCH64_P32_TLSDESC_ADD_LO12 R_AARCH64_P32_TLSDESC_ADD_LO12_NC
168 #endif
169
170 #define IS_AARCH64_TLS_RELOC(R_TYPE) \
171 ((R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC \
172 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21 \
173 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PREL21 \
174 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC \
175 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_MOVW_G1 \
176 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
177 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC \
178 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \
179 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
180 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \
181 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \
182 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12 \
183 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12 \
184 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC \
185 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC \
186 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21 \
187 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21 \
188 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12 \
189 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC \
190 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12 \
191 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC \
192 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12 \
193 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC \
194 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12 \
195 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC \
196 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0 \
197 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC \
198 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1 \
199 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC \
200 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2 \
201 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12 \
202 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12 \
203 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC \
204 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12 \
205 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC \
206 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12 \
207 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC \
208 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12 \
209 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC \
210 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12 \
211 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC \
212 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0 \
213 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC \
214 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 \
215 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC \
216 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2 \
217 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPMOD \
218 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPREL \
219 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_TPREL \
220 || IS_AARCH64_TLSDESC_RELOC ((R_TYPE)))
221
222 #define IS_AARCH64_TLS_RELAX_RELOC(R_TYPE) \
223 ((R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD \
224 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12 \
225 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21 \
226 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 \
227 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_CALL \
228 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 \
229 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC \
230 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
231 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC \
232 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G1 \
233 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
234 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21 \
235 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PREL21 \
236 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC \
237 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC \
238 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_MOVW_G1 \
239 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
240 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
241 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC \
242 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC \
243 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21 \
244 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21)
245
246 #define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \
247 ((R_TYPE) == BFD_RELOC_AARCH64_TLSDESC \
248 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD \
249 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12 \
250 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21 \
251 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 \
252 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_CALL \
253 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC \
254 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD64_LO12 \
255 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
256 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 \
257 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC \
258 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G1)
259
260 #define ELIMINATE_COPY_RELOCS 1
261
262 /* Return size of a relocation entry. HTAB is the bfd's
263 elf_aarch64_link_hash_entry. */
264 #define RELOC_SIZE(HTAB) (sizeof (ElfNN_External_Rela))
265
266 /* GOT Entry size - 8 bytes in ELF64 and 4 bytes in ELF32. */
267 #define GOT_ENTRY_SIZE (ARCH_SIZE / 8)
268 #define PLT_ENTRY_SIZE (32)
269 #define PLT_SMALL_ENTRY_SIZE (16)
270 #define PLT_TLSDESC_ENTRY_SIZE (32)
271 /* PLT sizes with BTI insn. */
272 #define PLT_BTI_SMALL_ENTRY_SIZE (24)
273 /* PLT sizes with PAC insn. */
274 #define PLT_PAC_SMALL_ENTRY_SIZE (24)
275 /* PLT sizes with BTI and PAC insn. */
276 #define PLT_BTI_PAC_SMALL_ENTRY_SIZE (24)
277
278 /* Encoding of the nop instruction. */
279 #define INSN_NOP 0xd503201f
280
281 #define aarch64_compute_jump_table_size(htab) \
282 (((htab)->root.srelplt == NULL) ? 0 \
283 : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
284
285 /* The first entry in a procedure linkage table looks like this
286 if the distance between the PLTGOT and the PLT is < 4GB use
287 these PLT entries. Note that the dynamic linker gets &PLTGOT[2]
288 in x16 and needs to work out PLTGOT[1] by using an address of
289 [x16,#-GOT_ENTRY_SIZE]. */
290 static const bfd_byte elfNN_aarch64_small_plt0_entry[PLT_ENTRY_SIZE] =
291 {
292 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
293 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
294 #if ARCH_SIZE == 64
295 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
296 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
297 #else
298 0x11, 0x0A, 0x40, 0xb9, /* ldr w17, [x16, #PLT_GOT+0x8] */
299 0x10, 0x22, 0x00, 0x11, /* add w16, w16,#PLT_GOT+0x8 */
300 #endif
301 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
302 0x1f, 0x20, 0x03, 0xd5, /* nop */
303 0x1f, 0x20, 0x03, 0xd5, /* nop */
304 0x1f, 0x20, 0x03, 0xd5, /* nop */
305 };
306
307 static const bfd_byte elfNN_aarch64_small_plt0_bti_entry[PLT_ENTRY_SIZE] =
308 {
309 0x5f, 0x24, 0x03, 0xd5, /* bti c. */
310 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
311 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
312 #if ARCH_SIZE == 64
313 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
314 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
315 #else
316 0x11, 0x0A, 0x40, 0xb9, /* ldr w17, [x16, #PLT_GOT+0x8] */
317 0x10, 0x22, 0x00, 0x11, /* add w16, w16,#PLT_GOT+0x8 */
318 #endif
319 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
320 0x1f, 0x20, 0x03, 0xd5, /* nop */
321 0x1f, 0x20, 0x03, 0xd5, /* nop */
322 };
323
324 /* Per function entry in a procedure linkage table looks like this
325 if the distance between the PLTGOT and the PLT is < 4GB use
326 these PLT entries. Use BTI versions of the PLTs when enabled. */
327 static const bfd_byte elfNN_aarch64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
328 {
329 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
330 #if ARCH_SIZE == 64
331 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
332 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
333 #else
334 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
335 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
336 #endif
337 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
338 };
339
340 static const bfd_byte
341 elfNN_aarch64_small_plt_bti_entry[PLT_BTI_SMALL_ENTRY_SIZE] =
342 {
343 0x5f, 0x24, 0x03, 0xd5, /* bti c. */
344 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
345 #if ARCH_SIZE == 64
346 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
347 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
348 #else
349 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
350 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
351 #endif
352 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
353 0x1f, 0x20, 0x03, 0xd5, /* nop */
354 };
355
356 static const bfd_byte
357 elfNN_aarch64_small_plt_pac_entry[PLT_PAC_SMALL_ENTRY_SIZE] =
358 {
359 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
360 #if ARCH_SIZE == 64
361 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
362 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
363 #else
364 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
365 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
366 #endif
367 0x9f, 0x21, 0x03, 0xd5, /* autia1716 */
368 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
369 0x1f, 0x20, 0x03, 0xd5, /* nop */
370 };
371
372 static const bfd_byte
373 elfNN_aarch64_small_plt_bti_pac_entry[PLT_BTI_PAC_SMALL_ENTRY_SIZE] =
374 {
375 0x5f, 0x24, 0x03, 0xd5, /* bti c. */
376 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
377 #if ARCH_SIZE == 64
378 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
379 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
380 #else
381 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
382 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
383 #endif
384 0x9f, 0x21, 0x03, 0xd5, /* autia1716 */
385 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
386 };
387
388 static const bfd_byte
389 elfNN_aarch64_tlsdesc_small_plt_entry[PLT_TLSDESC_ENTRY_SIZE] =
390 {
391 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
392 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
393 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
394 #if ARCH_SIZE == 64
395 0x42, 0x00, 0x40, 0xf9, /* ldr x2, [x2, #0] */
396 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
397 #else
398 0x42, 0x00, 0x40, 0xb9, /* ldr w2, [x2, #0] */
399 0x63, 0x00, 0x00, 0x11, /* add w3, w3, 0 */
400 #endif
401 0x40, 0x00, 0x1f, 0xd6, /* br x2 */
402 0x1f, 0x20, 0x03, 0xd5, /* nop */
403 0x1f, 0x20, 0x03, 0xd5, /* nop */
404 };
405
406 static const bfd_byte
407 elfNN_aarch64_tlsdesc_small_plt_bti_entry[PLT_TLSDESC_ENTRY_SIZE] =
408 {
409 0x5f, 0x24, 0x03, 0xd5, /* bti c. */
410 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
411 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
412 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
413 #if ARCH_SIZE == 64
414 0x42, 0x00, 0x40, 0xf9, /* ldr x2, [x2, #0] */
415 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
416 #else
417 0x42, 0x00, 0x40, 0xb9, /* ldr w2, [x2, #0] */
418 0x63, 0x00, 0x00, 0x11, /* add w3, w3, 0 */
419 #endif
420 0x40, 0x00, 0x1f, 0xd6, /* br x2 */
421 0x1f, 0x20, 0x03, 0xd5, /* nop */
422 };
423
424 #define elf_info_to_howto elfNN_aarch64_info_to_howto
425 #define elf_info_to_howto_rel elfNN_aarch64_info_to_howto
426
427 #define AARCH64_ELF_ABI_VERSION 0
428
429 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
430 #define ALL_ONES (~ (bfd_vma) 0)
431
432 /* Indexed by the bfd interal reloc enumerators.
433 Therefore, the table needs to be synced with BFD_RELOC_AARCH64_*
434 in reloc.c. */
435
436 static reloc_howto_type elfNN_aarch64_howto_table[] =
437 {
438 EMPTY_HOWTO (0),
439
440 /* Basic data relocations. */
441
442 /* Deprecated, but retained for backwards compatibility. */
443 HOWTO64 (R_AARCH64_NULL, /* type */
444 0, /* rightshift */
445 0, /* size */
446 0, /* bitsize */
447 false, /* pc_relative */
448 0, /* bitpos */
449 complain_overflow_dont, /* complain_on_overflow */
450 bfd_elf_generic_reloc, /* special_function */
451 "R_AARCH64_NULL", /* name */
452 false, /* partial_inplace */
453 0, /* src_mask */
454 0, /* dst_mask */
455 false), /* pcrel_offset */
456 HOWTO (R_AARCH64_NONE, /* type */
457 0, /* rightshift */
458 0, /* size */
459 0, /* bitsize */
460 false, /* pc_relative */
461 0, /* bitpos */
462 complain_overflow_dont, /* complain_on_overflow */
463 bfd_elf_generic_reloc, /* special_function */
464 "R_AARCH64_NONE", /* name */
465 false, /* partial_inplace */
466 0, /* src_mask */
467 0, /* dst_mask */
468 false), /* pcrel_offset */
469
470 /* .xword: (S+A) */
471 HOWTO64 (AARCH64_R (ABS64), /* type */
472 0, /* rightshift */
473 8, /* size */
474 64, /* bitsize */
475 false, /* pc_relative */
476 0, /* bitpos */
477 complain_overflow_unsigned, /* complain_on_overflow */
478 bfd_elf_generic_reloc, /* special_function */
479 AARCH64_R_STR (ABS64), /* name */
480 false, /* partial_inplace */
481 ALL_ONES, /* src_mask */
482 ALL_ONES, /* dst_mask */
483 false), /* pcrel_offset */
484
485 /* .word: (S+A) */
486 HOWTO (AARCH64_R (ABS32), /* type */
487 0, /* rightshift */
488 4, /* size */
489 32, /* bitsize */
490 false, /* pc_relative */
491 0, /* bitpos */
492 complain_overflow_unsigned, /* complain_on_overflow */
493 bfd_elf_generic_reloc, /* special_function */
494 AARCH64_R_STR (ABS32), /* name */
495 false, /* partial_inplace */
496 0xffffffff, /* src_mask */
497 0xffffffff, /* dst_mask */
498 false), /* pcrel_offset */
499
500 /* .half: (S+A) */
501 HOWTO (AARCH64_R (ABS16), /* type */
502 0, /* rightshift */
503 2, /* size */
504 16, /* bitsize */
505 false, /* pc_relative */
506 0, /* bitpos */
507 complain_overflow_unsigned, /* complain_on_overflow */
508 bfd_elf_generic_reloc, /* special_function */
509 AARCH64_R_STR (ABS16), /* name */
510 false, /* partial_inplace */
511 0xffff, /* src_mask */
512 0xffff, /* dst_mask */
513 false), /* pcrel_offset */
514
515 /* .xword: (S+A-P) */
516 HOWTO64 (AARCH64_R (PREL64), /* type */
517 0, /* rightshift */
518 8, /* size */
519 64, /* bitsize */
520 true, /* pc_relative */
521 0, /* bitpos */
522 complain_overflow_signed, /* complain_on_overflow */
523 bfd_elf_generic_reloc, /* special_function */
524 AARCH64_R_STR (PREL64), /* name */
525 false, /* partial_inplace */
526 ALL_ONES, /* src_mask */
527 ALL_ONES, /* dst_mask */
528 true), /* pcrel_offset */
529
530 /* .word: (S+A-P) */
531 HOWTO (AARCH64_R (PREL32), /* type */
532 0, /* rightshift */
533 4, /* size */
534 32, /* bitsize */
535 true, /* pc_relative */
536 0, /* bitpos */
537 complain_overflow_signed, /* complain_on_overflow */
538 bfd_elf_generic_reloc, /* special_function */
539 AARCH64_R_STR (PREL32), /* name */
540 false, /* partial_inplace */
541 0xffffffff, /* src_mask */
542 0xffffffff, /* dst_mask */
543 true), /* pcrel_offset */
544
545 /* .half: (S+A-P) */
546 HOWTO (AARCH64_R (PREL16), /* type */
547 0, /* rightshift */
548 2, /* size */
549 16, /* bitsize */
550 true, /* pc_relative */
551 0, /* bitpos */
552 complain_overflow_signed, /* complain_on_overflow */
553 bfd_elf_generic_reloc, /* special_function */
554 AARCH64_R_STR (PREL16), /* name */
555 false, /* partial_inplace */
556 0xffff, /* src_mask */
557 0xffff, /* dst_mask */
558 true), /* pcrel_offset */
559
560 /* Group relocations to create a 16, 32, 48 or 64 bit
561 unsigned data or abs address inline. */
562
563 /* MOVZ: ((S+A) >> 0) & 0xffff */
564 HOWTO (AARCH64_R (MOVW_UABS_G0), /* type */
565 0, /* rightshift */
566 4, /* size */
567 16, /* bitsize */
568 false, /* pc_relative */
569 0, /* bitpos */
570 complain_overflow_unsigned, /* complain_on_overflow */
571 bfd_elf_generic_reloc, /* special_function */
572 AARCH64_R_STR (MOVW_UABS_G0), /* name */
573 false, /* partial_inplace */
574 0xffff, /* src_mask */
575 0xffff, /* dst_mask */
576 false), /* pcrel_offset */
577
578 /* MOVK: ((S+A) >> 0) & 0xffff [no overflow check] */
579 HOWTO (AARCH64_R (MOVW_UABS_G0_NC), /* type */
580 0, /* rightshift */
581 4, /* size */
582 16, /* bitsize */
583 false, /* pc_relative */
584 0, /* bitpos */
585 complain_overflow_dont, /* complain_on_overflow */
586 bfd_elf_generic_reloc, /* special_function */
587 AARCH64_R_STR (MOVW_UABS_G0_NC), /* name */
588 false, /* partial_inplace */
589 0xffff, /* src_mask */
590 0xffff, /* dst_mask */
591 false), /* pcrel_offset */
592
593 /* MOVZ: ((S+A) >> 16) & 0xffff */
594 HOWTO (AARCH64_R (MOVW_UABS_G1), /* type */
595 16, /* rightshift */
596 4, /* size */
597 16, /* bitsize */
598 false, /* pc_relative */
599 0, /* bitpos */
600 complain_overflow_unsigned, /* complain_on_overflow */
601 bfd_elf_generic_reloc, /* special_function */
602 AARCH64_R_STR (MOVW_UABS_G1), /* name */
603 false, /* partial_inplace */
604 0xffff, /* src_mask */
605 0xffff, /* dst_mask */
606 false), /* pcrel_offset */
607
608 /* MOVK: ((S+A) >> 16) & 0xffff [no overflow check] */
609 HOWTO64 (AARCH64_R (MOVW_UABS_G1_NC), /* type */
610 16, /* rightshift */
611 4, /* size */
612 16, /* bitsize */
613 false, /* pc_relative */
614 0, /* bitpos */
615 complain_overflow_dont, /* complain_on_overflow */
616 bfd_elf_generic_reloc, /* special_function */
617 AARCH64_R_STR (MOVW_UABS_G1_NC), /* name */
618 false, /* partial_inplace */
619 0xffff, /* src_mask */
620 0xffff, /* dst_mask */
621 false), /* pcrel_offset */
622
623 /* MOVZ: ((S+A) >> 32) & 0xffff */
624 HOWTO64 (AARCH64_R (MOVW_UABS_G2), /* type */
625 32, /* rightshift */
626 4, /* size */
627 16, /* bitsize */
628 false, /* pc_relative */
629 0, /* bitpos */
630 complain_overflow_unsigned, /* complain_on_overflow */
631 bfd_elf_generic_reloc, /* special_function */
632 AARCH64_R_STR (MOVW_UABS_G2), /* name */
633 false, /* partial_inplace */
634 0xffff, /* src_mask */
635 0xffff, /* dst_mask */
636 false), /* pcrel_offset */
637
638 /* MOVK: ((S+A) >> 32) & 0xffff [no overflow check] */
639 HOWTO64 (AARCH64_R (MOVW_UABS_G2_NC), /* type */
640 32, /* rightshift */
641 4, /* size */
642 16, /* bitsize */
643 false, /* pc_relative */
644 0, /* bitpos */
645 complain_overflow_dont, /* complain_on_overflow */
646 bfd_elf_generic_reloc, /* special_function */
647 AARCH64_R_STR (MOVW_UABS_G2_NC), /* name */
648 false, /* partial_inplace */
649 0xffff, /* src_mask */
650 0xffff, /* dst_mask */
651 false), /* pcrel_offset */
652
653 /* MOVZ: ((S+A) >> 48) & 0xffff */
654 HOWTO64 (AARCH64_R (MOVW_UABS_G3), /* type */
655 48, /* rightshift */
656 4, /* size */
657 16, /* bitsize */
658 false, /* pc_relative */
659 0, /* bitpos */
660 complain_overflow_unsigned, /* complain_on_overflow */
661 bfd_elf_generic_reloc, /* special_function */
662 AARCH64_R_STR (MOVW_UABS_G3), /* name */
663 false, /* partial_inplace */
664 0xffff, /* src_mask */
665 0xffff, /* dst_mask */
666 false), /* pcrel_offset */
667
668 /* Group relocations to create high part of a 16, 32, 48 or 64 bit
669 signed data or abs address inline. Will change instruction
670 to MOVN or MOVZ depending on sign of calculated value. */
671
672 /* MOV[ZN]: ((S+A) >> 0) & 0xffff */
673 HOWTO (AARCH64_R (MOVW_SABS_G0), /* type */
674 0, /* rightshift */
675 4, /* size */
676 17, /* bitsize */
677 false, /* pc_relative */
678 0, /* bitpos */
679 complain_overflow_signed, /* complain_on_overflow */
680 bfd_elf_generic_reloc, /* special_function */
681 AARCH64_R_STR (MOVW_SABS_G0), /* name */
682 false, /* partial_inplace */
683 0xffff, /* src_mask */
684 0xffff, /* dst_mask */
685 false), /* pcrel_offset */
686
687 /* MOV[ZN]: ((S+A) >> 16) & 0xffff */
688 HOWTO64 (AARCH64_R (MOVW_SABS_G1), /* type */
689 16, /* rightshift */
690 4, /* size */
691 17, /* bitsize */
692 false, /* pc_relative */
693 0, /* bitpos */
694 complain_overflow_signed, /* complain_on_overflow */
695 bfd_elf_generic_reloc, /* special_function */
696 AARCH64_R_STR (MOVW_SABS_G1), /* name */
697 false, /* partial_inplace */
698 0xffff, /* src_mask */
699 0xffff, /* dst_mask */
700 false), /* pcrel_offset */
701
702 /* MOV[ZN]: ((S+A) >> 32) & 0xffff */
703 HOWTO64 (AARCH64_R (MOVW_SABS_G2), /* type */
704 32, /* rightshift */
705 4, /* size */
706 17, /* bitsize */
707 false, /* pc_relative */
708 0, /* bitpos */
709 complain_overflow_signed, /* complain_on_overflow */
710 bfd_elf_generic_reloc, /* special_function */
711 AARCH64_R_STR (MOVW_SABS_G2), /* name */
712 false, /* partial_inplace */
713 0xffff, /* src_mask */
714 0xffff, /* dst_mask */
715 false), /* pcrel_offset */
716
717 /* Group relocations to create a 16, 32, 48 or 64 bit
718 PC relative address inline. */
719
720 /* MOV[NZ]: ((S+A-P) >> 0) & 0xffff */
721 HOWTO (AARCH64_R (MOVW_PREL_G0), /* type */
722 0, /* rightshift */
723 4, /* size */
724 17, /* bitsize */
725 true, /* pc_relative */
726 0, /* bitpos */
727 complain_overflow_signed, /* complain_on_overflow */
728 bfd_elf_generic_reloc, /* special_function */
729 AARCH64_R_STR (MOVW_PREL_G0), /* name */
730 false, /* partial_inplace */
731 0xffff, /* src_mask */
732 0xffff, /* dst_mask */
733 true), /* pcrel_offset */
734
735 /* MOVK: ((S+A-P) >> 0) & 0xffff [no overflow check] */
736 HOWTO (AARCH64_R (MOVW_PREL_G0_NC), /* type */
737 0, /* rightshift */
738 4, /* size */
739 16, /* bitsize */
740 true, /* pc_relative */
741 0, /* bitpos */
742 complain_overflow_dont, /* complain_on_overflow */
743 bfd_elf_generic_reloc, /* special_function */
744 AARCH64_R_STR (MOVW_PREL_G0_NC), /* name */
745 false, /* partial_inplace */
746 0xffff, /* src_mask */
747 0xffff, /* dst_mask */
748 true), /* pcrel_offset */
749
750 /* MOV[NZ]: ((S+A-P) >> 16) & 0xffff */
751 HOWTO (AARCH64_R (MOVW_PREL_G1), /* type */
752 16, /* rightshift */
753 4, /* size */
754 17, /* bitsize */
755 true, /* pc_relative */
756 0, /* bitpos */
757 complain_overflow_signed, /* complain_on_overflow */
758 bfd_elf_generic_reloc, /* special_function */
759 AARCH64_R_STR (MOVW_PREL_G1), /* name */
760 false, /* partial_inplace */
761 0xffff, /* src_mask */
762 0xffff, /* dst_mask */
763 true), /* pcrel_offset */
764
765 /* MOVK: ((S+A-P) >> 16) & 0xffff [no overflow check] */
766 HOWTO64 (AARCH64_R (MOVW_PREL_G1_NC), /* type */
767 16, /* rightshift */
768 4, /* size */
769 16, /* bitsize */
770 true, /* pc_relative */
771 0, /* bitpos */
772 complain_overflow_dont, /* complain_on_overflow */
773 bfd_elf_generic_reloc, /* special_function */
774 AARCH64_R_STR (MOVW_PREL_G1_NC), /* name */
775 false, /* partial_inplace */
776 0xffff, /* src_mask */
777 0xffff, /* dst_mask */
778 true), /* pcrel_offset */
779
780 /* MOV[NZ]: ((S+A-P) >> 32) & 0xffff */
781 HOWTO64 (AARCH64_R (MOVW_PREL_G2), /* type */
782 32, /* rightshift */
783 4, /* size */
784 17, /* bitsize */
785 true, /* pc_relative */
786 0, /* bitpos */
787 complain_overflow_signed, /* complain_on_overflow */
788 bfd_elf_generic_reloc, /* special_function */
789 AARCH64_R_STR (MOVW_PREL_G2), /* name */
790 false, /* partial_inplace */
791 0xffff, /* src_mask */
792 0xffff, /* dst_mask */
793 true), /* pcrel_offset */
794
795 /* MOVK: ((S+A-P) >> 32) & 0xffff [no overflow check] */
796 HOWTO64 (AARCH64_R (MOVW_PREL_G2_NC), /* type */
797 32, /* rightshift */
798 4, /* size */
799 16, /* bitsize */
800 true, /* pc_relative */
801 0, /* bitpos */
802 complain_overflow_dont, /* complain_on_overflow */
803 bfd_elf_generic_reloc, /* special_function */
804 AARCH64_R_STR (MOVW_PREL_G2_NC), /* name */
805 false, /* partial_inplace */
806 0xffff, /* src_mask */
807 0xffff, /* dst_mask */
808 true), /* pcrel_offset */
809
810 /* MOV[NZ]: ((S+A-P) >> 48) & 0xffff */
811 HOWTO64 (AARCH64_R (MOVW_PREL_G3), /* type */
812 48, /* rightshift */
813 4, /* size */
814 16, /* bitsize */
815 true, /* pc_relative */
816 0, /* bitpos */
817 complain_overflow_dont, /* complain_on_overflow */
818 bfd_elf_generic_reloc, /* special_function */
819 AARCH64_R_STR (MOVW_PREL_G3), /* name */
820 false, /* partial_inplace */
821 0xffff, /* src_mask */
822 0xffff, /* dst_mask */
823 true), /* pcrel_offset */
824
825 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store
826 addresses: PG(x) is (x & ~0xfff). */
827
828 /* LD-lit: ((S+A-P) >> 2) & 0x7ffff */
829 HOWTO (AARCH64_R (LD_PREL_LO19), /* type */
830 2, /* rightshift */
831 4, /* size */
832 19, /* bitsize */
833 true, /* pc_relative */
834 0, /* bitpos */
835 complain_overflow_signed, /* complain_on_overflow */
836 bfd_elf_generic_reloc, /* special_function */
837 AARCH64_R_STR (LD_PREL_LO19), /* name */
838 false, /* partial_inplace */
839 0x7ffff, /* src_mask */
840 0x7ffff, /* dst_mask */
841 true), /* pcrel_offset */
842
843 /* ADR: (S+A-P) & 0x1fffff */
844 HOWTO (AARCH64_R (ADR_PREL_LO21), /* type */
845 0, /* rightshift */
846 4, /* size */
847 21, /* bitsize */
848 true, /* pc_relative */
849 0, /* bitpos */
850 complain_overflow_signed, /* complain_on_overflow */
851 bfd_elf_generic_reloc, /* special_function */
852 AARCH64_R_STR (ADR_PREL_LO21), /* name */
853 false, /* partial_inplace */
854 0x1fffff, /* src_mask */
855 0x1fffff, /* dst_mask */
856 true), /* pcrel_offset */
857
858 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
859 HOWTO (AARCH64_R (ADR_PREL_PG_HI21), /* type */
860 12, /* rightshift */
861 4, /* size */
862 21, /* bitsize */
863 true, /* pc_relative */
864 0, /* bitpos */
865 complain_overflow_signed, /* complain_on_overflow */
866 bfd_elf_generic_reloc, /* special_function */
867 AARCH64_R_STR (ADR_PREL_PG_HI21), /* name */
868 false, /* partial_inplace */
869 0x1fffff, /* src_mask */
870 0x1fffff, /* dst_mask */
871 true), /* pcrel_offset */
872
873 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff [no overflow check] */
874 HOWTO64 (AARCH64_R (ADR_PREL_PG_HI21_NC), /* type */
875 12, /* rightshift */
876 4, /* size */
877 21, /* bitsize */
878 true, /* pc_relative */
879 0, /* bitpos */
880 complain_overflow_dont, /* complain_on_overflow */
881 bfd_elf_generic_reloc, /* special_function */
882 AARCH64_R_STR (ADR_PREL_PG_HI21_NC), /* name */
883 false, /* partial_inplace */
884 0x1fffff, /* src_mask */
885 0x1fffff, /* dst_mask */
886 true), /* pcrel_offset */
887
888 /* ADD: (S+A) & 0xfff [no overflow check] */
889 HOWTO (AARCH64_R (ADD_ABS_LO12_NC), /* type */
890 0, /* rightshift */
891 4, /* size */
892 12, /* bitsize */
893 false, /* pc_relative */
894 10, /* bitpos */
895 complain_overflow_dont, /* complain_on_overflow */
896 bfd_elf_generic_reloc, /* special_function */
897 AARCH64_R_STR (ADD_ABS_LO12_NC), /* name */
898 false, /* partial_inplace */
899 0x3ffc00, /* src_mask */
900 0x3ffc00, /* dst_mask */
901 false), /* pcrel_offset */
902
903 /* LD/ST8: (S+A) & 0xfff */
904 HOWTO (AARCH64_R (LDST8_ABS_LO12_NC), /* type */
905 0, /* rightshift */
906 4, /* size */
907 12, /* bitsize */
908 false, /* pc_relative */
909 0, /* bitpos */
910 complain_overflow_dont, /* complain_on_overflow */
911 bfd_elf_generic_reloc, /* special_function */
912 AARCH64_R_STR (LDST8_ABS_LO12_NC), /* name */
913 false, /* partial_inplace */
914 0xfff, /* src_mask */
915 0xfff, /* dst_mask */
916 false), /* pcrel_offset */
917
918 /* Relocations for control-flow instructions. */
919
920 /* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */
921 HOWTO (AARCH64_R (TSTBR14), /* type */
922 2, /* rightshift */
923 4, /* size */
924 14, /* bitsize */
925 true, /* pc_relative */
926 0, /* bitpos */
927 complain_overflow_signed, /* complain_on_overflow */
928 bfd_elf_generic_reloc, /* special_function */
929 AARCH64_R_STR (TSTBR14), /* name */
930 false, /* partial_inplace */
931 0x3fff, /* src_mask */
932 0x3fff, /* dst_mask */
933 true), /* pcrel_offset */
934
935 /* B.cond: ((S+A-P) >> 2) & 0x7ffff */
936 HOWTO (AARCH64_R (CONDBR19), /* type */
937 2, /* rightshift */
938 4, /* size */
939 19, /* bitsize */
940 true, /* pc_relative */
941 0, /* bitpos */
942 complain_overflow_signed, /* complain_on_overflow */
943 bfd_elf_generic_reloc, /* special_function */
944 AARCH64_R_STR (CONDBR19), /* name */
945 false, /* partial_inplace */
946 0x7ffff, /* src_mask */
947 0x7ffff, /* dst_mask */
948 true), /* pcrel_offset */
949
950 /* B: ((S+A-P) >> 2) & 0x3ffffff */
951 HOWTO (AARCH64_R (JUMP26), /* type */
952 2, /* rightshift */
953 4, /* size */
954 26, /* bitsize */
955 true, /* pc_relative */
956 0, /* bitpos */
957 complain_overflow_signed, /* complain_on_overflow */
958 bfd_elf_generic_reloc, /* special_function */
959 AARCH64_R_STR (JUMP26), /* name */
960 false, /* partial_inplace */
961 0x3ffffff, /* src_mask */
962 0x3ffffff, /* dst_mask */
963 true), /* pcrel_offset */
964
965 /* BL: ((S+A-P) >> 2) & 0x3ffffff */
966 HOWTO (AARCH64_R (CALL26), /* type */
967 2, /* rightshift */
968 4, /* size */
969 26, /* bitsize */
970 true, /* pc_relative */
971 0, /* bitpos */
972 complain_overflow_signed, /* complain_on_overflow */
973 bfd_elf_generic_reloc, /* special_function */
974 AARCH64_R_STR (CALL26), /* name */
975 false, /* partial_inplace */
976 0x3ffffff, /* src_mask */
977 0x3ffffff, /* dst_mask */
978 true), /* pcrel_offset */
979
980 /* LD/ST16: (S+A) & 0xffe */
981 HOWTO (AARCH64_R (LDST16_ABS_LO12_NC), /* type */
982 1, /* rightshift */
983 4, /* size */
984 12, /* bitsize */
985 false, /* pc_relative */
986 0, /* bitpos */
987 complain_overflow_dont, /* complain_on_overflow */
988 bfd_elf_generic_reloc, /* special_function */
989 AARCH64_R_STR (LDST16_ABS_LO12_NC), /* name */
990 false, /* partial_inplace */
991 0xffe, /* src_mask */
992 0xffe, /* dst_mask */
993 false), /* pcrel_offset */
994
995 /* LD/ST32: (S+A) & 0xffc */
996 HOWTO (AARCH64_R (LDST32_ABS_LO12_NC), /* type */
997 2, /* rightshift */
998 4, /* size */
999 12, /* bitsize */
1000 false, /* pc_relative */
1001 0, /* bitpos */
1002 complain_overflow_dont, /* complain_on_overflow */
1003 bfd_elf_generic_reloc, /* special_function */
1004 AARCH64_R_STR (LDST32_ABS_LO12_NC), /* name */
1005 false, /* partial_inplace */
1006 0xffc, /* src_mask */
1007 0xffc, /* dst_mask */
1008 false), /* pcrel_offset */
1009
1010 /* LD/ST64: (S+A) & 0xff8 */
1011 HOWTO (AARCH64_R (LDST64_ABS_LO12_NC), /* type */
1012 3, /* rightshift */
1013 4, /* size */
1014 12, /* bitsize */
1015 false, /* pc_relative */
1016 0, /* bitpos */
1017 complain_overflow_dont, /* complain_on_overflow */
1018 bfd_elf_generic_reloc, /* special_function */
1019 AARCH64_R_STR (LDST64_ABS_LO12_NC), /* name */
1020 false, /* partial_inplace */
1021 0xff8, /* src_mask */
1022 0xff8, /* dst_mask */
1023 false), /* pcrel_offset */
1024
1025 /* LD/ST128: (S+A) & 0xff0 */
1026 HOWTO (AARCH64_R (LDST128_ABS_LO12_NC), /* type */
1027 4, /* rightshift */
1028 4, /* size */
1029 12, /* bitsize */
1030 false, /* pc_relative */
1031 0, /* bitpos */
1032 complain_overflow_dont, /* complain_on_overflow */
1033 bfd_elf_generic_reloc, /* special_function */
1034 AARCH64_R_STR (LDST128_ABS_LO12_NC), /* name */
1035 false, /* partial_inplace */
1036 0xff0, /* src_mask */
1037 0xff0, /* dst_mask */
1038 false), /* pcrel_offset */
1039
1040 /* Set a load-literal immediate field to bits
1041 0x1FFFFC of G(S)-P */
1042 HOWTO (AARCH64_R (GOT_LD_PREL19), /* type */
1043 2, /* rightshift */
1044 4, /* size */
1045 19, /* bitsize */
1046 true, /* pc_relative */
1047 0, /* bitpos */
1048 complain_overflow_signed, /* complain_on_overflow */
1049 bfd_elf_generic_reloc, /* special_function */
1050 AARCH64_R_STR (GOT_LD_PREL19), /* name */
1051 false, /* partial_inplace */
1052 0xffffe0, /* src_mask */
1053 0xffffe0, /* dst_mask */
1054 true), /* pcrel_offset */
1055
1056 /* Get to the page for the GOT entry for the symbol
1057 (G(S) - P) using an ADRP instruction. */
1058 HOWTO (AARCH64_R (ADR_GOT_PAGE), /* type */
1059 12, /* rightshift */
1060 4, /* size */
1061 21, /* bitsize */
1062 true, /* pc_relative */
1063 0, /* bitpos */
1064 complain_overflow_dont, /* complain_on_overflow */
1065 bfd_elf_generic_reloc, /* special_function */
1066 AARCH64_R_STR (ADR_GOT_PAGE), /* name */
1067 false, /* partial_inplace */
1068 0x1fffff, /* src_mask */
1069 0x1fffff, /* dst_mask */
1070 true), /* pcrel_offset */
1071
1072 /* LD64: GOT offset G(S) & 0xff8 */
1073 HOWTO64 (AARCH64_R (LD64_GOT_LO12_NC), /* type */
1074 3, /* rightshift */
1075 4, /* size */
1076 12, /* bitsize */
1077 false, /* pc_relative */
1078 0, /* bitpos */
1079 complain_overflow_dont, /* complain_on_overflow */
1080 bfd_elf_generic_reloc, /* special_function */
1081 AARCH64_R_STR (LD64_GOT_LO12_NC), /* name */
1082 false, /* partial_inplace */
1083 0xff8, /* src_mask */
1084 0xff8, /* dst_mask */
1085 false), /* pcrel_offset */
1086
1087 /* LD32: GOT offset G(S) & 0xffc */
1088 HOWTO32 (AARCH64_R (LD32_GOT_LO12_NC), /* type */
1089 2, /* rightshift */
1090 4, /* size */
1091 12, /* bitsize */
1092 false, /* pc_relative */
1093 0, /* bitpos */
1094 complain_overflow_dont, /* complain_on_overflow */
1095 bfd_elf_generic_reloc, /* special_function */
1096 AARCH64_R_STR (LD32_GOT_LO12_NC), /* name */
1097 false, /* partial_inplace */
1098 0xffc, /* src_mask */
1099 0xffc, /* dst_mask */
1100 false), /* pcrel_offset */
1101
1102 /* Lower 16 bits of GOT offset for the symbol. */
1103 HOWTO64 (AARCH64_R (MOVW_GOTOFF_G0_NC), /* type */
1104 0, /* rightshift */
1105 4, /* size */
1106 16, /* bitsize */
1107 false, /* pc_relative */
1108 0, /* bitpos */
1109 complain_overflow_dont, /* complain_on_overflow */
1110 bfd_elf_generic_reloc, /* special_function */
1111 AARCH64_R_STR (MOVW_GOTOFF_G0_NC), /* name */
1112 false, /* partial_inplace */
1113 0xffff, /* src_mask */
1114 0xffff, /* dst_mask */
1115 false), /* pcrel_offset */
1116
1117 /* Higher 16 bits of GOT offset for the symbol. */
1118 HOWTO64 (AARCH64_R (MOVW_GOTOFF_G1), /* type */
1119 16, /* rightshift */
1120 4, /* size */
1121 16, /* bitsize */
1122 false, /* pc_relative */
1123 0, /* bitpos */
1124 complain_overflow_unsigned, /* complain_on_overflow */
1125 bfd_elf_generic_reloc, /* special_function */
1126 AARCH64_R_STR (MOVW_GOTOFF_G1), /* name */
1127 false, /* partial_inplace */
1128 0xffff, /* src_mask */
1129 0xffff, /* dst_mask */
1130 false), /* pcrel_offset */
1131
1132 /* LD64: GOT offset for the symbol. */
1133 HOWTO64 (AARCH64_R (LD64_GOTOFF_LO15), /* type */
1134 3, /* rightshift */
1135 4, /* size */
1136 12, /* bitsize */
1137 false, /* pc_relative */
1138 0, /* bitpos */
1139 complain_overflow_unsigned, /* complain_on_overflow */
1140 bfd_elf_generic_reloc, /* special_function */
1141 AARCH64_R_STR (LD64_GOTOFF_LO15), /* name */
1142 false, /* partial_inplace */
1143 0x7ff8, /* src_mask */
1144 0x7ff8, /* dst_mask */
1145 false), /* pcrel_offset */
1146
1147 /* LD32: GOT offset to the page address of GOT table.
1148 (G(S) - PAGE (_GLOBAL_OFFSET_TABLE_)) & 0x5ffc. */
1149 HOWTO32 (AARCH64_R (LD32_GOTPAGE_LO14), /* type */
1150 2, /* rightshift */
1151 4, /* size */
1152 12, /* bitsize */
1153 false, /* pc_relative */
1154 0, /* bitpos */
1155 complain_overflow_unsigned, /* complain_on_overflow */
1156 bfd_elf_generic_reloc, /* special_function */
1157 AARCH64_R_STR (LD32_GOTPAGE_LO14), /* name */
1158 false, /* partial_inplace */
1159 0x5ffc, /* src_mask */
1160 0x5ffc, /* dst_mask */
1161 false), /* pcrel_offset */
1162
1163 /* LD64: GOT offset to the page address of GOT table.
1164 (G(S) - PAGE (_GLOBAL_OFFSET_TABLE_)) & 0x7ff8. */
1165 HOWTO64 (AARCH64_R (LD64_GOTPAGE_LO15), /* type */
1166 3, /* rightshift */
1167 4, /* size */
1168 12, /* bitsize */
1169 false, /* pc_relative */
1170 0, /* bitpos */
1171 complain_overflow_unsigned, /* complain_on_overflow */
1172 bfd_elf_generic_reloc, /* special_function */
1173 AARCH64_R_STR (LD64_GOTPAGE_LO15), /* name */
1174 false, /* partial_inplace */
1175 0x7ff8, /* src_mask */
1176 0x7ff8, /* dst_mask */
1177 false), /* pcrel_offset */
1178
1179 /* Get to the page for the GOT entry for the symbol
1180 (G(S) - P) using an ADRP instruction. */
1181 HOWTO (AARCH64_R (TLSGD_ADR_PAGE21), /* type */
1182 12, /* rightshift */
1183 4, /* size */
1184 21, /* bitsize */
1185 true, /* pc_relative */
1186 0, /* bitpos */
1187 complain_overflow_dont, /* complain_on_overflow */
1188 bfd_elf_generic_reloc, /* special_function */
1189 AARCH64_R_STR (TLSGD_ADR_PAGE21), /* name */
1190 false, /* partial_inplace */
1191 0x1fffff, /* src_mask */
1192 0x1fffff, /* dst_mask */
1193 true), /* pcrel_offset */
1194
1195 HOWTO (AARCH64_R (TLSGD_ADR_PREL21), /* type */
1196 0, /* rightshift */
1197 4, /* size */
1198 21, /* bitsize */
1199 true, /* pc_relative */
1200 0, /* bitpos */
1201 complain_overflow_dont, /* complain_on_overflow */
1202 bfd_elf_generic_reloc, /* special_function */
1203 AARCH64_R_STR (TLSGD_ADR_PREL21), /* name */
1204 false, /* partial_inplace */
1205 0x1fffff, /* src_mask */
1206 0x1fffff, /* dst_mask */
1207 true), /* pcrel_offset */
1208
1209 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
1210 HOWTO (AARCH64_R (TLSGD_ADD_LO12_NC), /* type */
1211 0, /* rightshift */
1212 4, /* size */
1213 12, /* bitsize */
1214 false, /* pc_relative */
1215 0, /* bitpos */
1216 complain_overflow_dont, /* complain_on_overflow */
1217 bfd_elf_generic_reloc, /* special_function */
1218 AARCH64_R_STR (TLSGD_ADD_LO12_NC), /* name */
1219 false, /* partial_inplace */
1220 0xfff, /* src_mask */
1221 0xfff, /* dst_mask */
1222 false), /* pcrel_offset */
1223
1224 /* Lower 16 bits of GOT offset to tls_index. */
1225 HOWTO64 (AARCH64_R (TLSGD_MOVW_G0_NC), /* type */
1226 0, /* rightshift */
1227 4, /* size */
1228 16, /* bitsize */
1229 false, /* pc_relative */
1230 0, /* bitpos */
1231 complain_overflow_dont, /* complain_on_overflow */
1232 bfd_elf_generic_reloc, /* special_function */
1233 AARCH64_R_STR (TLSGD_MOVW_G0_NC), /* name */
1234 false, /* partial_inplace */
1235 0xffff, /* src_mask */
1236 0xffff, /* dst_mask */
1237 false), /* pcrel_offset */
1238
1239 /* Higher 16 bits of GOT offset to tls_index. */
1240 HOWTO64 (AARCH64_R (TLSGD_MOVW_G1), /* type */
1241 16, /* rightshift */
1242 4, /* size */
1243 16, /* bitsize */
1244 false, /* pc_relative */
1245 0, /* bitpos */
1246 complain_overflow_unsigned, /* complain_on_overflow */
1247 bfd_elf_generic_reloc, /* special_function */
1248 AARCH64_R_STR (TLSGD_MOVW_G1), /* name */
1249 false, /* partial_inplace */
1250 0xffff, /* src_mask */
1251 0xffff, /* dst_mask */
1252 false), /* pcrel_offset */
1253
1254 HOWTO (AARCH64_R (TLSIE_ADR_GOTTPREL_PAGE21), /* type */
1255 12, /* rightshift */
1256 4, /* size */
1257 21, /* bitsize */
1258 false, /* pc_relative */
1259 0, /* bitpos */
1260 complain_overflow_dont, /* complain_on_overflow */
1261 bfd_elf_generic_reloc, /* special_function */
1262 AARCH64_R_STR (TLSIE_ADR_GOTTPREL_PAGE21), /* name */
1263 false, /* partial_inplace */
1264 0x1fffff, /* src_mask */
1265 0x1fffff, /* dst_mask */
1266 false), /* pcrel_offset */
1267
1268 HOWTO64 (AARCH64_R (TLSIE_LD64_GOTTPREL_LO12_NC), /* type */
1269 3, /* rightshift */
1270 4, /* size */
1271 12, /* bitsize */
1272 false, /* pc_relative */
1273 0, /* bitpos */
1274 complain_overflow_dont, /* complain_on_overflow */
1275 bfd_elf_generic_reloc, /* special_function */
1276 AARCH64_R_STR (TLSIE_LD64_GOTTPREL_LO12_NC), /* name */
1277 false, /* partial_inplace */
1278 0xff8, /* src_mask */
1279 0xff8, /* dst_mask */
1280 false), /* pcrel_offset */
1281
1282 HOWTO32 (AARCH64_R (TLSIE_LD32_GOTTPREL_LO12_NC), /* type */
1283 2, /* rightshift */
1284 4, /* size */
1285 12, /* bitsize */
1286 false, /* pc_relative */
1287 0, /* bitpos */
1288 complain_overflow_dont, /* complain_on_overflow */
1289 bfd_elf_generic_reloc, /* special_function */
1290 AARCH64_R_STR (TLSIE_LD32_GOTTPREL_LO12_NC), /* name */
1291 false, /* partial_inplace */
1292 0xffc, /* src_mask */
1293 0xffc, /* dst_mask */
1294 false), /* pcrel_offset */
1295
1296 HOWTO (AARCH64_R (TLSIE_LD_GOTTPREL_PREL19), /* type */
1297 2, /* rightshift */
1298 4, /* size */
1299 19, /* bitsize */
1300 false, /* pc_relative */
1301 0, /* bitpos */
1302 complain_overflow_dont, /* complain_on_overflow */
1303 bfd_elf_generic_reloc, /* special_function */
1304 AARCH64_R_STR (TLSIE_LD_GOTTPREL_PREL19), /* name */
1305 false, /* partial_inplace */
1306 0x1ffffc, /* src_mask */
1307 0x1ffffc, /* dst_mask */
1308 false), /* pcrel_offset */
1309
1310 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G0_NC), /* type */
1311 0, /* rightshift */
1312 4, /* size */
1313 16, /* bitsize */
1314 false, /* pc_relative */
1315 0, /* bitpos */
1316 complain_overflow_dont, /* complain_on_overflow */
1317 bfd_elf_generic_reloc, /* special_function */
1318 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G0_NC), /* name */
1319 false, /* partial_inplace */
1320 0xffff, /* src_mask */
1321 0xffff, /* dst_mask */
1322 false), /* pcrel_offset */
1323
1324 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G1), /* type */
1325 16, /* rightshift */
1326 4, /* size */
1327 16, /* bitsize */
1328 false, /* pc_relative */
1329 0, /* bitpos */
1330 complain_overflow_unsigned, /* complain_on_overflow */
1331 bfd_elf_generic_reloc, /* special_function */
1332 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G1), /* name */
1333 false, /* partial_inplace */
1334 0xffff, /* src_mask */
1335 0xffff, /* dst_mask */
1336 false), /* pcrel_offset */
1337
1338 /* ADD: bit[23:12] of byte offset to module TLS base address. */
1339 HOWTO (AARCH64_R (TLSLD_ADD_DTPREL_HI12), /* type */
1340 12, /* rightshift */
1341 4, /* size */
1342 12, /* bitsize */
1343 false, /* pc_relative */
1344 0, /* bitpos */
1345 complain_overflow_unsigned, /* complain_on_overflow */
1346 bfd_elf_generic_reloc, /* special_function */
1347 AARCH64_R_STR (TLSLD_ADD_DTPREL_HI12), /* name */
1348 false, /* partial_inplace */
1349 0xfff, /* src_mask */
1350 0xfff, /* dst_mask */
1351 false), /* pcrel_offset */
1352
1353 /* Unsigned 12 bit byte offset to module TLS base address. */
1354 HOWTO (AARCH64_R (TLSLD_ADD_DTPREL_LO12), /* type */
1355 0, /* rightshift */
1356 4, /* size */
1357 12, /* bitsize */
1358 false, /* pc_relative */
1359 0, /* bitpos */
1360 complain_overflow_unsigned, /* complain_on_overflow */
1361 bfd_elf_generic_reloc, /* special_function */
1362 AARCH64_R_STR (TLSLD_ADD_DTPREL_LO12), /* name */
1363 false, /* partial_inplace */
1364 0xfff, /* src_mask */
1365 0xfff, /* dst_mask */
1366 false), /* pcrel_offset */
1367
1368 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12. */
1369 HOWTO (AARCH64_R (TLSLD_ADD_DTPREL_LO12_NC), /* type */
1370 0, /* rightshift */
1371 4, /* size */
1372 12, /* bitsize */
1373 false, /* pc_relative */
1374 0, /* bitpos */
1375 complain_overflow_dont, /* complain_on_overflow */
1376 bfd_elf_generic_reloc, /* special_function */
1377 AARCH64_R_STR (TLSLD_ADD_DTPREL_LO12_NC), /* name */
1378 false, /* partial_inplace */
1379 0xfff, /* src_mask */
1380 0xfff, /* dst_mask */
1381 false), /* pcrel_offset */
1382
1383 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
1384 HOWTO (AARCH64_R (TLSLD_ADD_LO12_NC), /* type */
1385 0, /* rightshift */
1386 4, /* size */
1387 12, /* bitsize */
1388 false, /* pc_relative */
1389 0, /* bitpos */
1390 complain_overflow_dont, /* complain_on_overflow */
1391 bfd_elf_generic_reloc, /* special_function */
1392 AARCH64_R_STR (TLSLD_ADD_LO12_NC), /* name */
1393 false, /* partial_inplace */
1394 0xfff, /* src_mask */
1395 0xfff, /* dst_mask */
1396 false), /* pcrel_offset */
1397
1398 /* Get to the page for the GOT entry for the symbol
1399 (G(S) - P) using an ADRP instruction. */
1400 HOWTO (AARCH64_R (TLSLD_ADR_PAGE21), /* type */
1401 12, /* rightshift */
1402 4, /* size */
1403 21, /* bitsize */
1404 true, /* pc_relative */
1405 0, /* bitpos */
1406 complain_overflow_signed, /* complain_on_overflow */
1407 bfd_elf_generic_reloc, /* special_function */
1408 AARCH64_R_STR (TLSLD_ADR_PAGE21), /* name */
1409 false, /* partial_inplace */
1410 0x1fffff, /* src_mask */
1411 0x1fffff, /* dst_mask */
1412 true), /* pcrel_offset */
1413
1414 HOWTO (AARCH64_R (TLSLD_ADR_PREL21), /* type */
1415 0, /* rightshift */
1416 4, /* size */
1417 21, /* bitsize */
1418 true, /* pc_relative */
1419 0, /* bitpos */
1420 complain_overflow_signed, /* complain_on_overflow */
1421 bfd_elf_generic_reloc, /* special_function */
1422 AARCH64_R_STR (TLSLD_ADR_PREL21), /* name */
1423 false, /* partial_inplace */
1424 0x1fffff, /* src_mask */
1425 0x1fffff, /* dst_mask */
1426 true), /* pcrel_offset */
1427
1428 /* LD/ST16: bit[11:1] of byte offset to module TLS base address. */
1429 HOWTO64 (AARCH64_R (TLSLD_LDST16_DTPREL_LO12), /* type */
1430 1, /* rightshift */
1431 4, /* size */
1432 11, /* bitsize */
1433 false, /* pc_relative */
1434 10, /* bitpos */
1435 complain_overflow_unsigned, /* complain_on_overflow */
1436 bfd_elf_generic_reloc, /* special_function */
1437 AARCH64_R_STR (TLSLD_LDST16_DTPREL_LO12), /* name */
1438 false, /* partial_inplace */
1439 0x1ffc00, /* src_mask */
1440 0x1ffc00, /* dst_mask */
1441 false), /* pcrel_offset */
1442
1443 /* Same as BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12, but no overflow check. */
1444 HOWTO64 (AARCH64_R (TLSLD_LDST16_DTPREL_LO12_NC), /* type */
1445 1, /* rightshift */
1446 4, /* size */
1447 11, /* bitsize */
1448 false, /* pc_relative */
1449 10, /* bitpos */
1450 complain_overflow_dont, /* complain_on_overflow */
1451 bfd_elf_generic_reloc, /* special_function */
1452 AARCH64_R_STR (TLSLD_LDST16_DTPREL_LO12_NC), /* name */
1453 false, /* partial_inplace */
1454 0x1ffc00, /* src_mask */
1455 0x1ffc00, /* dst_mask */
1456 false), /* pcrel_offset */
1457
1458 /* LD/ST32: bit[11:2] of byte offset to module TLS base address. */
1459 HOWTO64 (AARCH64_R (TLSLD_LDST32_DTPREL_LO12), /* type */
1460 2, /* rightshift */
1461 4, /* size */
1462 10, /* bitsize */
1463 false, /* pc_relative */
1464 10, /* bitpos */
1465 complain_overflow_unsigned, /* complain_on_overflow */
1466 bfd_elf_generic_reloc, /* special_function */
1467 AARCH64_R_STR (TLSLD_LDST32_DTPREL_LO12), /* name */
1468 false, /* partial_inplace */
1469 0x3ffc00, /* src_mask */
1470 0x3ffc00, /* dst_mask */
1471 false), /* pcrel_offset */
1472
1473 /* Same as BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12, but no overflow check. */
1474 HOWTO64 (AARCH64_R (TLSLD_LDST32_DTPREL_LO12_NC), /* type */
1475 2, /* rightshift */
1476 4, /* size */
1477 10, /* bitsize */
1478 false, /* pc_relative */
1479 10, /* bitpos */
1480 complain_overflow_dont, /* complain_on_overflow */
1481 bfd_elf_generic_reloc, /* special_function */
1482 AARCH64_R_STR (TLSLD_LDST32_DTPREL_LO12_NC), /* name */
1483 false, /* partial_inplace */
1484 0xffc00, /* src_mask */
1485 0xffc00, /* dst_mask */
1486 false), /* pcrel_offset */
1487
1488 /* LD/ST64: bit[11:3] of byte offset to module TLS base address. */
1489 HOWTO64 (AARCH64_R (TLSLD_LDST64_DTPREL_LO12), /* type */
1490 3, /* rightshift */
1491 4, /* size */
1492 9, /* bitsize */
1493 false, /* pc_relative */
1494 10, /* bitpos */
1495 complain_overflow_unsigned, /* complain_on_overflow */
1496 bfd_elf_generic_reloc, /* special_function */
1497 AARCH64_R_STR (TLSLD_LDST64_DTPREL_LO12), /* name */
1498 false, /* partial_inplace */
1499 0x3ffc00, /* src_mask */
1500 0x3ffc00, /* dst_mask */
1501 false), /* pcrel_offset */
1502
1503 /* Same as BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12, but no overflow check. */
1504 HOWTO64 (AARCH64_R (TLSLD_LDST64_DTPREL_LO12_NC), /* type */
1505 3, /* rightshift */
1506 4, /* size */
1507 9, /* bitsize */
1508 false, /* pc_relative */
1509 10, /* bitpos */
1510 complain_overflow_dont, /* complain_on_overflow */
1511 bfd_elf_generic_reloc, /* special_function */
1512 AARCH64_R_STR (TLSLD_LDST64_DTPREL_LO12_NC), /* name */
1513 false, /* partial_inplace */
1514 0x7fc00, /* src_mask */
1515 0x7fc00, /* dst_mask */
1516 false), /* pcrel_offset */
1517
1518 /* LD/ST8: bit[11:0] of byte offset to module TLS base address. */
1519 HOWTO64 (AARCH64_R (TLSLD_LDST8_DTPREL_LO12), /* type */
1520 0, /* rightshift */
1521 4, /* size */
1522 12, /* bitsize */
1523 false, /* pc_relative */
1524 10, /* bitpos */
1525 complain_overflow_unsigned, /* complain_on_overflow */
1526 bfd_elf_generic_reloc, /* special_function */
1527 AARCH64_R_STR (TLSLD_LDST8_DTPREL_LO12), /* name */
1528 false, /* partial_inplace */
1529 0x3ffc00, /* src_mask */
1530 0x3ffc00, /* dst_mask */
1531 false), /* pcrel_offset */
1532
1533 /* Same as BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12, but no overflow check. */
1534 HOWTO64 (AARCH64_R (TLSLD_LDST8_DTPREL_LO12_NC), /* type */
1535 0, /* rightshift */
1536 4, /* size */
1537 12, /* bitsize */
1538 false, /* pc_relative */
1539 10, /* bitpos */
1540 complain_overflow_dont, /* complain_on_overflow */
1541 bfd_elf_generic_reloc, /* special_function */
1542 AARCH64_R_STR (TLSLD_LDST8_DTPREL_LO12_NC), /* name */
1543 false, /* partial_inplace */
1544 0x3ffc00, /* src_mask */
1545 0x3ffc00, /* dst_mask */
1546 false), /* pcrel_offset */
1547
1548 /* MOVZ: bit[15:0] of byte offset to module TLS base address. */
1549 HOWTO (AARCH64_R (TLSLD_MOVW_DTPREL_G0), /* type */
1550 0, /* rightshift */
1551 4, /* size */
1552 16, /* bitsize */
1553 false, /* pc_relative */
1554 0, /* bitpos */
1555 complain_overflow_unsigned, /* complain_on_overflow */
1556 bfd_elf_generic_reloc, /* special_function */
1557 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G0), /* name */
1558 false, /* partial_inplace */
1559 0xffff, /* src_mask */
1560 0xffff, /* dst_mask */
1561 false), /* pcrel_offset */
1562
1563 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
1564 HOWTO (AARCH64_R (TLSLD_MOVW_DTPREL_G0_NC), /* type */
1565 0, /* rightshift */
1566 4, /* size */
1567 16, /* bitsize */
1568 false, /* pc_relative */
1569 0, /* bitpos */
1570 complain_overflow_dont, /* complain_on_overflow */
1571 bfd_elf_generic_reloc, /* special_function */
1572 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G0_NC), /* name */
1573 false, /* partial_inplace */
1574 0xffff, /* src_mask */
1575 0xffff, /* dst_mask */
1576 false), /* pcrel_offset */
1577
1578 /* MOVZ: bit[31:16] of byte offset to module TLS base address. */
1579 HOWTO (AARCH64_R (TLSLD_MOVW_DTPREL_G1), /* type */
1580 16, /* rightshift */
1581 4, /* size */
1582 16, /* bitsize */
1583 false, /* pc_relative */
1584 0, /* bitpos */
1585 complain_overflow_unsigned, /* complain_on_overflow */
1586 bfd_elf_generic_reloc, /* special_function */
1587 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G1), /* name */
1588 false, /* partial_inplace */
1589 0xffff, /* src_mask */
1590 0xffff, /* dst_mask */
1591 false), /* pcrel_offset */
1592
1593 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
1594 HOWTO64 (AARCH64_R (TLSLD_MOVW_DTPREL_G1_NC), /* type */
1595 16, /* rightshift */
1596 4, /* size */
1597 16, /* bitsize */
1598 false, /* pc_relative */
1599 0, /* bitpos */
1600 complain_overflow_dont, /* complain_on_overflow */
1601 bfd_elf_generic_reloc, /* special_function */
1602 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G1_NC), /* name */
1603 false, /* partial_inplace */
1604 0xffff, /* src_mask */
1605 0xffff, /* dst_mask */
1606 false), /* pcrel_offset */
1607
1608 /* MOVZ: bit[47:32] of byte offset to module TLS base address. */
1609 HOWTO64 (AARCH64_R (TLSLD_MOVW_DTPREL_G2), /* type */
1610 32, /* rightshift */
1611 4, /* size */
1612 16, /* bitsize */
1613 false, /* pc_relative */
1614 0, /* bitpos */
1615 complain_overflow_unsigned, /* complain_on_overflow */
1616 bfd_elf_generic_reloc, /* special_function */
1617 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G2), /* name */
1618 false, /* partial_inplace */
1619 0xffff, /* src_mask */
1620 0xffff, /* dst_mask */
1621 false), /* pcrel_offset */
1622
1623 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G2), /* type */
1624 32, /* rightshift */
1625 4, /* size */
1626 16, /* bitsize */
1627 false, /* pc_relative */
1628 0, /* bitpos */
1629 complain_overflow_unsigned, /* complain_on_overflow */
1630 bfd_elf_generic_reloc, /* special_function */
1631 AARCH64_R_STR (TLSLE_MOVW_TPREL_G2), /* name */
1632 false, /* partial_inplace */
1633 0xffff, /* src_mask */
1634 0xffff, /* dst_mask */
1635 false), /* pcrel_offset */
1636
1637 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G1), /* type */
1638 16, /* rightshift */
1639 4, /* size */
1640 16, /* bitsize */
1641 false, /* pc_relative */
1642 0, /* bitpos */
1643 complain_overflow_dont, /* complain_on_overflow */
1644 bfd_elf_generic_reloc, /* special_function */
1645 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1), /* name */
1646 false, /* partial_inplace */
1647 0xffff, /* src_mask */
1648 0xffff, /* dst_mask */
1649 false), /* pcrel_offset */
1650
1651 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G1_NC), /* type */
1652 16, /* rightshift */
1653 4, /* size */
1654 16, /* bitsize */
1655 false, /* pc_relative */
1656 0, /* bitpos */
1657 complain_overflow_dont, /* complain_on_overflow */
1658 bfd_elf_generic_reloc, /* special_function */
1659 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1_NC), /* name */
1660 false, /* partial_inplace */
1661 0xffff, /* src_mask */
1662 0xffff, /* dst_mask */
1663 false), /* pcrel_offset */
1664
1665 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0), /* type */
1666 0, /* rightshift */
1667 4, /* size */
1668 16, /* bitsize */
1669 false, /* pc_relative */
1670 0, /* bitpos */
1671 complain_overflow_dont, /* complain_on_overflow */
1672 bfd_elf_generic_reloc, /* special_function */
1673 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0), /* name */
1674 false, /* partial_inplace */
1675 0xffff, /* src_mask */
1676 0xffff, /* dst_mask */
1677 false), /* pcrel_offset */
1678
1679 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0_NC), /* type */
1680 0, /* rightshift */
1681 4, /* size */
1682 16, /* bitsize */
1683 false, /* pc_relative */
1684 0, /* bitpos */
1685 complain_overflow_dont, /* complain_on_overflow */
1686 bfd_elf_generic_reloc, /* special_function */
1687 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0_NC), /* name */
1688 false, /* partial_inplace */
1689 0xffff, /* src_mask */
1690 0xffff, /* dst_mask */
1691 false), /* pcrel_offset */
1692
1693 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_HI12), /* type */
1694 12, /* rightshift */
1695 4, /* size */
1696 12, /* bitsize */
1697 false, /* pc_relative */
1698 0, /* bitpos */
1699 complain_overflow_unsigned, /* complain_on_overflow */
1700 bfd_elf_generic_reloc, /* special_function */
1701 AARCH64_R_STR (TLSLE_ADD_TPREL_HI12), /* name */
1702 false, /* partial_inplace */
1703 0xfff, /* src_mask */
1704 0xfff, /* dst_mask */
1705 false), /* pcrel_offset */
1706
1707 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12), /* type */
1708 0, /* rightshift */
1709 4, /* size */
1710 12, /* bitsize */
1711 false, /* pc_relative */
1712 0, /* bitpos */
1713 complain_overflow_unsigned, /* complain_on_overflow */
1714 bfd_elf_generic_reloc, /* special_function */
1715 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12), /* name */
1716 false, /* partial_inplace */
1717 0xfff, /* src_mask */
1718 0xfff, /* dst_mask */
1719 false), /* pcrel_offset */
1720
1721 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12_NC), /* type */
1722 0, /* rightshift */
1723 4, /* size */
1724 12, /* bitsize */
1725 false, /* pc_relative */
1726 0, /* bitpos */
1727 complain_overflow_dont, /* complain_on_overflow */
1728 bfd_elf_generic_reloc, /* special_function */
1729 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12_NC), /* name */
1730 false, /* partial_inplace */
1731 0xfff, /* src_mask */
1732 0xfff, /* dst_mask */
1733 false), /* pcrel_offset */
1734
1735 /* LD/ST16: bit[11:1] of byte offset to module TLS base address. */
1736 HOWTO (AARCH64_R (TLSLE_LDST16_TPREL_LO12), /* type */
1737 1, /* rightshift */
1738 4, /* size */
1739 11, /* bitsize */
1740 false, /* pc_relative */
1741 10, /* bitpos */
1742 complain_overflow_unsigned, /* complain_on_overflow */
1743 bfd_elf_generic_reloc, /* special_function */
1744 AARCH64_R_STR (TLSLE_LDST16_TPREL_LO12), /* name */
1745 false, /* partial_inplace */
1746 0x1ffc00, /* src_mask */
1747 0x1ffc00, /* dst_mask */
1748 false), /* pcrel_offset */
1749
1750 /* Same as BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12, but no overflow check. */
1751 HOWTO (AARCH64_R (TLSLE_LDST16_TPREL_LO12_NC), /* type */
1752 1, /* rightshift */
1753 4, /* size */
1754 11, /* bitsize */
1755 false, /* pc_relative */
1756 10, /* bitpos */
1757 complain_overflow_dont, /* complain_on_overflow */
1758 bfd_elf_generic_reloc, /* special_function */
1759 AARCH64_R_STR (TLSLE_LDST16_TPREL_LO12_NC), /* name */
1760 false, /* partial_inplace */
1761 0x1ffc00, /* src_mask */
1762 0x1ffc00, /* dst_mask */
1763 false), /* pcrel_offset */
1764
1765 /* LD/ST32: bit[11:2] of byte offset to module TLS base address. */
1766 HOWTO (AARCH64_R (TLSLE_LDST32_TPREL_LO12), /* type */
1767 2, /* rightshift */
1768 4, /* size */
1769 10, /* bitsize */
1770 false, /* pc_relative */
1771 10, /* bitpos */
1772 complain_overflow_unsigned, /* complain_on_overflow */
1773 bfd_elf_generic_reloc, /* special_function */
1774 AARCH64_R_STR (TLSLE_LDST32_TPREL_LO12), /* name */
1775 false, /* partial_inplace */
1776 0xffc00, /* src_mask */
1777 0xffc00, /* dst_mask */
1778 false), /* pcrel_offset */
1779
1780 /* Same as BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12, but no overflow check. */
1781 HOWTO (AARCH64_R (TLSLE_LDST32_TPREL_LO12_NC), /* type */
1782 2, /* rightshift */
1783 4, /* size */
1784 10, /* bitsize */
1785 false, /* pc_relative */
1786 10, /* bitpos */
1787 complain_overflow_dont, /* complain_on_overflow */
1788 bfd_elf_generic_reloc, /* special_function */
1789 AARCH64_R_STR (TLSLE_LDST32_TPREL_LO12_NC), /* name */
1790 false, /* partial_inplace */
1791 0xffc00, /* src_mask */
1792 0xffc00, /* dst_mask */
1793 false), /* pcrel_offset */
1794
1795 /* LD/ST64: bit[11:3] of byte offset to module TLS base address. */
1796 HOWTO (AARCH64_R (TLSLE_LDST64_TPREL_LO12), /* type */
1797 3, /* rightshift */
1798 4, /* size */
1799 9, /* bitsize */
1800 false, /* pc_relative */
1801 10, /* bitpos */
1802 complain_overflow_unsigned, /* complain_on_overflow */
1803 bfd_elf_generic_reloc, /* special_function */
1804 AARCH64_R_STR (TLSLE_LDST64_TPREL_LO12), /* name */
1805 false, /* partial_inplace */
1806 0x7fc00, /* src_mask */
1807 0x7fc00, /* dst_mask */
1808 false), /* pcrel_offset */
1809
1810 /* Same as BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12, but no overflow check. */
1811 HOWTO (AARCH64_R (TLSLE_LDST64_TPREL_LO12_NC), /* type */
1812 3, /* rightshift */
1813 4, /* size */
1814 9, /* bitsize */
1815 false, /* pc_relative */
1816 10, /* bitpos */
1817 complain_overflow_dont, /* complain_on_overflow */
1818 bfd_elf_generic_reloc, /* special_function */
1819 AARCH64_R_STR (TLSLE_LDST64_TPREL_LO12_NC), /* name */
1820 false, /* partial_inplace */
1821 0x7fc00, /* src_mask */
1822 0x7fc00, /* dst_mask */
1823 false), /* pcrel_offset */
1824
1825 /* LD/ST8: bit[11:0] of byte offset to module TLS base address. */
1826 HOWTO (AARCH64_R (TLSLE_LDST8_TPREL_LO12), /* type */
1827 0, /* rightshift */
1828 4, /* size */
1829 12, /* bitsize */
1830 false, /* pc_relative */
1831 10, /* bitpos */
1832 complain_overflow_unsigned, /* complain_on_overflow */
1833 bfd_elf_generic_reloc, /* special_function */
1834 AARCH64_R_STR (TLSLE_LDST8_TPREL_LO12), /* name */
1835 false, /* partial_inplace */
1836 0x3ffc00, /* src_mask */
1837 0x3ffc00, /* dst_mask */
1838 false), /* pcrel_offset */
1839
1840 /* Same as BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12, but no overflow check. */
1841 HOWTO (AARCH64_R (TLSLE_LDST8_TPREL_LO12_NC), /* type */
1842 0, /* rightshift */
1843 4, /* size */
1844 12, /* bitsize */
1845 false, /* pc_relative */
1846 10, /* bitpos */
1847 complain_overflow_dont, /* complain_on_overflow */
1848 bfd_elf_generic_reloc, /* special_function */
1849 AARCH64_R_STR (TLSLE_LDST8_TPREL_LO12_NC), /* name */
1850 false, /* partial_inplace */
1851 0x3ffc00, /* src_mask */
1852 0x3ffc00, /* dst_mask */
1853 false), /* pcrel_offset */
1854
1855 HOWTO (AARCH64_R (TLSDESC_LD_PREL19), /* type */
1856 2, /* rightshift */
1857 4, /* size */
1858 19, /* bitsize */
1859 true, /* pc_relative */
1860 0, /* bitpos */
1861 complain_overflow_dont, /* complain_on_overflow */
1862 bfd_elf_generic_reloc, /* special_function */
1863 AARCH64_R_STR (TLSDESC_LD_PREL19), /* name */
1864 false, /* partial_inplace */
1865 0x0ffffe0, /* src_mask */
1866 0x0ffffe0, /* dst_mask */
1867 true), /* pcrel_offset */
1868
1869 HOWTO (AARCH64_R (TLSDESC_ADR_PREL21), /* type */
1870 0, /* rightshift */
1871 4, /* size */
1872 21, /* bitsize */
1873 true, /* pc_relative */
1874 0, /* bitpos */
1875 complain_overflow_dont, /* complain_on_overflow */
1876 bfd_elf_generic_reloc, /* special_function */
1877 AARCH64_R_STR (TLSDESC_ADR_PREL21), /* name */
1878 false, /* partial_inplace */
1879 0x1fffff, /* src_mask */
1880 0x1fffff, /* dst_mask */
1881 true), /* pcrel_offset */
1882
1883 /* Get to the page for the GOT entry for the symbol
1884 (G(S) - P) using an ADRP instruction. */
1885 HOWTO (AARCH64_R (TLSDESC_ADR_PAGE21), /* type */
1886 12, /* rightshift */
1887 4, /* size */
1888 21, /* bitsize */
1889 true, /* pc_relative */
1890 0, /* bitpos */
1891 complain_overflow_dont, /* complain_on_overflow */
1892 bfd_elf_generic_reloc, /* special_function */
1893 AARCH64_R_STR (TLSDESC_ADR_PAGE21), /* name */
1894 false, /* partial_inplace */
1895 0x1fffff, /* src_mask */
1896 0x1fffff, /* dst_mask */
1897 true), /* pcrel_offset */
1898
1899 /* LD64: GOT offset G(S) & 0xff8. */
1900 HOWTO64 (AARCH64_R (TLSDESC_LD64_LO12), /* type */
1901 3, /* rightshift */
1902 4, /* size */
1903 12, /* bitsize */
1904 false, /* pc_relative */
1905 0, /* bitpos */
1906 complain_overflow_dont, /* complain_on_overflow */
1907 bfd_elf_generic_reloc, /* special_function */
1908 AARCH64_R_STR (TLSDESC_LD64_LO12), /* name */
1909 false, /* partial_inplace */
1910 0xff8, /* src_mask */
1911 0xff8, /* dst_mask */
1912 false), /* pcrel_offset */
1913
1914 /* LD32: GOT offset G(S) & 0xffc. */
1915 HOWTO32 (AARCH64_R (TLSDESC_LD32_LO12_NC), /* type */
1916 2, /* rightshift */
1917 4, /* size */
1918 12, /* bitsize */
1919 false, /* pc_relative */
1920 0, /* bitpos */
1921 complain_overflow_dont, /* complain_on_overflow */
1922 bfd_elf_generic_reloc, /* special_function */
1923 AARCH64_R_STR (TLSDESC_LD32_LO12_NC), /* name */
1924 false, /* partial_inplace */
1925 0xffc, /* src_mask */
1926 0xffc, /* dst_mask */
1927 false), /* pcrel_offset */
1928
1929 /* ADD: GOT offset G(S) & 0xfff. */
1930 HOWTO (AARCH64_R (TLSDESC_ADD_LO12), /* type */
1931 0, /* rightshift */
1932 4, /* size */
1933 12, /* bitsize */
1934 false, /* pc_relative */
1935 0, /* bitpos */
1936 complain_overflow_dont,/* complain_on_overflow */
1937 bfd_elf_generic_reloc, /* special_function */
1938 AARCH64_R_STR (TLSDESC_ADD_LO12), /* name */
1939 false, /* partial_inplace */
1940 0xfff, /* src_mask */
1941 0xfff, /* dst_mask */
1942 false), /* pcrel_offset */
1943
1944 HOWTO64 (AARCH64_R (TLSDESC_OFF_G1), /* type */
1945 16, /* rightshift */
1946 4, /* size */
1947 12, /* bitsize */
1948 false, /* pc_relative */
1949 0, /* bitpos */
1950 complain_overflow_unsigned, /* complain_on_overflow */
1951 bfd_elf_generic_reloc, /* special_function */
1952 AARCH64_R_STR (TLSDESC_OFF_G1), /* name */
1953 false, /* partial_inplace */
1954 0xffff, /* src_mask */
1955 0xffff, /* dst_mask */
1956 false), /* pcrel_offset */
1957
1958 HOWTO64 (AARCH64_R (TLSDESC_OFF_G0_NC), /* type */
1959 0, /* rightshift */
1960 4, /* size */
1961 12, /* bitsize */
1962 false, /* pc_relative */
1963 0, /* bitpos */
1964 complain_overflow_dont, /* complain_on_overflow */
1965 bfd_elf_generic_reloc, /* special_function */
1966 AARCH64_R_STR (TLSDESC_OFF_G0_NC), /* name */
1967 false, /* partial_inplace */
1968 0xffff, /* src_mask */
1969 0xffff, /* dst_mask */
1970 false), /* pcrel_offset */
1971
1972 HOWTO64 (AARCH64_R (TLSDESC_LDR), /* type */
1973 0, /* rightshift */
1974 4, /* size */
1975 12, /* bitsize */
1976 false, /* pc_relative */
1977 0, /* bitpos */
1978 complain_overflow_dont, /* complain_on_overflow */
1979 bfd_elf_generic_reloc, /* special_function */
1980 AARCH64_R_STR (TLSDESC_LDR), /* name */
1981 false, /* partial_inplace */
1982 0x0, /* src_mask */
1983 0x0, /* dst_mask */
1984 false), /* pcrel_offset */
1985
1986 HOWTO64 (AARCH64_R (TLSDESC_ADD), /* type */
1987 0, /* rightshift */
1988 4, /* size */
1989 12, /* bitsize */
1990 false, /* pc_relative */
1991 0, /* bitpos */
1992 complain_overflow_dont, /* complain_on_overflow */
1993 bfd_elf_generic_reloc, /* special_function */
1994 AARCH64_R_STR (TLSDESC_ADD), /* name */
1995 false, /* partial_inplace */
1996 0x0, /* src_mask */
1997 0x0, /* dst_mask */
1998 false), /* pcrel_offset */
1999
2000 HOWTO (AARCH64_R (TLSDESC_CALL), /* type */
2001 0, /* rightshift */
2002 4, /* size */
2003 0, /* bitsize */
2004 false, /* pc_relative */
2005 0, /* bitpos */
2006 complain_overflow_dont, /* complain_on_overflow */
2007 bfd_elf_generic_reloc, /* special_function */
2008 AARCH64_R_STR (TLSDESC_CALL), /* name */
2009 false, /* partial_inplace */
2010 0x0, /* src_mask */
2011 0x0, /* dst_mask */
2012 false), /* pcrel_offset */
2013
2014 HOWTO (AARCH64_R (COPY), /* type */
2015 0, /* rightshift */
2016 4, /* size */
2017 64, /* bitsize */
2018 false, /* pc_relative */
2019 0, /* bitpos */
2020 complain_overflow_bitfield, /* complain_on_overflow */
2021 bfd_elf_generic_reloc, /* special_function */
2022 AARCH64_R_STR (COPY), /* name */
2023 true, /* partial_inplace */
2024 0xffffffff, /* src_mask */
2025 0xffffffff, /* dst_mask */
2026 false), /* pcrel_offset */
2027
2028 HOWTO (AARCH64_R (GLOB_DAT), /* type */
2029 0, /* rightshift */
2030 4, /* size */
2031 64, /* bitsize */
2032 false, /* pc_relative */
2033 0, /* bitpos */
2034 complain_overflow_bitfield, /* complain_on_overflow */
2035 bfd_elf_generic_reloc, /* special_function */
2036 AARCH64_R_STR (GLOB_DAT), /* name */
2037 true, /* partial_inplace */
2038 0xffffffff, /* src_mask */
2039 0xffffffff, /* dst_mask */
2040 false), /* pcrel_offset */
2041
2042 HOWTO (AARCH64_R (JUMP_SLOT), /* type */
2043 0, /* rightshift */
2044 4, /* size */
2045 64, /* bitsize */
2046 false, /* pc_relative */
2047 0, /* bitpos */
2048 complain_overflow_bitfield, /* complain_on_overflow */
2049 bfd_elf_generic_reloc, /* special_function */
2050 AARCH64_R_STR (JUMP_SLOT), /* name */
2051 true, /* partial_inplace */
2052 0xffffffff, /* src_mask */
2053 0xffffffff, /* dst_mask */
2054 false), /* pcrel_offset */
2055
2056 HOWTO (AARCH64_R (RELATIVE), /* type */
2057 0, /* rightshift */
2058 4, /* size */
2059 64, /* bitsize */
2060 false, /* pc_relative */
2061 0, /* bitpos */
2062 complain_overflow_bitfield, /* complain_on_overflow */
2063 bfd_elf_generic_reloc, /* special_function */
2064 AARCH64_R_STR (RELATIVE), /* name */
2065 true, /* partial_inplace */
2066 ALL_ONES, /* src_mask */
2067 ALL_ONES, /* dst_mask */
2068 false), /* pcrel_offset */
2069
2070 HOWTO (AARCH64_R (TLS_DTPMOD), /* type */
2071 0, /* rightshift */
2072 4, /* size */
2073 64, /* bitsize */
2074 false, /* pc_relative */
2075 0, /* bitpos */
2076 complain_overflow_dont, /* complain_on_overflow */
2077 bfd_elf_generic_reloc, /* special_function */
2078 #if ARCH_SIZE == 64
2079 AARCH64_R_STR (TLS_DTPMOD64), /* name */
2080 #else
2081 AARCH64_R_STR (TLS_DTPMOD), /* name */
2082 #endif
2083 false, /* partial_inplace */
2084 0, /* src_mask */
2085 ALL_ONES, /* dst_mask */
2086 false), /* pc_reloffset */
2087
2088 HOWTO (AARCH64_R (TLS_DTPREL), /* type */
2089 0, /* rightshift */
2090 4, /* size */
2091 64, /* bitsize */
2092 false, /* pc_relative */
2093 0, /* bitpos */
2094 complain_overflow_dont, /* complain_on_overflow */
2095 bfd_elf_generic_reloc, /* special_function */
2096 #if ARCH_SIZE == 64
2097 AARCH64_R_STR (TLS_DTPREL64), /* name */
2098 #else
2099 AARCH64_R_STR (TLS_DTPREL), /* name */
2100 #endif
2101 false, /* partial_inplace */
2102 0, /* src_mask */
2103 ALL_ONES, /* dst_mask */
2104 false), /* pcrel_offset */
2105
2106 HOWTO (AARCH64_R (TLS_TPREL), /* type */
2107 0, /* rightshift */
2108 4, /* size */
2109 64, /* bitsize */
2110 false, /* pc_relative */
2111 0, /* bitpos */
2112 complain_overflow_dont, /* complain_on_overflow */
2113 bfd_elf_generic_reloc, /* special_function */
2114 #if ARCH_SIZE == 64
2115 AARCH64_R_STR (TLS_TPREL64), /* name */
2116 #else
2117 AARCH64_R_STR (TLS_TPREL), /* name */
2118 #endif
2119 false, /* partial_inplace */
2120 0, /* src_mask */
2121 ALL_ONES, /* dst_mask */
2122 false), /* pcrel_offset */
2123
2124 HOWTO (AARCH64_R (TLSDESC), /* type */
2125 0, /* rightshift */
2126 4, /* size */
2127 64, /* bitsize */
2128 false, /* pc_relative */
2129 0, /* bitpos */
2130 complain_overflow_dont, /* complain_on_overflow */
2131 bfd_elf_generic_reloc, /* special_function */
2132 AARCH64_R_STR (TLSDESC), /* name */
2133 false, /* partial_inplace */
2134 0, /* src_mask */
2135 ALL_ONES, /* dst_mask */
2136 false), /* pcrel_offset */
2137
2138 HOWTO (AARCH64_R (IRELATIVE), /* type */
2139 0, /* rightshift */
2140 4, /* size */
2141 64, /* bitsize */
2142 false, /* pc_relative */
2143 0, /* bitpos */
2144 complain_overflow_bitfield, /* complain_on_overflow */
2145 bfd_elf_generic_reloc, /* special_function */
2146 AARCH64_R_STR (IRELATIVE), /* name */
2147 false, /* partial_inplace */
2148 0, /* src_mask */
2149 ALL_ONES, /* dst_mask */
2150 false), /* pcrel_offset */
2151
2152 EMPTY_HOWTO (0),
2153 };
2154
2155 static reloc_howto_type elfNN_aarch64_howto_none =
2156 HOWTO (R_AARCH64_NONE, /* type */
2157 0, /* rightshift */
2158 0, /* size */
2159 0, /* bitsize */
2160 false, /* pc_relative */
2161 0, /* bitpos */
2162 complain_overflow_dont,/* complain_on_overflow */
2163 bfd_elf_generic_reloc, /* special_function */
2164 "R_AARCH64_NONE", /* name */
2165 false, /* partial_inplace */
2166 0, /* src_mask */
2167 0, /* dst_mask */
2168 false); /* pcrel_offset */
2169
2170 /* Given HOWTO, return the bfd internal relocation enumerator. */
2171
2172 static bfd_reloc_code_real_type
2173 elfNN_aarch64_bfd_reloc_from_howto (reloc_howto_type *howto)
2174 {
2175 const int size
2176 = (int) ARRAY_SIZE (elfNN_aarch64_howto_table);
2177 const ptrdiff_t offset
2178 = howto - elfNN_aarch64_howto_table;
2179
2180 if (offset > 0 && offset < size - 1)
2181 return BFD_RELOC_AARCH64_RELOC_START + offset;
2182
2183 if (howto == &elfNN_aarch64_howto_none)
2184 return BFD_RELOC_AARCH64_NONE;
2185
2186 return BFD_RELOC_AARCH64_RELOC_START;
2187 }
2188
2189 /* Given R_TYPE, return the bfd internal relocation enumerator. */
2190
2191 static bfd_reloc_code_real_type
2192 elfNN_aarch64_bfd_reloc_from_type (bfd *abfd, unsigned int r_type)
2193 {
2194 static bool initialized_p = false;
2195 /* Indexed by R_TYPE, values are offsets in the howto_table. */
2196 static unsigned int offsets[R_AARCH64_end];
2197
2198 if (!initialized_p)
2199 {
2200 unsigned int i;
2201
2202 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
2203 if (elfNN_aarch64_howto_table[i].type != 0)
2204 offsets[elfNN_aarch64_howto_table[i].type] = i;
2205
2206 initialized_p = true;
2207 }
2208
2209 if (r_type == R_AARCH64_NONE || r_type == R_AARCH64_NULL)
2210 return BFD_RELOC_AARCH64_NONE;
2211
2212 /* PR 17512: file: b371e70a. */
2213 if (r_type >= R_AARCH64_end)
2214 {
2215 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
2216 abfd, r_type);
2217 bfd_set_error (bfd_error_bad_value);
2218 return BFD_RELOC_AARCH64_NONE;
2219 }
2220
2221 return BFD_RELOC_AARCH64_RELOC_START + offsets[r_type];
2222 }
2223
2224 struct elf_aarch64_reloc_map
2225 {
2226 bfd_reloc_code_real_type from;
2227 bfd_reloc_code_real_type to;
2228 };
2229
2230 /* Map bfd generic reloc to AArch64-specific reloc. */
2231 static const struct elf_aarch64_reloc_map elf_aarch64_reloc_map[] =
2232 {
2233 {BFD_RELOC_NONE, BFD_RELOC_AARCH64_NONE},
2234
2235 /* Basic data relocations. */
2236 {BFD_RELOC_CTOR, BFD_RELOC_AARCH64_NN},
2237 {BFD_RELOC_64, BFD_RELOC_AARCH64_64},
2238 {BFD_RELOC_32, BFD_RELOC_AARCH64_32},
2239 {BFD_RELOC_16, BFD_RELOC_AARCH64_16},
2240 {BFD_RELOC_64_PCREL, BFD_RELOC_AARCH64_64_PCREL},
2241 {BFD_RELOC_32_PCREL, BFD_RELOC_AARCH64_32_PCREL},
2242 {BFD_RELOC_16_PCREL, BFD_RELOC_AARCH64_16_PCREL},
2243 };
2244
2245 /* Given the bfd internal relocation enumerator in CODE, return the
2246 corresponding howto entry. */
2247
2248 static reloc_howto_type *
2249 elfNN_aarch64_howto_from_bfd_reloc (bfd_reloc_code_real_type code)
2250 {
2251 unsigned int i;
2252
2253 /* Convert bfd generic reloc to AArch64-specific reloc. */
2254 if (code < BFD_RELOC_AARCH64_RELOC_START
2255 || code > BFD_RELOC_AARCH64_RELOC_END)
2256 for (i = 0; i < ARRAY_SIZE (elf_aarch64_reloc_map); i++)
2257 if (elf_aarch64_reloc_map[i].from == code)
2258 {
2259 code = elf_aarch64_reloc_map[i].to;
2260 break;
2261 }
2262
2263 if (code > BFD_RELOC_AARCH64_RELOC_START
2264 && code < BFD_RELOC_AARCH64_RELOC_END)
2265 if (elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START].type)
2266 return &elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START];
2267
2268 if (code == BFD_RELOC_AARCH64_NONE)
2269 return &elfNN_aarch64_howto_none;
2270
2271 return NULL;
2272 }
2273
2274 static reloc_howto_type *
2275 elfNN_aarch64_howto_from_type (bfd *abfd, unsigned int r_type)
2276 {
2277 bfd_reloc_code_real_type val;
2278 reloc_howto_type *howto;
2279
2280 #if ARCH_SIZE == 32
2281 if (r_type > 256)
2282 {
2283 bfd_set_error (bfd_error_bad_value);
2284 return NULL;
2285 }
2286 #endif
2287
2288 if (r_type == R_AARCH64_NONE)
2289 return &elfNN_aarch64_howto_none;
2290
2291 val = elfNN_aarch64_bfd_reloc_from_type (abfd, r_type);
2292 howto = elfNN_aarch64_howto_from_bfd_reloc (val);
2293
2294 if (howto != NULL)
2295 return howto;
2296
2297 bfd_set_error (bfd_error_bad_value);
2298 return NULL;
2299 }
2300
2301 static bool
2302 elfNN_aarch64_info_to_howto (bfd *abfd, arelent *bfd_reloc,
2303 Elf_Internal_Rela *elf_reloc)
2304 {
2305 unsigned int r_type;
2306
2307 r_type = ELFNN_R_TYPE (elf_reloc->r_info);
2308 bfd_reloc->howto = elfNN_aarch64_howto_from_type (abfd, r_type);
2309
2310 if (bfd_reloc->howto == NULL)
2311 {
2312 /* xgettext:c-format */
2313 _bfd_error_handler (_("%pB: unsupported relocation type %#x"), abfd, r_type);
2314 return false;
2315 }
2316 return true;
2317 }
2318
2319 static reloc_howto_type *
2320 elfNN_aarch64_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2321 bfd_reloc_code_real_type code)
2322 {
2323 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (code);
2324
2325 if (howto != NULL)
2326 return howto;
2327
2328 bfd_set_error (bfd_error_bad_value);
2329 return NULL;
2330 }
2331
2332 static reloc_howto_type *
2333 elfNN_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2334 const char *r_name)
2335 {
2336 unsigned int i;
2337
2338 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
2339 if (elfNN_aarch64_howto_table[i].name != NULL
2340 && strcasecmp (elfNN_aarch64_howto_table[i].name, r_name) == 0)
2341 return &elfNN_aarch64_howto_table[i];
2342
2343 return NULL;
2344 }
2345
2346 #define TARGET_LITTLE_SYM aarch64_elfNN_le_vec
2347 #define TARGET_LITTLE_NAME "elfNN-littleaarch64"
2348 #define TARGET_BIG_SYM aarch64_elfNN_be_vec
2349 #define TARGET_BIG_NAME "elfNN-bigaarch64"
2350
2351 /* The linker script knows the section names for placement.
2352 The entry_names are used to do simple name mangling on the stubs.
2353 Given a function name, and its type, the stub can be found. The
2354 name can be changed. The only requirement is the %s be present. */
2355 #define STUB_ENTRY_NAME "__%s_veneer"
2356
2357 /* Stub name for a BTI landing stub. */
2358 #define BTI_STUB_ENTRY_NAME "__%s_bti_veneer"
2359
2360 /* The name of the dynamic interpreter. This is put in the .interp
2361 section. */
2362 #define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
2363
2364 #define AARCH64_MAX_FWD_BRANCH_OFFSET \
2365 (((1 << 25) - 1) << 2)
2366 #define AARCH64_MAX_BWD_BRANCH_OFFSET \
2367 (-((1 << 25) << 2))
2368
2369 #define AARCH64_MAX_ADRP_IMM ((1 << 20) - 1)
2370 #define AARCH64_MIN_ADRP_IMM (-(1 << 20))
2371
2372 static int
2373 aarch64_valid_for_adrp_p (bfd_vma value, bfd_vma place)
2374 {
2375 bfd_signed_vma offset = (bfd_signed_vma) (PG (value) - PG (place)) >> 12;
2376 return offset <= AARCH64_MAX_ADRP_IMM && offset >= AARCH64_MIN_ADRP_IMM;
2377 }
2378
2379 static int
2380 aarch64_valid_branch_p (bfd_vma value, bfd_vma place)
2381 {
2382 bfd_signed_vma offset = (bfd_signed_vma) (value - place);
2383 return (offset <= AARCH64_MAX_FWD_BRANCH_OFFSET
2384 && offset >= AARCH64_MAX_BWD_BRANCH_OFFSET);
2385 }
2386
2387 static const uint32_t aarch64_adrp_branch_stub [] =
2388 {
2389 0x90000010, /* adrp ip0, X */
2390 /* R_AARCH64_ADR_HI21_PCREL(X) */
2391 0x91000210, /* add ip0, ip0, :lo12:X */
2392 /* R_AARCH64_ADD_ABS_LO12_NC(X) */
2393 0xd61f0200, /* br ip0 */
2394 };
2395
2396 static const uint32_t aarch64_long_branch_stub[] =
2397 {
2398 #if ARCH_SIZE == 64
2399 0x58000090, /* ldr ip0, 1f */
2400 #else
2401 0x18000090, /* ldr wip0, 1f */
2402 #endif
2403 0x10000011, /* adr ip1, #0 */
2404 0x8b110210, /* add ip0, ip0, ip1 */
2405 0xd61f0200, /* br ip0 */
2406 0x00000000, /* 1: .xword or .word
2407 R_AARCH64_PRELNN(X) + 12
2408 */
2409 0x00000000,
2410 };
2411
2412 static const uint32_t aarch64_bti_direct_branch_stub[] =
2413 {
2414 0xd503245f, /* bti c */
2415 0x14000000, /* b <label> */
2416 };
2417
2418 static const uint32_t aarch64_erratum_835769_stub[] =
2419 {
2420 0x00000000, /* Placeholder for multiply accumulate. */
2421 0x14000000, /* b <label> */
2422 };
2423
2424 static const uint32_t aarch64_erratum_843419_stub[] =
2425 {
2426 0x00000000, /* Placeholder for LDR instruction. */
2427 0x14000000, /* b <label> */
2428 };
2429
2430 /* Section name for stubs is the associated section name plus this
2431 string. */
2432 #define STUB_SUFFIX ".stub"
2433
2434 enum elf_aarch64_stub_type
2435 {
2436 aarch64_stub_none,
2437 aarch64_stub_adrp_branch,
2438 aarch64_stub_long_branch,
2439 aarch64_stub_bti_direct_branch,
2440 aarch64_stub_erratum_835769_veneer,
2441 aarch64_stub_erratum_843419_veneer,
2442 };
2443
2444 struct elf_aarch64_stub_hash_entry
2445 {
2446 /* Base hash table entry structure. */
2447 struct bfd_hash_entry root;
2448
2449 /* The stub section. */
2450 asection *stub_sec;
2451
2452 /* Offset within stub_sec of the beginning of this stub. */
2453 bfd_vma stub_offset;
2454
2455 /* Given the symbol's value and its section we can determine its final
2456 value when building the stubs (so the stub knows where to jump). */
2457 bfd_vma target_value;
2458 asection *target_section;
2459
2460 enum elf_aarch64_stub_type stub_type;
2461
2462 /* The symbol table entry, if any, that this was derived from. */
2463 struct elf_aarch64_link_hash_entry *h;
2464
2465 /* Destination symbol type */
2466 unsigned char st_type;
2467
2468 /* The target is also a stub. */
2469 bool double_stub;
2470
2471 /* Where this stub is being called from, or, in the case of combined
2472 stub sections, the first input section in the group. */
2473 asection *id_sec;
2474
2475 /* The name for the local symbol at the start of this stub. The
2476 stub name in the hash table has to be unique; this does not, so
2477 it can be friendlier. */
2478 char *output_name;
2479
2480 /* The instruction which caused this stub to be generated (only valid for
2481 erratum 835769 workaround stubs at present). */
2482 uint32_t veneered_insn;
2483
2484 /* In an erratum 843419 workaround stub, the ADRP instruction offset. */
2485 bfd_vma adrp_offset;
2486 };
2487
2488 /* Used to build a map of a section. This is required for mixed-endian
2489 code/data. */
2490
2491 typedef struct elf_elf_section_map
2492 {
2493 bfd_vma vma;
2494 char type;
2495 }
2496 elf_aarch64_section_map;
2497
2498
2499 typedef struct _aarch64_elf_section_data
2500 {
2501 struct bfd_elf_section_data elf;
2502 unsigned int mapcount;
2503 unsigned int mapsize;
2504 elf_aarch64_section_map *map;
2505 }
2506 _aarch64_elf_section_data;
2507
2508 #define elf_aarch64_section_data(sec) \
2509 ((_aarch64_elf_section_data *) elf_section_data (sec))
2510
2511 /* The size of the thread control block which is defined to be two pointers. */
2512 #define TCB_SIZE (ARCH_SIZE/8)*2
2513
2514 struct elf_aarch64_local_symbol
2515 {
2516 unsigned int got_type;
2517 bfd_signed_vma got_refcount;
2518 bfd_vma got_offset;
2519
2520 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The
2521 offset is from the end of the jump table and reserved entries
2522 within the PLTGOT.
2523
2524 The magic value (bfd_vma) -1 indicates that an offset has not be
2525 allocated. */
2526 bfd_vma tlsdesc_got_jump_table_offset;
2527 };
2528
2529 struct elf_aarch64_obj_tdata
2530 {
2531 struct elf_obj_tdata root;
2532
2533 /* local symbol descriptors */
2534 struct elf_aarch64_local_symbol *locals;
2535
2536 /* Zero to warn when linking objects with incompatible enum sizes. */
2537 int no_enum_size_warning;
2538
2539 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2540 int no_wchar_size_warning;
2541
2542 /* All GNU_PROPERTY_AARCH64_FEATURE_1_AND properties. */
2543 uint32_t gnu_and_prop;
2544
2545 /* Zero to warn when linking objects with incompatible
2546 GNU_PROPERTY_AARCH64_FEATURE_1_BTI. */
2547 int no_bti_warn;
2548
2549 /* PLT type based on security. */
2550 aarch64_plt_type plt_type;
2551 };
2552
2553 #define elf_aarch64_tdata(bfd) \
2554 ((struct elf_aarch64_obj_tdata *) (bfd)->tdata.any)
2555
2556 #define elf_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals)
2557
2558 #define is_aarch64_elf(bfd) \
2559 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2560 && elf_tdata (bfd) != NULL \
2561 && elf_object_id (bfd) == AARCH64_ELF_DATA)
2562
2563 static bool
2564 elfNN_aarch64_mkobject (bfd *abfd)
2565 {
2566 return bfd_elf_allocate_object (abfd, sizeof (struct elf_aarch64_obj_tdata),
2567 AARCH64_ELF_DATA);
2568 }
2569
2570 #define elf_aarch64_hash_entry(ent) \
2571 ((struct elf_aarch64_link_hash_entry *)(ent))
2572
2573 #define GOT_UNKNOWN 0
2574 #define GOT_NORMAL 1
2575 #define GOT_TLS_GD 2
2576 #define GOT_TLS_IE 4
2577 #define GOT_TLSDESC_GD 8
2578
2579 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD))
2580
2581 /* AArch64 ELF linker hash entry. */
2582 struct elf_aarch64_link_hash_entry
2583 {
2584 struct elf_link_hash_entry root;
2585
2586 /* Since PLT entries have variable size, we need to record the
2587 index into .got.plt instead of recomputing it from the PLT
2588 offset. */
2589 bfd_signed_vma plt_got_offset;
2590
2591 /* Bit mask representing the type of GOT entry(s) if any required by
2592 this symbol. */
2593 unsigned int got_type;
2594
2595 /* TRUE if symbol is defined as a protected symbol. */
2596 unsigned int def_protected : 1;
2597
2598 /* A pointer to the most recently used stub hash entry against this
2599 symbol. */
2600 struct elf_aarch64_stub_hash_entry *stub_cache;
2601
2602 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The offset
2603 is from the end of the jump table and reserved entries within the PLTGOT.
2604
2605 The magic value (bfd_vma) -1 indicates that an offset has not
2606 be allocated. */
2607 bfd_vma tlsdesc_got_jump_table_offset;
2608 };
2609
2610 static unsigned int
2611 elfNN_aarch64_symbol_got_type (struct elf_link_hash_entry *h,
2612 bfd *abfd,
2613 unsigned long r_symndx)
2614 {
2615 if (h)
2616 return elf_aarch64_hash_entry (h)->got_type;
2617
2618 if (! elf_aarch64_locals (abfd))
2619 return GOT_UNKNOWN;
2620
2621 return elf_aarch64_locals (abfd)[r_symndx].got_type;
2622 }
2623
2624 /* Get the AArch64 elf linker hash table from a link_info structure. */
2625 #define elf_aarch64_hash_table(info) \
2626 ((struct elf_aarch64_link_hash_table *) ((info)->hash))
2627
2628 #define aarch64_stub_hash_lookup(table, string, create, copy) \
2629 ((struct elf_aarch64_stub_hash_entry *) \
2630 bfd_hash_lookup ((table), (string), (create), (copy)))
2631
2632 /* AArch64 ELF linker hash table. */
2633 struct elf_aarch64_link_hash_table
2634 {
2635 /* The main hash table. */
2636 struct elf_link_hash_table root;
2637
2638 /* Nonzero to force PIC branch veneers. */
2639 int pic_veneer;
2640
2641 /* Fix erratum 835769. */
2642 int fix_erratum_835769;
2643
2644 /* Fix erratum 843419. */
2645 erratum_84319_opts fix_erratum_843419;
2646
2647 /* Don't apply link-time values for dynamic relocations. */
2648 int no_apply_dynamic_relocs;
2649
2650 /* The number of bytes in the initial entry in the PLT. */
2651 bfd_size_type plt_header_size;
2652
2653 /* The bytes of the initial PLT entry. */
2654 const bfd_byte *plt0_entry;
2655
2656 /* The number of bytes in the subsequent PLT entries. */
2657 bfd_size_type plt_entry_size;
2658
2659 /* The bytes of the subsequent PLT entry. */
2660 const bfd_byte *plt_entry;
2661
2662 /* For convenience in allocate_dynrelocs. */
2663 bfd *obfd;
2664
2665 /* The amount of space used by the reserved portion of the sgotplt
2666 section, plus whatever space is used by the jump slots. */
2667 bfd_vma sgotplt_jump_table_size;
2668
2669 /* The stub hash table. */
2670 struct bfd_hash_table stub_hash_table;
2671
2672 /* Linker stub bfd. */
2673 bfd *stub_bfd;
2674
2675 /* Linker call-backs. */
2676 asection *(*add_stub_section) (const char *, asection *);
2677 void (*layout_sections_again) (void);
2678
2679 /* Array to keep track of which stub sections have been created, and
2680 information on stub grouping. */
2681 struct map_stub
2682 {
2683 /* This is the section to which stubs in the group will be
2684 attached. */
2685 asection *link_sec;
2686 /* The stub section. */
2687 asection *stub_sec;
2688 } *stub_group;
2689
2690 /* Assorted information used by elfNN_aarch64_size_stubs. */
2691 unsigned int bfd_count;
2692 unsigned int top_index;
2693 asection **input_list;
2694
2695 /* True when two stubs are added where one targets the other, happens
2696 when BTI stubs are inserted and then the stub layout must not change
2697 during elfNN_aarch64_build_stubs. */
2698 bool has_double_stub;
2699
2700 /* JUMP_SLOT relocs for variant PCS symbols may be present. */
2701 int variant_pcs;
2702
2703 /* The number of bytes in the PLT enty for the TLS descriptor. */
2704 bfd_size_type tlsdesc_plt_entry_size;
2705
2706 /* Used by local STT_GNU_IFUNC symbols. */
2707 htab_t loc_hash_table;
2708 void * loc_hash_memory;
2709 };
2710
2711 /* Create an entry in an AArch64 ELF linker hash table. */
2712
2713 static struct bfd_hash_entry *
2714 elfNN_aarch64_link_hash_newfunc (struct bfd_hash_entry *entry,
2715 struct bfd_hash_table *table,
2716 const char *string)
2717 {
2718 struct elf_aarch64_link_hash_entry *ret =
2719 (struct elf_aarch64_link_hash_entry *) entry;
2720
2721 /* Allocate the structure if it has not already been allocated by a
2722 subclass. */
2723 if (ret == NULL)
2724 ret = bfd_hash_allocate (table,
2725 sizeof (struct elf_aarch64_link_hash_entry));
2726 if (ret == NULL)
2727 return (struct bfd_hash_entry *) ret;
2728
2729 /* Call the allocation method of the superclass. */
2730 ret = ((struct elf_aarch64_link_hash_entry *)
2731 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
2732 table, string));
2733 if (ret != NULL)
2734 {
2735 ret->got_type = GOT_UNKNOWN;
2736 ret->def_protected = 0;
2737 ret->plt_got_offset = (bfd_vma) - 1;
2738 ret->stub_cache = NULL;
2739 ret->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
2740 }
2741
2742 return (struct bfd_hash_entry *) ret;
2743 }
2744
2745 /* Initialize an entry in the stub hash table. */
2746
2747 static struct bfd_hash_entry *
2748 stub_hash_newfunc (struct bfd_hash_entry *entry,
2749 struct bfd_hash_table *table, const char *string)
2750 {
2751 /* Allocate the structure if it has not already been allocated by a
2752 subclass. */
2753 if (entry == NULL)
2754 {
2755 entry = bfd_hash_allocate (table,
2756 sizeof (struct
2757 elf_aarch64_stub_hash_entry));
2758 if (entry == NULL)
2759 return entry;
2760 }
2761
2762 /* Call the allocation method of the superclass. */
2763 entry = bfd_hash_newfunc (entry, table, string);
2764 if (entry != NULL)
2765 {
2766 struct elf_aarch64_stub_hash_entry *eh;
2767
2768 /* Initialize the local fields. */
2769 eh = (struct elf_aarch64_stub_hash_entry *) entry;
2770 memset (&eh->stub_sec, 0,
2771 (sizeof (struct elf_aarch64_stub_hash_entry)
2772 - offsetof (struct elf_aarch64_stub_hash_entry, stub_sec)));
2773 }
2774
2775 return entry;
2776 }
2777
2778 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
2779 for local symbol so that we can handle local STT_GNU_IFUNC symbols
2780 as global symbol. We reuse indx and dynstr_index for local symbol
2781 hash since they aren't used by global symbols in this backend. */
2782
2783 static hashval_t
2784 elfNN_aarch64_local_htab_hash (const void *ptr)
2785 {
2786 struct elf_link_hash_entry *h
2787 = (struct elf_link_hash_entry *) ptr;
2788 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
2789 }
2790
2791 /* Compare local hash entries. */
2792
2793 static int
2794 elfNN_aarch64_local_htab_eq (const void *ptr1, const void *ptr2)
2795 {
2796 struct elf_link_hash_entry *h1
2797 = (struct elf_link_hash_entry *) ptr1;
2798 struct elf_link_hash_entry *h2
2799 = (struct elf_link_hash_entry *) ptr2;
2800
2801 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
2802 }
2803
2804 /* Find and/or create a hash entry for local symbol. */
2805
2806 static struct elf_link_hash_entry *
2807 elfNN_aarch64_get_local_sym_hash (struct elf_aarch64_link_hash_table *htab,
2808 bfd *abfd, const Elf_Internal_Rela *rel,
2809 bool create)
2810 {
2811 struct elf_aarch64_link_hash_entry e, *ret;
2812 asection *sec = abfd->sections;
2813 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
2814 ELFNN_R_SYM (rel->r_info));
2815 void **slot;
2816
2817 e.root.indx = sec->id;
2818 e.root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2819 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
2820 create ? INSERT : NO_INSERT);
2821
2822 if (!slot)
2823 return NULL;
2824
2825 if (*slot)
2826 {
2827 ret = (struct elf_aarch64_link_hash_entry *) *slot;
2828 return &ret->root;
2829 }
2830
2831 ret = (struct elf_aarch64_link_hash_entry *)
2832 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
2833 sizeof (struct elf_aarch64_link_hash_entry));
2834 if (ret)
2835 {
2836 memset (ret, 0, sizeof (*ret));
2837 ret->root.indx = sec->id;
2838 ret->root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2839 ret->root.dynindx = -1;
2840 *slot = ret;
2841 }
2842 return &ret->root;
2843 }
2844
2845 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2846
2847 static void
2848 elfNN_aarch64_copy_indirect_symbol (struct bfd_link_info *info,
2849 struct elf_link_hash_entry *dir,
2850 struct elf_link_hash_entry *ind)
2851 {
2852 struct elf_aarch64_link_hash_entry *edir, *eind;
2853
2854 edir = (struct elf_aarch64_link_hash_entry *) dir;
2855 eind = (struct elf_aarch64_link_hash_entry *) ind;
2856
2857 if (ind->root.type == bfd_link_hash_indirect)
2858 {
2859 /* Copy over PLT info. */
2860 if (dir->got.refcount <= 0)
2861 {
2862 edir->got_type = eind->got_type;
2863 eind->got_type = GOT_UNKNOWN;
2864 }
2865 }
2866
2867 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2868 }
2869
2870 /* Merge non-visibility st_other attributes. */
2871
2872 static void
2873 elfNN_aarch64_merge_symbol_attribute (struct elf_link_hash_entry *h,
2874 unsigned int st_other,
2875 bool definition,
2876 bool dynamic ATTRIBUTE_UNUSED)
2877 {
2878 if (definition)
2879 {
2880 struct elf_aarch64_link_hash_entry *eh
2881 = (struct elf_aarch64_link_hash_entry *)h;
2882 eh->def_protected = ELF_ST_VISIBILITY (st_other) == STV_PROTECTED;
2883 }
2884
2885 unsigned int isym_sto = st_other & ~ELF_ST_VISIBILITY (-1);
2886 unsigned int h_sto = h->other & ~ELF_ST_VISIBILITY (-1);
2887
2888 if (isym_sto == h_sto)
2889 return;
2890
2891 if (isym_sto & ~STO_AARCH64_VARIANT_PCS)
2892 /* Not fatal, this callback cannot fail. */
2893 _bfd_error_handler (_("unknown attribute for symbol `%s': 0x%02x"),
2894 h->root.root.string, isym_sto);
2895
2896 /* Note: Ideally we would warn about any attribute mismatch, but
2897 this api does not allow that without substantial changes. */
2898 if (isym_sto & STO_AARCH64_VARIANT_PCS)
2899 h->other |= STO_AARCH64_VARIANT_PCS;
2900 }
2901
2902 /* Destroy an AArch64 elf linker hash table. */
2903
2904 static void
2905 elfNN_aarch64_link_hash_table_free (bfd *obfd)
2906 {
2907 struct elf_aarch64_link_hash_table *ret
2908 = (struct elf_aarch64_link_hash_table *) obfd->link.hash;
2909
2910 if (ret->loc_hash_table)
2911 htab_delete (ret->loc_hash_table);
2912 if (ret->loc_hash_memory)
2913 objalloc_free ((struct objalloc *) ret->loc_hash_memory);
2914
2915 bfd_hash_table_free (&ret->stub_hash_table);
2916 _bfd_elf_link_hash_table_free (obfd);
2917 }
2918
2919 /* Create an AArch64 elf linker hash table. */
2920
2921 static struct bfd_link_hash_table *
2922 elfNN_aarch64_link_hash_table_create (bfd *abfd)
2923 {
2924 struct elf_aarch64_link_hash_table *ret;
2925 size_t amt = sizeof (struct elf_aarch64_link_hash_table);
2926
2927 ret = bfd_zmalloc (amt);
2928 if (ret == NULL)
2929 return NULL;
2930
2931 if (!_bfd_elf_link_hash_table_init
2932 (&ret->root, abfd, elfNN_aarch64_link_hash_newfunc,
2933 sizeof (struct elf_aarch64_link_hash_entry), AARCH64_ELF_DATA))
2934 {
2935 free (ret);
2936 return NULL;
2937 }
2938
2939 ret->plt_header_size = PLT_ENTRY_SIZE;
2940 ret->plt0_entry = elfNN_aarch64_small_plt0_entry;
2941 ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
2942 ret->plt_entry = elfNN_aarch64_small_plt_entry;
2943 ret->tlsdesc_plt_entry_size = PLT_TLSDESC_ENTRY_SIZE;
2944 ret->obfd = abfd;
2945 ret->root.tlsdesc_got = (bfd_vma) - 1;
2946
2947 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2948 sizeof (struct elf_aarch64_stub_hash_entry)))
2949 {
2950 _bfd_elf_link_hash_table_free (abfd);
2951 return NULL;
2952 }
2953
2954 ret->loc_hash_table = htab_try_create (1024,
2955 elfNN_aarch64_local_htab_hash,
2956 elfNN_aarch64_local_htab_eq,
2957 NULL);
2958 ret->loc_hash_memory = objalloc_create ();
2959 if (!ret->loc_hash_table || !ret->loc_hash_memory)
2960 {
2961 elfNN_aarch64_link_hash_table_free (abfd);
2962 return NULL;
2963 }
2964 ret->root.root.hash_table_free = elfNN_aarch64_link_hash_table_free;
2965
2966 return &ret->root.root;
2967 }
2968
2969 /* Perform relocation R_TYPE. Returns TRUE upon success, FALSE otherwise. */
2970
2971 static bool
2972 aarch64_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
2973 bfd_vma offset, bfd_vma value)
2974 {
2975 reloc_howto_type *howto;
2976 bfd_vma place;
2977
2978 howto = elfNN_aarch64_howto_from_type (input_bfd, r_type);
2979 place = (input_section->output_section->vma + input_section->output_offset
2980 + offset);
2981
2982 r_type = elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type);
2983 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, r_type, place,
2984 value, 0, false);
2985 return _bfd_aarch64_elf_put_addend (input_bfd,
2986 input_section->contents + offset, r_type,
2987 howto, value) == bfd_reloc_ok;
2988 }
2989
2990 /* Determine the type of stub needed, if any, for a call. */
2991
2992 static enum elf_aarch64_stub_type
2993 aarch64_type_of_stub (asection *input_sec,
2994 const Elf_Internal_Rela *rel,
2995 asection *sym_sec,
2996 unsigned char st_type,
2997 bfd_vma destination)
2998 {
2999 bfd_vma location;
3000 bfd_signed_vma branch_offset;
3001 unsigned int r_type;
3002 enum elf_aarch64_stub_type stub_type = aarch64_stub_none;
3003
3004 if (st_type != STT_FUNC
3005 && (sym_sec == input_sec))
3006 return stub_type;
3007
3008 /* Determine where the call point is. */
3009 location = (input_sec->output_offset
3010 + input_sec->output_section->vma + rel->r_offset);
3011
3012 branch_offset = (bfd_signed_vma) (destination - location);
3013
3014 r_type = ELFNN_R_TYPE (rel->r_info);
3015
3016 /* We don't want to redirect any old unconditional jump in this way,
3017 only one which is being used for a sibcall, where it is
3018 acceptable for the IP0 and IP1 registers to be clobbered. */
3019 if ((r_type == AARCH64_R (CALL26) || r_type == AARCH64_R (JUMP26))
3020 && (branch_offset > AARCH64_MAX_FWD_BRANCH_OFFSET
3021 || branch_offset < AARCH64_MAX_BWD_BRANCH_OFFSET))
3022 {
3023 stub_type = aarch64_stub_long_branch;
3024 }
3025
3026 return stub_type;
3027 }
3028
3029 /* Build a name for an entry in the stub hash table. */
3030
3031 static char *
3032 elfNN_aarch64_stub_name (const asection *input_section,
3033 const asection *sym_sec,
3034 const struct elf_aarch64_link_hash_entry *hash,
3035 const Elf_Internal_Rela *rel)
3036 {
3037 char *stub_name;
3038 bfd_size_type len;
3039
3040 if (hash)
3041 {
3042 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
3043 stub_name = bfd_malloc (len);
3044 if (stub_name != NULL)
3045 snprintf (stub_name, len, "%08x_%s+%" PRIx64,
3046 (unsigned int) input_section->id,
3047 hash->root.root.root.string,
3048 (uint64_t) rel->r_addend);
3049 }
3050 else
3051 {
3052 len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
3053 stub_name = bfd_malloc (len);
3054 if (stub_name != NULL)
3055 snprintf (stub_name, len, "%08x_%x:%x+%" PRIx64,
3056 (unsigned int) input_section->id,
3057 (unsigned int) sym_sec->id,
3058 (unsigned int) ELFNN_R_SYM (rel->r_info),
3059 (uint64_t) rel->r_addend);
3060 }
3061
3062 return stub_name;
3063 }
3064
3065 /* Return TRUE if symbol H should be hashed in the `.gnu.hash' section. For
3066 executable PLT slots where the executable never takes the address of those
3067 functions, the function symbols are not added to the hash table. */
3068
3069 static bool
3070 elf_aarch64_hash_symbol (struct elf_link_hash_entry *h)
3071 {
3072 if (h->plt.offset != (bfd_vma) -1
3073 && !h->def_regular
3074 && !h->pointer_equality_needed)
3075 return false;
3076
3077 return _bfd_elf_hash_symbol (h);
3078 }
3079
3080
3081 /* Look up an entry in the stub hash. Stub entries are cached because
3082 creating the stub name takes a bit of time. */
3083
3084 static struct elf_aarch64_stub_hash_entry *
3085 elfNN_aarch64_get_stub_entry (const asection *input_section,
3086 const asection *sym_sec,
3087 struct elf_link_hash_entry *hash,
3088 const Elf_Internal_Rela *rel,
3089 struct elf_aarch64_link_hash_table *htab)
3090 {
3091 struct elf_aarch64_stub_hash_entry *stub_entry;
3092 struct elf_aarch64_link_hash_entry *h =
3093 (struct elf_aarch64_link_hash_entry *) hash;
3094 const asection *id_sec;
3095
3096 if ((input_section->flags & SEC_CODE) == 0)
3097 return NULL;
3098
3099 /* If this input section is part of a group of sections sharing one
3100 stub section, then use the id of the first section in the group.
3101 Stub names need to include a section id, as there may well be
3102 more than one stub used to reach say, printf, and we need to
3103 distinguish between them. */
3104 id_sec = htab->stub_group[input_section->id].link_sec;
3105
3106 if (h != NULL && h->stub_cache != NULL
3107 && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
3108 {
3109 stub_entry = h->stub_cache;
3110 }
3111 else
3112 {
3113 char *stub_name;
3114
3115 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, h, rel);
3116 if (stub_name == NULL)
3117 return NULL;
3118
3119 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table,
3120 stub_name, false, false);
3121 if (h != NULL)
3122 h->stub_cache = stub_entry;
3123
3124 free (stub_name);
3125 }
3126
3127 return stub_entry;
3128 }
3129
3130
3131 /* Create a stub section. */
3132
3133 static asection *
3134 _bfd_aarch64_create_stub_section (asection *section,
3135 struct elf_aarch64_link_hash_table *htab)
3136 {
3137 size_t namelen;
3138 bfd_size_type len;
3139 char *s_name;
3140
3141 namelen = strlen (section->name);
3142 len = namelen + sizeof (STUB_SUFFIX);
3143 s_name = bfd_alloc (htab->stub_bfd, len);
3144 if (s_name == NULL)
3145 return NULL;
3146
3147 memcpy (s_name, section->name, namelen);
3148 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
3149 return (*htab->add_stub_section) (s_name, section);
3150 }
3151
3152
3153 /* Find or create a stub section for a link section.
3154
3155 Fix or create the stub section used to collect stubs attached to
3156 the specified link section. */
3157
3158 static asection *
3159 _bfd_aarch64_get_stub_for_link_section (asection *link_section,
3160 struct elf_aarch64_link_hash_table *htab)
3161 {
3162 if (htab->stub_group[link_section->id].stub_sec == NULL)
3163 htab->stub_group[link_section->id].stub_sec
3164 = _bfd_aarch64_create_stub_section (link_section, htab);
3165 return htab->stub_group[link_section->id].stub_sec;
3166 }
3167
3168
3169 /* Find or create a stub section in the stub group for an input
3170 section. */
3171
3172 static asection *
3173 _bfd_aarch64_create_or_find_stub_sec (asection *section,
3174 struct elf_aarch64_link_hash_table *htab)
3175 {
3176 asection *link_sec = htab->stub_group[section->id].link_sec;
3177 return _bfd_aarch64_get_stub_for_link_section (link_sec, htab);
3178 }
3179
3180
3181 /* Add a new stub entry in the stub group associated with an input
3182 section to the stub hash. Not all fields of the new stub entry are
3183 initialised. */
3184
3185 static struct elf_aarch64_stub_hash_entry *
3186 _bfd_aarch64_add_stub_entry_in_group (const char *stub_name,
3187 asection *section,
3188 struct elf_aarch64_link_hash_table *htab)
3189 {
3190 asection *link_sec;
3191 asection *stub_sec;
3192 struct elf_aarch64_stub_hash_entry *stub_entry;
3193
3194 link_sec = htab->stub_group[section->id].link_sec;
3195 stub_sec = _bfd_aarch64_create_or_find_stub_sec (section, htab);
3196
3197 /* Enter this entry into the linker stub hash table. */
3198 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3199 true, false);
3200 if (stub_entry == NULL)
3201 {
3202 /* xgettext:c-format */
3203 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
3204 section->owner, stub_name);
3205 return NULL;
3206 }
3207
3208 stub_entry->stub_sec = stub_sec;
3209 stub_entry->stub_offset = 0;
3210 stub_entry->id_sec = link_sec;
3211
3212 return stub_entry;
3213 }
3214
3215 /* Add a new stub entry in the final stub section to the stub hash.
3216 Not all fields of the new stub entry are initialised. */
3217
3218 static struct elf_aarch64_stub_hash_entry *
3219 _bfd_aarch64_add_stub_entry_after (const char *stub_name,
3220 asection *link_section,
3221 struct elf_aarch64_link_hash_table *htab)
3222 {
3223 asection *stub_sec;
3224 struct elf_aarch64_stub_hash_entry *stub_entry;
3225
3226 stub_sec = NULL;
3227 /* Only create the actual stub if we will end up needing it. */
3228 if (htab->fix_erratum_843419 & ERRAT_ADRP)
3229 stub_sec = _bfd_aarch64_get_stub_for_link_section (link_section, htab);
3230 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3231 true, false);
3232 if (stub_entry == NULL)
3233 {
3234 _bfd_error_handler (_("cannot create stub entry %s"), stub_name);
3235 return NULL;
3236 }
3237
3238 stub_entry->stub_sec = stub_sec;
3239 stub_entry->stub_offset = 0;
3240 stub_entry->id_sec = link_section;
3241
3242 return stub_entry;
3243 }
3244
3245
3246 static bool
3247 aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
3248 void *in_arg)
3249 {
3250 struct elf_aarch64_stub_hash_entry *stub_entry;
3251 asection *stub_sec;
3252 bfd *stub_bfd;
3253 bfd_byte *loc;
3254 bfd_vma sym_value;
3255 bfd_vma veneered_insn_loc;
3256 bfd_vma veneer_entry_loc;
3257 bfd_signed_vma branch_offset = 0;
3258 unsigned int template_size;
3259 unsigned int pad_size = 0;
3260 const uint32_t *template;
3261 unsigned int i;
3262 struct bfd_link_info *info;
3263 struct elf_aarch64_link_hash_table *htab;
3264
3265 /* Massage our args to the form they really have. */
3266 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
3267
3268 info = (struct bfd_link_info *) in_arg;
3269 htab = elf_aarch64_hash_table (info);
3270
3271 /* Fail if the target section could not be assigned to an output
3272 section. The user should fix his linker script. */
3273 if (stub_entry->target_section->output_section == NULL
3274 && info->non_contiguous_regions)
3275 info->callbacks->einfo (_("%F%P: Could not assign `%pA' to an output section. "
3276 "Retry without "
3277 "--enable-non-contiguous-regions.\n"),
3278 stub_entry->target_section);
3279
3280 stub_sec = stub_entry->stub_sec;
3281
3282 /* The layout must not change when a stub may be the target of another. */
3283 if (htab->has_double_stub)
3284 BFD_ASSERT (stub_entry->stub_offset == stub_sec->size);
3285
3286 /* Make a note of the offset within the stubs for this entry. */
3287 stub_entry->stub_offset = stub_sec->size;
3288 loc = stub_sec->contents + stub_entry->stub_offset;
3289
3290 stub_bfd = stub_sec->owner;
3291
3292 /* This is the address of the stub destination. */
3293 sym_value = (stub_entry->target_value
3294 + stub_entry->target_section->output_offset
3295 + stub_entry->target_section->output_section->vma);
3296
3297 if (stub_entry->stub_type == aarch64_stub_long_branch)
3298 {
3299 bfd_vma place = (stub_entry->stub_offset + stub_sec->output_section->vma
3300 + stub_sec->output_offset);
3301
3302 /* See if we can relax the stub. */
3303 if (aarch64_valid_for_adrp_p (sym_value, place))
3304 {
3305 stub_entry->stub_type = aarch64_stub_adrp_branch;
3306
3307 /* Avoid the relaxation changing the layout. */
3308 if (htab->has_double_stub)
3309 pad_size = sizeof (aarch64_long_branch_stub)
3310 - sizeof (aarch64_adrp_branch_stub);
3311 }
3312 }
3313
3314 switch (stub_entry->stub_type)
3315 {
3316 case aarch64_stub_adrp_branch:
3317 template = aarch64_adrp_branch_stub;
3318 template_size = sizeof (aarch64_adrp_branch_stub);
3319 break;
3320 case aarch64_stub_long_branch:
3321 template = aarch64_long_branch_stub;
3322 template_size = sizeof (aarch64_long_branch_stub);
3323 break;
3324 case aarch64_stub_bti_direct_branch:
3325 template = aarch64_bti_direct_branch_stub;
3326 template_size = sizeof (aarch64_bti_direct_branch_stub);
3327 break;
3328 case aarch64_stub_erratum_835769_veneer:
3329 template = aarch64_erratum_835769_stub;
3330 template_size = sizeof (aarch64_erratum_835769_stub);
3331 break;
3332 case aarch64_stub_erratum_843419_veneer:
3333 template = aarch64_erratum_843419_stub;
3334 template_size = sizeof (aarch64_erratum_843419_stub);
3335 break;
3336 default:
3337 abort ();
3338 }
3339
3340 for (i = 0; i < (template_size / sizeof template[0]); i++)
3341 {
3342 bfd_putl32 (template[i], loc);
3343 loc += 4;
3344 }
3345
3346 template_size += pad_size;
3347 template_size = (template_size + 7) & ~7;
3348 stub_sec->size += template_size;
3349
3350 switch (stub_entry->stub_type)
3351 {
3352 case aarch64_stub_adrp_branch:
3353 if (!aarch64_relocate (AARCH64_R (ADR_PREL_PG_HI21), stub_bfd, stub_sec,
3354 stub_entry->stub_offset, sym_value))
3355 /* The stub would not have been relaxed if the offset was out
3356 of range. */
3357 BFD_FAIL ();
3358
3359 if (!aarch64_relocate (AARCH64_R (ADD_ABS_LO12_NC), stub_bfd, stub_sec,
3360 stub_entry->stub_offset + 4, sym_value))
3361 BFD_FAIL ();
3362 break;
3363
3364 case aarch64_stub_long_branch:
3365 /* We want the value relative to the address 12 bytes back from the
3366 value itself. */
3367 if (!aarch64_relocate (AARCH64_R (PRELNN), stub_bfd, stub_sec,
3368 stub_entry->stub_offset + 16, sym_value + 12))
3369 BFD_FAIL ();
3370 break;
3371
3372 case aarch64_stub_bti_direct_branch:
3373 if (!aarch64_relocate (AARCH64_R (JUMP26), stub_bfd, stub_sec,
3374 stub_entry->stub_offset + 4, sym_value))
3375 BFD_FAIL ();
3376 break;
3377
3378 case aarch64_stub_erratum_835769_veneer:
3379 veneered_insn_loc = stub_entry->target_section->output_section->vma
3380 + stub_entry->target_section->output_offset
3381 + stub_entry->target_value;
3382 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
3383 + stub_entry->stub_sec->output_offset
3384 + stub_entry->stub_offset;
3385 branch_offset = veneered_insn_loc - veneer_entry_loc;
3386 branch_offset >>= 2;
3387 branch_offset &= 0x3ffffff;
3388 bfd_putl32 (stub_entry->veneered_insn,
3389 stub_sec->contents + stub_entry->stub_offset);
3390 bfd_putl32 (template[1] | branch_offset,
3391 stub_sec->contents + stub_entry->stub_offset + 4);
3392 break;
3393
3394 case aarch64_stub_erratum_843419_veneer:
3395 if (!aarch64_relocate (AARCH64_R (JUMP26), stub_bfd, stub_sec,
3396 stub_entry->stub_offset + 4, sym_value + 4))
3397 BFD_FAIL ();
3398 break;
3399
3400 default:
3401 abort ();
3402 }
3403
3404 return true;
3405 }
3406
3407 /* As above, but don't actually build the stub. Just bump offset so
3408 we know stub section sizes and record the offset for each stub so
3409 a stub can target another stub (needed for BTI direct branch stub). */
3410
3411 static bool
3412 aarch64_size_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
3413 {
3414 struct elf_aarch64_stub_hash_entry *stub_entry;
3415 struct elf_aarch64_link_hash_table *htab;
3416 int size;
3417
3418 /* Massage our args to the form they really have. */
3419 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
3420 htab = (struct elf_aarch64_link_hash_table *) in_arg;
3421
3422 switch (stub_entry->stub_type)
3423 {
3424 case aarch64_stub_adrp_branch:
3425 size = sizeof (aarch64_adrp_branch_stub);
3426 break;
3427 case aarch64_stub_long_branch:
3428 size = sizeof (aarch64_long_branch_stub);
3429 break;
3430 case aarch64_stub_bti_direct_branch:
3431 size = sizeof (aarch64_bti_direct_branch_stub);
3432 break;
3433 case aarch64_stub_erratum_835769_veneer:
3434 size = sizeof (aarch64_erratum_835769_stub);
3435 break;
3436 case aarch64_stub_erratum_843419_veneer:
3437 {
3438 if (htab->fix_erratum_843419 == ERRAT_ADR)
3439 return true;
3440 size = sizeof (aarch64_erratum_843419_stub);
3441 }
3442 break;
3443 default:
3444 abort ();
3445 }
3446
3447 size = (size + 7) & ~7;
3448 stub_entry->stub_offset = stub_entry->stub_sec->size;
3449 stub_entry->stub_sec->size += size;
3450 return true;
3451 }
3452
3453 /* Output is BTI compatible. */
3454
3455 static bool
3456 elf_aarch64_bti_p (bfd *output_bfd)
3457 {
3458 uint32_t prop = elf_aarch64_tdata (output_bfd)->gnu_and_prop;
3459 return prop & GNU_PROPERTY_AARCH64_FEATURE_1_BTI;
3460 }
3461
3462 /* External entry points for sizing and building linker stubs. */
3463
3464 /* Set up various things so that we can make a list of input sections
3465 for each output section included in the link. Returns -1 on error,
3466 0 when no stubs will be needed, and 1 on success. */
3467
3468 int
3469 elfNN_aarch64_setup_section_lists (bfd *output_bfd,
3470 struct bfd_link_info *info)
3471 {
3472 bfd *input_bfd;
3473 unsigned int bfd_count;
3474 unsigned int top_id, top_index;
3475 asection *section;
3476 asection **input_list, **list;
3477 size_t amt;
3478 struct elf_aarch64_link_hash_table *htab =
3479 elf_aarch64_hash_table (info);
3480
3481 if (!is_elf_hash_table (&htab->root.root))
3482 return 0;
3483
3484 /* Count the number of input BFDs and find the top input section id. */
3485 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
3486 input_bfd != NULL; input_bfd = input_bfd->link.next)
3487 {
3488 bfd_count += 1;
3489 for (section = input_bfd->sections;
3490 section != NULL; section = section->next)
3491 {
3492 if (top_id < section->id)
3493 top_id = section->id;
3494 }
3495 }
3496 htab->bfd_count = bfd_count;
3497
3498 amt = sizeof (struct map_stub) * (top_id + 1);
3499 htab->stub_group = bfd_zmalloc (amt);
3500 if (htab->stub_group == NULL)
3501 return -1;
3502
3503 /* We can't use output_bfd->section_count here to find the top output
3504 section index as some sections may have been removed, and
3505 _bfd_strip_section_from_output doesn't renumber the indices. */
3506 for (section = output_bfd->sections, top_index = 0;
3507 section != NULL; section = section->next)
3508 {
3509 if (top_index < section->index)
3510 top_index = section->index;
3511 }
3512
3513 htab->top_index = top_index;
3514 amt = sizeof (asection *) * (top_index + 1);
3515 input_list = bfd_malloc (amt);
3516 htab->input_list = input_list;
3517 if (input_list == NULL)
3518 return -1;
3519
3520 /* For sections we aren't interested in, mark their entries with a
3521 value we can check later. */
3522 list = input_list + top_index;
3523 do
3524 *list = bfd_abs_section_ptr;
3525 while (list-- != input_list);
3526
3527 for (section = output_bfd->sections;
3528 section != NULL; section = section->next)
3529 {
3530 if ((section->flags & SEC_CODE) != 0)
3531 input_list[section->index] = NULL;
3532 }
3533
3534 return 1;
3535 }
3536
3537 /* Used by elfNN_aarch64_next_input_section and group_sections. */
3538 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
3539
3540 /* The linker repeatedly calls this function for each input section,
3541 in the order that input sections are linked into output sections.
3542 Build lists of input sections to determine groupings between which
3543 we may insert linker stubs. */
3544
3545 void
3546 elfNN_aarch64_next_input_section (struct bfd_link_info *info, asection *isec)
3547 {
3548 struct elf_aarch64_link_hash_table *htab =
3549 elf_aarch64_hash_table (info);
3550
3551 if (isec->output_section->index <= htab->top_index)
3552 {
3553 asection **list = htab->input_list + isec->output_section->index;
3554
3555 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
3556 {
3557 /* Steal the link_sec pointer for our list. */
3558 /* This happens to make the list in reverse order,
3559 which is what we want. */
3560 PREV_SEC (isec) = *list;
3561 *list = isec;
3562 }
3563 }
3564 }
3565
3566 /* See whether we can group stub sections together. Grouping stub
3567 sections may result in fewer stubs. More importantly, we need to
3568 put all .init* and .fini* stubs at the beginning of the .init or
3569 .fini output sections respectively, because glibc splits the
3570 _init and _fini functions into multiple parts. Putting a stub in
3571 the middle of a function is not a good idea. */
3572
3573 static void
3574 group_sections (struct elf_aarch64_link_hash_table *htab,
3575 bfd_size_type stub_group_size,
3576 bool stubs_always_after_branch)
3577 {
3578 asection **list = htab->input_list;
3579
3580 do
3581 {
3582 asection *tail = *list;
3583 asection *head;
3584
3585 if (tail == bfd_abs_section_ptr)
3586 continue;
3587
3588 /* Reverse the list: we must avoid placing stubs at the
3589 beginning of the section because the beginning of the text
3590 section may be required for an interrupt vector in bare metal
3591 code. */
3592 #define NEXT_SEC PREV_SEC
3593 head = NULL;
3594 while (tail != NULL)
3595 {
3596 /* Pop from tail. */
3597 asection *item = tail;
3598 tail = PREV_SEC (item);
3599
3600 /* Push on head. */
3601 NEXT_SEC (item) = head;
3602 head = item;
3603 }
3604
3605 while (head != NULL)
3606 {
3607 asection *curr;
3608 asection *next;
3609 bfd_vma stub_group_start = head->output_offset;
3610 bfd_vma end_of_next;
3611
3612 curr = head;
3613 while (NEXT_SEC (curr) != NULL)
3614 {
3615 next = NEXT_SEC (curr);
3616 end_of_next = next->output_offset + next->size;
3617 if (end_of_next - stub_group_start >= stub_group_size)
3618 /* End of NEXT is too far from start, so stop. */
3619 break;
3620 /* Add NEXT to the group. */
3621 curr = next;
3622 }
3623
3624 /* OK, the size from the start to the start of CURR is less
3625 than stub_group_size and thus can be handled by one stub
3626 section. (Or the head section is itself larger than
3627 stub_group_size, in which case we may be toast.)
3628 We should really be keeping track of the total size of
3629 stubs added here, as stubs contribute to the final output
3630 section size. */
3631 do
3632 {
3633 next = NEXT_SEC (head);
3634 /* Set up this stub group. */
3635 htab->stub_group[head->id].link_sec = curr;
3636 }
3637 while (head != curr && (head = next) != NULL);
3638
3639 /* But wait, there's more! Input sections up to stub_group_size
3640 bytes after the stub section can be handled by it too. */
3641 if (!stubs_always_after_branch)
3642 {
3643 stub_group_start = curr->output_offset + curr->size;
3644
3645 while (next != NULL)
3646 {
3647 end_of_next = next->output_offset + next->size;
3648 if (end_of_next - stub_group_start >= stub_group_size)
3649 /* End of NEXT is too far from stubs, so stop. */
3650 break;
3651 /* Add NEXT to the stub group. */
3652 head = next;
3653 next = NEXT_SEC (head);
3654 htab->stub_group[head->id].link_sec = curr;
3655 }
3656 }
3657 head = next;
3658 }
3659 }
3660 while (list++ != htab->input_list + htab->top_index);
3661
3662 free (htab->input_list);
3663 }
3664
3665 #undef PREV_SEC
3666 #undef PREV_SEC
3667
3668 #define AARCH64_HINT(insn) (((insn) & 0xfffff01f) == 0xd503201f)
3669 #define AARCH64_PACIASP 0xd503233f
3670 #define AARCH64_PACIBSP 0xd503237f
3671 #define AARCH64_BTI_C 0xd503245f
3672 #define AARCH64_BTI_J 0xd503249f
3673 #define AARCH64_BTI_JC 0xd50324df
3674
3675 /* True if the inserted stub does not break BTI compatibility. */
3676
3677 static bool
3678 aarch64_bti_stub_p (bfd *input_bfd,
3679 struct elf_aarch64_stub_hash_entry *stub_entry)
3680 {
3681 /* Stubs without indirect branch are BTI compatible. */
3682 if (stub_entry->stub_type != aarch64_stub_adrp_branch
3683 && stub_entry->stub_type != aarch64_stub_long_branch)
3684 return true;
3685
3686 /* Return true if the target instruction is compatible with BR x16. */
3687
3688 asection *section = stub_entry->target_section;
3689 bfd_byte loc[4];
3690 file_ptr off = stub_entry->target_value;
3691 bfd_size_type count = sizeof (loc);
3692
3693 if (!bfd_get_section_contents (input_bfd, section, loc, off, count))
3694 return false;
3695
3696 uint32_t insn = bfd_getl32 (loc);
3697 if (!AARCH64_HINT (insn))
3698 return false;
3699 return insn == AARCH64_BTI_C
3700 || insn == AARCH64_PACIASP
3701 || insn == AARCH64_BTI_JC
3702 || insn == AARCH64_BTI_J
3703 || insn == AARCH64_PACIBSP;
3704 }
3705
3706 #define AARCH64_BITS(x, pos, n) (((x) >> (pos)) & ((1 << (n)) - 1))
3707
3708 #define AARCH64_RT(insn) AARCH64_BITS (insn, 0, 5)
3709 #define AARCH64_RT2(insn) AARCH64_BITS (insn, 10, 5)
3710 #define AARCH64_RA(insn) AARCH64_BITS (insn, 10, 5)
3711 #define AARCH64_RD(insn) AARCH64_BITS (insn, 0, 5)
3712 #define AARCH64_RN(insn) AARCH64_BITS (insn, 5, 5)
3713 #define AARCH64_RM(insn) AARCH64_BITS (insn, 16, 5)
3714
3715 #define AARCH64_MAC(insn) (((insn) & 0xff000000) == 0x9b000000)
3716 #define AARCH64_BIT(insn, n) AARCH64_BITS (insn, n, 1)
3717 #define AARCH64_OP31(insn) AARCH64_BITS (insn, 21, 3)
3718 #define AARCH64_ZR 0x1f
3719
3720 /* All ld/st ops. See C4-182 of the ARM ARM. The encoding space for
3721 LD_PCREL, LDST_RO, LDST_UI and LDST_UIMM cover prefetch ops. */
3722
3723 #define AARCH64_LD(insn) (AARCH64_BIT (insn, 22) == 1)
3724 #define AARCH64_LDST(insn) (((insn) & 0x0a000000) == 0x08000000)
3725 #define AARCH64_LDST_EX(insn) (((insn) & 0x3f000000) == 0x08000000)
3726 #define AARCH64_LDST_PCREL(insn) (((insn) & 0x3b000000) == 0x18000000)
3727 #define AARCH64_LDST_NAP(insn) (((insn) & 0x3b800000) == 0x28000000)
3728 #define AARCH64_LDSTP_PI(insn) (((insn) & 0x3b800000) == 0x28800000)
3729 #define AARCH64_LDSTP_O(insn) (((insn) & 0x3b800000) == 0x29000000)
3730 #define AARCH64_LDSTP_PRE(insn) (((insn) & 0x3b800000) == 0x29800000)
3731 #define AARCH64_LDST_UI(insn) (((insn) & 0x3b200c00) == 0x38000000)
3732 #define AARCH64_LDST_PIIMM(insn) (((insn) & 0x3b200c00) == 0x38000400)
3733 #define AARCH64_LDST_U(insn) (((insn) & 0x3b200c00) == 0x38000800)
3734 #define AARCH64_LDST_PREIMM(insn) (((insn) & 0x3b200c00) == 0x38000c00)
3735 #define AARCH64_LDST_RO(insn) (((insn) & 0x3b200c00) == 0x38200800)
3736 #define AARCH64_LDST_UIMM(insn) (((insn) & 0x3b000000) == 0x39000000)
3737 #define AARCH64_LDST_SIMD_M(insn) (((insn) & 0xbfbf0000) == 0x0c000000)
3738 #define AARCH64_LDST_SIMD_M_PI(insn) (((insn) & 0xbfa00000) == 0x0c800000)
3739 #define AARCH64_LDST_SIMD_S(insn) (((insn) & 0xbf9f0000) == 0x0d000000)
3740 #define AARCH64_LDST_SIMD_S_PI(insn) (((insn) & 0xbf800000) == 0x0d800000)
3741
3742 /* Classify an INSN if it is indeed a load/store.
3743
3744 Return TRUE if INSN is a LD/ST instruction otherwise return FALSE.
3745
3746 For scalar LD/ST instructions PAIR is FALSE, RT is returned and RT2
3747 is set equal to RT.
3748
3749 For LD/ST pair instructions PAIR is TRUE, RT and RT2 are returned. */
3750
3751 static bool
3752 aarch64_mem_op_p (uint32_t insn, unsigned int *rt, unsigned int *rt2,
3753 bool *pair, bool *load)
3754 {
3755 uint32_t opcode;
3756 unsigned int r;
3757 uint32_t opc = 0;
3758 uint32_t v = 0;
3759 uint32_t opc_v = 0;
3760
3761 /* Bail out quickly if INSN doesn't fall into the load-store
3762 encoding space. */
3763 if (!AARCH64_LDST (insn))
3764 return false;
3765
3766 *pair = false;
3767 *load = false;
3768 if (AARCH64_LDST_EX (insn))
3769 {
3770 *rt = AARCH64_RT (insn);
3771 *rt2 = *rt;
3772 if (AARCH64_BIT (insn, 21) == 1)
3773 {
3774 *pair = true;
3775 *rt2 = AARCH64_RT2 (insn);
3776 }
3777 *load = AARCH64_LD (insn);
3778 return true;
3779 }
3780 else if (AARCH64_LDST_NAP (insn)
3781 || AARCH64_LDSTP_PI (insn)
3782 || AARCH64_LDSTP_O (insn)
3783 || AARCH64_LDSTP_PRE (insn))
3784 {
3785 *pair = true;
3786 *rt = AARCH64_RT (insn);
3787 *rt2 = AARCH64_RT2 (insn);
3788 *load = AARCH64_LD (insn);
3789 return true;
3790 }
3791 else if (AARCH64_LDST_PCREL (insn)
3792 || AARCH64_LDST_UI (insn)
3793 || AARCH64_LDST_PIIMM (insn)
3794 || AARCH64_LDST_U (insn)
3795 || AARCH64_LDST_PREIMM (insn)
3796 || AARCH64_LDST_RO (insn)
3797 || AARCH64_LDST_UIMM (insn))
3798 {
3799 *rt = AARCH64_RT (insn);
3800 *rt2 = *rt;
3801 if (AARCH64_LDST_PCREL (insn))
3802 *load = true;
3803 opc = AARCH64_BITS (insn, 22, 2);
3804 v = AARCH64_BIT (insn, 26);
3805 opc_v = opc | (v << 2);
3806 *load = (opc_v == 1 || opc_v == 2 || opc_v == 3
3807 || opc_v == 5 || opc_v == 7);
3808 return true;
3809 }
3810 else if (AARCH64_LDST_SIMD_M (insn)
3811 || AARCH64_LDST_SIMD_M_PI (insn))
3812 {
3813 *rt = AARCH64_RT (insn);
3814 *load = AARCH64_BIT (insn, 22);
3815 opcode = (insn >> 12) & 0xf;
3816 switch (opcode)
3817 {
3818 case 0:
3819 case 2:
3820 *rt2 = *rt + 3;
3821 break;
3822
3823 case 4:
3824 case 6:
3825 *rt2 = *rt + 2;
3826 break;
3827
3828 case 7:
3829 *rt2 = *rt;
3830 break;
3831
3832 case 8:
3833 case 10:
3834 *rt2 = *rt + 1;
3835 break;
3836
3837 default:
3838 return false;
3839 }
3840 return true;
3841 }
3842 else if (AARCH64_LDST_SIMD_S (insn)
3843 || AARCH64_LDST_SIMD_S_PI (insn))
3844 {
3845 *rt = AARCH64_RT (insn);
3846 r = (insn >> 21) & 1;
3847 *load = AARCH64_BIT (insn, 22);
3848 opcode = (insn >> 13) & 0x7;
3849 switch (opcode)
3850 {
3851 case 0:
3852 case 2:
3853 case 4:
3854 *rt2 = *rt + r;
3855 break;
3856
3857 case 1:
3858 case 3:
3859 case 5:
3860 *rt2 = *rt + (r == 0 ? 2 : 3);
3861 break;
3862
3863 case 6:
3864 *rt2 = *rt + r;
3865 break;
3866
3867 case 7:
3868 *rt2 = *rt + (r == 0 ? 2 : 3);
3869 break;
3870
3871 default:
3872 return false;
3873 }
3874 return true;
3875 }
3876
3877 return false;
3878 }
3879
3880 /* Return TRUE if INSN is multiply-accumulate. */
3881
3882 static bool
3883 aarch64_mlxl_p (uint32_t insn)
3884 {
3885 uint32_t op31 = AARCH64_OP31 (insn);
3886
3887 if (AARCH64_MAC (insn)
3888 && (op31 == 0 || op31 == 1 || op31 == 5)
3889 /* Exclude MUL instructions which are encoded as a multiple accumulate
3890 with RA = XZR. */
3891 && AARCH64_RA (insn) != AARCH64_ZR)
3892 return true;
3893
3894 return false;
3895 }
3896
3897 /* Some early revisions of the Cortex-A53 have an erratum (835769) whereby
3898 it is possible for a 64-bit multiply-accumulate instruction to generate an
3899 incorrect result. The details are quite complex and hard to
3900 determine statically, since branches in the code may exist in some
3901 circumstances, but all cases end with a memory (load, store, or
3902 prefetch) instruction followed immediately by the multiply-accumulate
3903 operation. We employ a linker patching technique, by moving the potentially
3904 affected multiply-accumulate instruction into a patch region and replacing
3905 the original instruction with a branch to the patch. This function checks
3906 if INSN_1 is the memory operation followed by a multiply-accumulate
3907 operation (INSN_2). Return TRUE if an erratum sequence is found, FALSE
3908 if INSN_1 and INSN_2 are safe. */
3909
3910 static bool
3911 aarch64_erratum_sequence (uint32_t insn_1, uint32_t insn_2)
3912 {
3913 uint32_t rt;
3914 uint32_t rt2;
3915 uint32_t rn;
3916 uint32_t rm;
3917 uint32_t ra;
3918 bool pair;
3919 bool load;
3920
3921 if (aarch64_mlxl_p (insn_2)
3922 && aarch64_mem_op_p (insn_1, &rt, &rt2, &pair, &load))
3923 {
3924 /* Any SIMD memory op is independent of the subsequent MLA
3925 by definition of the erratum. */
3926 if (AARCH64_BIT (insn_1, 26))
3927 return true;
3928
3929 /* If not SIMD, check for integer memory ops and MLA relationship. */
3930 rn = AARCH64_RN (insn_2);
3931 ra = AARCH64_RA (insn_2);
3932 rm = AARCH64_RM (insn_2);
3933
3934 /* If this is a load and there's a true(RAW) dependency, we are safe
3935 and this is not an erratum sequence. */
3936 if (load &&
3937 (rt == rn || rt == rm || rt == ra
3938 || (pair && (rt2 == rn || rt2 == rm || rt2 == ra))))
3939 return false;
3940
3941 /* We conservatively put out stubs for all other cases (including
3942 writebacks). */
3943 return true;
3944 }
3945
3946 return false;
3947 }
3948
3949 /* Used to order a list of mapping symbols by address. */
3950
3951 static int
3952 elf_aarch64_compare_mapping (const void *a, const void *b)
3953 {
3954 const elf_aarch64_section_map *amap = (const elf_aarch64_section_map *) a;
3955 const elf_aarch64_section_map *bmap = (const elf_aarch64_section_map *) b;
3956
3957 if (amap->vma > bmap->vma)
3958 return 1;
3959 else if (amap->vma < bmap->vma)
3960 return -1;
3961 else if (amap->type > bmap->type)
3962 /* Ensure results do not depend on the host qsort for objects with
3963 multiple mapping symbols at the same address by sorting on type
3964 after vma. */
3965 return 1;
3966 else if (amap->type < bmap->type)
3967 return -1;
3968 else
3969 return 0;
3970 }
3971
3972
3973 static char *
3974 _bfd_aarch64_erratum_835769_stub_name (unsigned num_fixes)
3975 {
3976 char *stub_name = (char *) bfd_malloc
3977 (strlen ("__erratum_835769_veneer_") + 16);
3978 if (stub_name != NULL)
3979 sprintf (stub_name,"__erratum_835769_veneer_%d", num_fixes);
3980 return stub_name;
3981 }
3982
3983 /* Scan for Cortex-A53 erratum 835769 sequence.
3984
3985 Return TRUE else FALSE on abnormal termination. */
3986
3987 static bool
3988 _bfd_aarch64_erratum_835769_scan (bfd *input_bfd,
3989 struct bfd_link_info *info,
3990 unsigned int *num_fixes_p)
3991 {
3992 asection *section;
3993 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3994 unsigned int num_fixes = *num_fixes_p;
3995
3996 if (htab == NULL)
3997 return true;
3998
3999 for (section = input_bfd->sections;
4000 section != NULL;
4001 section = section->next)
4002 {
4003 bfd_byte *contents = NULL;
4004 struct _aarch64_elf_section_data *sec_data;
4005 unsigned int span;
4006
4007 if (elf_section_type (section) != SHT_PROGBITS
4008 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
4009 || (section->flags & SEC_EXCLUDE) != 0
4010 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
4011 || (section->output_section == bfd_abs_section_ptr))
4012 continue;
4013
4014 if (elf_section_data (section)->this_hdr.contents != NULL)
4015 contents = elf_section_data (section)->this_hdr.contents;
4016 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
4017 return false;
4018
4019 sec_data = elf_aarch64_section_data (section);
4020
4021 if (sec_data->mapcount)
4022 qsort (sec_data->map, sec_data->mapcount,
4023 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
4024
4025 for (span = 0; span < sec_data->mapcount; span++)
4026 {
4027 unsigned int span_start = sec_data->map[span].vma;
4028 unsigned int span_end = ((span == sec_data->mapcount - 1)
4029 ? sec_data->map[0].vma + section->size
4030 : sec_data->map[span + 1].vma);
4031 unsigned int i;
4032 char span_type = sec_data->map[span].type;
4033
4034 if (span_type == 'd')
4035 continue;
4036
4037 for (i = span_start; i + 4 < span_end; i += 4)
4038 {
4039 uint32_t insn_1 = bfd_getl32 (contents + i);
4040 uint32_t insn_2 = bfd_getl32 (contents + i + 4);
4041
4042 if (aarch64_erratum_sequence (insn_1, insn_2))
4043 {
4044 struct elf_aarch64_stub_hash_entry *stub_entry;
4045 char *stub_name = _bfd_aarch64_erratum_835769_stub_name (num_fixes);
4046 if (! stub_name)
4047 return false;
4048
4049 stub_entry = _bfd_aarch64_add_stub_entry_in_group (stub_name,
4050 section,
4051 htab);
4052 if (! stub_entry)
4053 return false;
4054
4055 stub_entry->stub_type = aarch64_stub_erratum_835769_veneer;
4056 stub_entry->target_section = section;
4057 stub_entry->target_value = i + 4;
4058 stub_entry->veneered_insn = insn_2;
4059 stub_entry->output_name = stub_name;
4060 num_fixes++;
4061 }
4062 }
4063 }
4064 if (elf_section_data (section)->this_hdr.contents == NULL)
4065 free (contents);
4066 }
4067
4068 *num_fixes_p = num_fixes;
4069
4070 return true;
4071 }
4072
4073
4074 /* Test if instruction INSN is ADRP. */
4075
4076 static bool
4077 _bfd_aarch64_adrp_p (uint32_t insn)
4078 {
4079 return ((insn & AARCH64_ADRP_OP_MASK) == AARCH64_ADRP_OP);
4080 }
4081
4082
4083 /* Helper predicate to look for cortex-a53 erratum 843419 sequence 1. */
4084
4085 static bool
4086 _bfd_aarch64_erratum_843419_sequence_p (uint32_t insn_1, uint32_t insn_2,
4087 uint32_t insn_3)
4088 {
4089 uint32_t rt;
4090 uint32_t rt2;
4091 bool pair;
4092 bool load;
4093
4094 return (aarch64_mem_op_p (insn_2, &rt, &rt2, &pair, &load)
4095 && (!pair
4096 || (pair && !load))
4097 && AARCH64_LDST_UIMM (insn_3)
4098 && AARCH64_RN (insn_3) == AARCH64_RD (insn_1));
4099 }
4100
4101
4102 /* Test for the presence of Cortex-A53 erratum 843419 instruction sequence.
4103
4104 Return TRUE if section CONTENTS at offset I contains one of the
4105 erratum 843419 sequences, otherwise return FALSE. If a sequence is
4106 seen set P_VENEER_I to the offset of the final LOAD/STORE
4107 instruction in the sequence.
4108 */
4109
4110 static bool
4111 _bfd_aarch64_erratum_843419_p (bfd_byte *contents, bfd_vma vma,
4112 bfd_vma i, bfd_vma span_end,
4113 bfd_vma *p_veneer_i)
4114 {
4115 uint32_t insn_1 = bfd_getl32 (contents + i);
4116
4117 if (!_bfd_aarch64_adrp_p (insn_1))
4118 return false;
4119
4120 if (span_end < i + 12)
4121 return false;
4122
4123 uint32_t insn_2 = bfd_getl32 (contents + i + 4);
4124 uint32_t insn_3 = bfd_getl32 (contents + i + 8);
4125
4126 if ((vma & 0xfff) != 0xff8 && (vma & 0xfff) != 0xffc)
4127 return false;
4128
4129 if (_bfd_aarch64_erratum_843419_sequence_p (insn_1, insn_2, insn_3))
4130 {
4131 *p_veneer_i = i + 8;
4132 return true;
4133 }
4134
4135 if (span_end < i + 16)
4136 return false;
4137
4138 uint32_t insn_4 = bfd_getl32 (contents + i + 12);
4139
4140 if (_bfd_aarch64_erratum_843419_sequence_p (insn_1, insn_2, insn_4))
4141 {
4142 *p_veneer_i = i + 12;
4143 return true;
4144 }
4145
4146 return false;
4147 }
4148
4149
4150 /* Resize all stub sections. */
4151
4152 static void
4153 _bfd_aarch64_resize_stubs (struct elf_aarch64_link_hash_table *htab)
4154 {
4155 asection *section;
4156
4157 /* OK, we've added some stubs. Find out the new size of the
4158 stub sections. */
4159 for (section = htab->stub_bfd->sections;
4160 section != NULL; section = section->next)
4161 {
4162 /* Ignore non-stub sections. */
4163 if (!strstr (section->name, STUB_SUFFIX))
4164 continue;
4165
4166 /* Add space for a branch. Add 8 bytes to keep section 8 byte aligned,
4167 as long branch stubs contain a 64-bit address. */
4168 section->size = 8;
4169 }
4170
4171 bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab);
4172
4173 for (section = htab->stub_bfd->sections;
4174 section != NULL; section = section->next)
4175 {
4176 if (!strstr (section->name, STUB_SUFFIX))
4177 continue;
4178
4179 /* Empty stub section. */
4180 if (section->size == 8)
4181 section->size = 0;
4182
4183 /* Ensure all stub sections have a size which is a multiple of
4184 4096. This is important in order to ensure that the insertion
4185 of stub sections does not in itself move existing code around
4186 in such a way that new errata sequences are created. We only do this
4187 when the ADRP workaround is enabled. If only the ADR workaround is
4188 enabled then the stubs workaround won't ever be used. */
4189 if (htab->fix_erratum_843419 & ERRAT_ADRP)
4190 if (section->size)
4191 section->size = BFD_ALIGN (section->size, 0x1000);
4192 }
4193 }
4194
4195 /* Construct an erratum 843419 workaround stub name. */
4196
4197 static char *
4198 _bfd_aarch64_erratum_843419_stub_name (asection *input_section,
4199 bfd_vma offset)
4200 {
4201 const bfd_size_type len = 8 + 4 + 1 + 8 + 1 + 16 + 1;
4202 char *stub_name = bfd_malloc (len);
4203
4204 if (stub_name != NULL)
4205 snprintf (stub_name, len, "e843419@%04x_%08x_%" PRIx64,
4206 input_section->owner->id,
4207 input_section->id,
4208 (uint64_t) offset);
4209 return stub_name;
4210 }
4211
4212 /* Build a stub_entry structure describing an 843419 fixup.
4213
4214 The stub_entry constructed is populated with the bit pattern INSN
4215 of the instruction located at OFFSET within input SECTION.
4216
4217 Returns TRUE on success. */
4218
4219 static bool
4220 _bfd_aarch64_erratum_843419_fixup (uint32_t insn,
4221 bfd_vma adrp_offset,
4222 bfd_vma ldst_offset,
4223 asection *section,
4224 struct bfd_link_info *info)
4225 {
4226 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
4227 char *stub_name;
4228 struct elf_aarch64_stub_hash_entry *stub_entry;
4229
4230 stub_name = _bfd_aarch64_erratum_843419_stub_name (section, ldst_offset);
4231 if (stub_name == NULL)
4232 return false;
4233 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4234 false, false);
4235 if (stub_entry)
4236 {
4237 free (stub_name);
4238 return true;
4239 }
4240
4241 /* We always place an 843419 workaround veneer in the stub section
4242 attached to the input section in which an erratum sequence has
4243 been found. This ensures that later in the link process (in
4244 elfNN_aarch64_write_section) when we copy the veneered
4245 instruction from the input section into the stub section the
4246 copied instruction will have had any relocations applied to it.
4247 If we placed workaround veneers in any other stub section then we
4248 could not assume that all relocations have been processed on the
4249 corresponding input section at the point we output the stub
4250 section. */
4251
4252 stub_entry = _bfd_aarch64_add_stub_entry_after (stub_name, section, htab);
4253 if (stub_entry == NULL)
4254 {
4255 free (stub_name);
4256 return false;
4257 }
4258
4259 stub_entry->adrp_offset = adrp_offset;
4260 stub_entry->target_value = ldst_offset;
4261 stub_entry->target_section = section;
4262 stub_entry->stub_type = aarch64_stub_erratum_843419_veneer;
4263 stub_entry->veneered_insn = insn;
4264 stub_entry->output_name = stub_name;
4265
4266 return true;
4267 }
4268
4269
4270 /* Scan an input section looking for the signature of erratum 843419.
4271
4272 Scans input SECTION in INPUT_BFD looking for erratum 843419
4273 signatures, for each signature found a stub_entry is created
4274 describing the location of the erratum for subsequent fixup.
4275
4276 Return TRUE on successful scan, FALSE on failure to scan.
4277 */
4278
4279 static bool
4280 _bfd_aarch64_erratum_843419_scan (bfd *input_bfd, asection *section,
4281 struct bfd_link_info *info)
4282 {
4283 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
4284
4285 if (htab == NULL)
4286 return true;
4287
4288 if (elf_section_type (section) != SHT_PROGBITS
4289 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
4290 || (section->flags & SEC_EXCLUDE) != 0
4291 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
4292 || (section->output_section == bfd_abs_section_ptr))
4293 return true;
4294
4295 do
4296 {
4297 bfd_byte *contents = NULL;
4298 struct _aarch64_elf_section_data *sec_data;
4299 unsigned int span;
4300
4301 if (elf_section_data (section)->this_hdr.contents != NULL)
4302 contents = elf_section_data (section)->this_hdr.contents;
4303 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
4304 return false;
4305
4306 sec_data = elf_aarch64_section_data (section);
4307
4308 if (sec_data->mapcount)
4309 qsort (sec_data->map, sec_data->mapcount,
4310 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
4311
4312 for (span = 0; span < sec_data->mapcount; span++)
4313 {
4314 unsigned int span_start = sec_data->map[span].vma;
4315 unsigned int span_end = ((span == sec_data->mapcount - 1)
4316 ? sec_data->map[0].vma + section->size
4317 : sec_data->map[span + 1].vma);
4318 unsigned int i;
4319 char span_type = sec_data->map[span].type;
4320
4321 if (span_type == 'd')
4322 continue;
4323
4324 for (i = span_start; i + 8 < span_end; i += 4)
4325 {
4326 bfd_vma vma = (section->output_section->vma
4327 + section->output_offset
4328 + i);
4329 bfd_vma veneer_i;
4330
4331 if (_bfd_aarch64_erratum_843419_p
4332 (contents, vma, i, span_end, &veneer_i))
4333 {
4334 uint32_t insn = bfd_getl32 (contents + veneer_i);
4335
4336 if (!_bfd_aarch64_erratum_843419_fixup (insn, i, veneer_i,
4337 section, info))
4338 return false;
4339 }
4340 }
4341 }
4342
4343 if (elf_section_data (section)->this_hdr.contents == NULL)
4344 free (contents);
4345 }
4346 while (0);
4347
4348 return true;
4349 }
4350
4351
4352 /* Add stub entries for calls.
4353
4354 The basic idea here is to examine all the relocations looking for
4355 PC-relative calls to a target that is unreachable with a "bl"
4356 instruction. */
4357
4358 static bool
4359 _bfd_aarch64_add_call_stub_entries (bool *stub_changed, bfd *output_bfd,
4360 struct bfd_link_info *info)
4361 {
4362 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
4363 bool need_bti = elf_aarch64_bti_p (output_bfd);
4364 bfd *input_bfd;
4365
4366 for (input_bfd = info->input_bfds; input_bfd != NULL;
4367 input_bfd = input_bfd->link.next)
4368 {
4369 Elf_Internal_Shdr *symtab_hdr;
4370 asection *section;
4371 Elf_Internal_Sym *local_syms = NULL;
4372
4373 if (!is_aarch64_elf (input_bfd)
4374 || (input_bfd->flags & BFD_LINKER_CREATED) != 0)
4375 continue;
4376
4377 /* We'll need the symbol table in a second. */
4378 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4379 if (symtab_hdr->sh_info == 0)
4380 continue;
4381
4382 /* Walk over each section attached to the input bfd. */
4383 for (section = input_bfd->sections;
4384 section != NULL; section = section->next)
4385 {
4386 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
4387
4388 /* If there aren't any relocs, then there's nothing more to do. */
4389 if ((section->flags & SEC_RELOC) == 0
4390 || section->reloc_count == 0
4391 || (section->flags & SEC_CODE) == 0)
4392 continue;
4393
4394 /* If this section is a link-once section that will be
4395 discarded, then don't create any stubs. */
4396 if (section->output_section == NULL
4397 || section->output_section->owner != output_bfd)
4398 continue;
4399
4400 /* Get the relocs. */
4401 internal_relocs
4402 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
4403 NULL, info->keep_memory);
4404 if (internal_relocs == NULL)
4405 goto error_ret_free_local;
4406
4407 /* Now examine each relocation. */
4408 irela = internal_relocs;
4409 irelaend = irela + section->reloc_count;
4410 for (; irela < irelaend; irela++)
4411 {
4412 unsigned int r_type, r_indx;
4413 enum elf_aarch64_stub_type stub_type;
4414 struct elf_aarch64_stub_hash_entry *stub_entry;
4415 struct elf_aarch64_stub_hash_entry *stub_entry_bti;
4416 asection *sym_sec;
4417 bfd_vma sym_value;
4418 bfd_vma destination;
4419 struct elf_aarch64_link_hash_entry *hash;
4420 const char *sym_name;
4421 char *stub_name;
4422 char *stub_name_bti;
4423 const asection *id_sec;
4424 const asection *id_sec_bti;
4425 unsigned char st_type;
4426 bfd_size_type len;
4427
4428 r_type = ELFNN_R_TYPE (irela->r_info);
4429 r_indx = ELFNN_R_SYM (irela->r_info);
4430
4431 if (r_type >= (unsigned int) R_AARCH64_end)
4432 {
4433 bfd_set_error (bfd_error_bad_value);
4434 error_ret_free_internal:
4435 if (elf_section_data (section)->relocs == NULL)
4436 free (internal_relocs);
4437 goto error_ret_free_local;
4438 }
4439
4440 /* Only look for stubs on unconditional branch and
4441 branch and link instructions. */
4442 if (r_type != (unsigned int) AARCH64_R (CALL26)
4443 && r_type != (unsigned int) AARCH64_R (JUMP26))
4444 continue;
4445
4446 /* Now determine the call target, its name, value,
4447 section. */
4448 sym_sec = NULL;
4449 sym_value = 0;
4450 destination = 0;
4451 hash = NULL;
4452 sym_name = NULL;
4453 if (r_indx < symtab_hdr->sh_info)
4454 {
4455 /* It's a local symbol. */
4456 Elf_Internal_Sym *sym;
4457 Elf_Internal_Shdr *hdr;
4458
4459 if (local_syms == NULL)
4460 {
4461 local_syms
4462 = (Elf_Internal_Sym *) symtab_hdr->contents;
4463 if (local_syms == NULL)
4464 local_syms
4465 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
4466 symtab_hdr->sh_info, 0,
4467 NULL, NULL, NULL);
4468 if (local_syms == NULL)
4469 goto error_ret_free_internal;
4470 }
4471
4472 sym = local_syms + r_indx;
4473 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
4474 sym_sec = hdr->bfd_section;
4475 if (!sym_sec)
4476 /* This is an undefined symbol. It can never
4477 be resolved. */
4478 continue;
4479
4480 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
4481 sym_value = sym->st_value;
4482 destination = (sym_value + irela->r_addend
4483 + sym_sec->output_offset
4484 + sym_sec->output_section->vma);
4485 st_type = ELF_ST_TYPE (sym->st_info);
4486 sym_name
4487 = bfd_elf_string_from_elf_section (input_bfd,
4488 symtab_hdr->sh_link,
4489 sym->st_name);
4490 }
4491 else
4492 {
4493 int e_indx;
4494
4495 e_indx = r_indx - symtab_hdr->sh_info;
4496 hash = ((struct elf_aarch64_link_hash_entry *)
4497 elf_sym_hashes (input_bfd)[e_indx]);
4498
4499 while (hash->root.root.type == bfd_link_hash_indirect
4500 || hash->root.root.type == bfd_link_hash_warning)
4501 hash = ((struct elf_aarch64_link_hash_entry *)
4502 hash->root.root.u.i.link);
4503
4504 if (hash->root.root.type == bfd_link_hash_defined
4505 || hash->root.root.type == bfd_link_hash_defweak)
4506 {
4507 struct elf_aarch64_link_hash_table *globals =
4508 elf_aarch64_hash_table (info);
4509 sym_sec = hash->root.root.u.def.section;
4510 sym_value = hash->root.root.u.def.value;
4511 /* For a destination in a shared library,
4512 use the PLT stub as target address to
4513 decide whether a branch stub is
4514 needed. */
4515 if (globals->root.splt != NULL && hash != NULL
4516 && hash->root.plt.offset != (bfd_vma) - 1)
4517 {
4518 sym_sec = globals->root.splt;
4519 sym_value = hash->root.plt.offset;
4520 if (sym_sec->output_section != NULL)
4521 destination = (sym_value
4522 + sym_sec->output_offset
4523 + sym_sec->output_section->vma);
4524 }
4525 else if (sym_sec->output_section != NULL)
4526 destination = (sym_value + irela->r_addend
4527 + sym_sec->output_offset
4528 + sym_sec->output_section->vma);
4529 }
4530 else if (hash->root.root.type == bfd_link_hash_undefined
4531 || (hash->root.root.type
4532 == bfd_link_hash_undefweak))
4533 {
4534 /* For a shared library, use the PLT stub as
4535 target address to decide whether a long
4536 branch stub is needed.
4537 For absolute code, they cannot be handled. */
4538 struct elf_aarch64_link_hash_table *globals =
4539 elf_aarch64_hash_table (info);
4540
4541 if (globals->root.splt != NULL && hash != NULL
4542 && hash->root.plt.offset != (bfd_vma) - 1)
4543 {
4544 sym_sec = globals->root.splt;
4545 sym_value = hash->root.plt.offset;
4546 if (sym_sec->output_section != NULL)
4547 destination = (sym_value
4548 + sym_sec->output_offset
4549 + sym_sec->output_section->vma);
4550 }
4551 else
4552 continue;
4553 }
4554 else
4555 {
4556 bfd_set_error (bfd_error_bad_value);
4557 goto error_ret_free_internal;
4558 }
4559 st_type = ELF_ST_TYPE (hash->root.type);
4560 sym_name = hash->root.root.root.string;
4561 }
4562
4563 /* Determine what (if any) linker stub is needed. */
4564 stub_type = aarch64_type_of_stub (section, irela, sym_sec,
4565 st_type, destination);
4566 if (stub_type == aarch64_stub_none)
4567 continue;
4568
4569 /* Support for grouping stub sections. */
4570 id_sec = htab->stub_group[section->id].link_sec;
4571
4572 /* Get the name of this stub. */
4573 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, hash,
4574 irela);
4575 if (!stub_name)
4576 goto error_ret_free_internal;
4577
4578 stub_entry =
4579 aarch64_stub_hash_lookup (&htab->stub_hash_table,
4580 stub_name, false, false);
4581 if (stub_entry != NULL)
4582 {
4583 /* The proper stub has already been created. */
4584 free (stub_name);
4585
4586 /* Always update this stub's target since it may have
4587 changed after layout. */
4588 stub_entry->target_value = sym_value + irela->r_addend;
4589
4590 if (stub_entry->double_stub)
4591 {
4592 /* Update the target of both stubs. */
4593
4594 id_sec_bti = htab->stub_group[sym_sec->id].link_sec;
4595 stub_name_bti =
4596 elfNN_aarch64_stub_name (id_sec_bti, sym_sec, hash,
4597 irela);
4598 if (!stub_name_bti)
4599 goto error_ret_free_internal;
4600 stub_entry_bti =
4601 aarch64_stub_hash_lookup (&htab->stub_hash_table,
4602 stub_name_bti, false, false);
4603 BFD_ASSERT (stub_entry_bti != NULL);
4604 free (stub_name_bti);
4605 stub_entry_bti->target_value = stub_entry->target_value;
4606 stub_entry->target_value = stub_entry_bti->stub_offset;
4607 }
4608 continue;
4609 }
4610
4611 stub_entry = _bfd_aarch64_add_stub_entry_in_group
4612 (stub_name, section, htab);
4613 if (stub_entry == NULL)
4614 {
4615 free (stub_name);
4616 goto error_ret_free_internal;
4617 }
4618
4619 stub_entry->target_value = sym_value + irela->r_addend;
4620 stub_entry->target_section = sym_sec;
4621 stub_entry->stub_type = stub_type;
4622 stub_entry->h = hash;
4623 stub_entry->st_type = st_type;
4624
4625 if (sym_name == NULL)
4626 sym_name = "unnamed";
4627 len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
4628 stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
4629 if (stub_entry->output_name == NULL)
4630 {
4631 free (stub_name);
4632 goto error_ret_free_internal;
4633 }
4634
4635 snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
4636 sym_name);
4637
4638 /* A stub with indirect jump may break BTI compatibility, so
4639 insert another stub with direct jump near the target then. */
4640 if (need_bti && !aarch64_bti_stub_p (input_bfd, stub_entry))
4641 {
4642 stub_entry->double_stub = true;
4643 htab->has_double_stub = true;
4644 id_sec_bti = htab->stub_group[sym_sec->id].link_sec;
4645 stub_name_bti =
4646 elfNN_aarch64_stub_name (id_sec_bti, sym_sec, hash, irela);
4647 if (!stub_name_bti)
4648 {
4649 free (stub_name);
4650 goto error_ret_free_internal;
4651 }
4652
4653 stub_entry_bti =
4654 aarch64_stub_hash_lookup (&htab->stub_hash_table,
4655 stub_name_bti, false, false);
4656 if (stub_entry_bti == NULL)
4657 stub_entry_bti =
4658 _bfd_aarch64_add_stub_entry_in_group (stub_name_bti,
4659 sym_sec, htab);
4660 if (stub_entry_bti == NULL)
4661 {
4662 free (stub_name);
4663 free (stub_name_bti);
4664 goto error_ret_free_internal;
4665 }
4666
4667 stub_entry_bti->target_value = sym_value + irela->r_addend;
4668 stub_entry_bti->target_section = sym_sec;
4669 stub_entry_bti->stub_type = aarch64_stub_bti_direct_branch;
4670 stub_entry_bti->h = hash;
4671 stub_entry_bti->st_type = st_type;
4672
4673 len = sizeof (BTI_STUB_ENTRY_NAME) + strlen (sym_name);
4674 stub_entry_bti->output_name = bfd_alloc (htab->stub_bfd, len);
4675 if (stub_entry_bti->output_name == NULL)
4676 {
4677 free (stub_name);
4678 free (stub_name_bti);
4679 goto error_ret_free_internal;
4680 }
4681 snprintf (stub_entry_bti->output_name, len,
4682 BTI_STUB_ENTRY_NAME, sym_name);
4683
4684 /* Update the indirect call stub to target the BTI stub. */
4685 stub_entry->target_value = 0;
4686 stub_entry->target_section = stub_entry_bti->stub_sec;
4687 stub_entry->stub_type = stub_type;
4688 stub_entry->h = NULL;
4689 stub_entry->st_type = STT_FUNC;
4690 }
4691
4692 *stub_changed = true;
4693 }
4694
4695 /* We're done with the internal relocs, free them. */
4696 if (elf_section_data (section)->relocs == NULL)
4697 free (internal_relocs);
4698 }
4699 }
4700 return true;
4701 error_ret_free_local:
4702 return false;
4703 }
4704
4705
4706 /* Determine and set the size of the stub section for a final link. */
4707
4708 bool
4709 elfNN_aarch64_size_stubs (bfd *output_bfd,
4710 bfd *stub_bfd,
4711 struct bfd_link_info *info,
4712 bfd_signed_vma group_size,
4713 asection * (*add_stub_section) (const char *,
4714 asection *),
4715 void (*layout_sections_again) (void))
4716 {
4717 bfd_size_type stub_group_size;
4718 bool stubs_always_before_branch;
4719 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
4720 unsigned int num_erratum_835769_fixes = 0;
4721
4722 /* Propagate mach to stub bfd, because it may not have been
4723 finalized when we created stub_bfd. */
4724 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
4725 bfd_get_mach (output_bfd));
4726
4727 /* Stash our params away. */
4728 htab->stub_bfd = stub_bfd;
4729 htab->add_stub_section = add_stub_section;
4730 htab->layout_sections_again = layout_sections_again;
4731 stubs_always_before_branch = group_size < 0;
4732 if (group_size < 0)
4733 stub_group_size = -group_size;
4734 else
4735 stub_group_size = group_size;
4736
4737 if (stub_group_size == 1)
4738 {
4739 /* Default values. */
4740 /* AArch64 branch range is +-128MB. The value used is 1MB less. */
4741 stub_group_size = 127 * 1024 * 1024;
4742 }
4743
4744 group_sections (htab, stub_group_size, stubs_always_before_branch);
4745
4746 (*htab->layout_sections_again) ();
4747
4748 if (htab->fix_erratum_835769)
4749 {
4750 bfd *input_bfd;
4751
4752 for (input_bfd = info->input_bfds;
4753 input_bfd != NULL; input_bfd = input_bfd->link.next)
4754 {
4755 if (!is_aarch64_elf (input_bfd)
4756 || (input_bfd->flags & BFD_LINKER_CREATED) != 0)
4757 continue;
4758
4759 if (!_bfd_aarch64_erratum_835769_scan (input_bfd, info,
4760 &num_erratum_835769_fixes))
4761 return false;
4762 }
4763
4764 _bfd_aarch64_resize_stubs (htab);
4765 (*htab->layout_sections_again) ();
4766 }
4767
4768 if (htab->fix_erratum_843419 != ERRAT_NONE)
4769 {
4770 bfd *input_bfd;
4771
4772 for (input_bfd = info->input_bfds;
4773 input_bfd != NULL;
4774 input_bfd = input_bfd->link.next)
4775 {
4776 asection *section;
4777
4778 if (!is_aarch64_elf (input_bfd)
4779 || (input_bfd->flags & BFD_LINKER_CREATED) != 0)
4780 continue;
4781
4782 for (section = input_bfd->sections;
4783 section != NULL;
4784 section = section->next)
4785 if (!_bfd_aarch64_erratum_843419_scan (input_bfd, section, info))
4786 return false;
4787 }
4788
4789 _bfd_aarch64_resize_stubs (htab);
4790 (*htab->layout_sections_again) ();
4791 }
4792
4793 for (;;)
4794 {
4795 bool stub_changed = false;
4796
4797 if (!_bfd_aarch64_add_call_stub_entries (&stub_changed, output_bfd, info))
4798 return false;
4799
4800 if (!stub_changed)
4801 return true;
4802
4803 _bfd_aarch64_resize_stubs (htab);
4804 (*htab->layout_sections_again) ();
4805 }
4806 }
4807
4808 /* Build all the stubs associated with the current output file. The
4809 stubs are kept in a hash table attached to the main linker hash
4810 table. We also set up the .plt entries for statically linked PIC
4811 functions here. This function is called via aarch64_elf_finish in the
4812 linker. */
4813
4814 bool
4815 elfNN_aarch64_build_stubs (struct bfd_link_info *info)
4816 {
4817 asection *stub_sec;
4818 struct bfd_hash_table *table;
4819 struct elf_aarch64_link_hash_table *htab;
4820
4821 htab = elf_aarch64_hash_table (info);
4822
4823 for (stub_sec = htab->stub_bfd->sections;
4824 stub_sec != NULL; stub_sec = stub_sec->next)
4825 {
4826 bfd_size_type size;
4827
4828 /* Ignore non-stub sections. */
4829 if (!strstr (stub_sec->name, STUB_SUFFIX))
4830 continue;
4831
4832 /* Allocate memory to hold the linker stubs. */
4833 size = stub_sec->size;
4834 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
4835 if (stub_sec->contents == NULL && size != 0)
4836 return false;
4837 stub_sec->size = 0;
4838
4839 /* Add a branch around the stub section, and a nop, to keep it 8 byte
4840 aligned, as long branch stubs contain a 64-bit address. */
4841 bfd_putl32 (0x14000000 | (size >> 2), stub_sec->contents);
4842 bfd_putl32 (INSN_NOP, stub_sec->contents + 4);
4843 stub_sec->size += 8;
4844 }
4845
4846 /* Build the stubs as directed by the stub hash table. */
4847 table = &htab->stub_hash_table;
4848 bfd_hash_traverse (table, aarch64_build_one_stub, info);
4849
4850 return true;
4851 }
4852
4853
4854 /* Add an entry to the code/data map for section SEC. */
4855
4856 static void
4857 elfNN_aarch64_section_map_add (asection *sec, char type, bfd_vma vma)
4858 {
4859 struct _aarch64_elf_section_data *sec_data =
4860 elf_aarch64_section_data (sec);
4861 unsigned int newidx;
4862
4863 if (sec_data->map == NULL)
4864 {
4865 sec_data->map = bfd_malloc (sizeof (elf_aarch64_section_map));
4866 sec_data->mapcount = 0;
4867 sec_data->mapsize = 1;
4868 }
4869
4870 newidx = sec_data->mapcount++;
4871
4872 if (sec_data->mapcount > sec_data->mapsize)
4873 {
4874 sec_data->mapsize *= 2;
4875 sec_data->map = bfd_realloc_or_free
4876 (sec_data->map, sec_data->mapsize * sizeof (elf_aarch64_section_map));
4877 }
4878
4879 if (sec_data->map)
4880 {
4881 sec_data->map[newidx].vma = vma;
4882 sec_data->map[newidx].type = type;
4883 }
4884 }
4885
4886
4887 /* Initialise maps of insn/data for input BFDs. */
4888 void
4889 bfd_elfNN_aarch64_init_maps (bfd *abfd)
4890 {
4891 Elf_Internal_Sym *isymbuf;
4892 Elf_Internal_Shdr *hdr;
4893 unsigned int i, localsyms;
4894
4895 /* Make sure that we are dealing with an AArch64 elf binary. */
4896 if (!is_aarch64_elf (abfd))
4897 return;
4898
4899 if ((abfd->flags & DYNAMIC) != 0)
4900 return;
4901
4902 hdr = &elf_symtab_hdr (abfd);
4903 localsyms = hdr->sh_info;
4904
4905 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
4906 should contain the number of local symbols, which should come before any
4907 global symbols. Mapping symbols are always local. */
4908 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL, NULL);
4909
4910 /* No internal symbols read? Skip this BFD. */
4911 if (isymbuf == NULL)
4912 return;
4913
4914 for (i = 0; i < localsyms; i++)
4915 {
4916 Elf_Internal_Sym *isym = &isymbuf[i];
4917 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
4918 const char *name;
4919
4920 if (sec != NULL && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
4921 {
4922 name = bfd_elf_string_from_elf_section (abfd,
4923 hdr->sh_link,
4924 isym->st_name);
4925
4926 if (bfd_is_aarch64_special_symbol_name
4927 (name, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP))
4928 elfNN_aarch64_section_map_add (sec, name[1], isym->st_value);
4929 }
4930 }
4931 }
4932
4933 static void
4934 setup_plt_values (struct bfd_link_info *link_info,
4935 aarch64_plt_type plt_type)
4936 {
4937 struct elf_aarch64_link_hash_table *globals;
4938 globals = elf_aarch64_hash_table (link_info);
4939
4940 if (plt_type == PLT_BTI_PAC)
4941 {
4942 globals->plt0_entry = elfNN_aarch64_small_plt0_bti_entry;
4943
4944 /* Only in ET_EXEC we need PLTn with BTI. */
4945 if (bfd_link_pde (link_info))
4946 {
4947 globals->plt_entry_size = PLT_BTI_PAC_SMALL_ENTRY_SIZE;
4948 globals->plt_entry = elfNN_aarch64_small_plt_bti_pac_entry;
4949 }
4950 else
4951 {
4952 globals->plt_entry_size = PLT_PAC_SMALL_ENTRY_SIZE;
4953 globals->plt_entry = elfNN_aarch64_small_plt_pac_entry;
4954 }
4955 }
4956 else if (plt_type == PLT_BTI)
4957 {
4958 globals->plt0_entry = elfNN_aarch64_small_plt0_bti_entry;
4959
4960 /* Only in ET_EXEC we need PLTn with BTI. */
4961 if (bfd_link_pde (link_info))
4962 {
4963 globals->plt_entry_size = PLT_BTI_SMALL_ENTRY_SIZE;
4964 globals->plt_entry = elfNN_aarch64_small_plt_bti_entry;
4965 }
4966 }
4967 else if (plt_type == PLT_PAC)
4968 {
4969 globals->plt_entry_size = PLT_PAC_SMALL_ENTRY_SIZE;
4970 globals->plt_entry = elfNN_aarch64_small_plt_pac_entry;
4971 }
4972 }
4973
4974 /* Set option values needed during linking. */
4975 void
4976 bfd_elfNN_aarch64_set_options (struct bfd *output_bfd,
4977 struct bfd_link_info *link_info,
4978 int no_enum_warn,
4979 int no_wchar_warn, int pic_veneer,
4980 int fix_erratum_835769,
4981 erratum_84319_opts fix_erratum_843419,
4982 int no_apply_dynamic_relocs,
4983 aarch64_bti_pac_info bp_info)
4984 {
4985 struct elf_aarch64_link_hash_table *globals;
4986
4987 globals = elf_aarch64_hash_table (link_info);
4988 globals->pic_veneer = pic_veneer;
4989 globals->fix_erratum_835769 = fix_erratum_835769;
4990 /* If the default options are used, then ERRAT_ADR will be set by default
4991 which will enable the ADRP->ADR workaround for the erratum 843419
4992 workaround. */
4993 globals->fix_erratum_843419 = fix_erratum_843419;
4994 globals->no_apply_dynamic_relocs = no_apply_dynamic_relocs;
4995
4996 BFD_ASSERT (is_aarch64_elf (output_bfd));
4997 elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
4998 elf_aarch64_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
4999
5000 switch (bp_info.bti_type)
5001 {
5002 case BTI_WARN:
5003 elf_aarch64_tdata (output_bfd)->no_bti_warn = 0;
5004 elf_aarch64_tdata (output_bfd)->gnu_and_prop
5005 |= GNU_PROPERTY_AARCH64_FEATURE_1_BTI;
5006 break;
5007
5008 default:
5009 break;
5010 }
5011 elf_aarch64_tdata (output_bfd)->plt_type = bp_info.plt_type;
5012 setup_plt_values (link_info, bp_info.plt_type);
5013 }
5014
5015 static bfd_vma
5016 aarch64_calculate_got_entry_vma (struct elf_link_hash_entry *h,
5017 struct elf_aarch64_link_hash_table
5018 *globals, struct bfd_link_info *info,
5019 bfd_vma value, bfd *output_bfd,
5020 bool *unresolved_reloc_p)
5021 {
5022 bfd_vma off = (bfd_vma) - 1;
5023 asection *basegot = globals->root.sgot;
5024 bool dyn = globals->root.dynamic_sections_created;
5025
5026 if (h != NULL)
5027 {
5028 BFD_ASSERT (basegot != NULL);
5029 off = h->got.offset;
5030 BFD_ASSERT (off != (bfd_vma) - 1);
5031 if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, bfd_link_pic (info), h)
5032 || (bfd_link_pic (info)
5033 && SYMBOL_REFERENCES_LOCAL (info, h))
5034 || (ELF_ST_VISIBILITY (h->other)
5035 && h->root.type == bfd_link_hash_undefweak))
5036 {
5037 /* This is actually a static link, or it is a -Bsymbolic link
5038 and the symbol is defined locally. We must initialize this
5039 entry in the global offset table. Since the offset must
5040 always be a multiple of 8 (4 in the case of ILP32), we use
5041 the least significant bit to record whether we have
5042 initialized it already.
5043 When doing a dynamic link, we create a .rel(a).got relocation
5044 entry to initialize the value. This is done in the
5045 finish_dynamic_symbol routine. */
5046 if ((off & 1) != 0)
5047 off &= ~1;
5048 else
5049 {
5050 bfd_put_NN (output_bfd, value, basegot->contents + off);
5051 h->got.offset |= 1;
5052 }
5053 }
5054 else
5055 *unresolved_reloc_p = false;
5056
5057 off = off + basegot->output_section->vma + basegot->output_offset;
5058 }
5059
5060 return off;
5061 }
5062
5063 /* Change R_TYPE to a more efficient access model where possible,
5064 return the new reloc type. */
5065
5066 static bfd_reloc_code_real_type
5067 aarch64_tls_transition_without_check (bfd_reloc_code_real_type r_type,
5068 struct elf_link_hash_entry *h,
5069 struct bfd_link_info *info)
5070 {
5071 bool local_exec = bfd_link_executable (info)
5072 && SYMBOL_REFERENCES_LOCAL (info, h);
5073
5074 switch (r_type)
5075 {
5076 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5077 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5078 return (local_exec
5079 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
5080 : BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21);
5081
5082 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5083 return (local_exec
5084 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
5085 : r_type);
5086
5087 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5088 return (local_exec
5089 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
5090 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
5091
5092 case BFD_RELOC_AARCH64_TLSDESC_LDR:
5093 return (local_exec
5094 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
5095 : BFD_RELOC_AARCH64_NONE);
5096
5097 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5098 return (local_exec
5099 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC
5100 : BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC);
5101
5102 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5103 return (local_exec
5104 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2
5105 : BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1);
5106
5107 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
5108 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5109 return (local_exec
5110 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
5111 : BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC);
5112
5113 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5114 return local_exec ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 : r_type;
5115
5116 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
5117 return local_exec ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC : r_type;
5118
5119 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5120 return r_type;
5121
5122 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5123 return (local_exec
5124 ? BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12
5125 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
5126
5127 case BFD_RELOC_AARCH64_TLSDESC_ADD:
5128 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
5129 case BFD_RELOC_AARCH64_TLSDESC_CALL:
5130 /* Instructions with these relocations will become NOPs. */
5131 return BFD_RELOC_AARCH64_NONE;
5132
5133 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
5134 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
5135 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
5136 return local_exec ? BFD_RELOC_AARCH64_NONE : r_type;
5137
5138 #if ARCH_SIZE == 64
5139 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5140 return local_exec
5141 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC
5142 : BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC;
5143
5144 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5145 return local_exec
5146 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2
5147 : BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1;
5148 #endif
5149
5150 default:
5151 break;
5152 }
5153
5154 return r_type;
5155 }
5156
5157 static unsigned int
5158 aarch64_reloc_got_type (bfd_reloc_code_real_type r_type)
5159 {
5160 switch (r_type)
5161 {
5162 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
5163 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
5164 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
5165 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
5166 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
5167 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
5168 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
5169 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5170 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5171 return GOT_NORMAL;
5172
5173 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5174 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5175 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5176 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5177 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5178 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
5179 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
5180 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
5181 return GOT_TLS_GD;
5182
5183 case BFD_RELOC_AARCH64_TLSDESC_ADD:
5184 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
5185 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5186 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5187 case BFD_RELOC_AARCH64_TLSDESC_CALL:
5188 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
5189 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
5190 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5191 case BFD_RELOC_AARCH64_TLSDESC_LDR:
5192 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5193 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5194 return GOT_TLSDESC_GD;
5195
5196 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5197 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
5198 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5199 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5200 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5201 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5202 return GOT_TLS_IE;
5203
5204 default:
5205 break;
5206 }
5207 return GOT_UNKNOWN;
5208 }
5209
5210 static bool
5211 aarch64_can_relax_tls (bfd *input_bfd,
5212 struct bfd_link_info *info,
5213 bfd_reloc_code_real_type r_type,
5214 struct elf_link_hash_entry *h,
5215 unsigned long r_symndx)
5216 {
5217 unsigned int symbol_got_type;
5218 unsigned int reloc_got_type;
5219
5220 if (! IS_AARCH64_TLS_RELAX_RELOC (r_type))
5221 return false;
5222
5223 symbol_got_type = elfNN_aarch64_symbol_got_type (h, input_bfd, r_symndx);
5224 reloc_got_type = aarch64_reloc_got_type (r_type);
5225
5226 if (symbol_got_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (reloc_got_type))
5227 return true;
5228
5229 if (!bfd_link_executable (info))
5230 return false;
5231
5232 if (h && h->root.type == bfd_link_hash_undefweak)
5233 return false;
5234
5235 return true;
5236 }
5237
5238 /* Given the relocation code R_TYPE, return the relaxed bfd reloc
5239 enumerator. */
5240
5241 static bfd_reloc_code_real_type
5242 aarch64_tls_transition (bfd *input_bfd,
5243 struct bfd_link_info *info,
5244 unsigned int r_type,
5245 struct elf_link_hash_entry *h,
5246 unsigned long r_symndx)
5247 {
5248 bfd_reloc_code_real_type bfd_r_type
5249 = elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type);
5250
5251 if (! aarch64_can_relax_tls (input_bfd, info, bfd_r_type, h, r_symndx))
5252 return bfd_r_type;
5253
5254 return aarch64_tls_transition_without_check (bfd_r_type, h, info);
5255 }
5256
5257 /* Return the base VMA address which should be subtracted from real addresses
5258 when resolving R_AARCH64_TLS_DTPREL relocation. */
5259
5260 static bfd_vma
5261 dtpoff_base (struct bfd_link_info *info)
5262 {
5263 /* If tls_sec is NULL, we should have signalled an error already. */
5264 BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
5265 return elf_hash_table (info)->tls_sec->vma;
5266 }
5267
5268 /* Return the base VMA address which should be subtracted from real addresses
5269 when resolving R_AARCH64_TLS_GOTTPREL64 relocations. */
5270
5271 static bfd_vma
5272 tpoff_base (struct bfd_link_info *info)
5273 {
5274 struct elf_link_hash_table *htab = elf_hash_table (info);
5275
5276 /* If tls_sec is NULL, we should have signalled an error already. */
5277 BFD_ASSERT (htab->tls_sec != NULL);
5278
5279 bfd_vma base = align_power ((bfd_vma) TCB_SIZE,
5280 htab->tls_sec->alignment_power);
5281 return htab->tls_sec->vma - base;
5282 }
5283
5284 static bfd_vma *
5285 symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
5286 unsigned long r_symndx)
5287 {
5288 /* Calculate the address of the GOT entry for symbol
5289 referred to in h. */
5290 if (h != NULL)
5291 return &h->got.offset;
5292 else
5293 {
5294 /* local symbol */
5295 struct elf_aarch64_local_symbol *l;
5296
5297 l = elf_aarch64_locals (input_bfd);
5298 return &l[r_symndx].got_offset;
5299 }
5300 }
5301
5302 static void
5303 symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
5304 unsigned long r_symndx)
5305 {
5306 bfd_vma *p;
5307 p = symbol_got_offset_ref (input_bfd, h, r_symndx);
5308 *p |= 1;
5309 }
5310
5311 static int
5312 symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
5313 unsigned long r_symndx)
5314 {
5315 bfd_vma value;
5316 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
5317 return value & 1;
5318 }
5319
5320 static bfd_vma
5321 symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
5322 unsigned long r_symndx)
5323 {
5324 bfd_vma value;
5325 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
5326 value &= ~1;
5327 return value;
5328 }
5329
5330 static bfd_vma *
5331 symbol_tlsdesc_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
5332 unsigned long r_symndx)
5333 {
5334 /* Calculate the address of the GOT entry for symbol
5335 referred to in h. */
5336 if (h != NULL)
5337 {
5338 struct elf_aarch64_link_hash_entry *eh;
5339 eh = (struct elf_aarch64_link_hash_entry *) h;
5340 return &eh->tlsdesc_got_jump_table_offset;
5341 }
5342 else
5343 {
5344 /* local symbol */
5345 struct elf_aarch64_local_symbol *l;
5346
5347 l = elf_aarch64_locals (input_bfd);
5348 return &l[r_symndx].tlsdesc_got_jump_table_offset;
5349 }
5350 }
5351
5352 static void
5353 symbol_tlsdesc_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
5354 unsigned long r_symndx)
5355 {
5356 bfd_vma *p;
5357 p = symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
5358 *p |= 1;
5359 }
5360
5361 static int
5362 symbol_tlsdesc_got_offset_mark_p (bfd *input_bfd,
5363 struct elf_link_hash_entry *h,
5364 unsigned long r_symndx)
5365 {
5366 bfd_vma value;
5367 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
5368 return value & 1;
5369 }
5370
5371 static bfd_vma
5372 symbol_tlsdesc_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
5373 unsigned long r_symndx)
5374 {
5375 bfd_vma value;
5376 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
5377 value &= ~1;
5378 return value;
5379 }
5380
5381 /* Data for make_branch_to_erratum_835769_stub(). */
5382
5383 struct erratum_835769_branch_to_stub_data
5384 {
5385 struct bfd_link_info *info;
5386 asection *output_section;
5387 bfd_byte *contents;
5388 };
5389
5390 /* Helper to insert branches to erratum 835769 stubs in the right
5391 places for a particular section. */
5392
5393 static bool
5394 make_branch_to_erratum_835769_stub (struct bfd_hash_entry *gen_entry,
5395 void *in_arg)
5396 {
5397 struct elf_aarch64_stub_hash_entry *stub_entry;
5398 struct erratum_835769_branch_to_stub_data *data;
5399 bfd_byte *contents;
5400 unsigned long branch_insn = 0;
5401 bfd_vma veneered_insn_loc, veneer_entry_loc;
5402 bfd_signed_vma branch_offset;
5403 unsigned int target;
5404 bfd *abfd;
5405
5406 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
5407 data = (struct erratum_835769_branch_to_stub_data *) in_arg;
5408
5409 if (stub_entry->target_section != data->output_section
5410 || stub_entry->stub_type != aarch64_stub_erratum_835769_veneer)
5411 return true;
5412
5413 contents = data->contents;
5414 veneered_insn_loc = stub_entry->target_section->output_section->vma
5415 + stub_entry->target_section->output_offset
5416 + stub_entry->target_value;
5417 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
5418 + stub_entry->stub_sec->output_offset
5419 + stub_entry->stub_offset;
5420 branch_offset = veneer_entry_loc - veneered_insn_loc;
5421
5422 abfd = stub_entry->target_section->owner;
5423 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
5424 _bfd_error_handler
5425 (_("%pB: error: erratum 835769 stub out "
5426 "of range (input file too large)"), abfd);
5427
5428 target = stub_entry->target_value;
5429 branch_insn = 0x14000000;
5430 branch_offset >>= 2;
5431 branch_offset &= 0x3ffffff;
5432 branch_insn |= branch_offset;
5433 bfd_putl32 (branch_insn, &contents[target]);
5434
5435 return true;
5436 }
5437
5438
5439 static bool
5440 _bfd_aarch64_erratum_843419_branch_to_stub (struct bfd_hash_entry *gen_entry,
5441 void *in_arg)
5442 {
5443 struct elf_aarch64_stub_hash_entry *stub_entry
5444 = (struct elf_aarch64_stub_hash_entry *) gen_entry;
5445 struct erratum_835769_branch_to_stub_data *data
5446 = (struct erratum_835769_branch_to_stub_data *) in_arg;
5447 struct bfd_link_info *info;
5448 struct elf_aarch64_link_hash_table *htab;
5449 bfd_byte *contents;
5450 asection *section;
5451 bfd *abfd;
5452 bfd_vma place;
5453 uint32_t insn;
5454
5455 info = data->info;
5456 contents = data->contents;
5457 section = data->output_section;
5458
5459 htab = elf_aarch64_hash_table (info);
5460
5461 if (stub_entry->target_section != section
5462 || stub_entry->stub_type != aarch64_stub_erratum_843419_veneer)
5463 return true;
5464
5465 BFD_ASSERT (((htab->fix_erratum_843419 & ERRAT_ADRP) && stub_entry->stub_sec)
5466 || (htab->fix_erratum_843419 & ERRAT_ADR));
5467
5468 /* Only update the stub section if we have one. We should always have one if
5469 we're allowed to use the ADRP errata workaround, otherwise it is not
5470 required. */
5471 if (stub_entry->stub_sec)
5472 {
5473 insn = bfd_getl32 (contents + stub_entry->target_value);
5474 bfd_putl32 (insn,
5475 stub_entry->stub_sec->contents + stub_entry->stub_offset);
5476 }
5477
5478 place = (section->output_section->vma + section->output_offset
5479 + stub_entry->adrp_offset);
5480 insn = bfd_getl32 (contents + stub_entry->adrp_offset);
5481
5482 if (!_bfd_aarch64_adrp_p (insn))
5483 abort ();
5484
5485 bfd_signed_vma imm =
5486 (_bfd_aarch64_sign_extend
5487 ((bfd_vma) _bfd_aarch64_decode_adrp_imm (insn) << 12, 33)
5488 - (place & 0xfff));
5489
5490 if ((htab->fix_erratum_843419 & ERRAT_ADR)
5491 && (imm >= AARCH64_MIN_ADRP_IMM && imm <= AARCH64_MAX_ADRP_IMM))
5492 {
5493 insn = (_bfd_aarch64_reencode_adr_imm (AARCH64_ADR_OP, imm)
5494 | AARCH64_RT (insn));
5495 bfd_putl32 (insn, contents + stub_entry->adrp_offset);
5496 /* Stub is not needed, don't map it out. */
5497 stub_entry->stub_type = aarch64_stub_none;
5498 }
5499 else if (htab->fix_erratum_843419 & ERRAT_ADRP)
5500 {
5501 bfd_vma veneered_insn_loc;
5502 bfd_vma veneer_entry_loc;
5503 bfd_signed_vma branch_offset;
5504 uint32_t branch_insn;
5505
5506 veneered_insn_loc = stub_entry->target_section->output_section->vma
5507 + stub_entry->target_section->output_offset
5508 + stub_entry->target_value;
5509 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
5510 + stub_entry->stub_sec->output_offset
5511 + stub_entry->stub_offset;
5512 branch_offset = veneer_entry_loc - veneered_insn_loc;
5513
5514 abfd = stub_entry->target_section->owner;
5515 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
5516 _bfd_error_handler
5517 (_("%pB: error: erratum 843419 stub out "
5518 "of range (input file too large)"), abfd);
5519
5520 branch_insn = 0x14000000;
5521 branch_offset >>= 2;
5522 branch_offset &= 0x3ffffff;
5523 branch_insn |= branch_offset;
5524 bfd_putl32 (branch_insn, contents + stub_entry->target_value);
5525 }
5526 else
5527 {
5528 abfd = stub_entry->target_section->owner;
5529 _bfd_error_handler
5530 (_("%pB: error: erratum 843419 immediate 0x%" PRIx64
5531 " out of range for ADR (input file too large) and "
5532 "--fix-cortex-a53-843419=adr used. Run the linker with "
5533 "--fix-cortex-a53-843419=full instead"),
5534 abfd, (uint64_t) (bfd_vma) imm);
5535 bfd_set_error (bfd_error_bad_value);
5536 /* This function is called inside a hashtable traversal and the error
5537 handlers called above turn into non-fatal errors. Which means this
5538 case ld returns an exit code 0 and also produces a broken object file.
5539 To prevent this, issue a hard abort. */
5540 BFD_FAIL ();
5541 }
5542 return true;
5543 }
5544
5545
5546 static bool
5547 elfNN_aarch64_write_section (bfd *output_bfd ATTRIBUTE_UNUSED,
5548 struct bfd_link_info *link_info,
5549 asection *sec,
5550 bfd_byte *contents)
5551
5552 {
5553 struct elf_aarch64_link_hash_table *globals =
5554 elf_aarch64_hash_table (link_info);
5555
5556 if (globals == NULL)
5557 return false;
5558
5559 /* Fix code to point to erratum 835769 stubs. */
5560 if (globals->fix_erratum_835769)
5561 {
5562 struct erratum_835769_branch_to_stub_data data;
5563
5564 data.info = link_info;
5565 data.output_section = sec;
5566 data.contents = contents;
5567 bfd_hash_traverse (&globals->stub_hash_table,
5568 make_branch_to_erratum_835769_stub, &data);
5569 }
5570
5571 if (globals->fix_erratum_843419)
5572 {
5573 struct erratum_835769_branch_to_stub_data data;
5574
5575 data.info = link_info;
5576 data.output_section = sec;
5577 data.contents = contents;
5578 bfd_hash_traverse (&globals->stub_hash_table,
5579 _bfd_aarch64_erratum_843419_branch_to_stub, &data);
5580 }
5581
5582 return false;
5583 }
5584
5585 /* Return TRUE if RELOC is a relocation against the base of GOT table. */
5586
5587 static bool
5588 aarch64_relocation_aginst_gp_p (bfd_reloc_code_real_type reloc)
5589 {
5590 return (reloc == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14
5591 || reloc == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
5592 || reloc == BFD_RELOC_AARCH64_LD64_GOTOFF_LO15
5593 || reloc == BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC
5594 || reloc == BFD_RELOC_AARCH64_MOVW_GOTOFF_G1);
5595 }
5596
5597 /* Perform a relocation as part of a final link. The input relocation type
5598 should be TLS relaxed. */
5599
5600 static bfd_reloc_status_type
5601 elfNN_aarch64_final_link_relocate (reloc_howto_type *howto,
5602 bfd *input_bfd,
5603 bfd *output_bfd,
5604 asection *input_section,
5605 bfd_byte *contents,
5606 Elf_Internal_Rela *rel,
5607 bfd_vma value,
5608 struct bfd_link_info *info,
5609 asection *sym_sec,
5610 struct elf_link_hash_entry *h,
5611 bool *unresolved_reloc_p,
5612 bool save_addend,
5613 bfd_vma *saved_addend,
5614 Elf_Internal_Sym *sym)
5615 {
5616 Elf_Internal_Shdr *symtab_hdr;
5617 unsigned int r_type = howto->type;
5618 bfd_reloc_code_real_type bfd_r_type
5619 = elfNN_aarch64_bfd_reloc_from_howto (howto);
5620 unsigned long r_symndx;
5621 bfd_byte *hit_data = contents + rel->r_offset;
5622 bfd_vma place, off, got_entry_addr = 0;
5623 bfd_signed_vma signed_addend;
5624 struct elf_aarch64_link_hash_table *globals;
5625 bool weak_undef_p;
5626 bool relative_reloc;
5627 asection *base_got;
5628 bfd_vma orig_value = value;
5629 bool resolved_to_zero;
5630 bool abs_symbol_p;
5631
5632 globals = elf_aarch64_hash_table (info);
5633
5634 symtab_hdr = &elf_symtab_hdr (input_bfd);
5635
5636 BFD_ASSERT (is_aarch64_elf (input_bfd));
5637
5638 r_symndx = ELFNN_R_SYM (rel->r_info);
5639
5640 place = input_section->output_section->vma
5641 + input_section->output_offset + rel->r_offset;
5642
5643 /* Get addend, accumulating the addend for consecutive relocs
5644 which refer to the same offset. */
5645 signed_addend = saved_addend ? *saved_addend : 0;
5646 signed_addend += rel->r_addend;
5647
5648 weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
5649 : bfd_is_und_section (sym_sec));
5650 abs_symbol_p = h != NULL && bfd_is_abs_symbol (&h->root);
5651
5652
5653 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
5654 it here if it is defined in a non-shared object. */
5655 if (h != NULL
5656 && h->type == STT_GNU_IFUNC
5657 && h->def_regular)
5658 {
5659 asection *plt;
5660 const char *name;
5661 bfd_vma addend = 0;
5662
5663 if ((input_section->flags & SEC_ALLOC) == 0)
5664 {
5665 /* If this is a SHT_NOTE section without SHF_ALLOC, treat
5666 STT_GNU_IFUNC symbol as STT_FUNC. */
5667 if (elf_section_type (input_section) == SHT_NOTE)
5668 goto skip_ifunc;
5669
5670 /* Dynamic relocs are not propagated for SEC_DEBUGGING
5671 sections because such sections are not SEC_ALLOC and
5672 thus ld.so will not process them. */
5673 if ((input_section->flags & SEC_DEBUGGING) != 0)
5674 return bfd_reloc_ok;
5675
5676 if (h->root.root.string)
5677 name = h->root.root.string;
5678 else
5679 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, NULL);
5680 _bfd_error_handler
5681 /* xgettext:c-format */
5682 (_("%pB(%pA+%#" PRIx64 "): "
5683 "unresolvable %s relocation against symbol `%s'"),
5684 input_bfd, input_section, (uint64_t) rel->r_offset,
5685 howto->name, name);
5686 bfd_set_error (bfd_error_bad_value);
5687 return bfd_reloc_notsupported;
5688 }
5689 else if (h->plt.offset == (bfd_vma) -1)
5690 goto bad_ifunc_reloc;
5691
5692 /* STT_GNU_IFUNC symbol must go through PLT. */
5693 plt = globals->root.splt ? globals->root.splt : globals->root.iplt;
5694 value = (plt->output_section->vma + plt->output_offset + h->plt.offset);
5695
5696 switch (bfd_r_type)
5697 {
5698 default:
5699 bad_ifunc_reloc:
5700 if (h->root.root.string)
5701 name = h->root.root.string;
5702 else
5703 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
5704 NULL);
5705 _bfd_error_handler
5706 /* xgettext:c-format */
5707 (_("%pB: relocation %s against STT_GNU_IFUNC "
5708 "symbol `%s' isn't handled by %s"), input_bfd,
5709 howto->name, name, __func__);
5710 bfd_set_error (bfd_error_bad_value);
5711 return bfd_reloc_notsupported;
5712
5713 case BFD_RELOC_AARCH64_NN:
5714 if (rel->r_addend != 0)
5715 {
5716 if (h->root.root.string)
5717 name = h->root.root.string;
5718 else
5719 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
5720 sym, NULL);
5721 _bfd_error_handler
5722 /* xgettext:c-format */
5723 (_("%pB: relocation %s against STT_GNU_IFUNC "
5724 "symbol `%s' has non-zero addend: %" PRId64),
5725 input_bfd, howto->name, name, (int64_t) rel->r_addend);
5726 bfd_set_error (bfd_error_bad_value);
5727 return bfd_reloc_notsupported;
5728 }
5729
5730 /* Generate dynamic relocation only when there is a
5731 non-GOT reference in a shared object. */
5732 if (bfd_link_pic (info) && h->non_got_ref)
5733 {
5734 Elf_Internal_Rela outrel;
5735 asection *sreloc;
5736
5737 /* Need a dynamic relocation to get the real function
5738 address. */
5739 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
5740 info,
5741 input_section,
5742 rel->r_offset);
5743 if (outrel.r_offset == (bfd_vma) -1
5744 || outrel.r_offset == (bfd_vma) -2)
5745 abort ();
5746
5747 outrel.r_offset += (input_section->output_section->vma
5748 + input_section->output_offset);
5749
5750 if (h->dynindx == -1
5751 || h->forced_local
5752 || bfd_link_executable (info))
5753 {
5754 /* This symbol is resolved locally. */
5755 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
5756 outrel.r_addend = (h->root.u.def.value
5757 + h->root.u.def.section->output_section->vma
5758 + h->root.u.def.section->output_offset);
5759 }
5760 else
5761 {
5762 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
5763 outrel.r_addend = 0;
5764 }
5765
5766 sreloc = globals->root.irelifunc;
5767 elf_append_rela (output_bfd, sreloc, &outrel);
5768
5769 /* If this reloc is against an external symbol, we
5770 do not want to fiddle with the addend. Otherwise,
5771 we need to include the symbol value so that it
5772 becomes an addend for the dynamic reloc. For an
5773 internal symbol, we have updated addend. */
5774 return bfd_reloc_ok;
5775 }
5776 /* FALLTHROUGH */
5777 case BFD_RELOC_AARCH64_CALL26:
5778 case BFD_RELOC_AARCH64_JUMP26:
5779 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
5780 place, value,
5781 signed_addend,
5782 weak_undef_p);
5783 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
5784 howto, value);
5785 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
5786 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
5787 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
5788 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
5789 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
5790 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5791 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5792 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
5793 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
5794 base_got = globals->root.sgot;
5795 off = h->got.offset;
5796
5797 if (base_got == NULL)
5798 abort ();
5799
5800 if (off == (bfd_vma) -1)
5801 {
5802 bfd_vma plt_index;
5803
5804 /* We can't use h->got.offset here to save state, or
5805 even just remember the offset, as finish_dynamic_symbol
5806 would use that as offset into .got. */
5807
5808 if (globals->root.splt != NULL)
5809 {
5810 plt_index = ((h->plt.offset - globals->plt_header_size) /
5811 globals->plt_entry_size);
5812 off = (plt_index + 3) * GOT_ENTRY_SIZE;
5813 base_got = globals->root.sgotplt;
5814 }
5815 else
5816 {
5817 plt_index = h->plt.offset / globals->plt_entry_size;
5818 off = plt_index * GOT_ENTRY_SIZE;
5819 base_got = globals->root.igotplt;
5820 }
5821
5822 if (h->dynindx == -1
5823 || h->forced_local
5824 || info->symbolic)
5825 {
5826 /* This references the local definition. We must
5827 initialize this entry in the global offset table.
5828 Since the offset must always be a multiple of 8,
5829 we use the least significant bit to record
5830 whether we have initialized it already.
5831
5832 When doing a dynamic link, we create a .rela.got
5833 relocation entry to initialize the value. This
5834 is done in the finish_dynamic_symbol routine. */
5835 if ((off & 1) != 0)
5836 off &= ~1;
5837 else
5838 {
5839 bfd_put_NN (output_bfd, value,
5840 base_got->contents + off);
5841 /* Note that this is harmless as -1 | 1 still is -1. */
5842 h->got.offset |= 1;
5843 }
5844 }
5845 value = (base_got->output_section->vma
5846 + base_got->output_offset + off);
5847 }
5848 else
5849 value = aarch64_calculate_got_entry_vma (h, globals, info,
5850 value, output_bfd,
5851 unresolved_reloc_p);
5852
5853 if (aarch64_relocation_aginst_gp_p (bfd_r_type))
5854 addend = (globals->root.sgot->output_section->vma
5855 + globals->root.sgot->output_offset);
5856
5857 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
5858 place, value,
5859 addend, weak_undef_p);
5860 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type, howto, value);
5861 case BFD_RELOC_AARCH64_ADD_LO12:
5862 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
5863 break;
5864 }
5865 }
5866
5867 skip_ifunc:
5868 resolved_to_zero = (h != NULL
5869 && UNDEFWEAK_NO_DYNAMIC_RELOC (info, h));
5870
5871 switch (bfd_r_type)
5872 {
5873 case BFD_RELOC_AARCH64_NONE:
5874 case BFD_RELOC_AARCH64_TLSDESC_ADD:
5875 case BFD_RELOC_AARCH64_TLSDESC_CALL:
5876 case BFD_RELOC_AARCH64_TLSDESC_LDR:
5877 *unresolved_reloc_p = false;
5878 return bfd_reloc_ok;
5879
5880 case BFD_RELOC_AARCH64_NN:
5881
5882 /* When generating a shared object or relocatable executable, these
5883 relocations are copied into the output file to be resolved at
5884 run time. */
5885 if (((bfd_link_pic (info)
5886 || globals->root.is_relocatable_executable)
5887 && (input_section->flags & SEC_ALLOC)
5888 && (h == NULL
5889 || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5890 && !resolved_to_zero)
5891 || h->root.type != bfd_link_hash_undefweak))
5892 /* Or we are creating an executable, we may need to keep relocations
5893 for symbols satisfied by a dynamic library if we manage to avoid
5894 copy relocs for the symbol. */
5895 || (ELIMINATE_COPY_RELOCS
5896 && !bfd_link_pic (info)
5897 && h != NULL
5898 && (input_section->flags & SEC_ALLOC)
5899 && h->dynindx != -1
5900 && !h->non_got_ref
5901 && ((h->def_dynamic
5902 && !h->def_regular)
5903 || h->root.type == bfd_link_hash_undefweak
5904 || h->root.type == bfd_link_hash_undefined)))
5905 {
5906 Elf_Internal_Rela outrel;
5907 bfd_byte *loc;
5908 bool skip, relocate;
5909 asection *sreloc;
5910
5911 *unresolved_reloc_p = false;
5912
5913 skip = false;
5914 relocate = false;
5915
5916 outrel.r_addend = signed_addend;
5917 outrel.r_offset =
5918 _bfd_elf_section_offset (output_bfd, info, input_section,
5919 rel->r_offset);
5920 if (outrel.r_offset == (bfd_vma) - 1)
5921 skip = true;
5922 else if (outrel.r_offset == (bfd_vma) - 2)
5923 {
5924 skip = true;
5925 relocate = true;
5926 }
5927 else if (abs_symbol_p)
5928 {
5929 /* Local absolute symbol. */
5930 skip = (h->forced_local || (h->dynindx == -1));
5931 relocate = skip;
5932 }
5933
5934 outrel.r_offset += (input_section->output_section->vma
5935 + input_section->output_offset);
5936
5937 if (skip)
5938 memset (&outrel, 0, sizeof outrel);
5939 else if (h != NULL
5940 && h->dynindx != -1
5941 && (!bfd_link_pic (info)
5942 || !(bfd_link_pie (info) || SYMBOLIC_BIND (info, h))
5943 || !h->def_regular))
5944 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
5945 else
5946 {
5947 int symbol;
5948
5949 /* On SVR4-ish systems, the dynamic loader cannot
5950 relocate the text and data segments independently,
5951 so the symbol does not matter. */
5952 symbol = 0;
5953 relocate = !globals->no_apply_dynamic_relocs;
5954 outrel.r_info = ELFNN_R_INFO (symbol, AARCH64_R (RELATIVE));
5955 outrel.r_addend += value;
5956 }
5957
5958 sreloc = elf_section_data (input_section)->sreloc;
5959 if (sreloc == NULL || sreloc->contents == NULL)
5960 return bfd_reloc_notsupported;
5961
5962 loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (globals);
5963 bfd_elfNN_swap_reloca_out (output_bfd, &outrel, loc);
5964
5965 if (sreloc->reloc_count * RELOC_SIZE (globals) > sreloc->size)
5966 {
5967 /* Sanity to check that we have previously allocated
5968 sufficient space in the relocation section for the
5969 number of relocations we actually want to emit. */
5970 abort ();
5971 }
5972
5973 /* If this reloc is against an external symbol, we do not want to
5974 fiddle with the addend. Otherwise, we need to include the symbol
5975 value so that it becomes an addend for the dynamic reloc. */
5976 if (!relocate)
5977 return bfd_reloc_ok;
5978
5979 return _bfd_final_link_relocate (howto, input_bfd, input_section,
5980 contents, rel->r_offset, value,
5981 signed_addend);
5982 }
5983 else
5984 value += signed_addend;
5985 break;
5986
5987 case BFD_RELOC_AARCH64_CALL26:
5988 case BFD_RELOC_AARCH64_JUMP26:
5989 {
5990 asection *splt = globals->root.splt;
5991 bool via_plt_p =
5992 splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
5993
5994 /* A call to an undefined weak symbol is converted to a jump to
5995 the next instruction unless a PLT entry will be created.
5996 The jump to the next instruction is optimized as a NOP.
5997 Do the same for local undefined symbols. */
5998 if (weak_undef_p && ! via_plt_p)
5999 {
6000 bfd_putl32 (INSN_NOP, hit_data);
6001 return bfd_reloc_ok;
6002 }
6003
6004 /* If the call goes through a PLT entry, make sure to
6005 check distance to the right destination address. */
6006 if (via_plt_p)
6007 value = (splt->output_section->vma
6008 + splt->output_offset + h->plt.offset);
6009
6010 /* Check if a stub has to be inserted because the destination
6011 is too far away. */
6012 struct elf_aarch64_stub_hash_entry *stub_entry = NULL;
6013
6014 /* If the branch destination is directed to plt stub, "value" will be
6015 the final destination, otherwise we should plus signed_addend, it may
6016 contain non-zero value, for example call to local function symbol
6017 which are turned into "sec_sym + sec_off", and sec_off is kept in
6018 signed_addend. */
6019 if (! aarch64_valid_branch_p (via_plt_p ? value : value + signed_addend,
6020 place))
6021 /* The target is out of reach, so redirect the branch to
6022 the local stub for this function. */
6023 stub_entry = elfNN_aarch64_get_stub_entry (input_section, sym_sec, h,
6024 rel, globals);
6025 if (stub_entry != NULL)
6026 {
6027 value = (stub_entry->stub_offset
6028 + stub_entry->stub_sec->output_offset
6029 + stub_entry->stub_sec->output_section->vma);
6030
6031 /* We have redirected the destination to stub entry address,
6032 so ignore any addend record in the original rela entry. */
6033 signed_addend = 0;
6034 }
6035 }
6036 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6037 place, value,
6038 signed_addend, weak_undef_p);
6039 *unresolved_reloc_p = false;
6040 break;
6041
6042 case BFD_RELOC_AARCH64_16_PCREL:
6043 case BFD_RELOC_AARCH64_32_PCREL:
6044 case BFD_RELOC_AARCH64_64_PCREL:
6045 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6046 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6047 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6048 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
6049 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6050 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
6051 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6052 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
6053 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6054 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
6055 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6056 if (bfd_link_pic (info)
6057 && (input_section->flags & SEC_ALLOC) != 0
6058 && (input_section->flags & SEC_READONLY) != 0
6059 && !_bfd_elf_symbol_refs_local_p (h, info, 1))
6060 {
6061 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
6062
6063 _bfd_error_handler
6064 /* xgettext:c-format */
6065 (_("%pB: relocation %s against symbol `%s' which may bind "
6066 "externally can not be used when making a shared object; "
6067 "recompile with -fPIC"),
6068 input_bfd, elfNN_aarch64_howto_table[howto_index].name,
6069 h->root.root.string);
6070 bfd_set_error (bfd_error_bad_value);
6071 return bfd_reloc_notsupported;
6072 }
6073 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6074 place, value,
6075 signed_addend,
6076 weak_undef_p);
6077 break;
6078
6079 case BFD_RELOC_AARCH64_BRANCH19:
6080 case BFD_RELOC_AARCH64_TSTBR14:
6081 if (h && h->root.type == bfd_link_hash_undefined)
6082 {
6083 _bfd_error_handler
6084 /* xgettext:c-format */
6085 (_("%pB: conditional branch to undefined symbol `%s' "
6086 "not allowed"), input_bfd, h->root.root.string);
6087 bfd_set_error (bfd_error_bad_value);
6088 return bfd_reloc_notsupported;
6089 }
6090 /* Fall through. */
6091
6092 case BFD_RELOC_AARCH64_16:
6093 #if ARCH_SIZE == 64
6094 case BFD_RELOC_AARCH64_32:
6095 #endif
6096 case BFD_RELOC_AARCH64_ADD_LO12:
6097 case BFD_RELOC_AARCH64_LDST128_LO12:
6098 case BFD_RELOC_AARCH64_LDST16_LO12:
6099 case BFD_RELOC_AARCH64_LDST32_LO12:
6100 case BFD_RELOC_AARCH64_LDST64_LO12:
6101 case BFD_RELOC_AARCH64_LDST8_LO12:
6102 case BFD_RELOC_AARCH64_MOVW_G0:
6103 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6104 case BFD_RELOC_AARCH64_MOVW_G0_S:
6105 case BFD_RELOC_AARCH64_MOVW_G1:
6106 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6107 case BFD_RELOC_AARCH64_MOVW_G1_S:
6108 case BFD_RELOC_AARCH64_MOVW_G2:
6109 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6110 case BFD_RELOC_AARCH64_MOVW_G2_S:
6111 case BFD_RELOC_AARCH64_MOVW_G3:
6112 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6113 place, value,
6114 signed_addend, weak_undef_p);
6115 break;
6116
6117 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6118 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6119 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
6120 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6121 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6122 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6123 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
6124 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
6125 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6126 if (globals->root.sgot == NULL)
6127 BFD_ASSERT (h != NULL);
6128
6129 relative_reloc = false;
6130 if (h != NULL)
6131 {
6132 bfd_vma addend = 0;
6133
6134 /* If a symbol is not dynamic and is not undefined weak, bind it
6135 locally and generate a RELATIVE relocation under PIC mode.
6136
6137 NOTE: one symbol may be referenced by several relocations, we
6138 should only generate one RELATIVE relocation for that symbol.
6139 Therefore, check GOT offset mark first. */
6140 if (h->dynindx == -1
6141 && !h->forced_local
6142 && h->root.type != bfd_link_hash_undefweak
6143 && bfd_link_pic (info)
6144 && !symbol_got_offset_mark_p (input_bfd, h, r_symndx))
6145 relative_reloc = true;
6146
6147 value = aarch64_calculate_got_entry_vma (h, globals, info, value,
6148 output_bfd,
6149 unresolved_reloc_p);
6150 /* Record the GOT entry address which will be used when generating
6151 RELATIVE relocation. */
6152 if (relative_reloc)
6153 got_entry_addr = value;
6154
6155 if (aarch64_relocation_aginst_gp_p (bfd_r_type))
6156 addend = (globals->root.sgot->output_section->vma
6157 + globals->root.sgot->output_offset);
6158 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6159 place, value,
6160 addend, weak_undef_p);
6161 }
6162 else
6163 {
6164 bfd_vma addend = 0;
6165 struct elf_aarch64_local_symbol *locals
6166 = elf_aarch64_locals (input_bfd);
6167
6168 if (locals == NULL)
6169 {
6170 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
6171 _bfd_error_handler
6172 /* xgettext:c-format */
6173 (_("%pB: local symbol descriptor table be NULL when applying "
6174 "relocation %s against local symbol"),
6175 input_bfd, elfNN_aarch64_howto_table[howto_index].name);
6176 abort ();
6177 }
6178
6179 off = symbol_got_offset (input_bfd, h, r_symndx);
6180 base_got = globals->root.sgot;
6181 got_entry_addr = (base_got->output_section->vma
6182 + base_got->output_offset + off);
6183
6184 if (!symbol_got_offset_mark_p (input_bfd, h, r_symndx))
6185 {
6186 bfd_put_64 (output_bfd, value, base_got->contents + off);
6187
6188 /* For local symbol, we have done absolute relocation in static
6189 linking stage. While for shared library, we need to update the
6190 content of GOT entry according to the shared object's runtime
6191 base address. So, we need to generate a R_AARCH64_RELATIVE reloc
6192 for dynamic linker. */
6193 if (bfd_link_pic (info))
6194 relative_reloc = true;
6195
6196 symbol_got_offset_mark (input_bfd, h, r_symndx);
6197 }
6198
6199 /* Update the relocation value to GOT entry addr as we have transformed
6200 the direct data access into indirect data access through GOT. */
6201 value = got_entry_addr;
6202
6203 if (aarch64_relocation_aginst_gp_p (bfd_r_type))
6204 addend = base_got->output_section->vma + base_got->output_offset;
6205
6206 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6207 place, value,
6208 addend, weak_undef_p);
6209 }
6210
6211 if (relative_reloc)
6212 {
6213 asection *s;
6214 Elf_Internal_Rela outrel;
6215
6216 s = globals->root.srelgot;
6217 if (s == NULL)
6218 abort ();
6219
6220 outrel.r_offset = got_entry_addr;
6221 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
6222 outrel.r_addend = orig_value;
6223 elf_append_rela (output_bfd, s, &outrel);
6224 }
6225 break;
6226
6227 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6228 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6229 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6230 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6231 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6232 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6233 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6234 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
6235 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
6236 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
6237 if (globals->root.sgot == NULL)
6238 return bfd_reloc_notsupported;
6239
6240 value = (symbol_got_offset (input_bfd, h, r_symndx)
6241 + globals->root.sgot->output_section->vma
6242 + globals->root.sgot->output_offset);
6243
6244 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6245 place, value,
6246 0, weak_undef_p);
6247 *unresolved_reloc_p = false;
6248 break;
6249
6250 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6251 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6252 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
6253 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
6254 if (globals->root.sgot == NULL)
6255 return bfd_reloc_notsupported;
6256
6257 value = symbol_got_offset (input_bfd, h, r_symndx);
6258 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6259 place, value,
6260 0, weak_undef_p);
6261 *unresolved_reloc_p = false;
6262 break;
6263
6264 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
6265 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
6266 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
6267 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
6268 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
6269 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
6270 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
6271 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
6272 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
6273 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
6274 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
6275 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
6276 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6277 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
6278 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
6279 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
6280 {
6281 if (!(weak_undef_p || elf_hash_table (info)->tls_sec))
6282 {
6283 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
6284 _bfd_error_handler
6285 /* xgettext:c-format */
6286 (_("%pB: TLS relocation %s against undefined symbol `%s'"),
6287 input_bfd, elfNN_aarch64_howto_table[howto_index].name,
6288 h->root.root.string);
6289 bfd_set_error (bfd_error_bad_value);
6290 return bfd_reloc_notsupported;
6291 }
6292
6293 bfd_vma def_value
6294 = weak_undef_p ? 0 : signed_addend - dtpoff_base (info);
6295 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6296 place, value,
6297 def_value, weak_undef_p);
6298 break;
6299 }
6300
6301 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6302 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6303 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6304 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
6305 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
6306 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
6307 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
6308 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
6309 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
6310 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
6311 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
6312 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6313 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6314 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6315 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6316 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6317 {
6318 if (!(weak_undef_p || elf_hash_table (info)->tls_sec))
6319 {
6320 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
6321 _bfd_error_handler
6322 /* xgettext:c-format */
6323 (_("%pB: TLS relocation %s against undefined symbol `%s'"),
6324 input_bfd, elfNN_aarch64_howto_table[howto_index].name,
6325 h->root.root.string);
6326 bfd_set_error (bfd_error_bad_value);
6327 return bfd_reloc_notsupported;
6328 }
6329
6330 bfd_vma def_value
6331 = weak_undef_p ? 0 : signed_addend - tpoff_base (info);
6332 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6333 place, value,
6334 def_value, weak_undef_p);
6335 *unresolved_reloc_p = false;
6336 break;
6337 }
6338
6339 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
6340 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6341 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6342 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6343 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
6344 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6345 if (globals->root.sgot == NULL)
6346 return bfd_reloc_notsupported;
6347 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
6348 + globals->root.sgotplt->output_section->vma
6349 + globals->root.sgotplt->output_offset
6350 + globals->sgotplt_jump_table_size);
6351
6352 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6353 place, value,
6354 0, weak_undef_p);
6355 *unresolved_reloc_p = false;
6356 break;
6357
6358 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6359 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6360 if (globals->root.sgot == NULL)
6361 return bfd_reloc_notsupported;
6362
6363 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
6364 + globals->root.sgotplt->output_section->vma
6365 + globals->root.sgotplt->output_offset
6366 + globals->sgotplt_jump_table_size);
6367
6368 value -= (globals->root.sgot->output_section->vma
6369 + globals->root.sgot->output_offset);
6370
6371 value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
6372 place, value,
6373 0, weak_undef_p);
6374 *unresolved_reloc_p = false;
6375 break;
6376
6377 default:
6378 return bfd_reloc_notsupported;
6379 }
6380
6381 if (saved_addend)
6382 *saved_addend = value;
6383
6384 /* Only apply the final relocation in a sequence. */
6385 if (save_addend)
6386 return bfd_reloc_continue;
6387
6388 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
6389 howto, value);
6390 }
6391
6392 /* LP64 and ILP32 operates on x- and w-registers respectively.
6393 Next definitions take into account the difference between
6394 corresponding machine codes. R means x-register if the target
6395 arch is LP64, and w-register if the target is ILP32. */
6396
6397 #if ARCH_SIZE == 64
6398 # define add_R0_R0 (0x91000000)
6399 # define add_R0_R0_R1 (0x8b000020)
6400 # define add_R0_R1 (0x91400020)
6401 # define ldr_R0 (0x58000000)
6402 # define ldr_R0_mask(i) (i & 0xffffffe0)
6403 # define ldr_R0_x0 (0xf9400000)
6404 # define ldr_hw_R0 (0xf2a00000)
6405 # define movk_R0 (0xf2800000)
6406 # define movz_R0 (0xd2a00000)
6407 # define movz_hw_R0 (0xd2c00000)
6408 #else /*ARCH_SIZE == 32 */
6409 # define add_R0_R0 (0x11000000)
6410 # define add_R0_R0_R1 (0x0b000020)
6411 # define add_R0_R1 (0x11400020)
6412 # define ldr_R0 (0x18000000)
6413 # define ldr_R0_mask(i) (i & 0xbfffffe0)
6414 # define ldr_R0_x0 (0xb9400000)
6415 # define ldr_hw_R0 (0x72a00000)
6416 # define movk_R0 (0x72800000)
6417 # define movz_R0 (0x52a00000)
6418 # define movz_hw_R0 (0x52c00000)
6419 #endif
6420
6421 /* Structure to hold payload for _bfd_aarch64_erratum_843419_clear_stub,
6422 it is used to identify the stub information to reset. */
6423
6424 struct erratum_843419_branch_to_stub_clear_data
6425 {
6426 bfd_vma adrp_offset;
6427 asection *output_section;
6428 };
6429
6430 /* Clear the erratum information for GEN_ENTRY if the ADRP_OFFSET and
6431 section inside IN_ARG matches. The clearing is done by setting the
6432 stub_type to none. */
6433
6434 static bool
6435 _bfd_aarch64_erratum_843419_clear_stub (struct bfd_hash_entry *gen_entry,
6436 void *in_arg)
6437 {
6438 struct elf_aarch64_stub_hash_entry *stub_entry
6439 = (struct elf_aarch64_stub_hash_entry *) gen_entry;
6440 struct erratum_843419_branch_to_stub_clear_data *data
6441 = (struct erratum_843419_branch_to_stub_clear_data *) in_arg;
6442
6443 if (stub_entry->target_section != data->output_section
6444 || stub_entry->stub_type != aarch64_stub_erratum_843419_veneer
6445 || stub_entry->adrp_offset != data->adrp_offset)
6446 return true;
6447
6448 /* Change the stub type instead of removing the entry, removing from the hash
6449 table would be slower and we have already reserved the memory for the entry
6450 so there wouldn't be much gain. Changing the stub also keeps around a
6451 record of what was there before. */
6452 stub_entry->stub_type = aarch64_stub_none;
6453
6454 /* We're done and there could have been only one matching stub at that
6455 particular offset, so abort further traversal. */
6456 return false;
6457 }
6458
6459 /* TLS Relaxations may relax an adrp sequence that matches the erratum 843419
6460 sequence. In this case the erratum no longer applies and we need to remove
6461 the entry from the pending stub generation. This clears matching adrp insn
6462 at ADRP_OFFSET in INPUT_SECTION in the stub table defined in GLOBALS. */
6463
6464 static void
6465 clear_erratum_843419_entry (struct elf_aarch64_link_hash_table *globals,
6466 bfd_vma adrp_offset, asection *input_section)
6467 {
6468 if (globals->fix_erratum_843419 & ERRAT_ADRP)
6469 {
6470 struct erratum_843419_branch_to_stub_clear_data data;
6471 data.adrp_offset = adrp_offset;
6472 data.output_section = input_section;
6473
6474 bfd_hash_traverse (&globals->stub_hash_table,
6475 _bfd_aarch64_erratum_843419_clear_stub, &data);
6476 }
6477 }
6478
6479 /* Handle TLS relaxations. Relaxing is possible for symbols that use
6480 R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static
6481 link.
6482
6483 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
6484 is to then call final_link_relocate. Return other values in the
6485 case of error. */
6486
6487 static bfd_reloc_status_type
6488 elfNN_aarch64_tls_relax (struct elf_aarch64_link_hash_table *globals,
6489 bfd *input_bfd, asection *input_section,
6490 bfd_byte *contents, Elf_Internal_Rela *rel,
6491 struct elf_link_hash_entry *h,
6492 struct bfd_link_info *info)
6493 {
6494 bool local_exec = bfd_link_executable (info)
6495 && SYMBOL_REFERENCES_LOCAL (info, h);
6496 unsigned int r_type = ELFNN_R_TYPE (rel->r_info);
6497 unsigned long insn;
6498
6499 BFD_ASSERT (globals && input_bfd && contents && rel);
6500
6501 switch (elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type))
6502 {
6503 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6504 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6505 if (local_exec)
6506 {
6507 /* GD->LE relaxation:
6508 adrp x0, :tlsgd:var => movz R0, :tprel_g1:var
6509 or
6510 adrp x0, :tlsdesc:var => movz R0, :tprel_g1:var
6511
6512 Where R is x for LP64, and w for ILP32. */
6513 bfd_putl32 (movz_R0, contents + rel->r_offset);
6514 /* We have relaxed the adrp into a mov, we may have to clear any
6515 pending erratum fixes. */
6516 clear_erratum_843419_entry (globals, rel->r_offset, input_section);
6517 return bfd_reloc_continue;
6518 }
6519 else
6520 {
6521 /* GD->IE relaxation:
6522 adrp x0, :tlsgd:var => adrp x0, :gottprel:var
6523 or
6524 adrp x0, :tlsdesc:var => adrp x0, :gottprel:var
6525 */
6526 return bfd_reloc_continue;
6527 }
6528
6529 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6530 BFD_ASSERT (0);
6531 break;
6532
6533 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6534 if (local_exec)
6535 {
6536 /* Tiny TLSDESC->LE relaxation:
6537 ldr x1, :tlsdesc:var => movz R0, #:tprel_g1:var
6538 adr x0, :tlsdesc:var => movk R0, #:tprel_g0_nc:var
6539 .tlsdesccall var
6540 blr x1 => nop
6541
6542 Where R is x for LP64, and w for ILP32. */
6543 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
6544 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
6545
6546 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
6547 AARCH64_R (TLSLE_MOVW_TPREL_G0_NC));
6548 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6549
6550 bfd_putl32 (movz_R0, contents + rel->r_offset);
6551 bfd_putl32 (movk_R0, contents + rel->r_offset + 4);
6552 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
6553 return bfd_reloc_continue;
6554 }
6555 else
6556 {
6557 /* Tiny TLSDESC->IE relaxation:
6558 ldr x1, :tlsdesc:var => ldr x0, :gottprel:var
6559 adr x0, :tlsdesc:var => nop
6560 .tlsdesccall var
6561 blr x1 => nop
6562 */
6563 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
6564 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
6565
6566 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6567 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6568
6569 bfd_putl32 (ldr_R0, contents + rel->r_offset);
6570 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 4);
6571 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
6572 return bfd_reloc_continue;
6573 }
6574
6575 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6576 if (local_exec)
6577 {
6578 /* Tiny GD->LE relaxation:
6579 adr x0, :tlsgd:var => mrs x1, tpidr_el0
6580 bl __tls_get_addr => add R0, R1, #:tprel_hi12:x, lsl #12
6581 nop => add R0, R0, #:tprel_lo12_nc:x
6582
6583 Where R is x for LP64, and x for Ilp32. */
6584
6585 /* First kill the tls_get_addr reloc on the bl instruction. */
6586 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
6587
6588 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 0);
6589 bfd_putl32 (add_R0_R1, contents + rel->r_offset + 4);
6590 bfd_putl32 (add_R0_R0, contents + rel->r_offset + 8);
6591
6592 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
6593 AARCH64_R (TLSLE_ADD_TPREL_LO12_NC));
6594 rel[1].r_offset = rel->r_offset + 8;
6595
6596 /* Move the current relocation to the second instruction in
6597 the sequence. */
6598 rel->r_offset += 4;
6599 rel->r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
6600 AARCH64_R (TLSLE_ADD_TPREL_HI12));
6601 return bfd_reloc_continue;
6602 }
6603 else
6604 {
6605 /* Tiny GD->IE relaxation:
6606 adr x0, :tlsgd:var => ldr R0, :gottprel:var
6607 bl __tls_get_addr => mrs x1, tpidr_el0
6608 nop => add R0, R0, R1
6609
6610 Where R is x for LP64, and w for Ilp32. */
6611
6612 /* First kill the tls_get_addr reloc on the bl instruction. */
6613 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
6614 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6615
6616 bfd_putl32 (ldr_R0, contents + rel->r_offset);
6617 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
6618 bfd_putl32 (add_R0_R0_R1, contents + rel->r_offset + 8);
6619 return bfd_reloc_continue;
6620 }
6621
6622 #if ARCH_SIZE == 64
6623 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6624 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSGD_MOVW_G0_NC));
6625 BFD_ASSERT (rel->r_offset + 12 == rel[2].r_offset);
6626 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (CALL26));
6627
6628 if (local_exec)
6629 {
6630 /* Large GD->LE relaxation:
6631 movz x0, #:tlsgd_g1:var => movz x0, #:tprel_g2:var, lsl #32
6632 movk x0, #:tlsgd_g0_nc:var => movk x0, #:tprel_g1_nc:var, lsl #16
6633 add x0, gp, x0 => movk x0, #:tprel_g0_nc:var
6634 bl __tls_get_addr => mrs x1, tpidr_el0
6635 nop => add x0, x0, x1
6636 */
6637 rel[2].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
6638 AARCH64_R (TLSLE_MOVW_TPREL_G0_NC));
6639 rel[2].r_offset = rel->r_offset + 8;
6640
6641 bfd_putl32 (movz_hw_R0, contents + rel->r_offset + 0);
6642 bfd_putl32 (ldr_hw_R0, contents + rel->r_offset + 4);
6643 bfd_putl32 (movk_R0, contents + rel->r_offset + 8);
6644 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 12);
6645 bfd_putl32 (add_R0_R0_R1, contents + rel->r_offset + 16);
6646 }
6647 else
6648 {
6649 /* Large GD->IE relaxation:
6650 movz x0, #:tlsgd_g1:var => movz x0, #:gottprel_g1:var, lsl #16
6651 movk x0, #:tlsgd_g0_nc:var => movk x0, #:gottprel_g0_nc:var
6652 add x0, gp, x0 => ldr x0, [gp, x0]
6653 bl __tls_get_addr => mrs x1, tpidr_el0
6654 nop => add x0, x0, x1
6655 */
6656 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6657 bfd_putl32 (0xd2a80000, contents + rel->r_offset + 0);
6658 bfd_putl32 (ldr_R0, contents + rel->r_offset + 8);
6659 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 12);
6660 bfd_putl32 (add_R0_R0_R1, contents + rel->r_offset + 16);
6661 }
6662 return bfd_reloc_continue;
6663
6664 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6665 return bfd_reloc_continue;
6666 #endif
6667
6668 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6669 return bfd_reloc_continue;
6670
6671 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
6672 if (local_exec)
6673 {
6674 /* GD->LE relaxation:
6675 ldr xd, [x0, #:tlsdesc_lo12:var] => movk x0, :tprel_g0_nc:var
6676
6677 Where R is x for lp64 mode, and w for ILP32 mode. */
6678 bfd_putl32 (movk_R0, contents + rel->r_offset);
6679 return bfd_reloc_continue;
6680 }
6681 else
6682 {
6683 /* GD->IE relaxation:
6684 ldr xd, [x0, #:tlsdesc_lo12:var] => ldr R0, [x0, #:gottprel_lo12:var]
6685
6686 Where R is x for lp64 mode, and w for ILP32 mode. */
6687 insn = bfd_getl32 (contents + rel->r_offset);
6688 bfd_putl32 (ldr_R0_mask (insn), contents + rel->r_offset);
6689 return bfd_reloc_continue;
6690 }
6691
6692 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6693 if (local_exec)
6694 {
6695 /* GD->LE relaxation
6696 add x0, #:tlsgd_lo12:var => movk R0, :tprel_g0_nc:var
6697 bl __tls_get_addr => mrs x1, tpidr_el0
6698 nop => add R0, R1, R0
6699
6700 Where R is x for lp64 mode, and w for ILP32 mode. */
6701
6702 /* First kill the tls_get_addr reloc on the bl instruction. */
6703 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
6704 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6705
6706 bfd_putl32 (movk_R0, contents + rel->r_offset);
6707 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
6708 bfd_putl32 (add_R0_R0_R1, contents + rel->r_offset + 8);
6709 return bfd_reloc_continue;
6710 }
6711 else
6712 {
6713 /* GD->IE relaxation
6714 ADD x0, #:tlsgd_lo12:var => ldr R0, [x0, #:gottprel_lo12:var]
6715 BL __tls_get_addr => mrs x1, tpidr_el0
6716 R_AARCH64_CALL26
6717 NOP => add R0, R1, R0
6718
6719 Where R is x for lp64 mode, and w for ilp32 mode. */
6720
6721 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
6722
6723 /* Remove the relocation on the BL instruction. */
6724 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6725
6726 /* We choose to fixup the BL and NOP instructions using the
6727 offset from the second relocation to allow flexibility in
6728 scheduling instructions between the ADD and BL. */
6729 bfd_putl32 (ldr_R0_x0, contents + rel->r_offset);
6730 bfd_putl32 (0xd53bd041, contents + rel[1].r_offset);
6731 bfd_putl32 (add_R0_R0_R1, contents + rel[1].r_offset + 4);
6732 return bfd_reloc_continue;
6733 }
6734
6735 case BFD_RELOC_AARCH64_TLSDESC_ADD:
6736 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
6737 case BFD_RELOC_AARCH64_TLSDESC_CALL:
6738 /* GD->IE/LE relaxation:
6739 add x0, x0, #:tlsdesc_lo12:var => nop
6740 blr xd => nop
6741 */
6742 bfd_putl32 (INSN_NOP, contents + rel->r_offset);
6743 return bfd_reloc_ok;
6744
6745 case BFD_RELOC_AARCH64_TLSDESC_LDR:
6746 if (local_exec)
6747 {
6748 /* GD->LE relaxation:
6749 ldr xd, [gp, xn] => movk R0, #:tprel_g0_nc:var
6750
6751 Where R is x for lp64 mode, and w for ILP32 mode. */
6752 bfd_putl32 (movk_R0, contents + rel->r_offset);
6753 return bfd_reloc_continue;
6754 }
6755 else
6756 {
6757 /* GD->IE relaxation:
6758 ldr xd, [gp, xn] => ldr R0, [gp, xn]
6759
6760 Where R is x for lp64 mode, and w for ILP32 mode. */
6761 insn = bfd_getl32 (contents + rel->r_offset);
6762 bfd_putl32 (ldr_R0_mask (insn), contents + rel->r_offset);
6763 return bfd_reloc_ok;
6764 }
6765
6766 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6767 /* GD->LE relaxation:
6768 movk xd, #:tlsdesc_off_g0_nc:var => movk R0, #:tprel_g1_nc:var, lsl #16
6769 GD->IE relaxation:
6770 movk xd, #:tlsdesc_off_g0_nc:var => movk Rd, #:gottprel_g0_nc:var
6771
6772 Where R is x for lp64 mode, and w for ILP32 mode. */
6773 if (local_exec)
6774 bfd_putl32 (ldr_hw_R0, contents + rel->r_offset);
6775 return bfd_reloc_continue;
6776
6777 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6778 if (local_exec)
6779 {
6780 /* GD->LE relaxation:
6781 movz xd, #:tlsdesc_off_g1:var => movz R0, #:tprel_g2:var, lsl #32
6782
6783 Where R is x for lp64 mode, and w for ILP32 mode. */
6784 bfd_putl32 (movz_hw_R0, contents + rel->r_offset);
6785 return bfd_reloc_continue;
6786 }
6787 else
6788 {
6789 /* GD->IE relaxation:
6790 movz xd, #:tlsdesc_off_g1:var => movz Rd, #:gottprel_g1:var, lsl #16
6791
6792 Where R is x for lp64 mode, and w for ILP32 mode. */
6793 insn = bfd_getl32 (contents + rel->r_offset);
6794 bfd_putl32 (movz_R0 | (insn & 0x1f), contents + rel->r_offset);
6795 return bfd_reloc_continue;
6796 }
6797
6798 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6799 /* IE->LE relaxation:
6800 adrp xd, :gottprel:var => movz Rd, :tprel_g1:var
6801
6802 Where R is x for lp64 mode, and w for ILP32 mode. */
6803 if (local_exec)
6804 {
6805 insn = bfd_getl32 (contents + rel->r_offset);
6806 bfd_putl32 (movz_R0 | (insn & 0x1f), contents + rel->r_offset);
6807 /* We have relaxed the adrp into a mov, we may have to clear any
6808 pending erratum fixes. */
6809 clear_erratum_843419_entry (globals, rel->r_offset, input_section);
6810 }
6811 return bfd_reloc_continue;
6812
6813 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
6814 /* IE->LE relaxation:
6815 ldr xd, [xm, #:gottprel_lo12:var] => movk Rd, :tprel_g0_nc:var
6816
6817 Where R is x for lp64 mode, and w for ILP32 mode. */
6818 if (local_exec)
6819 {
6820 insn = bfd_getl32 (contents + rel->r_offset);
6821 bfd_putl32 (movk_R0 | (insn & 0x1f), contents + rel->r_offset);
6822 }
6823 return bfd_reloc_continue;
6824
6825 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
6826 /* LD->LE relaxation (tiny):
6827 adr x0, :tlsldm:x => mrs x0, tpidr_el0
6828 bl __tls_get_addr => add R0, R0, TCB_SIZE
6829
6830 Where R is x for lp64 mode, and w for ilp32 mode. */
6831 if (local_exec)
6832 {
6833 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
6834 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
6835 /* No need of CALL26 relocation for tls_get_addr. */
6836 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6837 bfd_putl32 (0xd53bd040, contents + rel->r_offset + 0);
6838 bfd_putl32 (add_R0_R0 | (TCB_SIZE << 10),
6839 contents + rel->r_offset + 4);
6840 return bfd_reloc_ok;
6841 }
6842 return bfd_reloc_continue;
6843
6844 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
6845 /* LD->LE relaxation (small):
6846 adrp x0, :tlsldm:x => mrs x0, tpidr_el0
6847 */
6848 if (local_exec)
6849 {
6850 bfd_putl32 (0xd53bd040, contents + rel->r_offset);
6851 return bfd_reloc_ok;
6852 }
6853 return bfd_reloc_continue;
6854
6855 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
6856 /* LD->LE relaxation (small):
6857 add x0, #:tlsldm_lo12:x => add R0, R0, TCB_SIZE
6858 bl __tls_get_addr => nop
6859
6860 Where R is x for lp64 mode, and w for ilp32 mode. */
6861 if (local_exec)
6862 {
6863 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
6864 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
6865 /* No need of CALL26 relocation for tls_get_addr. */
6866 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
6867 bfd_putl32 (add_R0_R0 | (TCB_SIZE << 10),
6868 contents + rel->r_offset + 0);
6869 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 4);
6870 return bfd_reloc_ok;
6871 }
6872 return bfd_reloc_continue;
6873
6874 default:
6875 return bfd_reloc_continue;
6876 }
6877
6878 return bfd_reloc_ok;
6879 }
6880
6881 /* Relocate an AArch64 ELF section. */
6882
6883 static int
6884 elfNN_aarch64_relocate_section (bfd *output_bfd,
6885 struct bfd_link_info *info,
6886 bfd *input_bfd,
6887 asection *input_section,
6888 bfd_byte *contents,
6889 Elf_Internal_Rela *relocs,
6890 Elf_Internal_Sym *local_syms,
6891 asection **local_sections)
6892 {
6893 Elf_Internal_Shdr *symtab_hdr;
6894 struct elf_link_hash_entry **sym_hashes;
6895 Elf_Internal_Rela *rel;
6896 Elf_Internal_Rela *relend;
6897 const char *name;
6898 struct elf_aarch64_link_hash_table *globals;
6899 bool save_addend = false;
6900 bfd_vma addend = 0;
6901
6902 globals = elf_aarch64_hash_table (info);
6903
6904 symtab_hdr = &elf_symtab_hdr (input_bfd);
6905 sym_hashes = elf_sym_hashes (input_bfd);
6906
6907 rel = relocs;
6908 relend = relocs + input_section->reloc_count;
6909 for (; rel < relend; rel++)
6910 {
6911 unsigned int r_type;
6912 bfd_reloc_code_real_type bfd_r_type;
6913 bfd_reloc_code_real_type relaxed_bfd_r_type;
6914 reloc_howto_type *howto;
6915 unsigned long r_symndx;
6916 Elf_Internal_Sym *sym;
6917 asection *sec;
6918 struct elf_link_hash_entry *h;
6919 bfd_vma relocation;
6920 bfd_reloc_status_type r;
6921 arelent bfd_reloc;
6922 char sym_type;
6923 bool unresolved_reloc = false;
6924 char *error_message = NULL;
6925
6926 r_symndx = ELFNN_R_SYM (rel->r_info);
6927 r_type = ELFNN_R_TYPE (rel->r_info);
6928
6929 bfd_reloc.howto = elfNN_aarch64_howto_from_type (input_bfd, r_type);
6930 howto = bfd_reloc.howto;
6931
6932 if (howto == NULL)
6933 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
6934
6935 bfd_r_type = elfNN_aarch64_bfd_reloc_from_howto (howto);
6936
6937 h = NULL;
6938 sym = NULL;
6939 sec = NULL;
6940
6941 if (r_symndx < symtab_hdr->sh_info)
6942 {
6943 sym = local_syms + r_symndx;
6944 sym_type = ELFNN_ST_TYPE (sym->st_info);
6945 sec = local_sections[r_symndx];
6946
6947 /* An object file might have a reference to a local
6948 undefined symbol. This is a daft object file, but we
6949 should at least do something about it. NONE and NULL
6950 relocations do not use the symbol and are explicitly
6951 allowed to use an undefined one, so allow those.
6952 Likewise for relocations against STN_UNDEF. */
6953 if (r_type != R_AARCH64_NONE && r_type != R_AARCH64_NULL
6954 && r_symndx != STN_UNDEF
6955 && bfd_is_und_section (sec)
6956 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
6957 (*info->callbacks->undefined_symbol)
6958 (info, bfd_elf_string_from_elf_section
6959 (input_bfd, symtab_hdr->sh_link, sym->st_name),
6960 input_bfd, input_section, rel->r_offset, true);
6961
6962 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
6963
6964 /* Relocate against local STT_GNU_IFUNC symbol. */
6965 if (!bfd_link_relocatable (info)
6966 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
6967 {
6968 h = elfNN_aarch64_get_local_sym_hash (globals, input_bfd,
6969 rel, false);
6970 if (h == NULL)
6971 abort ();
6972
6973 /* Set STT_GNU_IFUNC symbol value. */
6974 h->root.u.def.value = sym->st_value;
6975 h->root.u.def.section = sec;
6976 }
6977 }
6978 else
6979 {
6980 bool warned, ignored;
6981
6982 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
6983 r_symndx, symtab_hdr, sym_hashes,
6984 h, sec, relocation,
6985 unresolved_reloc, warned, ignored);
6986
6987 sym_type = h->type;
6988 }
6989
6990 if (sec != NULL && discarded_section (sec))
6991 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
6992 rel, 1, relend, howto, 0, contents);
6993
6994 if (bfd_link_relocatable (info))
6995 continue;
6996
6997 if (h != NULL)
6998 name = h->root.root.string;
6999 else
7000 {
7001 name = (bfd_elf_string_from_elf_section
7002 (input_bfd, symtab_hdr->sh_link, sym->st_name));
7003 if (name == NULL || *name == '\0')
7004 name = bfd_section_name (sec);
7005 }
7006
7007 if (r_symndx != 0
7008 && r_type != R_AARCH64_NONE
7009 && r_type != R_AARCH64_NULL
7010 && (h == NULL
7011 || h->root.type == bfd_link_hash_defined
7012 || h->root.type == bfd_link_hash_defweak)
7013 && IS_AARCH64_TLS_RELOC (bfd_r_type) != (sym_type == STT_TLS))
7014 {
7015 _bfd_error_handler
7016 ((sym_type == STT_TLS
7017 /* xgettext:c-format */
7018 ? _("%pB(%pA+%#" PRIx64 "): %s used with TLS symbol %s")
7019 /* xgettext:c-format */
7020 : _("%pB(%pA+%#" PRIx64 "): %s used with non-TLS symbol %s")),
7021 input_bfd,
7022 input_section, (uint64_t) rel->r_offset, howto->name, name);
7023 }
7024
7025 /* We relax only if we can see that there can be a valid transition
7026 from a reloc type to another.
7027 We call elfNN_aarch64_final_link_relocate unless we're completely
7028 done, i.e., the relaxation produced the final output we want. */
7029
7030 relaxed_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type,
7031 h, r_symndx);
7032 if (relaxed_bfd_r_type != bfd_r_type)
7033 {
7034 bfd_r_type = relaxed_bfd_r_type;
7035 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
7036 BFD_ASSERT (howto != NULL);
7037 r_type = howto->type;
7038 r = elfNN_aarch64_tls_relax (globals, input_bfd, input_section,
7039 contents, rel, h, info);
7040 unresolved_reloc = 0;
7041 }
7042 else
7043 r = bfd_reloc_continue;
7044
7045 /* There may be multiple consecutive relocations for the
7046 same offset. In that case we are supposed to treat the
7047 output of each relocation as the addend for the next. */
7048 if (rel + 1 < relend
7049 && rel->r_offset == rel[1].r_offset
7050 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NONE
7051 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NULL)
7052 save_addend = true;
7053 else
7054 save_addend = false;
7055
7056 if (r == bfd_reloc_continue)
7057 r = elfNN_aarch64_final_link_relocate (howto, input_bfd, output_bfd,
7058 input_section, contents, rel,
7059 relocation, info, sec,
7060 h, &unresolved_reloc,
7061 save_addend, &addend, sym);
7062
7063 switch (elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type))
7064 {
7065 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7066 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7067 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7068 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
7069 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7070 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7071 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7072 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7073 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
7074 {
7075 bool need_relocs = false;
7076 bfd_byte *loc;
7077 int indx;
7078 bfd_vma off;
7079
7080 off = symbol_got_offset (input_bfd, h, r_symndx);
7081 indx = h && h->dynindx != -1 ? h->dynindx : 0;
7082
7083 need_relocs =
7084 (!bfd_link_executable (info) || indx != 0) &&
7085 (h == NULL
7086 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7087 || h->root.type != bfd_link_hash_undefweak);
7088
7089 BFD_ASSERT (globals->root.srelgot != NULL);
7090
7091 if (need_relocs)
7092 {
7093 Elf_Internal_Rela rela;
7094 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPMOD));
7095 rela.r_addend = 0;
7096 rela.r_offset = globals->root.sgot->output_section->vma +
7097 globals->root.sgot->output_offset + off;
7098
7099
7100 loc = globals->root.srelgot->contents;
7101 loc += globals->root.srelgot->reloc_count++
7102 * RELOC_SIZE (htab);
7103 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
7104
7105 bfd_reloc_code_real_type real_type =
7106 elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type);
7107
7108 if (real_type == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21
7109 || real_type == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21
7110 || real_type == BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC)
7111 {
7112 /* For local dynamic, don't generate DTPREL in any case.
7113 Initialize the DTPREL slot into zero, so we get module
7114 base address when invoke runtime TLS resolver. */
7115 bfd_put_NN (output_bfd, 0,
7116 globals->root.sgot->contents + off
7117 + GOT_ENTRY_SIZE);
7118 }
7119 else if (indx == 0)
7120 {
7121 bfd_put_NN (output_bfd,
7122 relocation - dtpoff_base (info),
7123 globals->root.sgot->contents + off
7124 + GOT_ENTRY_SIZE);
7125 }
7126 else
7127 {
7128 /* This TLS symbol is global. We emit a
7129 relocation to fixup the tls offset at load
7130 time. */
7131 rela.r_info =
7132 ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPREL));
7133 rela.r_addend = 0;
7134 rela.r_offset =
7135 (globals->root.sgot->output_section->vma
7136 + globals->root.sgot->output_offset + off
7137 + GOT_ENTRY_SIZE);
7138
7139 loc = globals->root.srelgot->contents;
7140 loc += globals->root.srelgot->reloc_count++
7141 * RELOC_SIZE (globals);
7142 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
7143 bfd_put_NN (output_bfd, (bfd_vma) 0,
7144 globals->root.sgot->contents + off
7145 + GOT_ENTRY_SIZE);
7146 }
7147 }
7148 else
7149 {
7150 bfd_put_NN (output_bfd, (bfd_vma) 1,
7151 globals->root.sgot->contents + off);
7152 bfd_put_NN (output_bfd,
7153 relocation - dtpoff_base (info),
7154 globals->root.sgot->contents + off
7155 + GOT_ENTRY_SIZE);
7156 }
7157
7158 symbol_got_offset_mark (input_bfd, h, r_symndx);
7159 }
7160 break;
7161
7162 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7163 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
7164 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7165 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
7166 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
7167 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
7168 {
7169 bool need_relocs = false;
7170 bfd_byte *loc;
7171 int indx;
7172 bfd_vma off;
7173
7174 off = symbol_got_offset (input_bfd, h, r_symndx);
7175
7176 indx = h && h->dynindx != -1 ? h->dynindx : 0;
7177
7178 need_relocs =
7179 (!bfd_link_executable (info) || indx != 0) &&
7180 (h == NULL
7181 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7182 || h->root.type != bfd_link_hash_undefweak);
7183
7184 BFD_ASSERT (globals->root.srelgot != NULL);
7185
7186 if (need_relocs)
7187 {
7188 Elf_Internal_Rela rela;
7189
7190 if (indx == 0)
7191 rela.r_addend = relocation - dtpoff_base (info);
7192 else
7193 rela.r_addend = 0;
7194
7195 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_TPREL));
7196 rela.r_offset = globals->root.sgot->output_section->vma +
7197 globals->root.sgot->output_offset + off;
7198
7199 loc = globals->root.srelgot->contents;
7200 loc += globals->root.srelgot->reloc_count++
7201 * RELOC_SIZE (htab);
7202
7203 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
7204
7205 bfd_put_NN (output_bfd, rela.r_addend,
7206 globals->root.sgot->contents + off);
7207 }
7208 else
7209 bfd_put_NN (output_bfd, relocation - tpoff_base (info),
7210 globals->root.sgot->contents + off);
7211
7212 symbol_got_offset_mark (input_bfd, h, r_symndx);
7213 }
7214 break;
7215
7216 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
7217 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7218 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7219 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
7220 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7221 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7222 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7223 if (! symbol_tlsdesc_got_offset_mark_p (input_bfd, h, r_symndx))
7224 {
7225 bool need_relocs = false;
7226 int indx = h && h->dynindx != -1 ? h->dynindx : 0;
7227 bfd_vma off = symbol_tlsdesc_got_offset (input_bfd, h, r_symndx);
7228
7229 need_relocs = (h == NULL
7230 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7231 || h->root.type != bfd_link_hash_undefweak);
7232
7233 BFD_ASSERT (globals->root.srelgot != NULL);
7234 BFD_ASSERT (globals->root.sgot != NULL);
7235
7236 if (need_relocs)
7237 {
7238 bfd_byte *loc;
7239 Elf_Internal_Rela rela;
7240 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLSDESC));
7241
7242 rela.r_addend = 0;
7243 rela.r_offset = (globals->root.sgotplt->output_section->vma
7244 + globals->root.sgotplt->output_offset
7245 + off + globals->sgotplt_jump_table_size);
7246
7247 if (indx == 0)
7248 rela.r_addend = relocation - dtpoff_base (info);
7249
7250 /* Allocate the next available slot in the PLT reloc
7251 section to hold our R_AARCH64_TLSDESC, the next
7252 available slot is determined from reloc_count,
7253 which we step. But note, reloc_count was
7254 artifically moved down while allocating slots for
7255 real PLT relocs such that all of the PLT relocs
7256 will fit above the initial reloc_count and the
7257 extra stuff will fit below. */
7258 loc = globals->root.srelplt->contents;
7259 loc += globals->root.srelplt->reloc_count++
7260 * RELOC_SIZE (globals);
7261
7262 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
7263
7264 bfd_put_NN (output_bfd, (bfd_vma) 0,
7265 globals->root.sgotplt->contents + off +
7266 globals->sgotplt_jump_table_size);
7267 bfd_put_NN (output_bfd, (bfd_vma) 0,
7268 globals->root.sgotplt->contents + off +
7269 globals->sgotplt_jump_table_size +
7270 GOT_ENTRY_SIZE);
7271 }
7272
7273 symbol_tlsdesc_got_offset_mark (input_bfd, h, r_symndx);
7274 }
7275 break;
7276 default:
7277 break;
7278 }
7279
7280 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
7281 because such sections are not SEC_ALLOC and thus ld.so will
7282 not process them. */
7283 if (unresolved_reloc
7284 && !((input_section->flags & SEC_DEBUGGING) != 0
7285 && h->def_dynamic)
7286 && _bfd_elf_section_offset (output_bfd, info, input_section,
7287 +rel->r_offset) != (bfd_vma) - 1)
7288 {
7289 _bfd_error_handler
7290 /* xgettext:c-format */
7291 (_("%pB(%pA+%#" PRIx64 "): "
7292 "unresolvable %s relocation against symbol `%s'"),
7293 input_bfd, input_section, (uint64_t) rel->r_offset, howto->name,
7294 h->root.root.string);
7295 return false;
7296 }
7297
7298 if (r != bfd_reloc_ok && r != bfd_reloc_continue)
7299 {
7300 bfd_reloc_code_real_type real_r_type
7301 = elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type);
7302
7303 switch (r)
7304 {
7305 case bfd_reloc_overflow:
7306 (*info->callbacks->reloc_overflow)
7307 (info, (h ? &h->root : NULL), name, howto->name, (bfd_vma) 0,
7308 input_bfd, input_section, rel->r_offset);
7309 if (real_r_type == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
7310 || real_r_type == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14)
7311 {
7312 (*info->callbacks->warning)
7313 (info,
7314 _("too many GOT entries for -fpic, "
7315 "please recompile with -fPIC"),
7316 name, input_bfd, input_section, rel->r_offset);
7317 return false;
7318 }
7319 /* Overflow can occur when a variable is referenced with a type
7320 that has a larger alignment than the type with which it was
7321 declared. eg:
7322 file1.c: extern int foo; int a (void) { return foo; }
7323 file2.c: char bar, foo, baz;
7324 If the variable is placed into a data section at an offset
7325 that is incompatible with the larger alignment requirement
7326 overflow will occur. (Strictly speaking this is not overflow
7327 but rather an alignment problem, but the bfd_reloc_ error
7328 enum does not have a value to cover that situation).
7329
7330 Try to catch this situation here and provide a more helpful
7331 error message to the user. */
7332 if (addend & (((bfd_vma) 1 << howto->rightshift) - 1)
7333 /* FIXME: Are we testing all of the appropriate reloc
7334 types here ? */
7335 && (real_r_type == BFD_RELOC_AARCH64_LD_LO19_PCREL
7336 || real_r_type == BFD_RELOC_AARCH64_LDST16_LO12
7337 || real_r_type == BFD_RELOC_AARCH64_LDST32_LO12
7338 || real_r_type == BFD_RELOC_AARCH64_LDST64_LO12
7339 || real_r_type == BFD_RELOC_AARCH64_LDST128_LO12))
7340 {
7341 info->callbacks->warning
7342 (info, _("one possible cause of this error is that the \
7343 symbol is being referenced in the indicated code as if it had a larger \
7344 alignment than was declared where it was defined"),
7345 name, input_bfd, input_section, rel->r_offset);
7346 }
7347 break;
7348
7349 case bfd_reloc_undefined:
7350 (*info->callbacks->undefined_symbol)
7351 (info, name, input_bfd, input_section, rel->r_offset, true);
7352 break;
7353
7354 case bfd_reloc_outofrange:
7355 error_message = _("out of range");
7356 goto common_error;
7357
7358 case bfd_reloc_notsupported:
7359 error_message = _("unsupported relocation");
7360 goto common_error;
7361
7362 case bfd_reloc_dangerous:
7363 /* error_message should already be set. */
7364 goto common_error;
7365
7366 default:
7367 error_message = _("unknown error");
7368 /* Fall through. */
7369
7370 common_error:
7371 BFD_ASSERT (error_message != NULL);
7372 (*info->callbacks->reloc_dangerous)
7373 (info, error_message, input_bfd, input_section, rel->r_offset);
7374 break;
7375 }
7376 }
7377
7378 if (!save_addend)
7379 addend = 0;
7380 }
7381
7382 return true;
7383 }
7384
7385 /* Set the right machine number. */
7386
7387 static bool
7388 elfNN_aarch64_object_p (bfd *abfd)
7389 {
7390 #if ARCH_SIZE == 32
7391 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64_ilp32);
7392 #else
7393 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64);
7394 #endif
7395 return true;
7396 }
7397
7398 /* Function to keep AArch64 specific flags in the ELF header. */
7399
7400 static bool
7401 elfNN_aarch64_set_private_flags (bfd *abfd, flagword flags)
7402 {
7403 if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
7404 {
7405 }
7406 else
7407 {
7408 elf_elfheader (abfd)->e_flags = flags;
7409 elf_flags_init (abfd) = true;
7410 }
7411
7412 return true;
7413 }
7414
7415 /* Merge backend specific data from an object file to the output
7416 object file when linking. */
7417
7418 static bool
7419 elfNN_aarch64_merge_private_bfd_data (bfd *ibfd, struct bfd_link_info *info)
7420 {
7421 bfd *obfd = info->output_bfd;
7422 flagword out_flags;
7423 flagword in_flags;
7424 bool flags_compatible = true;
7425 asection *sec;
7426
7427 /* Check if we have the same endianess. */
7428 if (!_bfd_generic_verify_endian_match (ibfd, info))
7429 return false;
7430
7431 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
7432 return true;
7433
7434 /* The input BFD must have had its flags initialised. */
7435 /* The following seems bogus to me -- The flags are initialized in
7436 the assembler but I don't think an elf_flags_init field is
7437 written into the object. */
7438 /* BFD_ASSERT (elf_flags_init (ibfd)); */
7439
7440 in_flags = elf_elfheader (ibfd)->e_flags;
7441 out_flags = elf_elfheader (obfd)->e_flags;
7442
7443 if (!elf_flags_init (obfd))
7444 {
7445 /* If the input is the default architecture and had the default
7446 flags then do not bother setting the flags for the output
7447 architecture, instead allow future merges to do this. If no
7448 future merges ever set these flags then they will retain their
7449 uninitialised values, which surprise surprise, correspond
7450 to the default values. */
7451 if (bfd_get_arch_info (ibfd)->the_default
7452 && elf_elfheader (ibfd)->e_flags == 0)
7453 return true;
7454
7455 elf_flags_init (obfd) = true;
7456 elf_elfheader (obfd)->e_flags = in_flags;
7457
7458 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
7459 && bfd_get_arch_info (obfd)->the_default)
7460 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
7461 bfd_get_mach (ibfd));
7462
7463 return true;
7464 }
7465
7466 /* Identical flags must be compatible. */
7467 if (in_flags == out_flags)
7468 return true;
7469
7470 /* Check to see if the input BFD actually contains any sections. If
7471 not, its flags may not have been initialised either, but it
7472 cannot actually cause any incompatiblity. Do not short-circuit
7473 dynamic objects; their section list may be emptied by
7474 elf_link_add_object_symbols.
7475
7476 Also check to see if there are no code sections in the input.
7477 In this case there is no need to check for code specific flags.
7478 XXX - do we need to worry about floating-point format compatability
7479 in data sections ? */
7480 if (!(ibfd->flags & DYNAMIC))
7481 {
7482 bool null_input_bfd = true;
7483 bool only_data_sections = true;
7484
7485 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
7486 {
7487 if ((bfd_section_flags (sec)
7488 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
7489 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
7490 only_data_sections = false;
7491
7492 null_input_bfd = false;
7493 break;
7494 }
7495
7496 if (null_input_bfd || only_data_sections)
7497 return true;
7498 }
7499
7500 return flags_compatible;
7501 }
7502
7503 /* Display the flags field. */
7504
7505 static bool
7506 elfNN_aarch64_print_private_bfd_data (bfd *abfd, void *ptr)
7507 {
7508 FILE *file = (FILE *) ptr;
7509 unsigned long flags;
7510
7511 BFD_ASSERT (abfd != NULL && ptr != NULL);
7512
7513 /* Print normal ELF private data. */
7514 _bfd_elf_print_private_bfd_data (abfd, ptr);
7515
7516 flags = elf_elfheader (abfd)->e_flags;
7517 /* Ignore init flag - it may not be set, despite the flags field
7518 containing valid data. */
7519
7520 /* xgettext:c-format */
7521 fprintf (file, _("private flags = 0x%lx:"), elf_elfheader (abfd)->e_flags);
7522
7523 if (flags)
7524 fprintf (file, _(" <Unrecognised flag bits set>"));
7525
7526 fputc ('\n', file);
7527
7528 return true;
7529 }
7530
7531 /* Return true if we need copy relocation against EH. */
7532
7533 static bool
7534 need_copy_relocation_p (struct elf_aarch64_link_hash_entry *eh)
7535 {
7536 struct elf_dyn_relocs *p;
7537 asection *s;
7538
7539 for (p = eh->root.dyn_relocs; p != NULL; p = p->next)
7540 {
7541 /* If there is any pc-relative reference, we need to keep copy relocation
7542 to avoid propagating the relocation into runtime that current glibc
7543 does not support. */
7544 if (p->pc_count)
7545 return true;
7546
7547 s = p->sec->output_section;
7548 /* Need copy relocation if it's against read-only section. */
7549 if (s != NULL && (s->flags & SEC_READONLY) != 0)
7550 return true;
7551 }
7552
7553 return false;
7554 }
7555
7556 /* Adjust a symbol defined by a dynamic object and referenced by a
7557 regular object. The current definition is in some section of the
7558 dynamic object, but we're not including those sections. We have to
7559 change the definition to something the rest of the link can
7560 understand. */
7561
7562 static bool
7563 elfNN_aarch64_adjust_dynamic_symbol (struct bfd_link_info *info,
7564 struct elf_link_hash_entry *h)
7565 {
7566 struct elf_aarch64_link_hash_table *htab;
7567 asection *s, *srel;
7568
7569 /* If this is a function, put it in the procedure linkage table. We
7570 will fill in the contents of the procedure linkage table later,
7571 when we know the address of the .got section. */
7572 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
7573 {
7574 if (h->plt.refcount <= 0
7575 || (h->type != STT_GNU_IFUNC
7576 && (SYMBOL_CALLS_LOCAL (info, h)
7577 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
7578 && h->root.type == bfd_link_hash_undefweak))))
7579 {
7580 /* This case can occur if we saw a CALL26 reloc in
7581 an input file, but the symbol wasn't referred to
7582 by a dynamic object or all references were
7583 garbage collected. In which case we can end up
7584 resolving. */
7585 h->plt.offset = (bfd_vma) - 1;
7586 h->needs_plt = 0;
7587 }
7588
7589 return true;
7590 }
7591 else
7592 /* Otherwise, reset to -1. */
7593 h->plt.offset = (bfd_vma) - 1;
7594
7595
7596 /* If this is a weak symbol, and there is a real definition, the
7597 processor independent code will have arranged for us to see the
7598 real definition first, and we can just use the same value. */
7599 if (h->is_weakalias)
7600 {
7601 struct elf_link_hash_entry *def = weakdef (h);
7602 BFD_ASSERT (def->root.type == bfd_link_hash_defined);
7603 h->root.u.def.section = def->root.u.def.section;
7604 h->root.u.def.value = def->root.u.def.value;
7605 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
7606 h->non_got_ref = def->non_got_ref;
7607 return true;
7608 }
7609
7610 /* If we are creating a shared library, we must presume that the
7611 only references to the symbol are via the global offset table.
7612 For such cases we need not do anything here; the relocations will
7613 be handled correctly by relocate_section. */
7614 if (bfd_link_pic (info))
7615 return true;
7616
7617 /* If there are no references to this symbol that do not use the
7618 GOT, we don't need to generate a copy reloc. */
7619 if (!h->non_got_ref)
7620 return true;
7621
7622 /* If -z nocopyreloc was given, we won't generate them either. */
7623 if (info->nocopyreloc)
7624 {
7625 h->non_got_ref = 0;
7626 return true;
7627 }
7628
7629 if (ELIMINATE_COPY_RELOCS)
7630 {
7631 struct elf_aarch64_link_hash_entry *eh;
7632 /* If we don't find any dynamic relocs in read-only sections, then
7633 we'll be keeping the dynamic relocs and avoiding the copy reloc. */
7634 eh = (struct elf_aarch64_link_hash_entry *) h;
7635 if (!need_copy_relocation_p (eh))
7636 {
7637 h->non_got_ref = 0;
7638 return true;
7639 }
7640 }
7641
7642 /* We must allocate the symbol in our .dynbss section, which will
7643 become part of the .bss section of the executable. There will be
7644 an entry for this symbol in the .dynsym section. The dynamic
7645 object will contain position independent code, so all references
7646 from the dynamic object to this symbol will go through the global
7647 offset table. The dynamic linker will use the .dynsym entry to
7648 determine the address it must put in the global offset table, so
7649 both the dynamic object and the regular object will refer to the
7650 same memory location for the variable. */
7651
7652 htab = elf_aarch64_hash_table (info);
7653
7654 /* We must generate a R_AARCH64_COPY reloc to tell the dynamic linker
7655 to copy the initial value out of the dynamic object and into the
7656 runtime process image. */
7657 if ((h->root.u.def.section->flags & SEC_READONLY) != 0)
7658 {
7659 s = htab->root.sdynrelro;
7660 srel = htab->root.sreldynrelro;
7661 }
7662 else
7663 {
7664 s = htab->root.sdynbss;
7665 srel = htab->root.srelbss;
7666 }
7667 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
7668 {
7669 srel->size += RELOC_SIZE (htab);
7670 h->needs_copy = 1;
7671 }
7672
7673 return _bfd_elf_adjust_dynamic_copy (info, h, s);
7674
7675 }
7676
7677 static bool
7678 elfNN_aarch64_allocate_local_symbols (bfd *abfd, unsigned number)
7679 {
7680 struct elf_aarch64_local_symbol *locals;
7681 locals = elf_aarch64_locals (abfd);
7682 if (locals == NULL)
7683 {
7684 locals = (struct elf_aarch64_local_symbol *)
7685 bfd_zalloc (abfd, number * sizeof (struct elf_aarch64_local_symbol));
7686 if (locals == NULL)
7687 return false;
7688 elf_aarch64_locals (abfd) = locals;
7689 }
7690 return true;
7691 }
7692
7693 /* Create the .got section to hold the global offset table. */
7694
7695 static bool
7696 aarch64_elf_create_got_section (bfd *abfd, struct bfd_link_info *info)
7697 {
7698 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
7699 flagword flags;
7700 asection *s;
7701 struct elf_link_hash_entry *h;
7702 struct elf_link_hash_table *htab = elf_hash_table (info);
7703
7704 /* This function may be called more than once. */
7705 if (htab->sgot != NULL)
7706 return true;
7707
7708 flags = bed->dynamic_sec_flags;
7709
7710 s = bfd_make_section_anyway_with_flags (abfd,
7711 (bed->rela_plts_and_copies_p
7712 ? ".rela.got" : ".rel.got"),
7713 (bed->dynamic_sec_flags
7714 | SEC_READONLY));
7715 if (s == NULL
7716 || !bfd_set_section_alignment (s, bed->s->log_file_align))
7717 return false;
7718 htab->srelgot = s;
7719
7720 s = bfd_make_section_anyway_with_flags (abfd, ".got", flags);
7721 if (s == NULL
7722 || !bfd_set_section_alignment (s, bed->s->log_file_align))
7723 return false;
7724 htab->sgot = s;
7725 htab->sgot->size += GOT_ENTRY_SIZE;
7726
7727 if (bed->want_got_sym)
7728 {
7729 /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the .got
7730 (or .got.plt) section. We don't do this in the linker script
7731 because we don't want to define the symbol if we are not creating
7732 a global offset table. */
7733 h = _bfd_elf_define_linkage_sym (abfd, info, s,
7734 "_GLOBAL_OFFSET_TABLE_");
7735 elf_hash_table (info)->hgot = h;
7736 if (h == NULL)
7737 return false;
7738 }
7739
7740 if (bed->want_got_plt)
7741 {
7742 s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags);
7743 if (s == NULL
7744 || !bfd_set_section_alignment (s, bed->s->log_file_align))
7745 return false;
7746 htab->sgotplt = s;
7747 }
7748
7749 /* The first bit of the global offset table is the header. */
7750 s->size += bed->got_header_size;
7751
7752 return true;
7753 }
7754
7755 /* Look through the relocs for a section during the first phase. */
7756
7757 static bool
7758 elfNN_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info,
7759 asection *sec, const Elf_Internal_Rela *relocs)
7760 {
7761 Elf_Internal_Shdr *symtab_hdr;
7762 struct elf_link_hash_entry **sym_hashes;
7763 const Elf_Internal_Rela *rel;
7764 const Elf_Internal_Rela *rel_end;
7765 asection *sreloc;
7766
7767 struct elf_aarch64_link_hash_table *htab;
7768
7769 if (bfd_link_relocatable (info))
7770 return true;
7771
7772 BFD_ASSERT (is_aarch64_elf (abfd));
7773
7774 htab = elf_aarch64_hash_table (info);
7775 sreloc = NULL;
7776
7777 symtab_hdr = &elf_symtab_hdr (abfd);
7778 sym_hashes = elf_sym_hashes (abfd);
7779
7780 rel_end = relocs + sec->reloc_count;
7781 for (rel = relocs; rel < rel_end; rel++)
7782 {
7783 struct elf_link_hash_entry *h;
7784 unsigned int r_symndx;
7785 unsigned int r_type;
7786 bfd_reloc_code_real_type bfd_r_type;
7787 Elf_Internal_Sym *isym;
7788
7789 r_symndx = ELFNN_R_SYM (rel->r_info);
7790 r_type = ELFNN_R_TYPE (rel->r_info);
7791
7792 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
7793 {
7794 /* xgettext:c-format */
7795 _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd, r_symndx);
7796 return false;
7797 }
7798
7799 if (r_symndx < symtab_hdr->sh_info)
7800 {
7801 /* A local symbol. */
7802 isym = bfd_sym_from_r_symndx (&htab->root.sym_cache,
7803 abfd, r_symndx);
7804 if (isym == NULL)
7805 return false;
7806
7807 /* Check relocation against local STT_GNU_IFUNC symbol. */
7808 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
7809 {
7810 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel,
7811 true);
7812 if (h == NULL)
7813 return false;
7814
7815 /* Fake a STT_GNU_IFUNC symbol. */
7816 h->type = STT_GNU_IFUNC;
7817 h->def_regular = 1;
7818 h->ref_regular = 1;
7819 h->forced_local = 1;
7820 h->root.type = bfd_link_hash_defined;
7821 }
7822 else
7823 h = NULL;
7824 }
7825 else
7826 {
7827 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
7828 while (h->root.type == bfd_link_hash_indirect
7829 || h->root.type == bfd_link_hash_warning)
7830 h = (struct elf_link_hash_entry *) h->root.u.i.link;
7831 }
7832
7833 /* Could be done earlier, if h were already available. */
7834 bfd_r_type = aarch64_tls_transition (abfd, info, r_type, h, r_symndx);
7835
7836 if (h != NULL)
7837 {
7838 /* If a relocation refers to _GLOBAL_OFFSET_TABLE_, create the .got.
7839 This shows up in particular in an R_AARCH64_PREL64 in large model
7840 when calculating the pc-relative address to .got section which is
7841 used to initialize the gp register. */
7842 if (h->root.root.string
7843 && strcmp (h->root.root.string, "_GLOBAL_OFFSET_TABLE_") == 0)
7844 {
7845 if (htab->root.dynobj == NULL)
7846 htab->root.dynobj = abfd;
7847
7848 if (! aarch64_elf_create_got_section (htab->root.dynobj, info))
7849 return false;
7850
7851 BFD_ASSERT (h == htab->root.hgot);
7852 }
7853
7854 /* Create the ifunc sections for static executables. If we
7855 never see an indirect function symbol nor we are building
7856 a static executable, those sections will be empty and
7857 won't appear in output. */
7858 switch (bfd_r_type)
7859 {
7860 default:
7861 break;
7862
7863 case BFD_RELOC_AARCH64_ADD_LO12:
7864 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7865 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7866 case BFD_RELOC_AARCH64_CALL26:
7867 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7868 case BFD_RELOC_AARCH64_JUMP26:
7869 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7870 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7871 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7872 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7873 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7874 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
7875 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7876 case BFD_RELOC_AARCH64_NN:
7877 if (htab->root.dynobj == NULL)
7878 htab->root.dynobj = abfd;
7879 if (!_bfd_elf_create_ifunc_sections (htab->root.dynobj, info))
7880 return false;
7881 break;
7882 }
7883
7884 /* It is referenced by a non-shared object. */
7885 h->ref_regular = 1;
7886 }
7887
7888 switch (bfd_r_type)
7889 {
7890 case BFD_RELOC_AARCH64_16:
7891 #if ARCH_SIZE == 64
7892 case BFD_RELOC_AARCH64_32:
7893 #endif
7894 if (bfd_link_pic (info) && (sec->flags & SEC_ALLOC) != 0)
7895 {
7896 if (h != NULL
7897 /* This is an absolute symbol. It represents a value instead
7898 of an address. */
7899 && (bfd_is_abs_symbol (&h->root)
7900 /* This is an undefined symbol. */
7901 || h->root.type == bfd_link_hash_undefined))
7902 break;
7903
7904 /* For local symbols, defined global symbols in a non-ABS section,
7905 it is assumed that the value is an address. */
7906 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
7907 _bfd_error_handler
7908 /* xgettext:c-format */
7909 (_("%pB: relocation %s against `%s' can not be used when making "
7910 "a shared object"),
7911 abfd, elfNN_aarch64_howto_table[howto_index].name,
7912 (h) ? h->root.root.string : "a local symbol");
7913 bfd_set_error (bfd_error_bad_value);
7914 return false;
7915 }
7916 else
7917 break;
7918
7919 case BFD_RELOC_AARCH64_MOVW_G0_NC:
7920 case BFD_RELOC_AARCH64_MOVW_G1_NC:
7921 case BFD_RELOC_AARCH64_MOVW_G2_NC:
7922 case BFD_RELOC_AARCH64_MOVW_G3:
7923 if (bfd_link_pic (info))
7924 {
7925 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
7926 _bfd_error_handler
7927 /* xgettext:c-format */
7928 (_("%pB: relocation %s against `%s' can not be used when making "
7929 "a shared object; recompile with -fPIC"),
7930 abfd, elfNN_aarch64_howto_table[howto_index].name,
7931 (h) ? h->root.root.string : "a local symbol");
7932 bfd_set_error (bfd_error_bad_value);
7933 return false;
7934 }
7935 /* Fall through. */
7936
7937 case BFD_RELOC_AARCH64_16_PCREL:
7938 case BFD_RELOC_AARCH64_32_PCREL:
7939 case BFD_RELOC_AARCH64_64_PCREL:
7940 case BFD_RELOC_AARCH64_ADD_LO12:
7941 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7942 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7943 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
7944 case BFD_RELOC_AARCH64_LDST128_LO12:
7945 case BFD_RELOC_AARCH64_LDST16_LO12:
7946 case BFD_RELOC_AARCH64_LDST32_LO12:
7947 case BFD_RELOC_AARCH64_LDST64_LO12:
7948 case BFD_RELOC_AARCH64_LDST8_LO12:
7949 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
7950 if (h == NULL || bfd_link_pic (info))
7951 break;
7952 /* Fall through. */
7953
7954 case BFD_RELOC_AARCH64_NN:
7955
7956 /* We don't need to handle relocs into sections not going into
7957 the "real" output. */
7958 if ((sec->flags & SEC_ALLOC) == 0)
7959 break;
7960
7961 if (h != NULL)
7962 {
7963 if (!bfd_link_pic (info))
7964 h->non_got_ref = 1;
7965
7966 h->plt.refcount += 1;
7967 h->pointer_equality_needed = 1;
7968 }
7969
7970 /* No need to do anything if we're not creating a shared
7971 object. */
7972 if (!(bfd_link_pic (info)
7973 /* If on the other hand, we are creating an executable, we
7974 may need to keep relocations for symbols satisfied by a
7975 dynamic library if we manage to avoid copy relocs for the
7976 symbol.
7977
7978 NOTE: Currently, there is no support of copy relocs
7979 elimination on pc-relative relocation types, because there is
7980 no dynamic relocation support for them in glibc. We still
7981 record the dynamic symbol reference for them. This is
7982 because one symbol may be referenced by both absolute
7983 relocation (for example, BFD_RELOC_AARCH64_NN) and
7984 pc-relative relocation. We need full symbol reference
7985 information to make correct decision later in
7986 elfNN_aarch64_adjust_dynamic_symbol. */
7987 || (ELIMINATE_COPY_RELOCS
7988 && !bfd_link_pic (info)
7989 && h != NULL
7990 && (h->root.type == bfd_link_hash_defweak
7991 || !h->def_regular))))
7992 break;
7993
7994 {
7995 struct elf_dyn_relocs *p;
7996 struct elf_dyn_relocs **head;
7997 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
7998
7999 /* We must copy these reloc types into the output file.
8000 Create a reloc section in dynobj and make room for
8001 this reloc. */
8002 if (sreloc == NULL)
8003 {
8004 if (htab->root.dynobj == NULL)
8005 htab->root.dynobj = abfd;
8006
8007 sreloc = _bfd_elf_make_dynamic_reloc_section
8008 (sec, htab->root.dynobj, LOG_FILE_ALIGN, abfd, /*rela? */ true);
8009
8010 if (sreloc == NULL)
8011 return false;
8012 }
8013
8014 /* If this is a global symbol, we count the number of
8015 relocations we need for this symbol. */
8016 if (h != NULL)
8017 {
8018 head = &h->dyn_relocs;
8019 }
8020 else
8021 {
8022 /* Track dynamic relocs needed for local syms too.
8023 We really need local syms available to do this
8024 easily. Oh well. */
8025
8026 asection *s;
8027 void **vpp;
8028
8029 isym = bfd_sym_from_r_symndx (&htab->root.sym_cache,
8030 abfd, r_symndx);
8031 if (isym == NULL)
8032 return false;
8033
8034 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
8035 if (s == NULL)
8036 s = sec;
8037
8038 /* Beware of type punned pointers vs strict aliasing
8039 rules. */
8040 vpp = &(elf_section_data (s)->local_dynrel);
8041 head = (struct elf_dyn_relocs **) vpp;
8042 }
8043
8044 p = *head;
8045 if (p == NULL || p->sec != sec)
8046 {
8047 size_t amt = sizeof *p;
8048 p = ((struct elf_dyn_relocs *)
8049 bfd_zalloc (htab->root.dynobj, amt));
8050 if (p == NULL)
8051 return false;
8052 p->next = *head;
8053 *head = p;
8054 p->sec = sec;
8055 }
8056
8057 p->count += 1;
8058
8059 if (elfNN_aarch64_howto_table[howto_index].pc_relative)
8060 p->pc_count += 1;
8061 }
8062 break;
8063
8064 /* RR: We probably want to keep a consistency check that
8065 there are no dangling GOT_PAGE relocs. */
8066 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8067 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8068 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8069 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8070 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8071 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8072 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8073 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
8074 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8075 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8076 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8077 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8078 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8079 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8080 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8081 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8082 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8083 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8084 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8085 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8086 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8087 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8088 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8089 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8090 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8091 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8092 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8093 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8094 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8095 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8096 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8097 {
8098 unsigned got_type;
8099 unsigned old_got_type;
8100
8101 got_type = aarch64_reloc_got_type (bfd_r_type);
8102
8103 if (h)
8104 {
8105 h->got.refcount += 1;
8106 old_got_type = elf_aarch64_hash_entry (h)->got_type;
8107 }
8108 else
8109 {
8110 struct elf_aarch64_local_symbol *locals;
8111
8112 if (!elfNN_aarch64_allocate_local_symbols
8113 (abfd, symtab_hdr->sh_info))
8114 return false;
8115
8116 locals = elf_aarch64_locals (abfd);
8117 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
8118 locals[r_symndx].got_refcount += 1;
8119 old_got_type = locals[r_symndx].got_type;
8120 }
8121
8122 /* If a variable is accessed with both general dynamic TLS
8123 methods, two slots may be created. */
8124 if (GOT_TLS_GD_ANY_P (old_got_type) && GOT_TLS_GD_ANY_P (got_type))
8125 got_type |= old_got_type;
8126
8127 /* We will already have issued an error message if there
8128 is a TLS/non-TLS mismatch, based on the symbol type.
8129 So just combine any TLS types needed. */
8130 if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
8131 && got_type != GOT_NORMAL)
8132 got_type |= old_got_type;
8133
8134 /* If the symbol is accessed by both IE and GD methods, we
8135 are able to relax. Turn off the GD flag, without
8136 messing up with any other kind of TLS types that may be
8137 involved. */
8138 if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type))
8139 got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD);
8140
8141 if (old_got_type != got_type)
8142 {
8143 if (h != NULL)
8144 elf_aarch64_hash_entry (h)->got_type = got_type;
8145 else
8146 {
8147 struct elf_aarch64_local_symbol *locals;
8148 locals = elf_aarch64_locals (abfd);
8149 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
8150 locals[r_symndx].got_type = got_type;
8151 }
8152 }
8153
8154 if (htab->root.dynobj == NULL)
8155 htab->root.dynobj = abfd;
8156 if (! aarch64_elf_create_got_section (htab->root.dynobj, info))
8157 return false;
8158 break;
8159 }
8160
8161 case BFD_RELOC_AARCH64_CALL26:
8162 case BFD_RELOC_AARCH64_JUMP26:
8163 /* If this is a local symbol then we resolve it
8164 directly without creating a PLT entry. */
8165 if (h == NULL)
8166 continue;
8167
8168 h->needs_plt = 1;
8169 if (h->plt.refcount <= 0)
8170 h->plt.refcount = 1;
8171 else
8172 h->plt.refcount += 1;
8173 break;
8174
8175 default:
8176 break;
8177 }
8178 }
8179
8180 return true;
8181 }
8182
8183 /* Treat mapping symbols as special target symbols. */
8184
8185 static bool
8186 elfNN_aarch64_is_target_special_symbol (bfd *abfd ATTRIBUTE_UNUSED,
8187 asymbol *sym)
8188 {
8189 return bfd_is_aarch64_special_symbol_name (sym->name,
8190 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY);
8191 }
8192
8193 /* If the ELF symbol SYM might be a function in SEC, return the
8194 function size and set *CODE_OFF to the function's entry point,
8195 otherwise return zero. */
8196
8197 static bfd_size_type
8198 elfNN_aarch64_maybe_function_sym (const asymbol *sym, asection *sec,
8199 bfd_vma *code_off)
8200 {
8201 bfd_size_type size;
8202 elf_symbol_type * elf_sym = (elf_symbol_type *) sym;
8203
8204 if ((sym->flags & (BSF_SECTION_SYM | BSF_FILE | BSF_OBJECT
8205 | BSF_THREAD_LOCAL | BSF_RELC | BSF_SRELC)) != 0
8206 || sym->section != sec)
8207 return 0;
8208
8209 size = (sym->flags & BSF_SYNTHETIC) ? 0 : elf_sym->internal_elf_sym.st_size;
8210
8211 if (!(sym->flags & BSF_SYNTHETIC))
8212 switch (ELF_ST_TYPE (elf_sym->internal_elf_sym.st_info))
8213 {
8214 case STT_NOTYPE:
8215 /* Ignore symbols created by the annobin plugin for gcc and clang.
8216 These symbols are hidden, local, notype and have a size of 0. */
8217 if (size == 0
8218 && sym->flags & BSF_LOCAL
8219 && ELF_ST_VISIBILITY (elf_sym->internal_elf_sym.st_other) == STV_HIDDEN)
8220 return 0;
8221 /* Fall through. */
8222 case STT_FUNC:
8223 /* FIXME: Allow STT_GNU_IFUNC as well ? */
8224 break;
8225 default:
8226 return 0;
8227 }
8228
8229 if ((sym->flags & BSF_LOCAL)
8230 && bfd_is_aarch64_special_symbol_name (sym->name,
8231 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY))
8232 return 0;
8233
8234 *code_off = sym->value;
8235
8236 /* Do not return 0 for the function's size. */
8237 return size ? size : 1;
8238 }
8239
8240 static bool
8241 elfNN_aarch64_find_inliner_info (bfd *abfd,
8242 const char **filename_ptr,
8243 const char **functionname_ptr,
8244 unsigned int *line_ptr)
8245 {
8246 bool found;
8247 found = _bfd_dwarf2_find_inliner_info
8248 (abfd, filename_ptr,
8249 functionname_ptr, line_ptr, &elf_tdata (abfd)->dwarf2_find_line_info);
8250 return found;
8251 }
8252
8253
8254 static bool
8255 elfNN_aarch64_init_file_header (bfd *abfd, struct bfd_link_info *link_info)
8256 {
8257 Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */
8258
8259 if (!_bfd_elf_init_file_header (abfd, link_info))
8260 return false;
8261
8262 i_ehdrp = elf_elfheader (abfd);
8263 i_ehdrp->e_ident[EI_ABIVERSION] = AARCH64_ELF_ABI_VERSION;
8264 return true;
8265 }
8266
8267 static enum elf_reloc_type_class
8268 elfNN_aarch64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
8269 const asection *rel_sec ATTRIBUTE_UNUSED,
8270 const Elf_Internal_Rela *rela)
8271 {
8272 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
8273
8274 if (htab->root.dynsym != NULL
8275 && htab->root.dynsym->contents != NULL)
8276 {
8277 /* Check relocation against STT_GNU_IFUNC symbol if there are
8278 dynamic symbols. */
8279 bfd *abfd = info->output_bfd;
8280 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
8281 unsigned long r_symndx = ELFNN_R_SYM (rela->r_info);
8282 if (r_symndx != STN_UNDEF)
8283 {
8284 Elf_Internal_Sym sym;
8285 if (!bed->s->swap_symbol_in (abfd,
8286 (htab->root.dynsym->contents
8287 + r_symndx * bed->s->sizeof_sym),
8288 0, &sym))
8289 {
8290 /* xgettext:c-format */
8291 _bfd_error_handler (_("%pB symbol number %lu references"
8292 " nonexistent SHT_SYMTAB_SHNDX section"),
8293 abfd, r_symndx);
8294 /* Ideally an error class should be returned here. */
8295 }
8296 else if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
8297 return reloc_class_ifunc;
8298 }
8299 }
8300
8301 switch ((int) ELFNN_R_TYPE (rela->r_info))
8302 {
8303 case AARCH64_R (IRELATIVE):
8304 return reloc_class_ifunc;
8305 case AARCH64_R (RELATIVE):
8306 return reloc_class_relative;
8307 case AARCH64_R (JUMP_SLOT):
8308 return reloc_class_plt;
8309 case AARCH64_R (COPY):
8310 return reloc_class_copy;
8311 default:
8312 return reloc_class_normal;
8313 }
8314 }
8315
8316 /* Handle an AArch64 specific section when reading an object file. This is
8317 called when bfd_section_from_shdr finds a section with an unknown
8318 type. */
8319
8320 static bool
8321 elfNN_aarch64_section_from_shdr (bfd *abfd,
8322 Elf_Internal_Shdr *hdr,
8323 const char *name, int shindex)
8324 {
8325 /* There ought to be a place to keep ELF backend specific flags, but
8326 at the moment there isn't one. We just keep track of the
8327 sections by their name, instead. Fortunately, the ABI gives
8328 names for all the AArch64 specific sections, so we will probably get
8329 away with this. */
8330 switch (hdr->sh_type)
8331 {
8332 case SHT_AARCH64_ATTRIBUTES:
8333 break;
8334
8335 default:
8336 return false;
8337 }
8338
8339 if (!_bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
8340 return false;
8341
8342 return true;
8343 }
8344
8345 /* Process any AArch64-specific program segment types. */
8346
8347 static bool
8348 elfNN_aarch64_section_from_phdr (bfd *abfd ATTRIBUTE_UNUSED,
8349 Elf_Internal_Phdr *hdr,
8350 int hdr_index ATTRIBUTE_UNUSED,
8351 const char *name ATTRIBUTE_UNUSED)
8352 {
8353 /* Right now we only handle the PT_AARCH64_MEMTAG_MTE segment type. */
8354 if (hdr == NULL || hdr->p_type != PT_AARCH64_MEMTAG_MTE)
8355 return false;
8356
8357 if (hdr->p_filesz > 0)
8358 {
8359 /* Sections created from memory tag p_type's are always named
8360 "memtag". This makes it easier for tools (for example, GDB)
8361 to find them. */
8362 asection *newsect = bfd_make_section_anyway (abfd, "memtag");
8363
8364 if (newsect == NULL)
8365 return false;
8366
8367 unsigned int opb = bfd_octets_per_byte (abfd, NULL);
8368
8369 /* p_vaddr holds the original start address of the tagged memory
8370 range. */
8371 newsect->vma = hdr->p_vaddr / opb;
8372
8373 /* p_filesz holds the storage size of the packed tags. */
8374 newsect->size = hdr->p_filesz;
8375 newsect->filepos = hdr->p_offset;
8376
8377 /* p_memsz holds the size of the memory range that contains tags. The
8378 section's rawsize field is reused for this purpose. */
8379 newsect->rawsize = hdr->p_memsz;
8380
8381 /* Make sure the section's flags has SEC_HAS_CONTENTS set, otherwise
8382 BFD will return all zeroes when attempting to get contents from this
8383 section. */
8384 newsect->flags |= SEC_HAS_CONTENTS;
8385 }
8386
8387 return true;
8388 }
8389
8390 /* Implements the bfd_elf_modify_headers hook for aarch64. */
8391
8392 static bool
8393 elfNN_aarch64_modify_headers (bfd *abfd,
8394 struct bfd_link_info *info)
8395 {
8396 struct elf_segment_map *m;
8397 unsigned int segment_count = 0;
8398 Elf_Internal_Phdr *p;
8399
8400 for (m = elf_seg_map (abfd); m != NULL; m = m->next, segment_count++)
8401 {
8402 /* We are only interested in the memory tag segment that will be dumped
8403 to a core file. If we have no memory tags or this isn't a core file we
8404 are dealing with, just skip this segment. */
8405 if (m->p_type != PT_AARCH64_MEMTAG_MTE
8406 || bfd_get_format (abfd) != bfd_core)
8407 continue;
8408
8409 /* For memory tag segments in core files, the size of the file contents
8410 is smaller than the size of the memory range. Adjust the memory size
8411 accordingly. The real memory size is held in the section's rawsize
8412 field. */
8413 if (m->count > 0)
8414 {
8415 p = elf_tdata (abfd)->phdr;
8416 p += m->idx;
8417 p->p_memsz = m->sections[0]->rawsize;
8418 p->p_flags = 0;
8419 p->p_paddr = 0;
8420 p->p_align = 0;
8421 }
8422 }
8423
8424 /* Give the generic code a chance to handle the headers. */
8425 return _bfd_elf_modify_headers (abfd, info);
8426 }
8427
8428 /* A structure used to record a list of sections, independently
8429 of the next and prev fields in the asection structure. */
8430 typedef struct section_list
8431 {
8432 asection *sec;
8433 struct section_list *next;
8434 struct section_list *prev;
8435 }
8436 section_list;
8437
8438 /* Unfortunately we need to keep a list of sections for which
8439 an _aarch64_elf_section_data structure has been allocated. This
8440 is because it is possible for functions like elfNN_aarch64_write_section
8441 to be called on a section which has had an elf_data_structure
8442 allocated for it (and so the used_by_bfd field is valid) but
8443 for which the AArch64 extended version of this structure - the
8444 _aarch64_elf_section_data structure - has not been allocated. */
8445 static section_list *sections_with_aarch64_elf_section_data = NULL;
8446
8447 static void
8448 record_section_with_aarch64_elf_section_data (asection *sec)
8449 {
8450 struct section_list *entry;
8451
8452 entry = bfd_malloc (sizeof (*entry));
8453 if (entry == NULL)
8454 return;
8455 entry->sec = sec;
8456 entry->next = sections_with_aarch64_elf_section_data;
8457 entry->prev = NULL;
8458 if (entry->next != NULL)
8459 entry->next->prev = entry;
8460 sections_with_aarch64_elf_section_data = entry;
8461 }
8462
8463 static struct section_list *
8464 find_aarch64_elf_section_entry (asection *sec)
8465 {
8466 struct section_list *entry;
8467 static struct section_list *last_entry = NULL;
8468
8469 /* This is a short cut for the typical case where the sections are added
8470 to the sections_with_aarch64_elf_section_data list in forward order and
8471 then looked up here in backwards order. This makes a real difference
8472 to the ld-srec/sec64k.exp linker test. */
8473 entry = sections_with_aarch64_elf_section_data;
8474 if (last_entry != NULL)
8475 {
8476 if (last_entry->sec == sec)
8477 entry = last_entry;
8478 else if (last_entry->next != NULL && last_entry->next->sec == sec)
8479 entry = last_entry->next;
8480 }
8481
8482 for (; entry; entry = entry->next)
8483 if (entry->sec == sec)
8484 break;
8485
8486 if (entry)
8487 /* Record the entry prior to this one - it is the entry we are
8488 most likely to want to locate next time. Also this way if we
8489 have been called from
8490 unrecord_section_with_aarch64_elf_section_data () we will not
8491 be caching a pointer that is about to be freed. */
8492 last_entry = entry->prev;
8493
8494 return entry;
8495 }
8496
8497 static void
8498 unrecord_section_with_aarch64_elf_section_data (asection *sec)
8499 {
8500 struct section_list *entry;
8501
8502 entry = find_aarch64_elf_section_entry (sec);
8503
8504 if (entry)
8505 {
8506 if (entry->prev != NULL)
8507 entry->prev->next = entry->next;
8508 if (entry->next != NULL)
8509 entry->next->prev = entry->prev;
8510 if (entry == sections_with_aarch64_elf_section_data)
8511 sections_with_aarch64_elf_section_data = entry->next;
8512 free (entry);
8513 }
8514 }
8515
8516
8517 typedef struct
8518 {
8519 void *finfo;
8520 struct bfd_link_info *info;
8521 asection *sec;
8522 int sec_shndx;
8523 int (*func) (void *, const char *, Elf_Internal_Sym *,
8524 asection *, struct elf_link_hash_entry *);
8525 } output_arch_syminfo;
8526
8527 enum map_symbol_type
8528 {
8529 AARCH64_MAP_INSN,
8530 AARCH64_MAP_DATA
8531 };
8532
8533
8534 /* Output a single mapping symbol. */
8535
8536 static bool
8537 elfNN_aarch64_output_map_sym (output_arch_syminfo *osi,
8538 enum map_symbol_type type, bfd_vma offset)
8539 {
8540 static const char *names[2] = { "$x", "$d" };
8541 Elf_Internal_Sym sym;
8542
8543 sym.st_value = (osi->sec->output_section->vma
8544 + osi->sec->output_offset + offset);
8545 sym.st_size = 0;
8546 sym.st_other = 0;
8547 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
8548 sym.st_shndx = osi->sec_shndx;
8549 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
8550 }
8551
8552 /* Output a single local symbol for a generated stub. */
8553
8554 static bool
8555 elfNN_aarch64_output_stub_sym (output_arch_syminfo *osi, const char *name,
8556 bfd_vma offset, bfd_vma size)
8557 {
8558 Elf_Internal_Sym sym;
8559
8560 sym.st_value = (osi->sec->output_section->vma
8561 + osi->sec->output_offset + offset);
8562 sym.st_size = size;
8563 sym.st_other = 0;
8564 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
8565 sym.st_shndx = osi->sec_shndx;
8566 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
8567 }
8568
8569 static bool
8570 aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
8571 {
8572 struct elf_aarch64_stub_hash_entry *stub_entry;
8573 asection *stub_sec;
8574 bfd_vma addr;
8575 char *stub_name;
8576 output_arch_syminfo *osi;
8577
8578 /* Massage our args to the form they really have. */
8579 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
8580 osi = (output_arch_syminfo *) in_arg;
8581
8582 stub_sec = stub_entry->stub_sec;
8583
8584 /* Ensure this stub is attached to the current section being
8585 processed. */
8586 if (stub_sec != osi->sec)
8587 return true;
8588
8589 addr = (bfd_vma) stub_entry->stub_offset;
8590
8591 stub_name = stub_entry->output_name;
8592
8593 switch (stub_entry->stub_type)
8594 {
8595 case aarch64_stub_adrp_branch:
8596 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
8597 sizeof (aarch64_adrp_branch_stub)))
8598 return false;
8599 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
8600 return false;
8601 break;
8602 case aarch64_stub_long_branch:
8603 if (!elfNN_aarch64_output_stub_sym
8604 (osi, stub_name, addr, sizeof (aarch64_long_branch_stub)))
8605 return false;
8606 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
8607 return false;
8608 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16))
8609 return false;
8610 break;
8611 case aarch64_stub_bti_direct_branch:
8612 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
8613 sizeof (aarch64_bti_direct_branch_stub)))
8614 return false;
8615 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
8616 return false;
8617 break;
8618 case aarch64_stub_erratum_835769_veneer:
8619 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
8620 sizeof (aarch64_erratum_835769_stub)))
8621 return false;
8622 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
8623 return false;
8624 break;
8625 case aarch64_stub_erratum_843419_veneer:
8626 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
8627 sizeof (aarch64_erratum_843419_stub)))
8628 return false;
8629 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
8630 return false;
8631 break;
8632 case aarch64_stub_none:
8633 break;
8634
8635 default:
8636 abort ();
8637 }
8638
8639 return true;
8640 }
8641
8642 /* Output mapping symbols for linker generated sections. */
8643
8644 static bool
8645 elfNN_aarch64_output_arch_local_syms (bfd *output_bfd,
8646 struct bfd_link_info *info,
8647 void *finfo,
8648 int (*func) (void *, const char *,
8649 Elf_Internal_Sym *,
8650 asection *,
8651 struct elf_link_hash_entry
8652 *))
8653 {
8654 output_arch_syminfo osi;
8655 struct elf_aarch64_link_hash_table *htab;
8656
8657 if (info->strip == strip_all
8658 && !info->emitrelocations
8659 && !bfd_link_relocatable (info))
8660 return true;
8661
8662 htab = elf_aarch64_hash_table (info);
8663
8664 osi.finfo = finfo;
8665 osi.info = info;
8666 osi.func = func;
8667
8668 /* Long calls stubs. */
8669 if (htab->stub_bfd && htab->stub_bfd->sections)
8670 {
8671 asection *stub_sec;
8672
8673 for (stub_sec = htab->stub_bfd->sections;
8674 stub_sec != NULL; stub_sec = stub_sec->next)
8675 {
8676 /* Ignore non-stub sections. */
8677 if (!strstr (stub_sec->name, STUB_SUFFIX))
8678 continue;
8679
8680 osi.sec = stub_sec;
8681
8682 osi.sec_shndx = _bfd_elf_section_from_bfd_section
8683 (output_bfd, osi.sec->output_section);
8684
8685 /* The first instruction in a stub is always a branch. */
8686 if (!elfNN_aarch64_output_map_sym (&osi, AARCH64_MAP_INSN, 0))
8687 return false;
8688
8689 bfd_hash_traverse (&htab->stub_hash_table, aarch64_map_one_stub,
8690 &osi);
8691 }
8692 }
8693
8694 /* Finally, output mapping symbols for the PLT. */
8695 if (!htab->root.splt || htab->root.splt->size == 0)
8696 return true;
8697
8698 osi.sec_shndx = _bfd_elf_section_from_bfd_section
8699 (output_bfd, htab->root.splt->output_section);
8700 osi.sec = htab->root.splt;
8701
8702 elfNN_aarch64_output_map_sym (&osi, AARCH64_MAP_INSN, 0);
8703
8704 return true;
8705
8706 }
8707
8708 /* Allocate target specific section data. */
8709
8710 static bool
8711 elfNN_aarch64_new_section_hook (bfd *abfd, asection *sec)
8712 {
8713 if (!sec->used_by_bfd)
8714 {
8715 _aarch64_elf_section_data *sdata;
8716 size_t amt = sizeof (*sdata);
8717
8718 sdata = bfd_zalloc (abfd, amt);
8719 if (sdata == NULL)
8720 return false;
8721 sec->used_by_bfd = sdata;
8722 }
8723
8724 record_section_with_aarch64_elf_section_data (sec);
8725
8726 return _bfd_elf_new_section_hook (abfd, sec);
8727 }
8728
8729
8730 static void
8731 unrecord_section_via_map_over_sections (bfd *abfd ATTRIBUTE_UNUSED,
8732 asection *sec,
8733 void *ignore ATTRIBUTE_UNUSED)
8734 {
8735 unrecord_section_with_aarch64_elf_section_data (sec);
8736 }
8737
8738 static bool
8739 elfNN_aarch64_close_and_cleanup (bfd *abfd)
8740 {
8741 if (abfd->sections)
8742 bfd_map_over_sections (abfd,
8743 unrecord_section_via_map_over_sections, NULL);
8744
8745 return _bfd_elf_close_and_cleanup (abfd);
8746 }
8747
8748 static bool
8749 elfNN_aarch64_bfd_free_cached_info (bfd *abfd)
8750 {
8751 if (abfd->sections)
8752 bfd_map_over_sections (abfd,
8753 unrecord_section_via_map_over_sections, NULL);
8754
8755 return _bfd_free_cached_info (abfd);
8756 }
8757
8758 /* Create dynamic sections. This is different from the ARM backend in that
8759 the got, plt, gotplt and their relocation sections are all created in the
8760 standard part of the bfd elf backend. */
8761
8762 static bool
8763 elfNN_aarch64_create_dynamic_sections (bfd *dynobj,
8764 struct bfd_link_info *info)
8765 {
8766 /* We need to create .got section. */
8767 if (!aarch64_elf_create_got_section (dynobj, info))
8768 return false;
8769
8770 return _bfd_elf_create_dynamic_sections (dynobj, info);
8771 }
8772
8773
8774 /* Allocate space in .plt, .got and associated reloc sections for
8775 dynamic relocs. */
8776
8777 static bool
8778 elfNN_aarch64_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
8779 {
8780 struct bfd_link_info *info;
8781 struct elf_aarch64_link_hash_table *htab;
8782 struct elf_aarch64_link_hash_entry *eh;
8783 struct elf_dyn_relocs *p;
8784
8785 /* An example of a bfd_link_hash_indirect symbol is versioned
8786 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
8787 -> __gxx_personality_v0(bfd_link_hash_defined)
8788
8789 There is no need to process bfd_link_hash_indirect symbols here
8790 because we will also be presented with the concrete instance of
8791 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
8792 called to copy all relevant data from the generic to the concrete
8793 symbol instance. */
8794 if (h->root.type == bfd_link_hash_indirect)
8795 return true;
8796
8797 if (h->root.type == bfd_link_hash_warning)
8798 h = (struct elf_link_hash_entry *) h->root.u.i.link;
8799
8800 info = (struct bfd_link_info *) inf;
8801 htab = elf_aarch64_hash_table (info);
8802
8803 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
8804 here if it is defined and referenced in a non-shared object. */
8805 if (h->type == STT_GNU_IFUNC
8806 && h->def_regular)
8807 return true;
8808 else if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
8809 {
8810 /* Make sure this symbol is output as a dynamic symbol.
8811 Undefined weak syms won't yet be marked as dynamic. */
8812 if (h->dynindx == -1 && !h->forced_local
8813 && h->root.type == bfd_link_hash_undefweak)
8814 {
8815 if (!bfd_elf_link_record_dynamic_symbol (info, h))
8816 return false;
8817 }
8818
8819 if (bfd_link_pic (info) || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
8820 {
8821 asection *s = htab->root.splt;
8822
8823 /* If this is the first .plt entry, make room for the special
8824 first entry. */
8825 if (s->size == 0)
8826 s->size += htab->plt_header_size;
8827
8828 h->plt.offset = s->size;
8829
8830 /* If this symbol is not defined in a regular file, and we are
8831 not generating a shared library, then set the symbol to this
8832 location in the .plt. This is required to make function
8833 pointers compare as equal between the normal executable and
8834 the shared library. */
8835 if (!bfd_link_pic (info) && !h->def_regular)
8836 {
8837 h->root.u.def.section = s;
8838 h->root.u.def.value = h->plt.offset;
8839 }
8840
8841 /* Make room for this entry. For now we only create the
8842 small model PLT entries. We later need to find a way
8843 of relaxing into these from the large model PLT entries. */
8844 s->size += htab->plt_entry_size;
8845
8846 /* We also need to make an entry in the .got.plt section, which
8847 will be placed in the .got section by the linker script. */
8848 htab->root.sgotplt->size += GOT_ENTRY_SIZE;
8849
8850 /* We also need to make an entry in the .rela.plt section. */
8851 htab->root.srelplt->size += RELOC_SIZE (htab);
8852
8853 /* We need to ensure that all GOT entries that serve the PLT
8854 are consecutive with the special GOT slots [0] [1] and
8855 [2]. Any addtional relocations, such as
8856 R_AARCH64_TLSDESC, must be placed after the PLT related
8857 entries. We abuse the reloc_count such that during
8858 sizing we adjust reloc_count to indicate the number of
8859 PLT related reserved entries. In subsequent phases when
8860 filling in the contents of the reloc entries, PLT related
8861 entries are placed by computing their PLT index (0
8862 .. reloc_count). While other none PLT relocs are placed
8863 at the slot indicated by reloc_count and reloc_count is
8864 updated. */
8865
8866 htab->root.srelplt->reloc_count++;
8867
8868 /* Mark the DSO in case R_<CLS>_JUMP_SLOT relocs against
8869 variant PCS symbols are present. */
8870 if (h->other & STO_AARCH64_VARIANT_PCS)
8871 htab->variant_pcs = 1;
8872
8873 }
8874 else
8875 {
8876 h->plt.offset = (bfd_vma) - 1;
8877 h->needs_plt = 0;
8878 }
8879 }
8880 else
8881 {
8882 h->plt.offset = (bfd_vma) - 1;
8883 h->needs_plt = 0;
8884 }
8885
8886 eh = (struct elf_aarch64_link_hash_entry *) h;
8887 eh->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
8888
8889 if (h->got.refcount > 0)
8890 {
8891 bool dyn;
8892 unsigned got_type = elf_aarch64_hash_entry (h)->got_type;
8893
8894 h->got.offset = (bfd_vma) - 1;
8895
8896 dyn = htab->root.dynamic_sections_created;
8897
8898 /* Make sure this symbol is output as a dynamic symbol.
8899 Undefined weak syms won't yet be marked as dynamic. */
8900 if (dyn && h->dynindx == -1 && !h->forced_local
8901 && h->root.type == bfd_link_hash_undefweak)
8902 {
8903 if (!bfd_elf_link_record_dynamic_symbol (info, h))
8904 return false;
8905 }
8906
8907 if (got_type == GOT_UNKNOWN)
8908 {
8909 }
8910 else if (got_type == GOT_NORMAL)
8911 {
8912 h->got.offset = htab->root.sgot->size;
8913 htab->root.sgot->size += GOT_ENTRY_SIZE;
8914 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
8915 || h->root.type != bfd_link_hash_undefweak)
8916 && (bfd_link_pic (info)
8917 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h))
8918 /* Undefined weak symbol in static PIE resolves to 0 without
8919 any dynamic relocations. */
8920 && !UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
8921 {
8922 htab->root.srelgot->size += RELOC_SIZE (htab);
8923 }
8924 }
8925 else
8926 {
8927 int indx;
8928 if (got_type & GOT_TLSDESC_GD)
8929 {
8930 eh->tlsdesc_got_jump_table_offset =
8931 (htab->root.sgotplt->size
8932 - aarch64_compute_jump_table_size (htab));
8933 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
8934 h->got.offset = (bfd_vma) - 2;
8935 }
8936
8937 if (got_type & GOT_TLS_GD)
8938 {
8939 h->got.offset = htab->root.sgot->size;
8940 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
8941 }
8942
8943 if (got_type & GOT_TLS_IE)
8944 {
8945 h->got.offset = htab->root.sgot->size;
8946 htab->root.sgot->size += GOT_ENTRY_SIZE;
8947 }
8948
8949 indx = h && h->dynindx != -1 ? h->dynindx : 0;
8950 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
8951 || h->root.type != bfd_link_hash_undefweak)
8952 && (!bfd_link_executable (info)
8953 || indx != 0
8954 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
8955 {
8956 if (got_type & GOT_TLSDESC_GD)
8957 {
8958 htab->root.srelplt->size += RELOC_SIZE (htab);
8959 /* Note reloc_count not incremented here! We have
8960 already adjusted reloc_count for this relocation
8961 type. */
8962
8963 /* TLSDESC PLT is now needed, but not yet determined. */
8964 htab->root.tlsdesc_plt = (bfd_vma) - 1;
8965 }
8966
8967 if (got_type & GOT_TLS_GD)
8968 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
8969
8970 if (got_type & GOT_TLS_IE)
8971 htab->root.srelgot->size += RELOC_SIZE (htab);
8972 }
8973 }
8974 }
8975 else
8976 {
8977 h->got.offset = (bfd_vma) - 1;
8978 }
8979
8980 if (h->dyn_relocs == NULL)
8981 return true;
8982
8983 for (p = h->dyn_relocs; p != NULL; p = p->next)
8984 if (eh->def_protected)
8985 {
8986 /* Disallow copy relocations against protected symbol. */
8987 asection *s = p->sec->output_section;
8988 if (s != NULL && (s->flags & SEC_READONLY) != 0)
8989 {
8990 info->callbacks->einfo
8991 /* xgettext:c-format */
8992 (_ ("%F%P: %pB: copy relocation against non-copyable "
8993 "protected symbol `%s'\n"),
8994 p->sec->owner, h->root.root.string);
8995 return false;
8996 }
8997 }
8998
8999 /* In the shared -Bsymbolic case, discard space allocated for
9000 dynamic pc-relative relocs against symbols which turn out to be
9001 defined in regular objects. For the normal shared case, discard
9002 space for pc-relative relocs that have become local due to symbol
9003 visibility changes. */
9004
9005 if (bfd_link_pic (info))
9006 {
9007 /* Relocs that use pc_count are those that appear on a call
9008 insn, or certain REL relocs that can generated via assembly.
9009 We want calls to protected symbols to resolve directly to the
9010 function rather than going via the plt. If people want
9011 function pointer comparisons to work as expected then they
9012 should avoid writing weird assembly. */
9013 if (SYMBOL_CALLS_LOCAL (info, h))
9014 {
9015 struct elf_dyn_relocs **pp;
9016
9017 for (pp = &h->dyn_relocs; (p = *pp) != NULL;)
9018 {
9019 p->count -= p->pc_count;
9020 p->pc_count = 0;
9021 if (p->count == 0)
9022 *pp = p->next;
9023 else
9024 pp = &p->next;
9025 }
9026 }
9027
9028 /* Also discard relocs on undefined weak syms with non-default
9029 visibility. */
9030 if (h->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
9031 {
9032 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
9033 || UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
9034 h->dyn_relocs = NULL;
9035
9036 /* Make sure undefined weak symbols are output as a dynamic
9037 symbol in PIEs. */
9038 else if (h->dynindx == -1
9039 && !h->forced_local
9040 && h->root.type == bfd_link_hash_undefweak
9041 && !bfd_elf_link_record_dynamic_symbol (info, h))
9042 return false;
9043 }
9044
9045 }
9046 else if (ELIMINATE_COPY_RELOCS)
9047 {
9048 /* For the non-shared case, discard space for relocs against
9049 symbols which turn out to need copy relocs or are not
9050 dynamic. */
9051
9052 if (!h->non_got_ref
9053 && ((h->def_dynamic
9054 && !h->def_regular)
9055 || (htab->root.dynamic_sections_created
9056 && (h->root.type == bfd_link_hash_undefweak
9057 || h->root.type == bfd_link_hash_undefined))))
9058 {
9059 /* Make sure this symbol is output as a dynamic symbol.
9060 Undefined weak syms won't yet be marked as dynamic. */
9061 if (h->dynindx == -1
9062 && !h->forced_local
9063 && h->root.type == bfd_link_hash_undefweak
9064 && !bfd_elf_link_record_dynamic_symbol (info, h))
9065 return false;
9066
9067 /* If that succeeded, we know we'll be keeping all the
9068 relocs. */
9069 if (h->dynindx != -1)
9070 goto keep;
9071 }
9072
9073 h->dyn_relocs = NULL;
9074
9075 keep:;
9076 }
9077
9078 /* Finally, allocate space. */
9079 for (p = h->dyn_relocs; p != NULL; p = p->next)
9080 {
9081 asection *sreloc;
9082
9083 sreloc = elf_section_data (p->sec)->sreloc;
9084
9085 BFD_ASSERT (sreloc != NULL);
9086
9087 sreloc->size += p->count * RELOC_SIZE (htab);
9088 }
9089
9090 return true;
9091 }
9092
9093 /* Allocate space in .plt, .got and associated reloc sections for
9094 ifunc dynamic relocs. */
9095
9096 static bool
9097 elfNN_aarch64_allocate_ifunc_dynrelocs (struct elf_link_hash_entry *h,
9098 void *inf)
9099 {
9100 struct bfd_link_info *info;
9101 struct elf_aarch64_link_hash_table *htab;
9102
9103 /* An example of a bfd_link_hash_indirect symbol is versioned
9104 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
9105 -> __gxx_personality_v0(bfd_link_hash_defined)
9106
9107 There is no need to process bfd_link_hash_indirect symbols here
9108 because we will also be presented with the concrete instance of
9109 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
9110 called to copy all relevant data from the generic to the concrete
9111 symbol instance. */
9112 if (h->root.type == bfd_link_hash_indirect)
9113 return true;
9114
9115 if (h->root.type == bfd_link_hash_warning)
9116 h = (struct elf_link_hash_entry *) h->root.u.i.link;
9117
9118 info = (struct bfd_link_info *) inf;
9119 htab = elf_aarch64_hash_table (info);
9120
9121 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
9122 here if it is defined and referenced in a non-shared object. */
9123 if (h->type == STT_GNU_IFUNC
9124 && h->def_regular)
9125 return _bfd_elf_allocate_ifunc_dyn_relocs (info, h,
9126 &h->dyn_relocs,
9127 htab->plt_entry_size,
9128 htab->plt_header_size,
9129 GOT_ENTRY_SIZE,
9130 false);
9131 return true;
9132 }
9133
9134 /* Allocate space in .plt, .got and associated reloc sections for
9135 local ifunc dynamic relocs. */
9136
9137 static int
9138 elfNN_aarch64_allocate_local_ifunc_dynrelocs (void **slot, void *inf)
9139 {
9140 struct elf_link_hash_entry *h
9141 = (struct elf_link_hash_entry *) *slot;
9142
9143 if (h->type != STT_GNU_IFUNC
9144 || !h->def_regular
9145 || !h->ref_regular
9146 || !h->forced_local
9147 || h->root.type != bfd_link_hash_defined)
9148 abort ();
9149
9150 return elfNN_aarch64_allocate_ifunc_dynrelocs (h, inf);
9151 }
9152
9153 /* This is the most important function of all . Innocuosly named
9154 though ! */
9155
9156 static bool
9157 elfNN_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
9158 struct bfd_link_info *info)
9159 {
9160 struct elf_aarch64_link_hash_table *htab;
9161 bfd *dynobj;
9162 asection *s;
9163 bool relocs;
9164 bfd *ibfd;
9165
9166 htab = elf_aarch64_hash_table ((info));
9167 dynobj = htab->root.dynobj;
9168
9169 BFD_ASSERT (dynobj != NULL);
9170
9171 if (htab->root.dynamic_sections_created)
9172 {
9173 if (bfd_link_executable (info) && !info->nointerp)
9174 {
9175 s = bfd_get_linker_section (dynobj, ".interp");
9176 if (s == NULL)
9177 abort ();
9178 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
9179 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
9180 }
9181 }
9182
9183 /* Set up .got offsets for local syms, and space for local dynamic
9184 relocs. */
9185 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
9186 {
9187 struct elf_aarch64_local_symbol *locals = NULL;
9188 Elf_Internal_Shdr *symtab_hdr;
9189 asection *srel;
9190 unsigned int i;
9191
9192 if (!is_aarch64_elf (ibfd))
9193 continue;
9194
9195 for (s = ibfd->sections; s != NULL; s = s->next)
9196 {
9197 struct elf_dyn_relocs *p;
9198
9199 for (p = (struct elf_dyn_relocs *)
9200 (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
9201 {
9202 if (!bfd_is_abs_section (p->sec)
9203 && bfd_is_abs_section (p->sec->output_section))
9204 {
9205 /* Input section has been discarded, either because
9206 it is a copy of a linkonce section or due to
9207 linker script /DISCARD/, so we'll be discarding
9208 the relocs too. */
9209 }
9210 else if (p->count != 0)
9211 {
9212 srel = elf_section_data (p->sec)->sreloc;
9213 srel->size += p->count * RELOC_SIZE (htab);
9214 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
9215 info->flags |= DF_TEXTREL;
9216 }
9217 }
9218 }
9219
9220 locals = elf_aarch64_locals (ibfd);
9221 if (!locals)
9222 continue;
9223
9224 symtab_hdr = &elf_symtab_hdr (ibfd);
9225 srel = htab->root.srelgot;
9226 for (i = 0; i < symtab_hdr->sh_info; i++)
9227 {
9228 locals[i].got_offset = (bfd_vma) - 1;
9229 locals[i].tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
9230 if (locals[i].got_refcount > 0)
9231 {
9232 unsigned got_type = locals[i].got_type;
9233 if (got_type & GOT_TLSDESC_GD)
9234 {
9235 locals[i].tlsdesc_got_jump_table_offset =
9236 (htab->root.sgotplt->size
9237 - aarch64_compute_jump_table_size (htab));
9238 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
9239 locals[i].got_offset = (bfd_vma) - 2;
9240 }
9241
9242 if (got_type & GOT_TLS_GD)
9243 {
9244 locals[i].got_offset = htab->root.sgot->size;
9245 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
9246 }
9247
9248 if (got_type & GOT_TLS_IE
9249 || got_type & GOT_NORMAL)
9250 {
9251 locals[i].got_offset = htab->root.sgot->size;
9252 htab->root.sgot->size += GOT_ENTRY_SIZE;
9253 }
9254
9255 if (got_type == GOT_UNKNOWN)
9256 {
9257 }
9258
9259 if (bfd_link_pic (info))
9260 {
9261 if (got_type & GOT_TLSDESC_GD)
9262 {
9263 htab->root.srelplt->size += RELOC_SIZE (htab);
9264 /* Note RELOC_COUNT not incremented here! */
9265 htab->root.tlsdesc_plt = (bfd_vma) - 1;
9266 }
9267
9268 if (got_type & GOT_TLS_GD)
9269 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
9270
9271 if (got_type & GOT_TLS_IE
9272 || got_type & GOT_NORMAL)
9273 htab->root.srelgot->size += RELOC_SIZE (htab);
9274 }
9275 }
9276 else
9277 {
9278 locals[i].got_refcount = (bfd_vma) - 1;
9279 }
9280 }
9281 }
9282
9283
9284 /* Allocate global sym .plt and .got entries, and space for global
9285 sym dynamic relocs. */
9286 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_dynrelocs,
9287 info);
9288
9289 /* Allocate global ifunc sym .plt and .got entries, and space for global
9290 ifunc sym dynamic relocs. */
9291 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_ifunc_dynrelocs,
9292 info);
9293
9294 /* Allocate .plt and .got entries, and space for local ifunc symbols. */
9295 htab_traverse (htab->loc_hash_table,
9296 elfNN_aarch64_allocate_local_ifunc_dynrelocs,
9297 info);
9298
9299 /* For every jump slot reserved in the sgotplt, reloc_count is
9300 incremented. However, when we reserve space for TLS descriptors,
9301 it's not incremented, so in order to compute the space reserved
9302 for them, it suffices to multiply the reloc count by the jump
9303 slot size. */
9304
9305 if (htab->root.srelplt)
9306 htab->sgotplt_jump_table_size = aarch64_compute_jump_table_size (htab);
9307
9308 if (htab->root.tlsdesc_plt)
9309 {
9310 if (htab->root.splt->size == 0)
9311 htab->root.splt->size += htab->plt_header_size;
9312
9313 /* If we're not using lazy TLS relocations, don't generate the
9314 GOT and PLT entry required. */
9315 if ((info->flags & DF_BIND_NOW))
9316 htab->root.tlsdesc_plt = 0;
9317 else
9318 {
9319 htab->root.tlsdesc_plt = htab->root.splt->size;
9320 htab->root.splt->size += htab->tlsdesc_plt_entry_size;
9321
9322 htab->root.tlsdesc_got = htab->root.sgot->size;
9323 htab->root.sgot->size += GOT_ENTRY_SIZE;
9324 }
9325 }
9326
9327 /* Init mapping symbols information to use later to distingush between
9328 code and data while scanning for errata. */
9329 if (htab->fix_erratum_835769 || htab->fix_erratum_843419)
9330 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
9331 {
9332 if (!is_aarch64_elf (ibfd))
9333 continue;
9334 bfd_elfNN_aarch64_init_maps (ibfd);
9335 }
9336
9337 /* We now have determined the sizes of the various dynamic sections.
9338 Allocate memory for them. */
9339 relocs = false;
9340 for (s = dynobj->sections; s != NULL; s = s->next)
9341 {
9342 if ((s->flags & SEC_LINKER_CREATED) == 0)
9343 continue;
9344
9345 if (s == htab->root.splt
9346 || s == htab->root.sgot
9347 || s == htab->root.sgotplt
9348 || s == htab->root.iplt
9349 || s == htab->root.igotplt
9350 || s == htab->root.sdynbss
9351 || s == htab->root.sdynrelro)
9352 {
9353 /* Strip this section if we don't need it; see the
9354 comment below. */
9355 }
9356 else if (startswith (bfd_section_name (s), ".rela"))
9357 {
9358 if (s->size != 0 && s != htab->root.srelplt)
9359 relocs = true;
9360
9361 /* We use the reloc_count field as a counter if we need
9362 to copy relocs into the output file. */
9363 if (s != htab->root.srelplt)
9364 s->reloc_count = 0;
9365 }
9366 else
9367 {
9368 /* It's not one of our sections, so don't allocate space. */
9369 continue;
9370 }
9371
9372 if (s->size == 0)
9373 {
9374 /* If we don't need this section, strip it from the
9375 output file. This is mostly to handle .rela.bss and
9376 .rela.plt. We must create both sections in
9377 create_dynamic_sections, because they must be created
9378 before the linker maps input sections to output
9379 sections. The linker does that before
9380 adjust_dynamic_symbol is called, and it is that
9381 function which decides whether anything needs to go
9382 into these sections. */
9383 s->flags |= SEC_EXCLUDE;
9384 continue;
9385 }
9386
9387 if ((s->flags & SEC_HAS_CONTENTS) == 0)
9388 continue;
9389
9390 /* Allocate memory for the section contents. We use bfd_zalloc
9391 here in case unused entries are not reclaimed before the
9392 section's contents are written out. This should not happen,
9393 but this way if it does, we get a R_AARCH64_NONE reloc instead
9394 of garbage. */
9395 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
9396 if (s->contents == NULL)
9397 return false;
9398 }
9399
9400 if (htab->root.dynamic_sections_created)
9401 {
9402 /* Add some entries to the .dynamic section. We fill in the
9403 values later, in elfNN_aarch64_finish_dynamic_sections, but we
9404 must add the entries now so that we get the correct size for
9405 the .dynamic section. The DT_DEBUG entry is filled in by the
9406 dynamic linker and used by the debugger. */
9407 #define add_dynamic_entry(TAG, VAL) \
9408 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
9409
9410 if (!_bfd_elf_add_dynamic_tags (output_bfd, info, relocs))
9411 return false;
9412
9413 if (htab->root.splt->size != 0)
9414 {
9415 if (htab->variant_pcs
9416 && !add_dynamic_entry (DT_AARCH64_VARIANT_PCS, 0))
9417 return false;
9418
9419 if ((elf_aarch64_tdata (output_bfd)->plt_type == PLT_BTI_PAC)
9420 && (!add_dynamic_entry (DT_AARCH64_BTI_PLT, 0)
9421 || !add_dynamic_entry (DT_AARCH64_PAC_PLT, 0)))
9422 return false;
9423
9424 else if ((elf_aarch64_tdata (output_bfd)->plt_type == PLT_BTI)
9425 && !add_dynamic_entry (DT_AARCH64_BTI_PLT, 0))
9426 return false;
9427
9428 else if ((elf_aarch64_tdata (output_bfd)->plt_type == PLT_PAC)
9429 && !add_dynamic_entry (DT_AARCH64_PAC_PLT, 0))
9430 return false;
9431 }
9432 }
9433 #undef add_dynamic_entry
9434
9435 return true;
9436 }
9437
9438 static inline void
9439 elf_aarch64_update_plt_entry (bfd *output_bfd,
9440 bfd_reloc_code_real_type r_type,
9441 bfd_byte *plt_entry, bfd_vma value)
9442 {
9443 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (r_type);
9444
9445 /* FIXME: We should check the return value from this function call. */
9446 (void) _bfd_aarch64_elf_put_addend (output_bfd, plt_entry, r_type, howto, value);
9447 }
9448
9449 static void
9450 elfNN_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h,
9451 struct elf_aarch64_link_hash_table
9452 *htab, bfd *output_bfd,
9453 struct bfd_link_info *info)
9454 {
9455 bfd_byte *plt_entry;
9456 bfd_vma plt_index;
9457 bfd_vma got_offset;
9458 bfd_vma gotplt_entry_address;
9459 bfd_vma plt_entry_address;
9460 Elf_Internal_Rela rela;
9461 bfd_byte *loc;
9462 asection *plt, *gotplt, *relplt;
9463
9464 /* When building a static executable, use .iplt, .igot.plt and
9465 .rela.iplt sections for STT_GNU_IFUNC symbols. */
9466 if (htab->root.splt != NULL)
9467 {
9468 plt = htab->root.splt;
9469 gotplt = htab->root.sgotplt;
9470 relplt = htab->root.srelplt;
9471 }
9472 else
9473 {
9474 plt = htab->root.iplt;
9475 gotplt = htab->root.igotplt;
9476 relplt = htab->root.irelplt;
9477 }
9478
9479 /* Get the index in the procedure linkage table which
9480 corresponds to this symbol. This is the index of this symbol
9481 in all the symbols for which we are making plt entries. The
9482 first entry in the procedure linkage table is reserved.
9483
9484 Get the offset into the .got table of the entry that
9485 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
9486 bytes. The first three are reserved for the dynamic linker.
9487
9488 For static executables, we don't reserve anything. */
9489
9490 if (plt == htab->root.splt)
9491 {
9492 plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
9493 got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
9494 }
9495 else
9496 {
9497 plt_index = h->plt.offset / htab->plt_entry_size;
9498 got_offset = plt_index * GOT_ENTRY_SIZE;
9499 }
9500
9501 plt_entry = plt->contents + h->plt.offset;
9502 plt_entry_address = plt->output_section->vma
9503 + plt->output_offset + h->plt.offset;
9504 gotplt_entry_address = gotplt->output_section->vma +
9505 gotplt->output_offset + got_offset;
9506
9507 /* Copy in the boiler-plate for the PLTn entry. */
9508 memcpy (plt_entry, htab->plt_entry, htab->plt_entry_size);
9509
9510 /* First instruction in BTI enabled PLT stub is a BTI
9511 instruction so skip it. */
9512 if (elf_aarch64_tdata (output_bfd)->plt_type & PLT_BTI
9513 && elf_elfheader (output_bfd)->e_type == ET_EXEC)
9514 plt_entry = plt_entry + 4;
9515
9516 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
9517 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
9518 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
9519 plt_entry,
9520 PG (gotplt_entry_address) -
9521 PG (plt_entry_address));
9522
9523 /* Fill in the lo12 bits for the load from the pltgot. */
9524 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
9525 plt_entry + 4,
9526 PG_OFFSET (gotplt_entry_address));
9527
9528 /* Fill in the lo12 bits for the add from the pltgot entry. */
9529 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
9530 plt_entry + 8,
9531 PG_OFFSET (gotplt_entry_address));
9532
9533 /* All the GOTPLT Entries are essentially initialized to PLT0. */
9534 bfd_put_NN (output_bfd,
9535 plt->output_section->vma + plt->output_offset,
9536 gotplt->contents + got_offset);
9537
9538 rela.r_offset = gotplt_entry_address;
9539
9540 if (h->dynindx == -1
9541 || ((bfd_link_executable (info)
9542 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
9543 && h->def_regular
9544 && h->type == STT_GNU_IFUNC))
9545 {
9546 /* If an STT_GNU_IFUNC symbol is locally defined, generate
9547 R_AARCH64_IRELATIVE instead of R_AARCH64_JUMP_SLOT. */
9548 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
9549 rela.r_addend = (h->root.u.def.value
9550 + h->root.u.def.section->output_section->vma
9551 + h->root.u.def.section->output_offset);
9552 }
9553 else
9554 {
9555 /* Fill in the entry in the .rela.plt section. */
9556 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (JUMP_SLOT));
9557 rela.r_addend = 0;
9558 }
9559
9560 /* Compute the relocation entry to used based on PLT index and do
9561 not adjust reloc_count. The reloc_count has already been adjusted
9562 to account for this entry. */
9563 loc = relplt->contents + plt_index * RELOC_SIZE (htab);
9564 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
9565 }
9566
9567 /* Size sections even though they're not dynamic. We use it to setup
9568 _TLS_MODULE_BASE_, if needed. */
9569
9570 static bool
9571 elfNN_aarch64_always_size_sections (bfd *output_bfd,
9572 struct bfd_link_info *info)
9573 {
9574 asection *tls_sec;
9575
9576 if (bfd_link_relocatable (info))
9577 return true;
9578
9579 tls_sec = elf_hash_table (info)->tls_sec;
9580
9581 if (tls_sec)
9582 {
9583 struct elf_link_hash_entry *tlsbase;
9584
9585 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
9586 "_TLS_MODULE_BASE_", true, true, false);
9587
9588 if (tlsbase)
9589 {
9590 struct bfd_link_hash_entry *h = NULL;
9591 const struct elf_backend_data *bed =
9592 get_elf_backend_data (output_bfd);
9593
9594 if (!(_bfd_generic_link_add_one_symbol
9595 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
9596 tls_sec, 0, NULL, false, bed->collect, &h)))
9597 return false;
9598
9599 tlsbase->type = STT_TLS;
9600 tlsbase = (struct elf_link_hash_entry *) h;
9601 tlsbase->def_regular = 1;
9602 tlsbase->other = STV_HIDDEN;
9603 (*bed->elf_backend_hide_symbol) (info, tlsbase, true);
9604 }
9605 }
9606
9607 return true;
9608 }
9609
9610 /* Finish up dynamic symbol handling. We set the contents of various
9611 dynamic sections here. */
9612
9613 static bool
9614 elfNN_aarch64_finish_dynamic_symbol (bfd *output_bfd,
9615 struct bfd_link_info *info,
9616 struct elf_link_hash_entry *h,
9617 Elf_Internal_Sym *sym)
9618 {
9619 struct elf_aarch64_link_hash_table *htab;
9620 htab = elf_aarch64_hash_table (info);
9621
9622 if (h->plt.offset != (bfd_vma) - 1)
9623 {
9624 asection *plt, *gotplt, *relplt;
9625
9626 /* This symbol has an entry in the procedure linkage table. Set
9627 it up. */
9628
9629 /* When building a static executable, use .iplt, .igot.plt and
9630 .rela.iplt sections for STT_GNU_IFUNC symbols. */
9631 if (htab->root.splt != NULL)
9632 {
9633 plt = htab->root.splt;
9634 gotplt = htab->root.sgotplt;
9635 relplt = htab->root.srelplt;
9636 }
9637 else
9638 {
9639 plt = htab->root.iplt;
9640 gotplt = htab->root.igotplt;
9641 relplt = htab->root.irelplt;
9642 }
9643
9644 /* This symbol has an entry in the procedure linkage table. Set
9645 it up. */
9646 if ((h->dynindx == -1
9647 && !((h->forced_local || bfd_link_executable (info))
9648 && h->def_regular
9649 && h->type == STT_GNU_IFUNC))
9650 || plt == NULL
9651 || gotplt == NULL
9652 || relplt == NULL)
9653 return false;
9654
9655 elfNN_aarch64_create_small_pltn_entry (h, htab, output_bfd, info);
9656 if (!h->def_regular)
9657 {
9658 /* Mark the symbol as undefined, rather than as defined in
9659 the .plt section. */
9660 sym->st_shndx = SHN_UNDEF;
9661 /* If the symbol is weak we need to clear the value.
9662 Otherwise, the PLT entry would provide a definition for
9663 the symbol even if the symbol wasn't defined anywhere,
9664 and so the symbol would never be NULL. Leave the value if
9665 there were any relocations where pointer equality matters
9666 (this is a clue for the dynamic linker, to make function
9667 pointer comparisons work between an application and shared
9668 library). */
9669 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
9670 sym->st_value = 0;
9671 }
9672 }
9673
9674 if (h->got.offset != (bfd_vma) - 1
9675 && elf_aarch64_hash_entry (h)->got_type == GOT_NORMAL
9676 /* Undefined weak symbol in static PIE resolves to 0 without
9677 any dynamic relocations. */
9678 && !UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
9679 {
9680 Elf_Internal_Rela rela;
9681 bfd_byte *loc;
9682
9683 /* This symbol has an entry in the global offset table. Set it
9684 up. */
9685 if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
9686 abort ();
9687
9688 rela.r_offset = (htab->root.sgot->output_section->vma
9689 + htab->root.sgot->output_offset
9690 + (h->got.offset & ~(bfd_vma) 1));
9691
9692 if (h->def_regular
9693 && h->type == STT_GNU_IFUNC)
9694 {
9695 if (bfd_link_pic (info))
9696 {
9697 /* Generate R_AARCH64_GLOB_DAT. */
9698 goto do_glob_dat;
9699 }
9700 else
9701 {
9702 asection *plt;
9703
9704 if (!h->pointer_equality_needed)
9705 abort ();
9706
9707 /* For non-shared object, we can't use .got.plt, which
9708 contains the real function address if we need pointer
9709 equality. We load the GOT entry with the PLT entry. */
9710 plt = htab->root.splt ? htab->root.splt : htab->root.iplt;
9711 bfd_put_NN (output_bfd, (plt->output_section->vma
9712 + plt->output_offset
9713 + h->plt.offset),
9714 htab->root.sgot->contents
9715 + (h->got.offset & ~(bfd_vma) 1));
9716 return true;
9717 }
9718 }
9719 else if (bfd_link_pic (info) && SYMBOL_REFERENCES_LOCAL (info, h))
9720 {
9721 if (!(h->def_regular || ELF_COMMON_DEF_P (h)))
9722 return false;
9723
9724 BFD_ASSERT ((h->got.offset & 1) != 0);
9725 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
9726 rela.r_addend = (h->root.u.def.value
9727 + h->root.u.def.section->output_section->vma
9728 + h->root.u.def.section->output_offset);
9729 }
9730 else
9731 {
9732 do_glob_dat:
9733 BFD_ASSERT ((h->got.offset & 1) == 0);
9734 bfd_put_NN (output_bfd, (bfd_vma) 0,
9735 htab->root.sgot->contents + h->got.offset);
9736 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (GLOB_DAT));
9737 rela.r_addend = 0;
9738 }
9739
9740 loc = htab->root.srelgot->contents;
9741 loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
9742 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
9743 }
9744
9745 if (h->needs_copy)
9746 {
9747 Elf_Internal_Rela rela;
9748 asection *s;
9749 bfd_byte *loc;
9750
9751 /* This symbol needs a copy reloc. Set it up. */
9752 if (h->dynindx == -1
9753 || (h->root.type != bfd_link_hash_defined
9754 && h->root.type != bfd_link_hash_defweak)
9755 || htab->root.srelbss == NULL)
9756 abort ();
9757
9758 rela.r_offset = (h->root.u.def.value
9759 + h->root.u.def.section->output_section->vma
9760 + h->root.u.def.section->output_offset);
9761 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (COPY));
9762 rela.r_addend = 0;
9763 if (h->root.u.def.section == htab->root.sdynrelro)
9764 s = htab->root.sreldynrelro;
9765 else
9766 s = htab->root.srelbss;
9767 loc = s->contents + s->reloc_count++ * RELOC_SIZE (htab);
9768 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
9769 }
9770
9771 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may
9772 be NULL for local symbols. */
9773 if (sym != NULL
9774 && (h == elf_hash_table (info)->hdynamic
9775 || h == elf_hash_table (info)->hgot))
9776 sym->st_shndx = SHN_ABS;
9777
9778 return true;
9779 }
9780
9781 /* Finish up local dynamic symbol handling. We set the contents of
9782 various dynamic sections here. */
9783
9784 static int
9785 elfNN_aarch64_finish_local_dynamic_symbol (void **slot, void *inf)
9786 {
9787 struct elf_link_hash_entry *h
9788 = (struct elf_link_hash_entry *) *slot;
9789 struct bfd_link_info *info
9790 = (struct bfd_link_info *) inf;
9791
9792 return elfNN_aarch64_finish_dynamic_symbol (info->output_bfd,
9793 info, h, NULL);
9794 }
9795
9796 static void
9797 elfNN_aarch64_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
9798 struct elf_aarch64_link_hash_table
9799 *htab)
9800 {
9801 /* Fill in PLT0. Fixme:RR Note this doesn't distinguish between
9802 small and large plts and at the minute just generates
9803 the small PLT. */
9804
9805 /* PLT0 of the small PLT looks like this in ELF64 -
9806 stp x16, x30, [sp, #-16]! // Save the reloc and lr on stack.
9807 adrp x16, PLT_GOT + 16 // Get the page base of the GOTPLT
9808 ldr x17, [x16, #:lo12:PLT_GOT+16] // Load the address of the
9809 // symbol resolver
9810 add x16, x16, #:lo12:PLT_GOT+16 // Load the lo12 bits of the
9811 // GOTPLT entry for this.
9812 br x17
9813 PLT0 will be slightly different in ELF32 due to different got entry
9814 size. */
9815 bfd_vma plt_got_2nd_ent; /* Address of GOT[2]. */
9816 bfd_vma plt_base;
9817
9818
9819 memcpy (htab->root.splt->contents, htab->plt0_entry,
9820 htab->plt_header_size);
9821
9822 /* PR 26312: Explicitly set the sh_entsize to 0 so that
9823 consumers do not think that the section contains fixed
9824 sized objects. */
9825 elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize = 0;
9826
9827 plt_got_2nd_ent = (htab->root.sgotplt->output_section->vma
9828 + htab->root.sgotplt->output_offset
9829 + GOT_ENTRY_SIZE * 2);
9830
9831 plt_base = htab->root.splt->output_section->vma +
9832 htab->root.splt->output_offset;
9833
9834 /* First instruction in BTI enabled PLT stub is a BTI
9835 instruction so skip it. */
9836 bfd_byte *plt0_entry = htab->root.splt->contents;
9837 if (elf_aarch64_tdata (output_bfd)->plt_type & PLT_BTI)
9838 plt0_entry = plt0_entry + 4;
9839
9840 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
9841 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
9842 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
9843 plt0_entry + 4,
9844 PG (plt_got_2nd_ent) - PG (plt_base + 4));
9845
9846 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
9847 plt0_entry + 8,
9848 PG_OFFSET (plt_got_2nd_ent));
9849
9850 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
9851 plt0_entry + 12,
9852 PG_OFFSET (plt_got_2nd_ent));
9853 }
9854
9855 static bool
9856 elfNN_aarch64_finish_dynamic_sections (bfd *output_bfd,
9857 struct bfd_link_info *info)
9858 {
9859 struct elf_aarch64_link_hash_table *htab;
9860 bfd *dynobj;
9861 asection *sdyn;
9862
9863 htab = elf_aarch64_hash_table (info);
9864 dynobj = htab->root.dynobj;
9865 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
9866
9867 if (htab->root.dynamic_sections_created)
9868 {
9869 ElfNN_External_Dyn *dyncon, *dynconend;
9870
9871 if (sdyn == NULL || htab->root.sgot == NULL)
9872 abort ();
9873
9874 dyncon = (ElfNN_External_Dyn *) sdyn->contents;
9875 dynconend = (ElfNN_External_Dyn *) (sdyn->contents + sdyn->size);
9876 for (; dyncon < dynconend; dyncon++)
9877 {
9878 Elf_Internal_Dyn dyn;
9879 asection *s;
9880
9881 bfd_elfNN_swap_dyn_in (dynobj, dyncon, &dyn);
9882
9883 switch (dyn.d_tag)
9884 {
9885 default:
9886 continue;
9887
9888 case DT_PLTGOT:
9889 s = htab->root.sgotplt;
9890 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
9891 break;
9892
9893 case DT_JMPREL:
9894 s = htab->root.srelplt;
9895 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
9896 break;
9897
9898 case DT_PLTRELSZ:
9899 s = htab->root.srelplt;
9900 dyn.d_un.d_val = s->size;
9901 break;
9902
9903 case DT_TLSDESC_PLT:
9904 s = htab->root.splt;
9905 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
9906 + htab->root.tlsdesc_plt;
9907 break;
9908
9909 case DT_TLSDESC_GOT:
9910 s = htab->root.sgot;
9911 BFD_ASSERT (htab->root.tlsdesc_got != (bfd_vma)-1);
9912 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
9913 + htab->root.tlsdesc_got;
9914 break;
9915 }
9916
9917 bfd_elfNN_swap_dyn_out (output_bfd, &dyn, dyncon);
9918 }
9919
9920 }
9921
9922 /* Fill in the special first entry in the procedure linkage table. */
9923 if (htab->root.splt && htab->root.splt->size > 0)
9924 {
9925 elfNN_aarch64_init_small_plt0_entry (output_bfd, htab);
9926
9927 if (htab->root.tlsdesc_plt && !(info->flags & DF_BIND_NOW))
9928 {
9929 BFD_ASSERT (htab->root.tlsdesc_got != (bfd_vma)-1);
9930 bfd_put_NN (output_bfd, (bfd_vma) 0,
9931 htab->root.sgot->contents + htab->root.tlsdesc_got);
9932
9933 const bfd_byte *entry = elfNN_aarch64_tlsdesc_small_plt_entry;
9934 htab->tlsdesc_plt_entry_size = PLT_TLSDESC_ENTRY_SIZE;
9935
9936 aarch64_plt_type type = elf_aarch64_tdata (output_bfd)->plt_type;
9937 if (type == PLT_BTI || type == PLT_BTI_PAC)
9938 {
9939 entry = elfNN_aarch64_tlsdesc_small_plt_bti_entry;
9940 }
9941
9942 memcpy (htab->root.splt->contents + htab->root.tlsdesc_plt,
9943 entry, htab->tlsdesc_plt_entry_size);
9944
9945 {
9946 bfd_vma adrp1_addr =
9947 htab->root.splt->output_section->vma
9948 + htab->root.splt->output_offset
9949 + htab->root.tlsdesc_plt + 4;
9950
9951 bfd_vma adrp2_addr = adrp1_addr + 4;
9952
9953 bfd_vma got_addr =
9954 htab->root.sgot->output_section->vma
9955 + htab->root.sgot->output_offset;
9956
9957 bfd_vma pltgot_addr =
9958 htab->root.sgotplt->output_section->vma
9959 + htab->root.sgotplt->output_offset;
9960
9961 bfd_vma dt_tlsdesc_got = got_addr + htab->root.tlsdesc_got;
9962
9963 bfd_byte *plt_entry =
9964 htab->root.splt->contents + htab->root.tlsdesc_plt;
9965
9966 /* First instruction in BTI enabled PLT stub is a BTI
9967 instruction so skip it. */
9968 if (type & PLT_BTI)
9969 {
9970 plt_entry = plt_entry + 4;
9971 adrp1_addr = adrp1_addr + 4;
9972 adrp2_addr = adrp2_addr + 4;
9973 }
9974
9975 /* adrp x2, DT_TLSDESC_GOT */
9976 elf_aarch64_update_plt_entry (output_bfd,
9977 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
9978 plt_entry + 4,
9979 (PG (dt_tlsdesc_got)
9980 - PG (adrp1_addr)));
9981
9982 /* adrp x3, 0 */
9983 elf_aarch64_update_plt_entry (output_bfd,
9984 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
9985 plt_entry + 8,
9986 (PG (pltgot_addr)
9987 - PG (adrp2_addr)));
9988
9989 /* ldr x2, [x2, #0] */
9990 elf_aarch64_update_plt_entry (output_bfd,
9991 BFD_RELOC_AARCH64_LDSTNN_LO12,
9992 plt_entry + 12,
9993 PG_OFFSET (dt_tlsdesc_got));
9994
9995 /* add x3, x3, 0 */
9996 elf_aarch64_update_plt_entry (output_bfd,
9997 BFD_RELOC_AARCH64_ADD_LO12,
9998 plt_entry + 16,
9999 PG_OFFSET (pltgot_addr));
10000 }
10001 }
10002 }
10003
10004 if (htab->root.sgotplt)
10005 {
10006 if (bfd_is_abs_section (htab->root.sgotplt->output_section))
10007 {
10008 _bfd_error_handler
10009 (_("discarded output section: `%pA'"), htab->root.sgotplt);
10010 return false;
10011 }
10012
10013 /* Fill in the first three entries in the global offset table. */
10014 if (htab->root.sgotplt->size > 0)
10015 {
10016 bfd_put_NN (output_bfd, (bfd_vma) 0, htab->root.sgotplt->contents);
10017
10018 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
10019 bfd_put_NN (output_bfd,
10020 (bfd_vma) 0,
10021 htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
10022 bfd_put_NN (output_bfd,
10023 (bfd_vma) 0,
10024 htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
10025 }
10026
10027 if (htab->root.sgot)
10028 {
10029 if (htab->root.sgot->size > 0)
10030 {
10031 bfd_vma addr =
10032 sdyn ? sdyn->output_section->vma + sdyn->output_offset : 0;
10033 bfd_put_NN (output_bfd, addr, htab->root.sgot->contents);
10034 }
10035 }
10036
10037 elf_section_data (htab->root.sgotplt->output_section)->
10038 this_hdr.sh_entsize = GOT_ENTRY_SIZE;
10039 }
10040
10041 if (htab->root.sgot && htab->root.sgot->size > 0)
10042 elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
10043 = GOT_ENTRY_SIZE;
10044
10045 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
10046 htab_traverse (htab->loc_hash_table,
10047 elfNN_aarch64_finish_local_dynamic_symbol,
10048 info);
10049
10050 return true;
10051 }
10052
10053 /* Check if BTI enabled PLTs are needed. Returns the type needed. */
10054 static aarch64_plt_type
10055 get_plt_type (bfd *abfd)
10056 {
10057 aarch64_plt_type ret = PLT_NORMAL;
10058 bfd_byte *contents, *extdyn, *extdynend;
10059 asection *sec = bfd_get_section_by_name (abfd, ".dynamic");
10060 if (!sec
10061 || (sec->flags & SEC_HAS_CONTENTS) == 0
10062 || sec->size < sizeof (ElfNN_External_Dyn)
10063 || !bfd_malloc_and_get_section (abfd, sec, &contents))
10064 return ret;
10065 extdyn = contents;
10066 extdynend = contents + sec->size - sizeof (ElfNN_External_Dyn);
10067 for (; extdyn <= extdynend; extdyn += sizeof (ElfNN_External_Dyn))
10068 {
10069 Elf_Internal_Dyn dyn;
10070 bfd_elfNN_swap_dyn_in (abfd, extdyn, &dyn);
10071
10072 /* Let's check the processor specific dynamic array tags. */
10073 bfd_vma tag = dyn.d_tag;
10074 if (tag < DT_LOPROC || tag > DT_HIPROC)
10075 continue;
10076
10077 switch (tag)
10078 {
10079 case DT_AARCH64_BTI_PLT:
10080 ret |= PLT_BTI;
10081 break;
10082
10083 case DT_AARCH64_PAC_PLT:
10084 ret |= PLT_PAC;
10085 break;
10086
10087 default: break;
10088 }
10089 }
10090 free (contents);
10091 return ret;
10092 }
10093
10094 static long
10095 elfNN_aarch64_get_synthetic_symtab (bfd *abfd,
10096 long symcount,
10097 asymbol **syms,
10098 long dynsymcount,
10099 asymbol **dynsyms,
10100 asymbol **ret)
10101 {
10102 elf_aarch64_tdata (abfd)->plt_type = get_plt_type (abfd);
10103 return _bfd_elf_get_synthetic_symtab (abfd, symcount, syms,
10104 dynsymcount, dynsyms, ret);
10105 }
10106
10107 /* Return address for Ith PLT stub in section PLT, for relocation REL
10108 or (bfd_vma) -1 if it should not be included. */
10109
10110 static bfd_vma
10111 elfNN_aarch64_plt_sym_val (bfd_vma i, const asection *plt,
10112 const arelent *rel ATTRIBUTE_UNUSED)
10113 {
10114 size_t plt0_size = PLT_ENTRY_SIZE;
10115 size_t pltn_size = PLT_SMALL_ENTRY_SIZE;
10116
10117 if (elf_aarch64_tdata (plt->owner)->plt_type == PLT_BTI_PAC)
10118 {
10119 if (elf_elfheader (plt->owner)->e_type == ET_EXEC)
10120 pltn_size = PLT_BTI_PAC_SMALL_ENTRY_SIZE;
10121 else
10122 pltn_size = PLT_PAC_SMALL_ENTRY_SIZE;
10123 }
10124 else if (elf_aarch64_tdata (plt->owner)->plt_type == PLT_BTI)
10125 {
10126 if (elf_elfheader (plt->owner)->e_type == ET_EXEC)
10127 pltn_size = PLT_BTI_SMALL_ENTRY_SIZE;
10128 }
10129 else if (elf_aarch64_tdata (plt->owner)->plt_type == PLT_PAC)
10130 {
10131 pltn_size = PLT_PAC_SMALL_ENTRY_SIZE;
10132 }
10133
10134 return plt->vma + plt0_size + i * pltn_size;
10135 }
10136
10137 /* Returns TRUE if NAME is an AArch64 mapping symbol.
10138 The ARM ELF standard defines $x (for A64 code) and $d (for data).
10139 It also allows a period initiated suffix to be added to the symbol, ie:
10140 "$[adtx]\.[:sym_char]+". */
10141
10142 static bool
10143 is_aarch64_mapping_symbol (const char * name)
10144 {
10145 return name != NULL /* Paranoia. */
10146 && name[0] == '$' /* Note: if objcopy --prefix-symbols has been used then
10147 the mapping symbols could have acquired a prefix.
10148 We do not support this here, since such symbols no
10149 longer conform to the ARM ELF ABI. */
10150 && (name[1] == 'd' || name[1] == 'x')
10151 && (name[2] == 0 || name[2] == '.');
10152 /* FIXME: Strictly speaking the symbol is only a valid mapping symbol if
10153 any characters that follow the period are legal characters for the body
10154 of a symbol's name. For now we just assume that this is the case. */
10155 }
10156
10157 /* Make sure that mapping symbols in object files are not removed via the
10158 "strip --strip-unneeded" tool. These symbols might needed in order to
10159 correctly generate linked files. Once an object file has been linked,
10160 it should be safe to remove them. */
10161
10162 static void
10163 elfNN_aarch64_backend_symbol_processing (bfd *abfd, asymbol *sym)
10164 {
10165 if (((abfd->flags & (EXEC_P | DYNAMIC)) == 0)
10166 && sym->section != bfd_abs_section_ptr
10167 && is_aarch64_mapping_symbol (sym->name))
10168 sym->flags |= BSF_KEEP;
10169 }
10170
10171 /* Implement elf_backend_setup_gnu_properties for AArch64. It serves as a
10172 wrapper function for _bfd_aarch64_elf_link_setup_gnu_properties to account
10173 for the effect of GNU properties of the output_bfd. */
10174 static bfd *
10175 elfNN_aarch64_link_setup_gnu_properties (struct bfd_link_info *info)
10176 {
10177 uint32_t prop = elf_aarch64_tdata (info->output_bfd)->gnu_and_prop;
10178 bfd *pbfd = _bfd_aarch64_elf_link_setup_gnu_properties (info, &prop);
10179 elf_aarch64_tdata (info->output_bfd)->gnu_and_prop = prop;
10180 elf_aarch64_tdata (info->output_bfd)->plt_type
10181 |= (prop & GNU_PROPERTY_AARCH64_FEATURE_1_BTI) ? PLT_BTI : 0;
10182 setup_plt_values (info, elf_aarch64_tdata (info->output_bfd)->plt_type);
10183 return pbfd;
10184 }
10185
10186 /* Implement elf_backend_merge_gnu_properties for AArch64. It serves as a
10187 wrapper function for _bfd_aarch64_elf_merge_gnu_properties to account
10188 for the effect of GNU properties of the output_bfd. */
10189 static bool
10190 elfNN_aarch64_merge_gnu_properties (struct bfd_link_info *info,
10191 bfd *abfd, bfd *bbfd,
10192 elf_property *aprop,
10193 elf_property *bprop)
10194 {
10195 uint32_t prop
10196 = elf_aarch64_tdata (info->output_bfd)->gnu_and_prop;
10197
10198 /* If output has been marked with BTI using command line argument, give out
10199 warning if necessary. */
10200 /* Properties are merged per type, hence only check for warnings when merging
10201 GNU_PROPERTY_AARCH64_FEATURE_1_AND. */
10202 if (((aprop && aprop->pr_type == GNU_PROPERTY_AARCH64_FEATURE_1_AND)
10203 || (bprop && bprop->pr_type == GNU_PROPERTY_AARCH64_FEATURE_1_AND))
10204 && (prop & GNU_PROPERTY_AARCH64_FEATURE_1_BTI)
10205 && (!elf_aarch64_tdata (info->output_bfd)->no_bti_warn))
10206 {
10207 if ((aprop && !(aprop->u.number & GNU_PROPERTY_AARCH64_FEATURE_1_BTI))
10208 || !aprop)
10209 {
10210 _bfd_error_handler (_("%pB: warning: BTI turned on by -z force-bti when "
10211 "all inputs do not have BTI in NOTE section."),
10212 abfd);
10213 }
10214 if ((bprop && !(bprop->u.number & GNU_PROPERTY_AARCH64_FEATURE_1_BTI))
10215 || !bprop)
10216 {
10217 _bfd_error_handler (_("%pB: warning: BTI turned on by -z force-bti when "
10218 "all inputs do not have BTI in NOTE section."),
10219 bbfd);
10220 }
10221 }
10222
10223 return _bfd_aarch64_elf_merge_gnu_properties (info, abfd, aprop,
10224 bprop, prop);
10225 }
10226
10227 /* We use this so we can override certain functions
10228 (though currently we don't). */
10229
10230 const struct elf_size_info elfNN_aarch64_size_info =
10231 {
10232 sizeof (ElfNN_External_Ehdr),
10233 sizeof (ElfNN_External_Phdr),
10234 sizeof (ElfNN_External_Shdr),
10235 sizeof (ElfNN_External_Rel),
10236 sizeof (ElfNN_External_Rela),
10237 sizeof (ElfNN_External_Sym),
10238 sizeof (ElfNN_External_Dyn),
10239 sizeof (Elf_External_Note),
10240 4, /* Hash table entry size. */
10241 1, /* Internal relocs per external relocs. */
10242 ARCH_SIZE, /* Arch size. */
10243 LOG_FILE_ALIGN, /* Log_file_align. */
10244 ELFCLASSNN, EV_CURRENT,
10245 bfd_elfNN_write_out_phdrs,
10246 bfd_elfNN_write_shdrs_and_ehdr,
10247 bfd_elfNN_checksum_contents,
10248 bfd_elfNN_write_relocs,
10249 bfd_elfNN_swap_symbol_in,
10250 bfd_elfNN_swap_symbol_out,
10251 bfd_elfNN_slurp_reloc_table,
10252 bfd_elfNN_slurp_symbol_table,
10253 bfd_elfNN_swap_dyn_in,
10254 bfd_elfNN_swap_dyn_out,
10255 bfd_elfNN_swap_reloc_in,
10256 bfd_elfNN_swap_reloc_out,
10257 bfd_elfNN_swap_reloca_in,
10258 bfd_elfNN_swap_reloca_out
10259 };
10260
10261 #define ELF_ARCH bfd_arch_aarch64
10262 #define ELF_MACHINE_CODE EM_AARCH64
10263 #define ELF_MAXPAGESIZE 0x10000
10264 #define ELF_COMMONPAGESIZE 0x1000
10265
10266 #define bfd_elfNN_close_and_cleanup \
10267 elfNN_aarch64_close_and_cleanup
10268
10269 #define bfd_elfNN_bfd_free_cached_info \
10270 elfNN_aarch64_bfd_free_cached_info
10271
10272 #define bfd_elfNN_bfd_is_target_special_symbol \
10273 elfNN_aarch64_is_target_special_symbol
10274
10275 #define bfd_elfNN_bfd_link_hash_table_create \
10276 elfNN_aarch64_link_hash_table_create
10277
10278 #define bfd_elfNN_bfd_merge_private_bfd_data \
10279 elfNN_aarch64_merge_private_bfd_data
10280
10281 #define bfd_elfNN_bfd_print_private_bfd_data \
10282 elfNN_aarch64_print_private_bfd_data
10283
10284 #define bfd_elfNN_bfd_reloc_type_lookup \
10285 elfNN_aarch64_reloc_type_lookup
10286
10287 #define bfd_elfNN_bfd_reloc_name_lookup \
10288 elfNN_aarch64_reloc_name_lookup
10289
10290 #define bfd_elfNN_bfd_set_private_flags \
10291 elfNN_aarch64_set_private_flags
10292
10293 #define bfd_elfNN_find_inliner_info \
10294 elfNN_aarch64_find_inliner_info
10295
10296 #define bfd_elfNN_get_synthetic_symtab \
10297 elfNN_aarch64_get_synthetic_symtab
10298
10299 #define bfd_elfNN_mkobject \
10300 elfNN_aarch64_mkobject
10301
10302 #define bfd_elfNN_new_section_hook \
10303 elfNN_aarch64_new_section_hook
10304
10305 #define elf_backend_adjust_dynamic_symbol \
10306 elfNN_aarch64_adjust_dynamic_symbol
10307
10308 #define elf_backend_always_size_sections \
10309 elfNN_aarch64_always_size_sections
10310
10311 #define elf_backend_check_relocs \
10312 elfNN_aarch64_check_relocs
10313
10314 #define elf_backend_copy_indirect_symbol \
10315 elfNN_aarch64_copy_indirect_symbol
10316
10317 #define elf_backend_merge_symbol_attribute \
10318 elfNN_aarch64_merge_symbol_attribute
10319
10320 /* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
10321 to them in our hash. */
10322 #define elf_backend_create_dynamic_sections \
10323 elfNN_aarch64_create_dynamic_sections
10324
10325 #define elf_backend_init_index_section \
10326 _bfd_elf_init_2_index_sections
10327
10328 #define elf_backend_finish_dynamic_sections \
10329 elfNN_aarch64_finish_dynamic_sections
10330
10331 #define elf_backend_finish_dynamic_symbol \
10332 elfNN_aarch64_finish_dynamic_symbol
10333
10334 #define elf_backend_object_p \
10335 elfNN_aarch64_object_p
10336
10337 #define elf_backend_output_arch_local_syms \
10338 elfNN_aarch64_output_arch_local_syms
10339
10340 #define elf_backend_maybe_function_sym \
10341 elfNN_aarch64_maybe_function_sym
10342
10343 #define elf_backend_plt_sym_val \
10344 elfNN_aarch64_plt_sym_val
10345
10346 #define elf_backend_init_file_header \
10347 elfNN_aarch64_init_file_header
10348
10349 #define elf_backend_relocate_section \
10350 elfNN_aarch64_relocate_section
10351
10352 #define elf_backend_reloc_type_class \
10353 elfNN_aarch64_reloc_type_class
10354
10355 #define elf_backend_section_from_shdr \
10356 elfNN_aarch64_section_from_shdr
10357
10358 #define elf_backend_section_from_phdr \
10359 elfNN_aarch64_section_from_phdr
10360
10361 #define elf_backend_modify_headers \
10362 elfNN_aarch64_modify_headers
10363
10364 #define elf_backend_size_dynamic_sections \
10365 elfNN_aarch64_size_dynamic_sections
10366
10367 #define elf_backend_size_info \
10368 elfNN_aarch64_size_info
10369
10370 #define elf_backend_write_section \
10371 elfNN_aarch64_write_section
10372
10373 #define elf_backend_symbol_processing \
10374 elfNN_aarch64_backend_symbol_processing
10375
10376 #define elf_backend_setup_gnu_properties \
10377 elfNN_aarch64_link_setup_gnu_properties
10378
10379 #define elf_backend_merge_gnu_properties \
10380 elfNN_aarch64_merge_gnu_properties
10381
10382 #define elf_backend_can_refcount 1
10383 #define elf_backend_can_gc_sections 1
10384 #define elf_backend_plt_readonly 1
10385 #define elf_backend_want_got_plt 1
10386 #define elf_backend_want_plt_sym 0
10387 #define elf_backend_want_dynrelro 1
10388 #define elf_backend_may_use_rel_p 0
10389 #define elf_backend_may_use_rela_p 1
10390 #define elf_backend_default_use_rela_p 1
10391 #define elf_backend_rela_normal 1
10392 #define elf_backend_dtrel_excludes_plt 1
10393 #define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
10394 #define elf_backend_default_execstack 0
10395 #define elf_backend_extern_protected_data 0
10396 #define elf_backend_hash_symbol elf_aarch64_hash_symbol
10397
10398 #undef elf_backend_obj_attrs_section
10399 #define elf_backend_obj_attrs_section ".ARM.attributes"
10400
10401 #include "elfNN-target.h"
10402
10403 /* CloudABI support. */
10404
10405 #undef TARGET_LITTLE_SYM
10406 #define TARGET_LITTLE_SYM aarch64_elfNN_le_cloudabi_vec
10407 #undef TARGET_LITTLE_NAME
10408 #define TARGET_LITTLE_NAME "elfNN-littleaarch64-cloudabi"
10409 #undef TARGET_BIG_SYM
10410 #define TARGET_BIG_SYM aarch64_elfNN_be_cloudabi_vec
10411 #undef TARGET_BIG_NAME
10412 #define TARGET_BIG_NAME "elfNN-bigaarch64-cloudabi"
10413
10414 #undef ELF_OSABI
10415 #define ELF_OSABI ELFOSABI_CLOUDABI
10416
10417 #undef elfNN_bed
10418 #define elfNN_bed elfNN_aarch64_cloudabi_bed
10419
10420 #include "elfNN-target.h"