[binutils, ARM, 5/16] BF insns infrastructure with new global reloc R_ARM_THM_BF16
[binutils-gdb.git] / bfd / elf32-arm.c
1 /* 32-bit ELF support for ARM
2 Copyright (C) 1998-2019 Free Software Foundation, Inc.
3
4 This file is part of BFD, the Binary File Descriptor library.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19 MA 02110-1301, USA. */
20
21 #include "sysdep.h"
22 #include <limits.h>
23
24 #include "bfd.h"
25 #include "libiberty.h"
26 #include "libbfd.h"
27 #include "elf-bfd.h"
28 #include "elf-nacl.h"
29 #include "elf-vxworks.h"
30 #include "elf/arm.h"
31
32 /* Return the relocation section associated with NAME. HTAB is the
33 bfd's elf32_arm_link_hash_entry. */
34 #define RELOC_SECTION(HTAB, NAME) \
35 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
36
37 /* Return size of a relocation entry. HTAB is the bfd's
38 elf32_arm_link_hash_entry. */
39 #define RELOC_SIZE(HTAB) \
40 ((HTAB)->use_rel \
41 ? sizeof (Elf32_External_Rel) \
42 : sizeof (Elf32_External_Rela))
43
44 /* Return function to swap relocations in. HTAB is the bfd's
45 elf32_arm_link_hash_entry. */
46 #define SWAP_RELOC_IN(HTAB) \
47 ((HTAB)->use_rel \
48 ? bfd_elf32_swap_reloc_in \
49 : bfd_elf32_swap_reloca_in)
50
51 /* Return function to swap relocations out. HTAB is the bfd's
52 elf32_arm_link_hash_entry. */
53 #define SWAP_RELOC_OUT(HTAB) \
54 ((HTAB)->use_rel \
55 ? bfd_elf32_swap_reloc_out \
56 : bfd_elf32_swap_reloca_out)
57
58 #define elf_info_to_howto NULL
59 #define elf_info_to_howto_rel elf32_arm_info_to_howto
60
61 #define ARM_ELF_ABI_VERSION 0
62 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
63
64 /* The Adjusted Place, as defined by AAELF. */
65 #define Pa(X) ((X) & 0xfffffffc)
66
67 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
68 struct bfd_link_info *link_info,
69 asection *sec,
70 bfd_byte *contents);
71
72 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
73 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
74 in that slot. */
75
76 static reloc_howto_type elf32_arm_howto_table_1[] =
77 {
78 /* No relocation. */
79 HOWTO (R_ARM_NONE, /* type */
80 0, /* rightshift */
81 3, /* size (0 = byte, 1 = short, 2 = long) */
82 0, /* bitsize */
83 FALSE, /* pc_relative */
84 0, /* bitpos */
85 complain_overflow_dont,/* complain_on_overflow */
86 bfd_elf_generic_reloc, /* special_function */
87 "R_ARM_NONE", /* name */
88 FALSE, /* partial_inplace */
89 0, /* src_mask */
90 0, /* dst_mask */
91 FALSE), /* pcrel_offset */
92
93 HOWTO (R_ARM_PC24, /* type */
94 2, /* rightshift */
95 2, /* size (0 = byte, 1 = short, 2 = long) */
96 24, /* bitsize */
97 TRUE, /* pc_relative */
98 0, /* bitpos */
99 complain_overflow_signed,/* complain_on_overflow */
100 bfd_elf_generic_reloc, /* special_function */
101 "R_ARM_PC24", /* name */
102 FALSE, /* partial_inplace */
103 0x00ffffff, /* src_mask */
104 0x00ffffff, /* dst_mask */
105 TRUE), /* pcrel_offset */
106
107 /* 32 bit absolute */
108 HOWTO (R_ARM_ABS32, /* type */
109 0, /* rightshift */
110 2, /* size (0 = byte, 1 = short, 2 = long) */
111 32, /* bitsize */
112 FALSE, /* pc_relative */
113 0, /* bitpos */
114 complain_overflow_bitfield,/* complain_on_overflow */
115 bfd_elf_generic_reloc, /* special_function */
116 "R_ARM_ABS32", /* name */
117 FALSE, /* partial_inplace */
118 0xffffffff, /* src_mask */
119 0xffffffff, /* dst_mask */
120 FALSE), /* pcrel_offset */
121
122 /* standard 32bit pc-relative reloc */
123 HOWTO (R_ARM_REL32, /* type */
124 0, /* rightshift */
125 2, /* size (0 = byte, 1 = short, 2 = long) */
126 32, /* bitsize */
127 TRUE, /* pc_relative */
128 0, /* bitpos */
129 complain_overflow_bitfield,/* complain_on_overflow */
130 bfd_elf_generic_reloc, /* special_function */
131 "R_ARM_REL32", /* name */
132 FALSE, /* partial_inplace */
133 0xffffffff, /* src_mask */
134 0xffffffff, /* dst_mask */
135 TRUE), /* pcrel_offset */
136
137 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
138 HOWTO (R_ARM_LDR_PC_G0, /* type */
139 0, /* rightshift */
140 0, /* size (0 = byte, 1 = short, 2 = long) */
141 32, /* bitsize */
142 TRUE, /* pc_relative */
143 0, /* bitpos */
144 complain_overflow_dont,/* complain_on_overflow */
145 bfd_elf_generic_reloc, /* special_function */
146 "R_ARM_LDR_PC_G0", /* name */
147 FALSE, /* partial_inplace */
148 0xffffffff, /* src_mask */
149 0xffffffff, /* dst_mask */
150 TRUE), /* pcrel_offset */
151
152 /* 16 bit absolute */
153 HOWTO (R_ARM_ABS16, /* type */
154 0, /* rightshift */
155 1, /* size (0 = byte, 1 = short, 2 = long) */
156 16, /* bitsize */
157 FALSE, /* pc_relative */
158 0, /* bitpos */
159 complain_overflow_bitfield,/* complain_on_overflow */
160 bfd_elf_generic_reloc, /* special_function */
161 "R_ARM_ABS16", /* name */
162 FALSE, /* partial_inplace */
163 0x0000ffff, /* src_mask */
164 0x0000ffff, /* dst_mask */
165 FALSE), /* pcrel_offset */
166
167 /* 12 bit absolute */
168 HOWTO (R_ARM_ABS12, /* type */
169 0, /* rightshift */
170 2, /* size (0 = byte, 1 = short, 2 = long) */
171 12, /* bitsize */
172 FALSE, /* pc_relative */
173 0, /* bitpos */
174 complain_overflow_bitfield,/* complain_on_overflow */
175 bfd_elf_generic_reloc, /* special_function */
176 "R_ARM_ABS12", /* name */
177 FALSE, /* partial_inplace */
178 0x00000fff, /* src_mask */
179 0x00000fff, /* dst_mask */
180 FALSE), /* pcrel_offset */
181
182 HOWTO (R_ARM_THM_ABS5, /* type */
183 6, /* rightshift */
184 1, /* size (0 = byte, 1 = short, 2 = long) */
185 5, /* bitsize */
186 FALSE, /* pc_relative */
187 0, /* bitpos */
188 complain_overflow_bitfield,/* complain_on_overflow */
189 bfd_elf_generic_reloc, /* special_function */
190 "R_ARM_THM_ABS5", /* name */
191 FALSE, /* partial_inplace */
192 0x000007e0, /* src_mask */
193 0x000007e0, /* dst_mask */
194 FALSE), /* pcrel_offset */
195
196 /* 8 bit absolute */
197 HOWTO (R_ARM_ABS8, /* type */
198 0, /* rightshift */
199 0, /* size (0 = byte, 1 = short, 2 = long) */
200 8, /* bitsize */
201 FALSE, /* pc_relative */
202 0, /* bitpos */
203 complain_overflow_bitfield,/* complain_on_overflow */
204 bfd_elf_generic_reloc, /* special_function */
205 "R_ARM_ABS8", /* name */
206 FALSE, /* partial_inplace */
207 0x000000ff, /* src_mask */
208 0x000000ff, /* dst_mask */
209 FALSE), /* pcrel_offset */
210
211 HOWTO (R_ARM_SBREL32, /* type */
212 0, /* rightshift */
213 2, /* size (0 = byte, 1 = short, 2 = long) */
214 32, /* bitsize */
215 FALSE, /* pc_relative */
216 0, /* bitpos */
217 complain_overflow_dont,/* complain_on_overflow */
218 bfd_elf_generic_reloc, /* special_function */
219 "R_ARM_SBREL32", /* name */
220 FALSE, /* partial_inplace */
221 0xffffffff, /* src_mask */
222 0xffffffff, /* dst_mask */
223 FALSE), /* pcrel_offset */
224
225 HOWTO (R_ARM_THM_CALL, /* type */
226 1, /* rightshift */
227 2, /* size (0 = byte, 1 = short, 2 = long) */
228 24, /* bitsize */
229 TRUE, /* pc_relative */
230 0, /* bitpos */
231 complain_overflow_signed,/* complain_on_overflow */
232 bfd_elf_generic_reloc, /* special_function */
233 "R_ARM_THM_CALL", /* name */
234 FALSE, /* partial_inplace */
235 0x07ff2fff, /* src_mask */
236 0x07ff2fff, /* dst_mask */
237 TRUE), /* pcrel_offset */
238
239 HOWTO (R_ARM_THM_PC8, /* type */
240 1, /* rightshift */
241 1, /* size (0 = byte, 1 = short, 2 = long) */
242 8, /* bitsize */
243 TRUE, /* pc_relative */
244 0, /* bitpos */
245 complain_overflow_signed,/* complain_on_overflow */
246 bfd_elf_generic_reloc, /* special_function */
247 "R_ARM_THM_PC8", /* name */
248 FALSE, /* partial_inplace */
249 0x000000ff, /* src_mask */
250 0x000000ff, /* dst_mask */
251 TRUE), /* pcrel_offset */
252
253 HOWTO (R_ARM_BREL_ADJ, /* type */
254 1, /* rightshift */
255 1, /* size (0 = byte, 1 = short, 2 = long) */
256 32, /* bitsize */
257 FALSE, /* pc_relative */
258 0, /* bitpos */
259 complain_overflow_signed,/* complain_on_overflow */
260 bfd_elf_generic_reloc, /* special_function */
261 "R_ARM_BREL_ADJ", /* name */
262 FALSE, /* partial_inplace */
263 0xffffffff, /* src_mask */
264 0xffffffff, /* dst_mask */
265 FALSE), /* pcrel_offset */
266
267 HOWTO (R_ARM_TLS_DESC, /* type */
268 0, /* rightshift */
269 2, /* size (0 = byte, 1 = short, 2 = long) */
270 32, /* bitsize */
271 FALSE, /* pc_relative */
272 0, /* bitpos */
273 complain_overflow_bitfield,/* complain_on_overflow */
274 bfd_elf_generic_reloc, /* special_function */
275 "R_ARM_TLS_DESC", /* name */
276 FALSE, /* partial_inplace */
277 0xffffffff, /* src_mask */
278 0xffffffff, /* dst_mask */
279 FALSE), /* pcrel_offset */
280
281 HOWTO (R_ARM_THM_SWI8, /* type */
282 0, /* rightshift */
283 0, /* size (0 = byte, 1 = short, 2 = long) */
284 0, /* bitsize */
285 FALSE, /* pc_relative */
286 0, /* bitpos */
287 complain_overflow_signed,/* complain_on_overflow */
288 bfd_elf_generic_reloc, /* special_function */
289 "R_ARM_SWI8", /* name */
290 FALSE, /* partial_inplace */
291 0x00000000, /* src_mask */
292 0x00000000, /* dst_mask */
293 FALSE), /* pcrel_offset */
294
295 /* BLX instruction for the ARM. */
296 HOWTO (R_ARM_XPC25, /* type */
297 2, /* rightshift */
298 2, /* size (0 = byte, 1 = short, 2 = long) */
299 24, /* bitsize */
300 TRUE, /* pc_relative */
301 0, /* bitpos */
302 complain_overflow_signed,/* complain_on_overflow */
303 bfd_elf_generic_reloc, /* special_function */
304 "R_ARM_XPC25", /* name */
305 FALSE, /* partial_inplace */
306 0x00ffffff, /* src_mask */
307 0x00ffffff, /* dst_mask */
308 TRUE), /* pcrel_offset */
309
310 /* BLX instruction for the Thumb. */
311 HOWTO (R_ARM_THM_XPC22, /* type */
312 2, /* rightshift */
313 2, /* size (0 = byte, 1 = short, 2 = long) */
314 24, /* bitsize */
315 TRUE, /* pc_relative */
316 0, /* bitpos */
317 complain_overflow_signed,/* complain_on_overflow */
318 bfd_elf_generic_reloc, /* special_function */
319 "R_ARM_THM_XPC22", /* name */
320 FALSE, /* partial_inplace */
321 0x07ff2fff, /* src_mask */
322 0x07ff2fff, /* dst_mask */
323 TRUE), /* pcrel_offset */
324
325 /* Dynamic TLS relocations. */
326
327 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
328 0, /* rightshift */
329 2, /* size (0 = byte, 1 = short, 2 = long) */
330 32, /* bitsize */
331 FALSE, /* pc_relative */
332 0, /* bitpos */
333 complain_overflow_bitfield,/* complain_on_overflow */
334 bfd_elf_generic_reloc, /* special_function */
335 "R_ARM_TLS_DTPMOD32", /* name */
336 TRUE, /* partial_inplace */
337 0xffffffff, /* src_mask */
338 0xffffffff, /* dst_mask */
339 FALSE), /* pcrel_offset */
340
341 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
342 0, /* rightshift */
343 2, /* size (0 = byte, 1 = short, 2 = long) */
344 32, /* bitsize */
345 FALSE, /* pc_relative */
346 0, /* bitpos */
347 complain_overflow_bitfield,/* complain_on_overflow */
348 bfd_elf_generic_reloc, /* special_function */
349 "R_ARM_TLS_DTPOFF32", /* name */
350 TRUE, /* partial_inplace */
351 0xffffffff, /* src_mask */
352 0xffffffff, /* dst_mask */
353 FALSE), /* pcrel_offset */
354
355 HOWTO (R_ARM_TLS_TPOFF32, /* type */
356 0, /* rightshift */
357 2, /* size (0 = byte, 1 = short, 2 = long) */
358 32, /* bitsize */
359 FALSE, /* pc_relative */
360 0, /* bitpos */
361 complain_overflow_bitfield,/* complain_on_overflow */
362 bfd_elf_generic_reloc, /* special_function */
363 "R_ARM_TLS_TPOFF32", /* name */
364 TRUE, /* partial_inplace */
365 0xffffffff, /* src_mask */
366 0xffffffff, /* dst_mask */
367 FALSE), /* pcrel_offset */
368
369 /* Relocs used in ARM Linux */
370
371 HOWTO (R_ARM_COPY, /* type */
372 0, /* rightshift */
373 2, /* size (0 = byte, 1 = short, 2 = long) */
374 32, /* bitsize */
375 FALSE, /* pc_relative */
376 0, /* bitpos */
377 complain_overflow_bitfield,/* complain_on_overflow */
378 bfd_elf_generic_reloc, /* special_function */
379 "R_ARM_COPY", /* name */
380 TRUE, /* partial_inplace */
381 0xffffffff, /* src_mask */
382 0xffffffff, /* dst_mask */
383 FALSE), /* pcrel_offset */
384
385 HOWTO (R_ARM_GLOB_DAT, /* type */
386 0, /* rightshift */
387 2, /* size (0 = byte, 1 = short, 2 = long) */
388 32, /* bitsize */
389 FALSE, /* pc_relative */
390 0, /* bitpos */
391 complain_overflow_bitfield,/* complain_on_overflow */
392 bfd_elf_generic_reloc, /* special_function */
393 "R_ARM_GLOB_DAT", /* name */
394 TRUE, /* partial_inplace */
395 0xffffffff, /* src_mask */
396 0xffffffff, /* dst_mask */
397 FALSE), /* pcrel_offset */
398
399 HOWTO (R_ARM_JUMP_SLOT, /* type */
400 0, /* rightshift */
401 2, /* size (0 = byte, 1 = short, 2 = long) */
402 32, /* bitsize */
403 FALSE, /* pc_relative */
404 0, /* bitpos */
405 complain_overflow_bitfield,/* complain_on_overflow */
406 bfd_elf_generic_reloc, /* special_function */
407 "R_ARM_JUMP_SLOT", /* name */
408 TRUE, /* partial_inplace */
409 0xffffffff, /* src_mask */
410 0xffffffff, /* dst_mask */
411 FALSE), /* pcrel_offset */
412
413 HOWTO (R_ARM_RELATIVE, /* type */
414 0, /* rightshift */
415 2, /* size (0 = byte, 1 = short, 2 = long) */
416 32, /* bitsize */
417 FALSE, /* pc_relative */
418 0, /* bitpos */
419 complain_overflow_bitfield,/* complain_on_overflow */
420 bfd_elf_generic_reloc, /* special_function */
421 "R_ARM_RELATIVE", /* name */
422 TRUE, /* partial_inplace */
423 0xffffffff, /* src_mask */
424 0xffffffff, /* dst_mask */
425 FALSE), /* pcrel_offset */
426
427 HOWTO (R_ARM_GOTOFF32, /* type */
428 0, /* rightshift */
429 2, /* size (0 = byte, 1 = short, 2 = long) */
430 32, /* bitsize */
431 FALSE, /* pc_relative */
432 0, /* bitpos */
433 complain_overflow_bitfield,/* complain_on_overflow */
434 bfd_elf_generic_reloc, /* special_function */
435 "R_ARM_GOTOFF32", /* name */
436 TRUE, /* partial_inplace */
437 0xffffffff, /* src_mask */
438 0xffffffff, /* dst_mask */
439 FALSE), /* pcrel_offset */
440
441 HOWTO (R_ARM_GOTPC, /* type */
442 0, /* rightshift */
443 2, /* size (0 = byte, 1 = short, 2 = long) */
444 32, /* bitsize */
445 TRUE, /* pc_relative */
446 0, /* bitpos */
447 complain_overflow_bitfield,/* complain_on_overflow */
448 bfd_elf_generic_reloc, /* special_function */
449 "R_ARM_GOTPC", /* name */
450 TRUE, /* partial_inplace */
451 0xffffffff, /* src_mask */
452 0xffffffff, /* dst_mask */
453 TRUE), /* pcrel_offset */
454
455 HOWTO (R_ARM_GOT32, /* type */
456 0, /* rightshift */
457 2, /* size (0 = byte, 1 = short, 2 = long) */
458 32, /* bitsize */
459 FALSE, /* pc_relative */
460 0, /* bitpos */
461 complain_overflow_bitfield,/* complain_on_overflow */
462 bfd_elf_generic_reloc, /* special_function */
463 "R_ARM_GOT32", /* name */
464 TRUE, /* partial_inplace */
465 0xffffffff, /* src_mask */
466 0xffffffff, /* dst_mask */
467 FALSE), /* pcrel_offset */
468
469 HOWTO (R_ARM_PLT32, /* type */
470 2, /* rightshift */
471 2, /* size (0 = byte, 1 = short, 2 = long) */
472 24, /* bitsize */
473 TRUE, /* pc_relative */
474 0, /* bitpos */
475 complain_overflow_bitfield,/* complain_on_overflow */
476 bfd_elf_generic_reloc, /* special_function */
477 "R_ARM_PLT32", /* name */
478 FALSE, /* partial_inplace */
479 0x00ffffff, /* src_mask */
480 0x00ffffff, /* dst_mask */
481 TRUE), /* pcrel_offset */
482
483 HOWTO (R_ARM_CALL, /* type */
484 2, /* rightshift */
485 2, /* size (0 = byte, 1 = short, 2 = long) */
486 24, /* bitsize */
487 TRUE, /* pc_relative */
488 0, /* bitpos */
489 complain_overflow_signed,/* complain_on_overflow */
490 bfd_elf_generic_reloc, /* special_function */
491 "R_ARM_CALL", /* name */
492 FALSE, /* partial_inplace */
493 0x00ffffff, /* src_mask */
494 0x00ffffff, /* dst_mask */
495 TRUE), /* pcrel_offset */
496
497 HOWTO (R_ARM_JUMP24, /* type */
498 2, /* rightshift */
499 2, /* size (0 = byte, 1 = short, 2 = long) */
500 24, /* bitsize */
501 TRUE, /* pc_relative */
502 0, /* bitpos */
503 complain_overflow_signed,/* complain_on_overflow */
504 bfd_elf_generic_reloc, /* special_function */
505 "R_ARM_JUMP24", /* name */
506 FALSE, /* partial_inplace */
507 0x00ffffff, /* src_mask */
508 0x00ffffff, /* dst_mask */
509 TRUE), /* pcrel_offset */
510
511 HOWTO (R_ARM_THM_JUMP24, /* type */
512 1, /* rightshift */
513 2, /* size (0 = byte, 1 = short, 2 = long) */
514 24, /* bitsize */
515 TRUE, /* pc_relative */
516 0, /* bitpos */
517 complain_overflow_signed,/* complain_on_overflow */
518 bfd_elf_generic_reloc, /* special_function */
519 "R_ARM_THM_JUMP24", /* name */
520 FALSE, /* partial_inplace */
521 0x07ff2fff, /* src_mask */
522 0x07ff2fff, /* dst_mask */
523 TRUE), /* pcrel_offset */
524
525 HOWTO (R_ARM_BASE_ABS, /* type */
526 0, /* rightshift */
527 2, /* size (0 = byte, 1 = short, 2 = long) */
528 32, /* bitsize */
529 FALSE, /* pc_relative */
530 0, /* bitpos */
531 complain_overflow_dont,/* complain_on_overflow */
532 bfd_elf_generic_reloc, /* special_function */
533 "R_ARM_BASE_ABS", /* name */
534 FALSE, /* partial_inplace */
535 0xffffffff, /* src_mask */
536 0xffffffff, /* dst_mask */
537 FALSE), /* pcrel_offset */
538
539 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
540 0, /* rightshift */
541 2, /* size (0 = byte, 1 = short, 2 = long) */
542 12, /* bitsize */
543 TRUE, /* pc_relative */
544 0, /* bitpos */
545 complain_overflow_dont,/* complain_on_overflow */
546 bfd_elf_generic_reloc, /* special_function */
547 "R_ARM_ALU_PCREL_7_0", /* name */
548 FALSE, /* partial_inplace */
549 0x00000fff, /* src_mask */
550 0x00000fff, /* dst_mask */
551 TRUE), /* pcrel_offset */
552
553 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
554 0, /* rightshift */
555 2, /* size (0 = byte, 1 = short, 2 = long) */
556 12, /* bitsize */
557 TRUE, /* pc_relative */
558 8, /* bitpos */
559 complain_overflow_dont,/* complain_on_overflow */
560 bfd_elf_generic_reloc, /* special_function */
561 "R_ARM_ALU_PCREL_15_8",/* name */
562 FALSE, /* partial_inplace */
563 0x00000fff, /* src_mask */
564 0x00000fff, /* dst_mask */
565 TRUE), /* pcrel_offset */
566
567 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
568 0, /* rightshift */
569 2, /* size (0 = byte, 1 = short, 2 = long) */
570 12, /* bitsize */
571 TRUE, /* pc_relative */
572 16, /* bitpos */
573 complain_overflow_dont,/* complain_on_overflow */
574 bfd_elf_generic_reloc, /* special_function */
575 "R_ARM_ALU_PCREL_23_15",/* name */
576 FALSE, /* partial_inplace */
577 0x00000fff, /* src_mask */
578 0x00000fff, /* dst_mask */
579 TRUE), /* pcrel_offset */
580
581 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
582 0, /* rightshift */
583 2, /* size (0 = byte, 1 = short, 2 = long) */
584 12, /* bitsize */
585 FALSE, /* pc_relative */
586 0, /* bitpos */
587 complain_overflow_dont,/* complain_on_overflow */
588 bfd_elf_generic_reloc, /* special_function */
589 "R_ARM_LDR_SBREL_11_0",/* name */
590 FALSE, /* partial_inplace */
591 0x00000fff, /* src_mask */
592 0x00000fff, /* dst_mask */
593 FALSE), /* pcrel_offset */
594
595 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
596 0, /* rightshift */
597 2, /* size (0 = byte, 1 = short, 2 = long) */
598 8, /* bitsize */
599 FALSE, /* pc_relative */
600 12, /* bitpos */
601 complain_overflow_dont,/* complain_on_overflow */
602 bfd_elf_generic_reloc, /* special_function */
603 "R_ARM_ALU_SBREL_19_12",/* name */
604 FALSE, /* partial_inplace */
605 0x000ff000, /* src_mask */
606 0x000ff000, /* dst_mask */
607 FALSE), /* pcrel_offset */
608
609 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
610 0, /* rightshift */
611 2, /* size (0 = byte, 1 = short, 2 = long) */
612 8, /* bitsize */
613 FALSE, /* pc_relative */
614 20, /* bitpos */
615 complain_overflow_dont,/* complain_on_overflow */
616 bfd_elf_generic_reloc, /* special_function */
617 "R_ARM_ALU_SBREL_27_20",/* name */
618 FALSE, /* partial_inplace */
619 0x0ff00000, /* src_mask */
620 0x0ff00000, /* dst_mask */
621 FALSE), /* pcrel_offset */
622
623 HOWTO (R_ARM_TARGET1, /* type */
624 0, /* rightshift */
625 2, /* size (0 = byte, 1 = short, 2 = long) */
626 32, /* bitsize */
627 FALSE, /* pc_relative */
628 0, /* bitpos */
629 complain_overflow_dont,/* complain_on_overflow */
630 bfd_elf_generic_reloc, /* special_function */
631 "R_ARM_TARGET1", /* name */
632 FALSE, /* partial_inplace */
633 0xffffffff, /* src_mask */
634 0xffffffff, /* dst_mask */
635 FALSE), /* pcrel_offset */
636
637 HOWTO (R_ARM_ROSEGREL32, /* type */
638 0, /* rightshift */
639 2, /* size (0 = byte, 1 = short, 2 = long) */
640 32, /* bitsize */
641 FALSE, /* pc_relative */
642 0, /* bitpos */
643 complain_overflow_dont,/* complain_on_overflow */
644 bfd_elf_generic_reloc, /* special_function */
645 "R_ARM_ROSEGREL32", /* name */
646 FALSE, /* partial_inplace */
647 0xffffffff, /* src_mask */
648 0xffffffff, /* dst_mask */
649 FALSE), /* pcrel_offset */
650
651 HOWTO (R_ARM_V4BX, /* type */
652 0, /* rightshift */
653 2, /* size (0 = byte, 1 = short, 2 = long) */
654 32, /* bitsize */
655 FALSE, /* pc_relative */
656 0, /* bitpos */
657 complain_overflow_dont,/* complain_on_overflow */
658 bfd_elf_generic_reloc, /* special_function */
659 "R_ARM_V4BX", /* name */
660 FALSE, /* partial_inplace */
661 0xffffffff, /* src_mask */
662 0xffffffff, /* dst_mask */
663 FALSE), /* pcrel_offset */
664
665 HOWTO (R_ARM_TARGET2, /* type */
666 0, /* rightshift */
667 2, /* size (0 = byte, 1 = short, 2 = long) */
668 32, /* bitsize */
669 FALSE, /* pc_relative */
670 0, /* bitpos */
671 complain_overflow_signed,/* complain_on_overflow */
672 bfd_elf_generic_reloc, /* special_function */
673 "R_ARM_TARGET2", /* name */
674 FALSE, /* partial_inplace */
675 0xffffffff, /* src_mask */
676 0xffffffff, /* dst_mask */
677 TRUE), /* pcrel_offset */
678
679 HOWTO (R_ARM_PREL31, /* type */
680 0, /* rightshift */
681 2, /* size (0 = byte, 1 = short, 2 = long) */
682 31, /* bitsize */
683 TRUE, /* pc_relative */
684 0, /* bitpos */
685 complain_overflow_signed,/* complain_on_overflow */
686 bfd_elf_generic_reloc, /* special_function */
687 "R_ARM_PREL31", /* name */
688 FALSE, /* partial_inplace */
689 0x7fffffff, /* src_mask */
690 0x7fffffff, /* dst_mask */
691 TRUE), /* pcrel_offset */
692
693 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
694 0, /* rightshift */
695 2, /* size (0 = byte, 1 = short, 2 = long) */
696 16, /* bitsize */
697 FALSE, /* pc_relative */
698 0, /* bitpos */
699 complain_overflow_dont,/* complain_on_overflow */
700 bfd_elf_generic_reloc, /* special_function */
701 "R_ARM_MOVW_ABS_NC", /* name */
702 FALSE, /* partial_inplace */
703 0x000f0fff, /* src_mask */
704 0x000f0fff, /* dst_mask */
705 FALSE), /* pcrel_offset */
706
707 HOWTO (R_ARM_MOVT_ABS, /* type */
708 0, /* rightshift */
709 2, /* size (0 = byte, 1 = short, 2 = long) */
710 16, /* bitsize */
711 FALSE, /* pc_relative */
712 0, /* bitpos */
713 complain_overflow_bitfield,/* complain_on_overflow */
714 bfd_elf_generic_reloc, /* special_function */
715 "R_ARM_MOVT_ABS", /* name */
716 FALSE, /* partial_inplace */
717 0x000f0fff, /* src_mask */
718 0x000f0fff, /* dst_mask */
719 FALSE), /* pcrel_offset */
720
721 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
722 0, /* rightshift */
723 2, /* size (0 = byte, 1 = short, 2 = long) */
724 16, /* bitsize */
725 TRUE, /* pc_relative */
726 0, /* bitpos */
727 complain_overflow_dont,/* complain_on_overflow */
728 bfd_elf_generic_reloc, /* special_function */
729 "R_ARM_MOVW_PREL_NC", /* name */
730 FALSE, /* partial_inplace */
731 0x000f0fff, /* src_mask */
732 0x000f0fff, /* dst_mask */
733 TRUE), /* pcrel_offset */
734
735 HOWTO (R_ARM_MOVT_PREL, /* type */
736 0, /* rightshift */
737 2, /* size (0 = byte, 1 = short, 2 = long) */
738 16, /* bitsize */
739 TRUE, /* pc_relative */
740 0, /* bitpos */
741 complain_overflow_bitfield,/* complain_on_overflow */
742 bfd_elf_generic_reloc, /* special_function */
743 "R_ARM_MOVT_PREL", /* name */
744 FALSE, /* partial_inplace */
745 0x000f0fff, /* src_mask */
746 0x000f0fff, /* dst_mask */
747 TRUE), /* pcrel_offset */
748
749 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
750 0, /* rightshift */
751 2, /* size (0 = byte, 1 = short, 2 = long) */
752 16, /* bitsize */
753 FALSE, /* pc_relative */
754 0, /* bitpos */
755 complain_overflow_dont,/* complain_on_overflow */
756 bfd_elf_generic_reloc, /* special_function */
757 "R_ARM_THM_MOVW_ABS_NC",/* name */
758 FALSE, /* partial_inplace */
759 0x040f70ff, /* src_mask */
760 0x040f70ff, /* dst_mask */
761 FALSE), /* pcrel_offset */
762
763 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
764 0, /* rightshift */
765 2, /* size (0 = byte, 1 = short, 2 = long) */
766 16, /* bitsize */
767 FALSE, /* pc_relative */
768 0, /* bitpos */
769 complain_overflow_bitfield,/* complain_on_overflow */
770 bfd_elf_generic_reloc, /* special_function */
771 "R_ARM_THM_MOVT_ABS", /* name */
772 FALSE, /* partial_inplace */
773 0x040f70ff, /* src_mask */
774 0x040f70ff, /* dst_mask */
775 FALSE), /* pcrel_offset */
776
777 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
778 0, /* rightshift */
779 2, /* size (0 = byte, 1 = short, 2 = long) */
780 16, /* bitsize */
781 TRUE, /* pc_relative */
782 0, /* bitpos */
783 complain_overflow_dont,/* complain_on_overflow */
784 bfd_elf_generic_reloc, /* special_function */
785 "R_ARM_THM_MOVW_PREL_NC",/* name */
786 FALSE, /* partial_inplace */
787 0x040f70ff, /* src_mask */
788 0x040f70ff, /* dst_mask */
789 TRUE), /* pcrel_offset */
790
791 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
792 0, /* rightshift */
793 2, /* size (0 = byte, 1 = short, 2 = long) */
794 16, /* bitsize */
795 TRUE, /* pc_relative */
796 0, /* bitpos */
797 complain_overflow_bitfield,/* complain_on_overflow */
798 bfd_elf_generic_reloc, /* special_function */
799 "R_ARM_THM_MOVT_PREL", /* name */
800 FALSE, /* partial_inplace */
801 0x040f70ff, /* src_mask */
802 0x040f70ff, /* dst_mask */
803 TRUE), /* pcrel_offset */
804
805 HOWTO (R_ARM_THM_JUMP19, /* type */
806 1, /* rightshift */
807 2, /* size (0 = byte, 1 = short, 2 = long) */
808 19, /* bitsize */
809 TRUE, /* pc_relative */
810 0, /* bitpos */
811 complain_overflow_signed,/* complain_on_overflow */
812 bfd_elf_generic_reloc, /* special_function */
813 "R_ARM_THM_JUMP19", /* name */
814 FALSE, /* partial_inplace */
815 0x043f2fff, /* src_mask */
816 0x043f2fff, /* dst_mask */
817 TRUE), /* pcrel_offset */
818
819 HOWTO (R_ARM_THM_JUMP6, /* type */
820 1, /* rightshift */
821 1, /* size (0 = byte, 1 = short, 2 = long) */
822 6, /* bitsize */
823 TRUE, /* pc_relative */
824 0, /* bitpos */
825 complain_overflow_unsigned,/* complain_on_overflow */
826 bfd_elf_generic_reloc, /* special_function */
827 "R_ARM_THM_JUMP6", /* name */
828 FALSE, /* partial_inplace */
829 0x02f8, /* src_mask */
830 0x02f8, /* dst_mask */
831 TRUE), /* pcrel_offset */
832
833 /* These are declared as 13-bit signed relocations because we can
834 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
835 versa. */
836 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
837 0, /* rightshift */
838 2, /* size (0 = byte, 1 = short, 2 = long) */
839 13, /* bitsize */
840 TRUE, /* pc_relative */
841 0, /* bitpos */
842 complain_overflow_dont,/* complain_on_overflow */
843 bfd_elf_generic_reloc, /* special_function */
844 "R_ARM_THM_ALU_PREL_11_0",/* name */
845 FALSE, /* partial_inplace */
846 0xffffffff, /* src_mask */
847 0xffffffff, /* dst_mask */
848 TRUE), /* pcrel_offset */
849
850 HOWTO (R_ARM_THM_PC12, /* type */
851 0, /* rightshift */
852 2, /* size (0 = byte, 1 = short, 2 = long) */
853 13, /* bitsize */
854 TRUE, /* pc_relative */
855 0, /* bitpos */
856 complain_overflow_dont,/* complain_on_overflow */
857 bfd_elf_generic_reloc, /* special_function */
858 "R_ARM_THM_PC12", /* name */
859 FALSE, /* partial_inplace */
860 0xffffffff, /* src_mask */
861 0xffffffff, /* dst_mask */
862 TRUE), /* pcrel_offset */
863
864 HOWTO (R_ARM_ABS32_NOI, /* type */
865 0, /* rightshift */
866 2, /* size (0 = byte, 1 = short, 2 = long) */
867 32, /* bitsize */
868 FALSE, /* pc_relative */
869 0, /* bitpos */
870 complain_overflow_dont,/* complain_on_overflow */
871 bfd_elf_generic_reloc, /* special_function */
872 "R_ARM_ABS32_NOI", /* name */
873 FALSE, /* partial_inplace */
874 0xffffffff, /* src_mask */
875 0xffffffff, /* dst_mask */
876 FALSE), /* pcrel_offset */
877
878 HOWTO (R_ARM_REL32_NOI, /* type */
879 0, /* rightshift */
880 2, /* size (0 = byte, 1 = short, 2 = long) */
881 32, /* bitsize */
882 TRUE, /* pc_relative */
883 0, /* bitpos */
884 complain_overflow_dont,/* complain_on_overflow */
885 bfd_elf_generic_reloc, /* special_function */
886 "R_ARM_REL32_NOI", /* name */
887 FALSE, /* partial_inplace */
888 0xffffffff, /* src_mask */
889 0xffffffff, /* dst_mask */
890 FALSE), /* pcrel_offset */
891
892 /* Group relocations. */
893
894 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
895 0, /* rightshift */
896 2, /* size (0 = byte, 1 = short, 2 = long) */
897 32, /* bitsize */
898 TRUE, /* pc_relative */
899 0, /* bitpos */
900 complain_overflow_dont,/* complain_on_overflow */
901 bfd_elf_generic_reloc, /* special_function */
902 "R_ARM_ALU_PC_G0_NC", /* name */
903 FALSE, /* partial_inplace */
904 0xffffffff, /* src_mask */
905 0xffffffff, /* dst_mask */
906 TRUE), /* pcrel_offset */
907
908 HOWTO (R_ARM_ALU_PC_G0, /* type */
909 0, /* rightshift */
910 2, /* size (0 = byte, 1 = short, 2 = long) */
911 32, /* bitsize */
912 TRUE, /* pc_relative */
913 0, /* bitpos */
914 complain_overflow_dont,/* complain_on_overflow */
915 bfd_elf_generic_reloc, /* special_function */
916 "R_ARM_ALU_PC_G0", /* name */
917 FALSE, /* partial_inplace */
918 0xffffffff, /* src_mask */
919 0xffffffff, /* dst_mask */
920 TRUE), /* pcrel_offset */
921
922 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
923 0, /* rightshift */
924 2, /* size (0 = byte, 1 = short, 2 = long) */
925 32, /* bitsize */
926 TRUE, /* pc_relative */
927 0, /* bitpos */
928 complain_overflow_dont,/* complain_on_overflow */
929 bfd_elf_generic_reloc, /* special_function */
930 "R_ARM_ALU_PC_G1_NC", /* name */
931 FALSE, /* partial_inplace */
932 0xffffffff, /* src_mask */
933 0xffffffff, /* dst_mask */
934 TRUE), /* pcrel_offset */
935
936 HOWTO (R_ARM_ALU_PC_G1, /* type */
937 0, /* rightshift */
938 2, /* size (0 = byte, 1 = short, 2 = long) */
939 32, /* bitsize */
940 TRUE, /* pc_relative */
941 0, /* bitpos */
942 complain_overflow_dont,/* complain_on_overflow */
943 bfd_elf_generic_reloc, /* special_function */
944 "R_ARM_ALU_PC_G1", /* name */
945 FALSE, /* partial_inplace */
946 0xffffffff, /* src_mask */
947 0xffffffff, /* dst_mask */
948 TRUE), /* pcrel_offset */
949
950 HOWTO (R_ARM_ALU_PC_G2, /* type */
951 0, /* rightshift */
952 2, /* size (0 = byte, 1 = short, 2 = long) */
953 32, /* bitsize */
954 TRUE, /* pc_relative */
955 0, /* bitpos */
956 complain_overflow_dont,/* complain_on_overflow */
957 bfd_elf_generic_reloc, /* special_function */
958 "R_ARM_ALU_PC_G2", /* name */
959 FALSE, /* partial_inplace */
960 0xffffffff, /* src_mask */
961 0xffffffff, /* dst_mask */
962 TRUE), /* pcrel_offset */
963
964 HOWTO (R_ARM_LDR_PC_G1, /* type */
965 0, /* rightshift */
966 2, /* size (0 = byte, 1 = short, 2 = long) */
967 32, /* bitsize */
968 TRUE, /* pc_relative */
969 0, /* bitpos */
970 complain_overflow_dont,/* complain_on_overflow */
971 bfd_elf_generic_reloc, /* special_function */
972 "R_ARM_LDR_PC_G1", /* name */
973 FALSE, /* partial_inplace */
974 0xffffffff, /* src_mask */
975 0xffffffff, /* dst_mask */
976 TRUE), /* pcrel_offset */
977
978 HOWTO (R_ARM_LDR_PC_G2, /* type */
979 0, /* rightshift */
980 2, /* size (0 = byte, 1 = short, 2 = long) */
981 32, /* bitsize */
982 TRUE, /* pc_relative */
983 0, /* bitpos */
984 complain_overflow_dont,/* complain_on_overflow */
985 bfd_elf_generic_reloc, /* special_function */
986 "R_ARM_LDR_PC_G2", /* name */
987 FALSE, /* partial_inplace */
988 0xffffffff, /* src_mask */
989 0xffffffff, /* dst_mask */
990 TRUE), /* pcrel_offset */
991
992 HOWTO (R_ARM_LDRS_PC_G0, /* type */
993 0, /* rightshift */
994 2, /* size (0 = byte, 1 = short, 2 = long) */
995 32, /* bitsize */
996 TRUE, /* pc_relative */
997 0, /* bitpos */
998 complain_overflow_dont,/* complain_on_overflow */
999 bfd_elf_generic_reloc, /* special_function */
1000 "R_ARM_LDRS_PC_G0", /* name */
1001 FALSE, /* partial_inplace */
1002 0xffffffff, /* src_mask */
1003 0xffffffff, /* dst_mask */
1004 TRUE), /* pcrel_offset */
1005
1006 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1007 0, /* rightshift */
1008 2, /* size (0 = byte, 1 = short, 2 = long) */
1009 32, /* bitsize */
1010 TRUE, /* pc_relative */
1011 0, /* bitpos */
1012 complain_overflow_dont,/* complain_on_overflow */
1013 bfd_elf_generic_reloc, /* special_function */
1014 "R_ARM_LDRS_PC_G1", /* name */
1015 FALSE, /* partial_inplace */
1016 0xffffffff, /* src_mask */
1017 0xffffffff, /* dst_mask */
1018 TRUE), /* pcrel_offset */
1019
1020 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1021 0, /* rightshift */
1022 2, /* size (0 = byte, 1 = short, 2 = long) */
1023 32, /* bitsize */
1024 TRUE, /* pc_relative */
1025 0, /* bitpos */
1026 complain_overflow_dont,/* complain_on_overflow */
1027 bfd_elf_generic_reloc, /* special_function */
1028 "R_ARM_LDRS_PC_G2", /* name */
1029 FALSE, /* partial_inplace */
1030 0xffffffff, /* src_mask */
1031 0xffffffff, /* dst_mask */
1032 TRUE), /* pcrel_offset */
1033
1034 HOWTO (R_ARM_LDC_PC_G0, /* type */
1035 0, /* rightshift */
1036 2, /* size (0 = byte, 1 = short, 2 = long) */
1037 32, /* bitsize */
1038 TRUE, /* pc_relative */
1039 0, /* bitpos */
1040 complain_overflow_dont,/* complain_on_overflow */
1041 bfd_elf_generic_reloc, /* special_function */
1042 "R_ARM_LDC_PC_G0", /* name */
1043 FALSE, /* partial_inplace */
1044 0xffffffff, /* src_mask */
1045 0xffffffff, /* dst_mask */
1046 TRUE), /* pcrel_offset */
1047
1048 HOWTO (R_ARM_LDC_PC_G1, /* type */
1049 0, /* rightshift */
1050 2, /* size (0 = byte, 1 = short, 2 = long) */
1051 32, /* bitsize */
1052 TRUE, /* pc_relative */
1053 0, /* bitpos */
1054 complain_overflow_dont,/* complain_on_overflow */
1055 bfd_elf_generic_reloc, /* special_function */
1056 "R_ARM_LDC_PC_G1", /* name */
1057 FALSE, /* partial_inplace */
1058 0xffffffff, /* src_mask */
1059 0xffffffff, /* dst_mask */
1060 TRUE), /* pcrel_offset */
1061
1062 HOWTO (R_ARM_LDC_PC_G2, /* type */
1063 0, /* rightshift */
1064 2, /* size (0 = byte, 1 = short, 2 = long) */
1065 32, /* bitsize */
1066 TRUE, /* pc_relative */
1067 0, /* bitpos */
1068 complain_overflow_dont,/* complain_on_overflow */
1069 bfd_elf_generic_reloc, /* special_function */
1070 "R_ARM_LDC_PC_G2", /* name */
1071 FALSE, /* partial_inplace */
1072 0xffffffff, /* src_mask */
1073 0xffffffff, /* dst_mask */
1074 TRUE), /* pcrel_offset */
1075
1076 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1077 0, /* rightshift */
1078 2, /* size (0 = byte, 1 = short, 2 = long) */
1079 32, /* bitsize */
1080 TRUE, /* pc_relative */
1081 0, /* bitpos */
1082 complain_overflow_dont,/* complain_on_overflow */
1083 bfd_elf_generic_reloc, /* special_function */
1084 "R_ARM_ALU_SB_G0_NC", /* name */
1085 FALSE, /* partial_inplace */
1086 0xffffffff, /* src_mask */
1087 0xffffffff, /* dst_mask */
1088 TRUE), /* pcrel_offset */
1089
1090 HOWTO (R_ARM_ALU_SB_G0, /* type */
1091 0, /* rightshift */
1092 2, /* size (0 = byte, 1 = short, 2 = long) */
1093 32, /* bitsize */
1094 TRUE, /* pc_relative */
1095 0, /* bitpos */
1096 complain_overflow_dont,/* complain_on_overflow */
1097 bfd_elf_generic_reloc, /* special_function */
1098 "R_ARM_ALU_SB_G0", /* name */
1099 FALSE, /* partial_inplace */
1100 0xffffffff, /* src_mask */
1101 0xffffffff, /* dst_mask */
1102 TRUE), /* pcrel_offset */
1103
1104 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1105 0, /* rightshift */
1106 2, /* size (0 = byte, 1 = short, 2 = long) */
1107 32, /* bitsize */
1108 TRUE, /* pc_relative */
1109 0, /* bitpos */
1110 complain_overflow_dont,/* complain_on_overflow */
1111 bfd_elf_generic_reloc, /* special_function */
1112 "R_ARM_ALU_SB_G1_NC", /* name */
1113 FALSE, /* partial_inplace */
1114 0xffffffff, /* src_mask */
1115 0xffffffff, /* dst_mask */
1116 TRUE), /* pcrel_offset */
1117
1118 HOWTO (R_ARM_ALU_SB_G1, /* type */
1119 0, /* rightshift */
1120 2, /* size (0 = byte, 1 = short, 2 = long) */
1121 32, /* bitsize */
1122 TRUE, /* pc_relative */
1123 0, /* bitpos */
1124 complain_overflow_dont,/* complain_on_overflow */
1125 bfd_elf_generic_reloc, /* special_function */
1126 "R_ARM_ALU_SB_G1", /* name */
1127 FALSE, /* partial_inplace */
1128 0xffffffff, /* src_mask */
1129 0xffffffff, /* dst_mask */
1130 TRUE), /* pcrel_offset */
1131
1132 HOWTO (R_ARM_ALU_SB_G2, /* type */
1133 0, /* rightshift */
1134 2, /* size (0 = byte, 1 = short, 2 = long) */
1135 32, /* bitsize */
1136 TRUE, /* pc_relative */
1137 0, /* bitpos */
1138 complain_overflow_dont,/* complain_on_overflow */
1139 bfd_elf_generic_reloc, /* special_function */
1140 "R_ARM_ALU_SB_G2", /* name */
1141 FALSE, /* partial_inplace */
1142 0xffffffff, /* src_mask */
1143 0xffffffff, /* dst_mask */
1144 TRUE), /* pcrel_offset */
1145
1146 HOWTO (R_ARM_LDR_SB_G0, /* type */
1147 0, /* rightshift */
1148 2, /* size (0 = byte, 1 = short, 2 = long) */
1149 32, /* bitsize */
1150 TRUE, /* pc_relative */
1151 0, /* bitpos */
1152 complain_overflow_dont,/* complain_on_overflow */
1153 bfd_elf_generic_reloc, /* special_function */
1154 "R_ARM_LDR_SB_G0", /* name */
1155 FALSE, /* partial_inplace */
1156 0xffffffff, /* src_mask */
1157 0xffffffff, /* dst_mask */
1158 TRUE), /* pcrel_offset */
1159
1160 HOWTO (R_ARM_LDR_SB_G1, /* type */
1161 0, /* rightshift */
1162 2, /* size (0 = byte, 1 = short, 2 = long) */
1163 32, /* bitsize */
1164 TRUE, /* pc_relative */
1165 0, /* bitpos */
1166 complain_overflow_dont,/* complain_on_overflow */
1167 bfd_elf_generic_reloc, /* special_function */
1168 "R_ARM_LDR_SB_G1", /* name */
1169 FALSE, /* partial_inplace */
1170 0xffffffff, /* src_mask */
1171 0xffffffff, /* dst_mask */
1172 TRUE), /* pcrel_offset */
1173
1174 HOWTO (R_ARM_LDR_SB_G2, /* type */
1175 0, /* rightshift */
1176 2, /* size (0 = byte, 1 = short, 2 = long) */
1177 32, /* bitsize */
1178 TRUE, /* pc_relative */
1179 0, /* bitpos */
1180 complain_overflow_dont,/* complain_on_overflow */
1181 bfd_elf_generic_reloc, /* special_function */
1182 "R_ARM_LDR_SB_G2", /* name */
1183 FALSE, /* partial_inplace */
1184 0xffffffff, /* src_mask */
1185 0xffffffff, /* dst_mask */
1186 TRUE), /* pcrel_offset */
1187
1188 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1189 0, /* rightshift */
1190 2, /* size (0 = byte, 1 = short, 2 = long) */
1191 32, /* bitsize */
1192 TRUE, /* pc_relative */
1193 0, /* bitpos */
1194 complain_overflow_dont,/* complain_on_overflow */
1195 bfd_elf_generic_reloc, /* special_function */
1196 "R_ARM_LDRS_SB_G0", /* name */
1197 FALSE, /* partial_inplace */
1198 0xffffffff, /* src_mask */
1199 0xffffffff, /* dst_mask */
1200 TRUE), /* pcrel_offset */
1201
1202 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1203 0, /* rightshift */
1204 2, /* size (0 = byte, 1 = short, 2 = long) */
1205 32, /* bitsize */
1206 TRUE, /* pc_relative */
1207 0, /* bitpos */
1208 complain_overflow_dont,/* complain_on_overflow */
1209 bfd_elf_generic_reloc, /* special_function */
1210 "R_ARM_LDRS_SB_G1", /* name */
1211 FALSE, /* partial_inplace */
1212 0xffffffff, /* src_mask */
1213 0xffffffff, /* dst_mask */
1214 TRUE), /* pcrel_offset */
1215
1216 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1217 0, /* rightshift */
1218 2, /* size (0 = byte, 1 = short, 2 = long) */
1219 32, /* bitsize */
1220 TRUE, /* pc_relative */
1221 0, /* bitpos */
1222 complain_overflow_dont,/* complain_on_overflow */
1223 bfd_elf_generic_reloc, /* special_function */
1224 "R_ARM_LDRS_SB_G2", /* name */
1225 FALSE, /* partial_inplace */
1226 0xffffffff, /* src_mask */
1227 0xffffffff, /* dst_mask */
1228 TRUE), /* pcrel_offset */
1229
1230 HOWTO (R_ARM_LDC_SB_G0, /* type */
1231 0, /* rightshift */
1232 2, /* size (0 = byte, 1 = short, 2 = long) */
1233 32, /* bitsize */
1234 TRUE, /* pc_relative */
1235 0, /* bitpos */
1236 complain_overflow_dont,/* complain_on_overflow */
1237 bfd_elf_generic_reloc, /* special_function */
1238 "R_ARM_LDC_SB_G0", /* name */
1239 FALSE, /* partial_inplace */
1240 0xffffffff, /* src_mask */
1241 0xffffffff, /* dst_mask */
1242 TRUE), /* pcrel_offset */
1243
1244 HOWTO (R_ARM_LDC_SB_G1, /* type */
1245 0, /* rightshift */
1246 2, /* size (0 = byte, 1 = short, 2 = long) */
1247 32, /* bitsize */
1248 TRUE, /* pc_relative */
1249 0, /* bitpos */
1250 complain_overflow_dont,/* complain_on_overflow */
1251 bfd_elf_generic_reloc, /* special_function */
1252 "R_ARM_LDC_SB_G1", /* name */
1253 FALSE, /* partial_inplace */
1254 0xffffffff, /* src_mask */
1255 0xffffffff, /* dst_mask */
1256 TRUE), /* pcrel_offset */
1257
1258 HOWTO (R_ARM_LDC_SB_G2, /* type */
1259 0, /* rightshift */
1260 2, /* size (0 = byte, 1 = short, 2 = long) */
1261 32, /* bitsize */
1262 TRUE, /* pc_relative */
1263 0, /* bitpos */
1264 complain_overflow_dont,/* complain_on_overflow */
1265 bfd_elf_generic_reloc, /* special_function */
1266 "R_ARM_LDC_SB_G2", /* name */
1267 FALSE, /* partial_inplace */
1268 0xffffffff, /* src_mask */
1269 0xffffffff, /* dst_mask */
1270 TRUE), /* pcrel_offset */
1271
1272 /* End of group relocations. */
1273
1274 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1275 0, /* rightshift */
1276 2, /* size (0 = byte, 1 = short, 2 = long) */
1277 16, /* bitsize */
1278 FALSE, /* pc_relative */
1279 0, /* bitpos */
1280 complain_overflow_dont,/* complain_on_overflow */
1281 bfd_elf_generic_reloc, /* special_function */
1282 "R_ARM_MOVW_BREL_NC", /* name */
1283 FALSE, /* partial_inplace */
1284 0x0000ffff, /* src_mask */
1285 0x0000ffff, /* dst_mask */
1286 FALSE), /* pcrel_offset */
1287
1288 HOWTO (R_ARM_MOVT_BREL, /* type */
1289 0, /* rightshift */
1290 2, /* size (0 = byte, 1 = short, 2 = long) */
1291 16, /* bitsize */
1292 FALSE, /* pc_relative */
1293 0, /* bitpos */
1294 complain_overflow_bitfield,/* complain_on_overflow */
1295 bfd_elf_generic_reloc, /* special_function */
1296 "R_ARM_MOVT_BREL", /* name */
1297 FALSE, /* partial_inplace */
1298 0x0000ffff, /* src_mask */
1299 0x0000ffff, /* dst_mask */
1300 FALSE), /* pcrel_offset */
1301
1302 HOWTO (R_ARM_MOVW_BREL, /* type */
1303 0, /* rightshift */
1304 2, /* size (0 = byte, 1 = short, 2 = long) */
1305 16, /* bitsize */
1306 FALSE, /* pc_relative */
1307 0, /* bitpos */
1308 complain_overflow_dont,/* complain_on_overflow */
1309 bfd_elf_generic_reloc, /* special_function */
1310 "R_ARM_MOVW_BREL", /* name */
1311 FALSE, /* partial_inplace */
1312 0x0000ffff, /* src_mask */
1313 0x0000ffff, /* dst_mask */
1314 FALSE), /* pcrel_offset */
1315
1316 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1317 0, /* rightshift */
1318 2, /* size (0 = byte, 1 = short, 2 = long) */
1319 16, /* bitsize */
1320 FALSE, /* pc_relative */
1321 0, /* bitpos */
1322 complain_overflow_dont,/* complain_on_overflow */
1323 bfd_elf_generic_reloc, /* special_function */
1324 "R_ARM_THM_MOVW_BREL_NC",/* name */
1325 FALSE, /* partial_inplace */
1326 0x040f70ff, /* src_mask */
1327 0x040f70ff, /* dst_mask */
1328 FALSE), /* pcrel_offset */
1329
1330 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1331 0, /* rightshift */
1332 2, /* size (0 = byte, 1 = short, 2 = long) */
1333 16, /* bitsize */
1334 FALSE, /* pc_relative */
1335 0, /* bitpos */
1336 complain_overflow_bitfield,/* complain_on_overflow */
1337 bfd_elf_generic_reloc, /* special_function */
1338 "R_ARM_THM_MOVT_BREL", /* name */
1339 FALSE, /* partial_inplace */
1340 0x040f70ff, /* src_mask */
1341 0x040f70ff, /* dst_mask */
1342 FALSE), /* pcrel_offset */
1343
1344 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1345 0, /* rightshift */
1346 2, /* size (0 = byte, 1 = short, 2 = long) */
1347 16, /* bitsize */
1348 FALSE, /* pc_relative */
1349 0, /* bitpos */
1350 complain_overflow_dont,/* complain_on_overflow */
1351 bfd_elf_generic_reloc, /* special_function */
1352 "R_ARM_THM_MOVW_BREL", /* name */
1353 FALSE, /* partial_inplace */
1354 0x040f70ff, /* src_mask */
1355 0x040f70ff, /* dst_mask */
1356 FALSE), /* pcrel_offset */
1357
1358 HOWTO (R_ARM_TLS_GOTDESC, /* type */
1359 0, /* rightshift */
1360 2, /* size (0 = byte, 1 = short, 2 = long) */
1361 32, /* bitsize */
1362 FALSE, /* pc_relative */
1363 0, /* bitpos */
1364 complain_overflow_bitfield,/* complain_on_overflow */
1365 NULL, /* special_function */
1366 "R_ARM_TLS_GOTDESC", /* name */
1367 TRUE, /* partial_inplace */
1368 0xffffffff, /* src_mask */
1369 0xffffffff, /* dst_mask */
1370 FALSE), /* pcrel_offset */
1371
1372 HOWTO (R_ARM_TLS_CALL, /* type */
1373 0, /* rightshift */
1374 2, /* size (0 = byte, 1 = short, 2 = long) */
1375 24, /* bitsize */
1376 FALSE, /* pc_relative */
1377 0, /* bitpos */
1378 complain_overflow_dont,/* complain_on_overflow */
1379 bfd_elf_generic_reloc, /* special_function */
1380 "R_ARM_TLS_CALL", /* name */
1381 FALSE, /* partial_inplace */
1382 0x00ffffff, /* src_mask */
1383 0x00ffffff, /* dst_mask */
1384 FALSE), /* pcrel_offset */
1385
1386 HOWTO (R_ARM_TLS_DESCSEQ, /* type */
1387 0, /* rightshift */
1388 2, /* size (0 = byte, 1 = short, 2 = long) */
1389 0, /* bitsize */
1390 FALSE, /* pc_relative */
1391 0, /* bitpos */
1392 complain_overflow_bitfield,/* complain_on_overflow */
1393 bfd_elf_generic_reloc, /* special_function */
1394 "R_ARM_TLS_DESCSEQ", /* name */
1395 FALSE, /* partial_inplace */
1396 0x00000000, /* src_mask */
1397 0x00000000, /* dst_mask */
1398 FALSE), /* pcrel_offset */
1399
1400 HOWTO (R_ARM_THM_TLS_CALL, /* type */
1401 0, /* rightshift */
1402 2, /* size (0 = byte, 1 = short, 2 = long) */
1403 24, /* bitsize */
1404 FALSE, /* pc_relative */
1405 0, /* bitpos */
1406 complain_overflow_dont,/* complain_on_overflow */
1407 bfd_elf_generic_reloc, /* special_function */
1408 "R_ARM_THM_TLS_CALL", /* name */
1409 FALSE, /* partial_inplace */
1410 0x07ff07ff, /* src_mask */
1411 0x07ff07ff, /* dst_mask */
1412 FALSE), /* pcrel_offset */
1413
1414 HOWTO (R_ARM_PLT32_ABS, /* type */
1415 0, /* rightshift */
1416 2, /* size (0 = byte, 1 = short, 2 = long) */
1417 32, /* bitsize */
1418 FALSE, /* pc_relative */
1419 0, /* bitpos */
1420 complain_overflow_dont,/* complain_on_overflow */
1421 bfd_elf_generic_reloc, /* special_function */
1422 "R_ARM_PLT32_ABS", /* name */
1423 FALSE, /* partial_inplace */
1424 0xffffffff, /* src_mask */
1425 0xffffffff, /* dst_mask */
1426 FALSE), /* pcrel_offset */
1427
1428 HOWTO (R_ARM_GOT_ABS, /* type */
1429 0, /* rightshift */
1430 2, /* size (0 = byte, 1 = short, 2 = long) */
1431 32, /* bitsize */
1432 FALSE, /* pc_relative */
1433 0, /* bitpos */
1434 complain_overflow_dont,/* complain_on_overflow */
1435 bfd_elf_generic_reloc, /* special_function */
1436 "R_ARM_GOT_ABS", /* name */
1437 FALSE, /* partial_inplace */
1438 0xffffffff, /* src_mask */
1439 0xffffffff, /* dst_mask */
1440 FALSE), /* pcrel_offset */
1441
1442 HOWTO (R_ARM_GOT_PREL, /* type */
1443 0, /* rightshift */
1444 2, /* size (0 = byte, 1 = short, 2 = long) */
1445 32, /* bitsize */
1446 TRUE, /* pc_relative */
1447 0, /* bitpos */
1448 complain_overflow_dont, /* complain_on_overflow */
1449 bfd_elf_generic_reloc, /* special_function */
1450 "R_ARM_GOT_PREL", /* name */
1451 FALSE, /* partial_inplace */
1452 0xffffffff, /* src_mask */
1453 0xffffffff, /* dst_mask */
1454 TRUE), /* pcrel_offset */
1455
1456 HOWTO (R_ARM_GOT_BREL12, /* type */
1457 0, /* rightshift */
1458 2, /* size (0 = byte, 1 = short, 2 = long) */
1459 12, /* bitsize */
1460 FALSE, /* pc_relative */
1461 0, /* bitpos */
1462 complain_overflow_bitfield,/* complain_on_overflow */
1463 bfd_elf_generic_reloc, /* special_function */
1464 "R_ARM_GOT_BREL12", /* name */
1465 FALSE, /* partial_inplace */
1466 0x00000fff, /* src_mask */
1467 0x00000fff, /* dst_mask */
1468 FALSE), /* pcrel_offset */
1469
1470 HOWTO (R_ARM_GOTOFF12, /* type */
1471 0, /* rightshift */
1472 2, /* size (0 = byte, 1 = short, 2 = long) */
1473 12, /* bitsize */
1474 FALSE, /* pc_relative */
1475 0, /* bitpos */
1476 complain_overflow_bitfield,/* complain_on_overflow */
1477 bfd_elf_generic_reloc, /* special_function */
1478 "R_ARM_GOTOFF12", /* name */
1479 FALSE, /* partial_inplace */
1480 0x00000fff, /* src_mask */
1481 0x00000fff, /* dst_mask */
1482 FALSE), /* pcrel_offset */
1483
1484 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1485
1486 /* GNU extension to record C++ vtable member usage */
1487 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1488 0, /* rightshift */
1489 2, /* size (0 = byte, 1 = short, 2 = long) */
1490 0, /* bitsize */
1491 FALSE, /* pc_relative */
1492 0, /* bitpos */
1493 complain_overflow_dont, /* complain_on_overflow */
1494 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1495 "R_ARM_GNU_VTENTRY", /* name */
1496 FALSE, /* partial_inplace */
1497 0, /* src_mask */
1498 0, /* dst_mask */
1499 FALSE), /* pcrel_offset */
1500
1501 /* GNU extension to record C++ vtable hierarchy */
1502 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1503 0, /* rightshift */
1504 2, /* size (0 = byte, 1 = short, 2 = long) */
1505 0, /* bitsize */
1506 FALSE, /* pc_relative */
1507 0, /* bitpos */
1508 complain_overflow_dont, /* complain_on_overflow */
1509 NULL, /* special_function */
1510 "R_ARM_GNU_VTINHERIT", /* name */
1511 FALSE, /* partial_inplace */
1512 0, /* src_mask */
1513 0, /* dst_mask */
1514 FALSE), /* pcrel_offset */
1515
1516 HOWTO (R_ARM_THM_JUMP11, /* type */
1517 1, /* rightshift */
1518 1, /* size (0 = byte, 1 = short, 2 = long) */
1519 11, /* bitsize */
1520 TRUE, /* pc_relative */
1521 0, /* bitpos */
1522 complain_overflow_signed, /* complain_on_overflow */
1523 bfd_elf_generic_reloc, /* special_function */
1524 "R_ARM_THM_JUMP11", /* name */
1525 FALSE, /* partial_inplace */
1526 0x000007ff, /* src_mask */
1527 0x000007ff, /* dst_mask */
1528 TRUE), /* pcrel_offset */
1529
1530 HOWTO (R_ARM_THM_JUMP8, /* type */
1531 1, /* rightshift */
1532 1, /* size (0 = byte, 1 = short, 2 = long) */
1533 8, /* bitsize */
1534 TRUE, /* pc_relative */
1535 0, /* bitpos */
1536 complain_overflow_signed, /* complain_on_overflow */
1537 bfd_elf_generic_reloc, /* special_function */
1538 "R_ARM_THM_JUMP8", /* name */
1539 FALSE, /* partial_inplace */
1540 0x000000ff, /* src_mask */
1541 0x000000ff, /* dst_mask */
1542 TRUE), /* pcrel_offset */
1543
1544 /* TLS relocations */
1545 HOWTO (R_ARM_TLS_GD32, /* type */
1546 0, /* rightshift */
1547 2, /* size (0 = byte, 1 = short, 2 = long) */
1548 32, /* bitsize */
1549 FALSE, /* pc_relative */
1550 0, /* bitpos */
1551 complain_overflow_bitfield,/* complain_on_overflow */
1552 NULL, /* special_function */
1553 "R_ARM_TLS_GD32", /* name */
1554 TRUE, /* partial_inplace */
1555 0xffffffff, /* src_mask */
1556 0xffffffff, /* dst_mask */
1557 FALSE), /* pcrel_offset */
1558
1559 HOWTO (R_ARM_TLS_LDM32, /* type */
1560 0, /* rightshift */
1561 2, /* size (0 = byte, 1 = short, 2 = long) */
1562 32, /* bitsize */
1563 FALSE, /* pc_relative */
1564 0, /* bitpos */
1565 complain_overflow_bitfield,/* complain_on_overflow */
1566 bfd_elf_generic_reloc, /* special_function */
1567 "R_ARM_TLS_LDM32", /* name */
1568 TRUE, /* partial_inplace */
1569 0xffffffff, /* src_mask */
1570 0xffffffff, /* dst_mask */
1571 FALSE), /* pcrel_offset */
1572
1573 HOWTO (R_ARM_TLS_LDO32, /* type */
1574 0, /* rightshift */
1575 2, /* size (0 = byte, 1 = short, 2 = long) */
1576 32, /* bitsize */
1577 FALSE, /* pc_relative */
1578 0, /* bitpos */
1579 complain_overflow_bitfield,/* complain_on_overflow */
1580 bfd_elf_generic_reloc, /* special_function */
1581 "R_ARM_TLS_LDO32", /* name */
1582 TRUE, /* partial_inplace */
1583 0xffffffff, /* src_mask */
1584 0xffffffff, /* dst_mask */
1585 FALSE), /* pcrel_offset */
1586
1587 HOWTO (R_ARM_TLS_IE32, /* type */
1588 0, /* rightshift */
1589 2, /* size (0 = byte, 1 = short, 2 = long) */
1590 32, /* bitsize */
1591 FALSE, /* pc_relative */
1592 0, /* bitpos */
1593 complain_overflow_bitfield,/* complain_on_overflow */
1594 NULL, /* special_function */
1595 "R_ARM_TLS_IE32", /* name */
1596 TRUE, /* partial_inplace */
1597 0xffffffff, /* src_mask */
1598 0xffffffff, /* dst_mask */
1599 FALSE), /* pcrel_offset */
1600
1601 HOWTO (R_ARM_TLS_LE32, /* type */
1602 0, /* rightshift */
1603 2, /* size (0 = byte, 1 = short, 2 = long) */
1604 32, /* bitsize */
1605 FALSE, /* pc_relative */
1606 0, /* bitpos */
1607 complain_overflow_bitfield,/* complain_on_overflow */
1608 NULL, /* special_function */
1609 "R_ARM_TLS_LE32", /* name */
1610 TRUE, /* partial_inplace */
1611 0xffffffff, /* src_mask */
1612 0xffffffff, /* dst_mask */
1613 FALSE), /* pcrel_offset */
1614
1615 HOWTO (R_ARM_TLS_LDO12, /* type */
1616 0, /* rightshift */
1617 2, /* size (0 = byte, 1 = short, 2 = long) */
1618 12, /* bitsize */
1619 FALSE, /* pc_relative */
1620 0, /* bitpos */
1621 complain_overflow_bitfield,/* complain_on_overflow */
1622 bfd_elf_generic_reloc, /* special_function */
1623 "R_ARM_TLS_LDO12", /* name */
1624 FALSE, /* partial_inplace */
1625 0x00000fff, /* src_mask */
1626 0x00000fff, /* dst_mask */
1627 FALSE), /* pcrel_offset */
1628
1629 HOWTO (R_ARM_TLS_LE12, /* type */
1630 0, /* rightshift */
1631 2, /* size (0 = byte, 1 = short, 2 = long) */
1632 12, /* bitsize */
1633 FALSE, /* pc_relative */
1634 0, /* bitpos */
1635 complain_overflow_bitfield,/* complain_on_overflow */
1636 bfd_elf_generic_reloc, /* special_function */
1637 "R_ARM_TLS_LE12", /* name */
1638 FALSE, /* partial_inplace */
1639 0x00000fff, /* src_mask */
1640 0x00000fff, /* dst_mask */
1641 FALSE), /* pcrel_offset */
1642
1643 HOWTO (R_ARM_TLS_IE12GP, /* type */
1644 0, /* rightshift */
1645 2, /* size (0 = byte, 1 = short, 2 = long) */
1646 12, /* bitsize */
1647 FALSE, /* pc_relative */
1648 0, /* bitpos */
1649 complain_overflow_bitfield,/* complain_on_overflow */
1650 bfd_elf_generic_reloc, /* special_function */
1651 "R_ARM_TLS_IE12GP", /* name */
1652 FALSE, /* partial_inplace */
1653 0x00000fff, /* src_mask */
1654 0x00000fff, /* dst_mask */
1655 FALSE), /* pcrel_offset */
1656
1657 /* 112-127 private relocations. */
1658 EMPTY_HOWTO (112),
1659 EMPTY_HOWTO (113),
1660 EMPTY_HOWTO (114),
1661 EMPTY_HOWTO (115),
1662 EMPTY_HOWTO (116),
1663 EMPTY_HOWTO (117),
1664 EMPTY_HOWTO (118),
1665 EMPTY_HOWTO (119),
1666 EMPTY_HOWTO (120),
1667 EMPTY_HOWTO (121),
1668 EMPTY_HOWTO (122),
1669 EMPTY_HOWTO (123),
1670 EMPTY_HOWTO (124),
1671 EMPTY_HOWTO (125),
1672 EMPTY_HOWTO (126),
1673 EMPTY_HOWTO (127),
1674
1675 /* R_ARM_ME_TOO, obsolete. */
1676 EMPTY_HOWTO (128),
1677
1678 HOWTO (R_ARM_THM_TLS_DESCSEQ, /* type */
1679 0, /* rightshift */
1680 1, /* size (0 = byte, 1 = short, 2 = long) */
1681 0, /* bitsize */
1682 FALSE, /* pc_relative */
1683 0, /* bitpos */
1684 complain_overflow_bitfield,/* complain_on_overflow */
1685 bfd_elf_generic_reloc, /* special_function */
1686 "R_ARM_THM_TLS_DESCSEQ",/* name */
1687 FALSE, /* partial_inplace */
1688 0x00000000, /* src_mask */
1689 0x00000000, /* dst_mask */
1690 FALSE), /* pcrel_offset */
1691 EMPTY_HOWTO (130),
1692 EMPTY_HOWTO (131),
1693 HOWTO (R_ARM_THM_ALU_ABS_G0_NC,/* type. */
1694 0, /* rightshift. */
1695 1, /* size (0 = byte, 1 = short, 2 = long). */
1696 16, /* bitsize. */
1697 FALSE, /* pc_relative. */
1698 0, /* bitpos. */
1699 complain_overflow_bitfield,/* complain_on_overflow. */
1700 bfd_elf_generic_reloc, /* special_function. */
1701 "R_ARM_THM_ALU_ABS_G0_NC",/* name. */
1702 FALSE, /* partial_inplace. */
1703 0x00000000, /* src_mask. */
1704 0x00000000, /* dst_mask. */
1705 FALSE), /* pcrel_offset. */
1706 HOWTO (R_ARM_THM_ALU_ABS_G1_NC,/* type. */
1707 0, /* rightshift. */
1708 1, /* size (0 = byte, 1 = short, 2 = long). */
1709 16, /* bitsize. */
1710 FALSE, /* pc_relative. */
1711 0, /* bitpos. */
1712 complain_overflow_bitfield,/* complain_on_overflow. */
1713 bfd_elf_generic_reloc, /* special_function. */
1714 "R_ARM_THM_ALU_ABS_G1_NC",/* name. */
1715 FALSE, /* partial_inplace. */
1716 0x00000000, /* src_mask. */
1717 0x00000000, /* dst_mask. */
1718 FALSE), /* pcrel_offset. */
1719 HOWTO (R_ARM_THM_ALU_ABS_G2_NC,/* type. */
1720 0, /* rightshift. */
1721 1, /* size (0 = byte, 1 = short, 2 = long). */
1722 16, /* bitsize. */
1723 FALSE, /* pc_relative. */
1724 0, /* bitpos. */
1725 complain_overflow_bitfield,/* complain_on_overflow. */
1726 bfd_elf_generic_reloc, /* special_function. */
1727 "R_ARM_THM_ALU_ABS_G2_NC",/* name. */
1728 FALSE, /* partial_inplace. */
1729 0x00000000, /* src_mask. */
1730 0x00000000, /* dst_mask. */
1731 FALSE), /* pcrel_offset. */
1732 HOWTO (R_ARM_THM_ALU_ABS_G3_NC,/* type. */
1733 0, /* rightshift. */
1734 1, /* size (0 = byte, 1 = short, 2 = long). */
1735 16, /* bitsize. */
1736 FALSE, /* pc_relative. */
1737 0, /* bitpos. */
1738 complain_overflow_bitfield,/* complain_on_overflow. */
1739 bfd_elf_generic_reloc, /* special_function. */
1740 "R_ARM_THM_ALU_ABS_G3_NC",/* name. */
1741 FALSE, /* partial_inplace. */
1742 0x00000000, /* src_mask. */
1743 0x00000000, /* dst_mask. */
1744 FALSE), /* pcrel_offset. */
1745 /* Relocations for Armv8.1-M Mainline. */
1746 HOWTO (R_ARM_THM_BF16, /* type. */
1747 0, /* rightshift. */
1748 1, /* size (0 = byte, 1 = short, 2 = long). */
1749 16, /* bitsize. */
1750 TRUE, /* pc_relative. */
1751 0, /* bitpos. */
1752 complain_overflow_dont,/* do not complain_on_overflow. */
1753 bfd_elf_generic_reloc, /* special_function. */
1754 "R_ARM_THM_BF16", /* name. */
1755 FALSE, /* partial_inplace. */
1756 0x001f0ffe, /* src_mask. */
1757 0x001f0ffe, /* dst_mask. */
1758 TRUE), /* pcrel_offset. */
1759 };
1760
1761 /* 160 onwards: */
1762 static reloc_howto_type elf32_arm_howto_table_2[8] =
1763 {
1764 HOWTO (R_ARM_IRELATIVE, /* type */
1765 0, /* rightshift */
1766 2, /* size (0 = byte, 1 = short, 2 = long) */
1767 32, /* bitsize */
1768 FALSE, /* pc_relative */
1769 0, /* bitpos */
1770 complain_overflow_bitfield,/* complain_on_overflow */
1771 bfd_elf_generic_reloc, /* special_function */
1772 "R_ARM_IRELATIVE", /* name */
1773 TRUE, /* partial_inplace */
1774 0xffffffff, /* src_mask */
1775 0xffffffff, /* dst_mask */
1776 FALSE), /* pcrel_offset */
1777 HOWTO (R_ARM_GOTFUNCDESC, /* type */
1778 0, /* rightshift */
1779 2, /* size (0 = byte, 1 = short, 2 = long) */
1780 32, /* bitsize */
1781 FALSE, /* pc_relative */
1782 0, /* bitpos */
1783 complain_overflow_bitfield,/* complain_on_overflow */
1784 bfd_elf_generic_reloc, /* special_function */
1785 "R_ARM_GOTFUNCDESC", /* name */
1786 FALSE, /* partial_inplace */
1787 0, /* src_mask */
1788 0xffffffff, /* dst_mask */
1789 FALSE), /* pcrel_offset */
1790 HOWTO (R_ARM_GOTOFFFUNCDESC, /* type */
1791 0, /* rightshift */
1792 2, /* size (0 = byte, 1 = short, 2 = long) */
1793 32, /* bitsize */
1794 FALSE, /* pc_relative */
1795 0, /* bitpos */
1796 complain_overflow_bitfield,/* complain_on_overflow */
1797 bfd_elf_generic_reloc, /* special_function */
1798 "R_ARM_GOTOFFFUNCDESC",/* name */
1799 FALSE, /* partial_inplace */
1800 0, /* src_mask */
1801 0xffffffff, /* dst_mask */
1802 FALSE), /* pcrel_offset */
1803 HOWTO (R_ARM_FUNCDESC, /* type */
1804 0, /* rightshift */
1805 2, /* size (0 = byte, 1 = short, 2 = long) */
1806 32, /* bitsize */
1807 FALSE, /* pc_relative */
1808 0, /* bitpos */
1809 complain_overflow_bitfield,/* complain_on_overflow */
1810 bfd_elf_generic_reloc, /* special_function */
1811 "R_ARM_FUNCDESC", /* name */
1812 FALSE, /* partial_inplace */
1813 0, /* src_mask */
1814 0xffffffff, /* dst_mask */
1815 FALSE), /* pcrel_offset */
1816 HOWTO (R_ARM_FUNCDESC_VALUE, /* type */
1817 0, /* rightshift */
1818 2, /* size (0 = byte, 1 = short, 2 = long) */
1819 64, /* bitsize */
1820 FALSE, /* pc_relative */
1821 0, /* bitpos */
1822 complain_overflow_bitfield,/* complain_on_overflow */
1823 bfd_elf_generic_reloc, /* special_function */
1824 "R_ARM_FUNCDESC_VALUE",/* name */
1825 FALSE, /* partial_inplace */
1826 0, /* src_mask */
1827 0xffffffff, /* dst_mask */
1828 FALSE), /* pcrel_offset */
1829 HOWTO (R_ARM_TLS_GD32_FDPIC, /* type */
1830 0, /* rightshift */
1831 2, /* size (0 = byte, 1 = short, 2 = long) */
1832 32, /* bitsize */
1833 FALSE, /* pc_relative */
1834 0, /* bitpos */
1835 complain_overflow_bitfield,/* complain_on_overflow */
1836 bfd_elf_generic_reloc, /* special_function */
1837 "R_ARM_TLS_GD32_FDPIC",/* name */
1838 FALSE, /* partial_inplace */
1839 0, /* src_mask */
1840 0xffffffff, /* dst_mask */
1841 FALSE), /* pcrel_offset */
1842 HOWTO (R_ARM_TLS_LDM32_FDPIC, /* type */
1843 0, /* rightshift */
1844 2, /* size (0 = byte, 1 = short, 2 = long) */
1845 32, /* bitsize */
1846 FALSE, /* pc_relative */
1847 0, /* bitpos */
1848 complain_overflow_bitfield,/* complain_on_overflow */
1849 bfd_elf_generic_reloc, /* special_function */
1850 "R_ARM_TLS_LDM32_FDPIC",/* name */
1851 FALSE, /* partial_inplace */
1852 0, /* src_mask */
1853 0xffffffff, /* dst_mask */
1854 FALSE), /* pcrel_offset */
1855 HOWTO (R_ARM_TLS_IE32_FDPIC, /* type */
1856 0, /* rightshift */
1857 2, /* size (0 = byte, 1 = short, 2 = long) */
1858 32, /* bitsize */
1859 FALSE, /* pc_relative */
1860 0, /* bitpos */
1861 complain_overflow_bitfield,/* complain_on_overflow */
1862 bfd_elf_generic_reloc, /* special_function */
1863 "R_ARM_TLS_IE32_FDPIC",/* name */
1864 FALSE, /* partial_inplace */
1865 0, /* src_mask */
1866 0xffffffff, /* dst_mask */
1867 FALSE), /* pcrel_offset */
1868 };
1869
1870 /* 249-255 extended, currently unused, relocations: */
1871 static reloc_howto_type elf32_arm_howto_table_3[4] =
1872 {
1873 HOWTO (R_ARM_RREL32, /* type */
1874 0, /* rightshift */
1875 0, /* size (0 = byte, 1 = short, 2 = long) */
1876 0, /* bitsize */
1877 FALSE, /* pc_relative */
1878 0, /* bitpos */
1879 complain_overflow_dont,/* complain_on_overflow */
1880 bfd_elf_generic_reloc, /* special_function */
1881 "R_ARM_RREL32", /* name */
1882 FALSE, /* partial_inplace */
1883 0, /* src_mask */
1884 0, /* dst_mask */
1885 FALSE), /* pcrel_offset */
1886
1887 HOWTO (R_ARM_RABS32, /* type */
1888 0, /* rightshift */
1889 0, /* size (0 = byte, 1 = short, 2 = long) */
1890 0, /* bitsize */
1891 FALSE, /* pc_relative */
1892 0, /* bitpos */
1893 complain_overflow_dont,/* complain_on_overflow */
1894 bfd_elf_generic_reloc, /* special_function */
1895 "R_ARM_RABS32", /* name */
1896 FALSE, /* partial_inplace */
1897 0, /* src_mask */
1898 0, /* dst_mask */
1899 FALSE), /* pcrel_offset */
1900
1901 HOWTO (R_ARM_RPC24, /* type */
1902 0, /* rightshift */
1903 0, /* size (0 = byte, 1 = short, 2 = long) */
1904 0, /* bitsize */
1905 FALSE, /* pc_relative */
1906 0, /* bitpos */
1907 complain_overflow_dont,/* complain_on_overflow */
1908 bfd_elf_generic_reloc, /* special_function */
1909 "R_ARM_RPC24", /* name */
1910 FALSE, /* partial_inplace */
1911 0, /* src_mask */
1912 0, /* dst_mask */
1913 FALSE), /* pcrel_offset */
1914
1915 HOWTO (R_ARM_RBASE, /* type */
1916 0, /* rightshift */
1917 0, /* size (0 = byte, 1 = short, 2 = long) */
1918 0, /* bitsize */
1919 FALSE, /* pc_relative */
1920 0, /* bitpos */
1921 complain_overflow_dont,/* complain_on_overflow */
1922 bfd_elf_generic_reloc, /* special_function */
1923 "R_ARM_RBASE", /* name */
1924 FALSE, /* partial_inplace */
1925 0, /* src_mask */
1926 0, /* dst_mask */
1927 FALSE) /* pcrel_offset */
1928 };
1929
1930 static reloc_howto_type *
1931 elf32_arm_howto_from_type (unsigned int r_type)
1932 {
1933 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1934 return &elf32_arm_howto_table_1[r_type];
1935
1936 if (r_type >= R_ARM_IRELATIVE
1937 && r_type < R_ARM_IRELATIVE + ARRAY_SIZE (elf32_arm_howto_table_2))
1938 return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE];
1939
1940 if (r_type >= R_ARM_RREL32
1941 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_3))
1942 return &elf32_arm_howto_table_3[r_type - R_ARM_RREL32];
1943
1944 return NULL;
1945 }
1946
1947 static bfd_boolean
1948 elf32_arm_info_to_howto (bfd * abfd, arelent * bfd_reloc,
1949 Elf_Internal_Rela * elf_reloc)
1950 {
1951 unsigned int r_type;
1952
1953 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1954 if ((bfd_reloc->howto = elf32_arm_howto_from_type (r_type)) == NULL)
1955 {
1956 /* xgettext:c-format */
1957 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
1958 abfd, r_type);
1959 bfd_set_error (bfd_error_bad_value);
1960 return FALSE;
1961 }
1962 return TRUE;
1963 }
1964
1965 struct elf32_arm_reloc_map
1966 {
1967 bfd_reloc_code_real_type bfd_reloc_val;
1968 unsigned char elf_reloc_val;
1969 };
1970
1971 /* All entries in this list must also be present in elf32_arm_howto_table. */
1972 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1973 {
1974 {BFD_RELOC_NONE, R_ARM_NONE},
1975 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1976 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1977 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1978 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1979 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1980 {BFD_RELOC_32, R_ARM_ABS32},
1981 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1982 {BFD_RELOC_8, R_ARM_ABS8},
1983 {BFD_RELOC_16, R_ARM_ABS16},
1984 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1985 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1986 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1987 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1988 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1989 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1990 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1991 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1992 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1993 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1994 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1995 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1996 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1997 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL},
1998 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1999 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
2000 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
2001 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
2002 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
2003 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
2004 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
2005 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
2006 {BFD_RELOC_ARM_TLS_GOTDESC, R_ARM_TLS_GOTDESC},
2007 {BFD_RELOC_ARM_TLS_CALL, R_ARM_TLS_CALL},
2008 {BFD_RELOC_ARM_THM_TLS_CALL, R_ARM_THM_TLS_CALL},
2009 {BFD_RELOC_ARM_TLS_DESCSEQ, R_ARM_TLS_DESCSEQ},
2010 {BFD_RELOC_ARM_THM_TLS_DESCSEQ, R_ARM_THM_TLS_DESCSEQ},
2011 {BFD_RELOC_ARM_TLS_DESC, R_ARM_TLS_DESC},
2012 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
2013 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
2014 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
2015 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
2016 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
2017 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
2018 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
2019 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
2020 {BFD_RELOC_ARM_IRELATIVE, R_ARM_IRELATIVE},
2021 {BFD_RELOC_ARM_GOTFUNCDESC, R_ARM_GOTFUNCDESC},
2022 {BFD_RELOC_ARM_GOTOFFFUNCDESC, R_ARM_GOTOFFFUNCDESC},
2023 {BFD_RELOC_ARM_FUNCDESC, R_ARM_FUNCDESC},
2024 {BFD_RELOC_ARM_FUNCDESC_VALUE, R_ARM_FUNCDESC_VALUE},
2025 {BFD_RELOC_ARM_TLS_GD32_FDPIC, R_ARM_TLS_GD32_FDPIC},
2026 {BFD_RELOC_ARM_TLS_LDM32_FDPIC, R_ARM_TLS_LDM32_FDPIC},
2027 {BFD_RELOC_ARM_TLS_IE32_FDPIC, R_ARM_TLS_IE32_FDPIC},
2028 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
2029 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
2030 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
2031 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
2032 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
2033 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
2034 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
2035 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
2036 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
2037 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
2038 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
2039 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
2040 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
2041 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
2042 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
2043 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
2044 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
2045 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
2046 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
2047 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
2048 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
2049 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
2050 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
2051 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
2052 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
2053 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
2054 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
2055 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
2056 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
2057 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
2058 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
2059 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
2060 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
2061 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
2062 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
2063 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
2064 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
2065 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
2066 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX},
2067 {BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC, R_ARM_THM_ALU_ABS_G3_NC},
2068 {BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC, R_ARM_THM_ALU_ABS_G2_NC},
2069 {BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC, R_ARM_THM_ALU_ABS_G1_NC},
2070 {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC, R_ARM_THM_ALU_ABS_G0_NC},
2071 {BFD_RELOC_ARM_THUMB_BF17, R_ARM_THM_BF16}
2072 };
2073
2074 static reloc_howto_type *
2075 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2076 bfd_reloc_code_real_type code)
2077 {
2078 unsigned int i;
2079
2080 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
2081 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
2082 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
2083
2084 return NULL;
2085 }
2086
2087 static reloc_howto_type *
2088 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2089 const char *r_name)
2090 {
2091 unsigned int i;
2092
2093 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
2094 if (elf32_arm_howto_table_1[i].name != NULL
2095 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
2096 return &elf32_arm_howto_table_1[i];
2097
2098 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
2099 if (elf32_arm_howto_table_2[i].name != NULL
2100 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
2101 return &elf32_arm_howto_table_2[i];
2102
2103 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_3); i++)
2104 if (elf32_arm_howto_table_3[i].name != NULL
2105 && strcasecmp (elf32_arm_howto_table_3[i].name, r_name) == 0)
2106 return &elf32_arm_howto_table_3[i];
2107
2108 return NULL;
2109 }
2110
2111 /* Support for core dump NOTE sections. */
2112
2113 static bfd_boolean
2114 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
2115 {
2116 int offset;
2117 size_t size;
2118
2119 switch (note->descsz)
2120 {
2121 default:
2122 return FALSE;
2123
2124 case 148: /* Linux/ARM 32-bit. */
2125 /* pr_cursig */
2126 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
2127
2128 /* pr_pid */
2129 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
2130
2131 /* pr_reg */
2132 offset = 72;
2133 size = 72;
2134
2135 break;
2136 }
2137
2138 /* Make a ".reg/999" section. */
2139 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
2140 size, note->descpos + offset);
2141 }
2142
2143 static bfd_boolean
2144 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
2145 {
2146 switch (note->descsz)
2147 {
2148 default:
2149 return FALSE;
2150
2151 case 124: /* Linux/ARM elf_prpsinfo. */
2152 elf_tdata (abfd)->core->pid
2153 = bfd_get_32 (abfd, note->descdata + 12);
2154 elf_tdata (abfd)->core->program
2155 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
2156 elf_tdata (abfd)->core->command
2157 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
2158 }
2159
2160 /* Note that for some reason, a spurious space is tacked
2161 onto the end of the args in some (at least one anyway)
2162 implementations, so strip it off if it exists. */
2163 {
2164 char *command = elf_tdata (abfd)->core->command;
2165 int n = strlen (command);
2166
2167 if (0 < n && command[n - 1] == ' ')
2168 command[n - 1] = '\0';
2169 }
2170
2171 return TRUE;
2172 }
2173
2174 static char *
2175 elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz,
2176 int note_type, ...)
2177 {
2178 switch (note_type)
2179 {
2180 default:
2181 return NULL;
2182
2183 case NT_PRPSINFO:
2184 {
2185 char data[124] ATTRIBUTE_NONSTRING;
2186 va_list ap;
2187
2188 va_start (ap, note_type);
2189 memset (data, 0, sizeof (data));
2190 strncpy (data + 28, va_arg (ap, const char *), 16);
2191 #if GCC_VERSION == 8000 || GCC_VERSION == 8001
2192 DIAGNOSTIC_PUSH;
2193 /* GCC 8.0 and 8.1 warn about 80 equals destination size with
2194 -Wstringop-truncation:
2195 https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85643
2196 */
2197 DIAGNOSTIC_IGNORE_STRINGOP_TRUNCATION;
2198 #endif
2199 strncpy (data + 44, va_arg (ap, const char *), 80);
2200 #if GCC_VERSION == 8000 || GCC_VERSION == 8001
2201 DIAGNOSTIC_POP;
2202 #endif
2203 va_end (ap);
2204
2205 return elfcore_write_note (abfd, buf, bufsiz,
2206 "CORE", note_type, data, sizeof (data));
2207 }
2208
2209 case NT_PRSTATUS:
2210 {
2211 char data[148];
2212 va_list ap;
2213 long pid;
2214 int cursig;
2215 const void *greg;
2216
2217 va_start (ap, note_type);
2218 memset (data, 0, sizeof (data));
2219 pid = va_arg (ap, long);
2220 bfd_put_32 (abfd, pid, data + 24);
2221 cursig = va_arg (ap, int);
2222 bfd_put_16 (abfd, cursig, data + 12);
2223 greg = va_arg (ap, const void *);
2224 memcpy (data + 72, greg, 72);
2225 va_end (ap);
2226
2227 return elfcore_write_note (abfd, buf, bufsiz,
2228 "CORE", note_type, data, sizeof (data));
2229 }
2230 }
2231 }
2232
2233 #define TARGET_LITTLE_SYM arm_elf32_le_vec
2234 #define TARGET_LITTLE_NAME "elf32-littlearm"
2235 #define TARGET_BIG_SYM arm_elf32_be_vec
2236 #define TARGET_BIG_NAME "elf32-bigarm"
2237
2238 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
2239 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
2240 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note
2241
2242 typedef unsigned long int insn32;
2243 typedef unsigned short int insn16;
2244
2245 /* In lieu of proper flags, assume all EABIv4 or later objects are
2246 interworkable. */
2247 #define INTERWORK_FLAG(abfd) \
2248 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2249 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2250 || ((abfd)->flags & BFD_LINKER_CREATED))
2251
2252 /* The linker script knows the section names for placement.
2253 The entry_names are used to do simple name mangling on the stubs.
2254 Given a function name, and its type, the stub can be found. The
2255 name can be changed. The only requirement is the %s be present. */
2256 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2257 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
2258
2259 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2260 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
2261
2262 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2263 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
2264
2265 #define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer"
2266 #define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "__stm32l4xx_veneer_%x"
2267
2268 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2269 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
2270
2271 #define STUB_ENTRY_NAME "__%s_veneer"
2272
2273 #define CMSE_PREFIX "__acle_se_"
2274
2275 /* The name of the dynamic interpreter. This is put in the .interp
2276 section. */
2277 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
2278
2279 /* FDPIC default stack size. */
2280 #define DEFAULT_STACK_SIZE 0x8000
2281
2282 static const unsigned long tls_trampoline [] =
2283 {
2284 0xe08e0000, /* add r0, lr, r0 */
2285 0xe5901004, /* ldr r1, [r0,#4] */
2286 0xe12fff11, /* bx r1 */
2287 };
2288
2289 static const unsigned long dl_tlsdesc_lazy_trampoline [] =
2290 {
2291 0xe52d2004, /* push {r2} */
2292 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
2293 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
2294 0xe79f2002, /* 1: ldr r2, [pc, r2] */
2295 0xe081100f, /* 2: add r1, pc */
2296 0xe12fff12, /* bx r2 */
2297 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
2298 + dl_tlsdesc_lazy_resolver(GOT) */
2299 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2300 };
2301
2302 /* ARM FDPIC PLT entry. */
2303 /* The last 5 words contain PLT lazy fragment code and data. */
2304 static const bfd_vma elf32_arm_fdpic_plt_entry [] =
2305 {
2306 0xe59fc008, /* ldr r12, .L1 */
2307 0xe08cc009, /* add r12, r12, r9 */
2308 0xe59c9004, /* ldr r9, [r12, #4] */
2309 0xe59cf000, /* ldr pc, [r12] */
2310 0x00000000, /* L1. .word foo(GOTOFFFUNCDESC) */
2311 0x00000000, /* L1. .word foo(funcdesc_value_reloc_offset) */
2312 0xe51fc00c, /* ldr r12, [pc, #-12] */
2313 0xe92d1000, /* push {r12} */
2314 0xe599c004, /* ldr r12, [r9, #4] */
2315 0xe599f000, /* ldr pc, [r9] */
2316 };
2317
2318 /* Thumb FDPIC PLT entry. */
2319 /* The last 5 words contain PLT lazy fragment code and data. */
2320 static const bfd_vma elf32_arm_fdpic_thumb_plt_entry [] =
2321 {
2322 0xc00cf8df, /* ldr.w r12, .L1 */
2323 0x0c09eb0c, /* add.w r12, r12, r9 */
2324 0x9004f8dc, /* ldr.w r9, [r12, #4] */
2325 0xf000f8dc, /* ldr.w pc, [r12] */
2326 0x00000000, /* .L1 .word foo(GOTOFFFUNCDESC) */
2327 0x00000000, /* .L2 .word foo(funcdesc_value_reloc_offset) */
2328 0xc008f85f, /* ldr.w r12, .L2 */
2329 0xcd04f84d, /* push {r12} */
2330 0xc004f8d9, /* ldr.w r12, [r9, #4] */
2331 0xf000f8d9, /* ldr.w pc, [r9] */
2332 };
2333
2334 #ifdef FOUR_WORD_PLT
2335
2336 /* The first entry in a procedure linkage table looks like
2337 this. It is set up so that any shared library function that is
2338 called before the relocation has been set up calls the dynamic
2339 linker first. */
2340 static const bfd_vma elf32_arm_plt0_entry [] =
2341 {
2342 0xe52de004, /* str lr, [sp, #-4]! */
2343 0xe59fe010, /* ldr lr, [pc, #16] */
2344 0xe08fe00e, /* add lr, pc, lr */
2345 0xe5bef008, /* ldr pc, [lr, #8]! */
2346 };
2347
2348 /* Subsequent entries in a procedure linkage table look like
2349 this. */
2350 static const bfd_vma elf32_arm_plt_entry [] =
2351 {
2352 0xe28fc600, /* add ip, pc, #NN */
2353 0xe28cca00, /* add ip, ip, #NN */
2354 0xe5bcf000, /* ldr pc, [ip, #NN]! */
2355 0x00000000, /* unused */
2356 };
2357
2358 #else /* not FOUR_WORD_PLT */
2359
2360 /* The first entry in a procedure linkage table looks like
2361 this. It is set up so that any shared library function that is
2362 called before the relocation has been set up calls the dynamic
2363 linker first. */
2364 static const bfd_vma elf32_arm_plt0_entry [] =
2365 {
2366 0xe52de004, /* str lr, [sp, #-4]! */
2367 0xe59fe004, /* ldr lr, [pc, #4] */
2368 0xe08fe00e, /* add lr, pc, lr */
2369 0xe5bef008, /* ldr pc, [lr, #8]! */
2370 0x00000000, /* &GOT[0] - . */
2371 };
2372
2373 /* By default subsequent entries in a procedure linkage table look like
2374 this. Offsets that don't fit into 28 bits will cause link error. */
2375 static const bfd_vma elf32_arm_plt_entry_short [] =
2376 {
2377 0xe28fc600, /* add ip, pc, #0xNN00000 */
2378 0xe28cca00, /* add ip, ip, #0xNN000 */
2379 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2380 };
2381
2382 /* When explicitly asked, we'll use this "long" entry format
2383 which can cope with arbitrary displacements. */
2384 static const bfd_vma elf32_arm_plt_entry_long [] =
2385 {
2386 0xe28fc200, /* add ip, pc, #0xN0000000 */
2387 0xe28cc600, /* add ip, ip, #0xNN00000 */
2388 0xe28cca00, /* add ip, ip, #0xNN000 */
2389 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2390 };
2391
2392 static bfd_boolean elf32_arm_use_long_plt_entry = FALSE;
2393
2394 #endif /* not FOUR_WORD_PLT */
2395
2396 /* The first entry in a procedure linkage table looks like this.
2397 It is set up so that any shared library function that is called before the
2398 relocation has been set up calls the dynamic linker first. */
2399 static const bfd_vma elf32_thumb2_plt0_entry [] =
2400 {
2401 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2402 an instruction maybe encoded to one or two array elements. */
2403 0xf8dfb500, /* push {lr} */
2404 0x44fee008, /* ldr.w lr, [pc, #8] */
2405 /* add lr, pc */
2406 0xff08f85e, /* ldr.w pc, [lr, #8]! */
2407 0x00000000, /* &GOT[0] - . */
2408 };
2409
2410 /* Subsequent entries in a procedure linkage table for thumb only target
2411 look like this. */
2412 static const bfd_vma elf32_thumb2_plt_entry [] =
2413 {
2414 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2415 an instruction maybe encoded to one or two array elements. */
2416 0x0c00f240, /* movw ip, #0xNNNN */
2417 0x0c00f2c0, /* movt ip, #0xNNNN */
2418 0xf8dc44fc, /* add ip, pc */
2419 0xbf00f000 /* ldr.w pc, [ip] */
2420 /* nop */
2421 };
2422
2423 /* The format of the first entry in the procedure linkage table
2424 for a VxWorks executable. */
2425 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
2426 {
2427 0xe52dc008, /* str ip,[sp,#-8]! */
2428 0xe59fc000, /* ldr ip,[pc] */
2429 0xe59cf008, /* ldr pc,[ip,#8] */
2430 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
2431 };
2432
2433 /* The format of subsequent entries in a VxWorks executable. */
2434 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
2435 {
2436 0xe59fc000, /* ldr ip,[pc] */
2437 0xe59cf000, /* ldr pc,[ip] */
2438 0x00000000, /* .long @got */
2439 0xe59fc000, /* ldr ip,[pc] */
2440 0xea000000, /* b _PLT */
2441 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2442 };
2443
2444 /* The format of entries in a VxWorks shared library. */
2445 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
2446 {
2447 0xe59fc000, /* ldr ip,[pc] */
2448 0xe79cf009, /* ldr pc,[ip,r9] */
2449 0x00000000, /* .long @got */
2450 0xe59fc000, /* ldr ip,[pc] */
2451 0xe599f008, /* ldr pc,[r9,#8] */
2452 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2453 };
2454
2455 /* An initial stub used if the PLT entry is referenced from Thumb code. */
2456 #define PLT_THUMB_STUB_SIZE 4
2457 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2458 {
2459 0x4778, /* bx pc */
2460 0x46c0 /* nop */
2461 };
2462
2463 /* The entries in a PLT when using a DLL-based target with multiple
2464 address spaces. */
2465 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2466 {
2467 0xe51ff004, /* ldr pc, [pc, #-4] */
2468 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2469 };
2470
2471 /* The first entry in a procedure linkage table looks like
2472 this. It is set up so that any shared library function that is
2473 called before the relocation has been set up calls the dynamic
2474 linker first. */
2475 static const bfd_vma elf32_arm_nacl_plt0_entry [] =
2476 {
2477 /* First bundle: */
2478 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */
2479 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */
2480 0xe08cc00f, /* add ip, ip, pc */
2481 0xe52dc008, /* str ip, [sp, #-8]! */
2482 /* Second bundle: */
2483 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2484 0xe59cc000, /* ldr ip, [ip] */
2485 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2486 0xe12fff1c, /* bx ip */
2487 /* Third bundle: */
2488 0xe320f000, /* nop */
2489 0xe320f000, /* nop */
2490 0xe320f000, /* nop */
2491 /* .Lplt_tail: */
2492 0xe50dc004, /* str ip, [sp, #-4] */
2493 /* Fourth bundle: */
2494 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2495 0xe59cc000, /* ldr ip, [ip] */
2496 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2497 0xe12fff1c, /* bx ip */
2498 };
2499 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
2500
2501 /* Subsequent entries in a procedure linkage table look like this. */
2502 static const bfd_vma elf32_arm_nacl_plt_entry [] =
2503 {
2504 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */
2505 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */
2506 0xe08cc00f, /* add ip, ip, pc */
2507 0xea000000, /* b .Lplt_tail */
2508 };
2509
2510 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2511 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2512 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2513 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2514 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2515 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2516 #define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4)
2517 #define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4)
2518
2519 enum stub_insn_type
2520 {
2521 THUMB16_TYPE = 1,
2522 THUMB32_TYPE,
2523 ARM_TYPE,
2524 DATA_TYPE
2525 };
2526
2527 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2528 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2529 is inserted in arm_build_one_stub(). */
2530 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2531 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2532 #define THUMB32_MOVT(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVT_ABS, 0}
2533 #define THUMB32_MOVW(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVW_ABS_NC, 0}
2534 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2535 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2536 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2537 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2538
2539 typedef struct
2540 {
2541 bfd_vma data;
2542 enum stub_insn_type type;
2543 unsigned int r_type;
2544 int reloc_addend;
2545 } insn_sequence;
2546
2547 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2548 to reach the stub if necessary. */
2549 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2550 {
2551 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2552 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2553 };
2554
2555 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2556 available. */
2557 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2558 {
2559 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2560 ARM_INSN (0xe12fff1c), /* bx ip */
2561 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2562 };
2563
2564 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2565 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2566 {
2567 THUMB16_INSN (0xb401), /* push {r0} */
2568 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2569 THUMB16_INSN (0x4684), /* mov ip, r0 */
2570 THUMB16_INSN (0xbc01), /* pop {r0} */
2571 THUMB16_INSN (0x4760), /* bx ip */
2572 THUMB16_INSN (0xbf00), /* nop */
2573 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2574 };
2575
2576 /* Thumb -> Thumb long branch stub in thumb2 encoding. Used on armv7. */
2577 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only[] =
2578 {
2579 THUMB32_INSN (0xf85ff000), /* ldr.w pc, [pc, #-0] */
2580 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(x) */
2581 };
2582
2583 /* Thumb -> Thumb long branch stub. Used for PureCode sections on Thumb2
2584 M-profile architectures. */
2585 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only_pure[] =
2586 {
2587 THUMB32_MOVW (0xf2400c00), /* mov.w ip, R_ARM_MOVW_ABS_NC */
2588 THUMB32_MOVT (0xf2c00c00), /* movt ip, R_ARM_MOVT_ABS << 16 */
2589 THUMB16_INSN (0x4760), /* bx ip */
2590 };
2591
2592 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2593 allowed. */
2594 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2595 {
2596 THUMB16_INSN (0x4778), /* bx pc */
2597 THUMB16_INSN (0x46c0), /* nop */
2598 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2599 ARM_INSN (0xe12fff1c), /* bx ip */
2600 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2601 };
2602
2603 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2604 available. */
2605 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2606 {
2607 THUMB16_INSN (0x4778), /* bx pc */
2608 THUMB16_INSN (0x46c0), /* nop */
2609 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2610 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2611 };
2612
2613 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2614 one, when the destination is close enough. */
2615 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2616 {
2617 THUMB16_INSN (0x4778), /* bx pc */
2618 THUMB16_INSN (0x46c0), /* nop */
2619 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */
2620 };
2621
2622 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2623 blx to reach the stub if necessary. */
2624 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2625 {
2626 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */
2627 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */
2628 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2629 };
2630
2631 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2632 blx to reach the stub if necessary. We can not add into pc;
2633 it is not guaranteed to mode switch (different in ARMv6 and
2634 ARMv7). */
2635 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2636 {
2637 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2638 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2639 ARM_INSN (0xe12fff1c), /* bx ip */
2640 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2641 };
2642
2643 /* V4T ARM -> ARM long branch stub, PIC. */
2644 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2645 {
2646 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2647 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2648 ARM_INSN (0xe12fff1c), /* bx ip */
2649 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2650 };
2651
2652 /* V4T Thumb -> ARM long branch stub, PIC. */
2653 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2654 {
2655 THUMB16_INSN (0x4778), /* bx pc */
2656 THUMB16_INSN (0x46c0), /* nop */
2657 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2658 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */
2659 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2660 };
2661
2662 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2663 architectures. */
2664 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2665 {
2666 THUMB16_INSN (0xb401), /* push {r0} */
2667 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2668 THUMB16_INSN (0x46fc), /* mov ip, pc */
2669 THUMB16_INSN (0x4484), /* add ip, r0 */
2670 THUMB16_INSN (0xbc01), /* pop {r0} */
2671 THUMB16_INSN (0x4760), /* bx ip */
2672 DATA_WORD (0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2673 };
2674
2675 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2676 allowed. */
2677 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2678 {
2679 THUMB16_INSN (0x4778), /* bx pc */
2680 THUMB16_INSN (0x46c0), /* nop */
2681 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2682 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2683 ARM_INSN (0xe12fff1c), /* bx ip */
2684 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2685 };
2686
2687 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
2688 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2689 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic[] =
2690 {
2691 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */
2692 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */
2693 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2694 };
2695
2696 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
2697 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2698 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] =
2699 {
2700 THUMB16_INSN (0x4778), /* bx pc */
2701 THUMB16_INSN (0x46c0), /* nop */
2702 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */
2703 ARM_INSN (0xe081f00f), /* add pc, r1, pc */
2704 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2705 };
2706
2707 /* NaCl ARM -> ARM long branch stub. */
2708 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl[] =
2709 {
2710 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2711 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2712 ARM_INSN (0xe12fff1c), /* bx ip */
2713 ARM_INSN (0xe320f000), /* nop */
2714 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2715 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2716 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2717 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2718 };
2719
2720 /* NaCl ARM -> ARM long branch stub, PIC. */
2721 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic[] =
2722 {
2723 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2724 ARM_INSN (0xe08cc00f), /* add ip, ip, pc */
2725 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2726 ARM_INSN (0xe12fff1c), /* bx ip */
2727 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2728 DATA_WORD (0, R_ARM_REL32, 8), /* dcd R_ARM_REL32(X+8) */
2729 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2730 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2731 };
2732
2733 /* Stub used for transition to secure state (aka SG veneer). */
2734 static const insn_sequence elf32_arm_stub_cmse_branch_thumb_only[] =
2735 {
2736 THUMB32_INSN (0xe97fe97f), /* sg. */
2737 THUMB32_B_INSN (0xf000b800, -4), /* b.w original_branch_dest. */
2738 };
2739
2740
2741 /* Cortex-A8 erratum-workaround stubs. */
2742
2743 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2744 can't use a conditional branch to reach this stub). */
2745
2746 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2747 {
2748 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */
2749 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */
2750 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */
2751 };
2752
2753 /* Stub used for b.w and bl.w instructions. */
2754
2755 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2756 {
2757 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2758 };
2759
2760 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2761 {
2762 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2763 };
2764
2765 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2766 instruction (which switches to ARM mode) to point to this stub. Jump to the
2767 real destination using an ARM-mode branch. */
2768
2769 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2770 {
2771 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */
2772 };
2773
2774 /* For each section group there can be a specially created linker section
2775 to hold the stubs for that group. The name of the stub section is based
2776 upon the name of another section within that group with the suffix below
2777 applied.
2778
2779 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2780 create what appeared to be a linker stub section when it actually
2781 contained user code/data. For example, consider this fragment:
2782
2783 const char * stubborn_problems[] = { "np" };
2784
2785 If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2786 section called:
2787
2788 .data.rel.local.stubborn_problems
2789
2790 This then causes problems in arm32_arm_build_stubs() as it triggers:
2791
2792 // Ignore non-stub sections.
2793 if (!strstr (stub_sec->name, STUB_SUFFIX))
2794 continue;
2795
2796 And so the section would be ignored instead of being processed. Hence
2797 the change in definition of STUB_SUFFIX to a name that cannot be a valid
2798 C identifier. */
2799 #define STUB_SUFFIX ".__stub"
2800
2801 /* One entry per long/short branch stub defined above. */
2802 #define DEF_STUBS \
2803 DEF_STUB(long_branch_any_any) \
2804 DEF_STUB(long_branch_v4t_arm_thumb) \
2805 DEF_STUB(long_branch_thumb_only) \
2806 DEF_STUB(long_branch_v4t_thumb_thumb) \
2807 DEF_STUB(long_branch_v4t_thumb_arm) \
2808 DEF_STUB(short_branch_v4t_thumb_arm) \
2809 DEF_STUB(long_branch_any_arm_pic) \
2810 DEF_STUB(long_branch_any_thumb_pic) \
2811 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2812 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2813 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2814 DEF_STUB(long_branch_thumb_only_pic) \
2815 DEF_STUB(long_branch_any_tls_pic) \
2816 DEF_STUB(long_branch_v4t_thumb_tls_pic) \
2817 DEF_STUB(long_branch_arm_nacl) \
2818 DEF_STUB(long_branch_arm_nacl_pic) \
2819 DEF_STUB(cmse_branch_thumb_only) \
2820 DEF_STUB(a8_veneer_b_cond) \
2821 DEF_STUB(a8_veneer_b) \
2822 DEF_STUB(a8_veneer_bl) \
2823 DEF_STUB(a8_veneer_blx) \
2824 DEF_STUB(long_branch_thumb2_only) \
2825 DEF_STUB(long_branch_thumb2_only_pure)
2826
2827 #define DEF_STUB(x) arm_stub_##x,
2828 enum elf32_arm_stub_type
2829 {
2830 arm_stub_none,
2831 DEF_STUBS
2832 max_stub_type
2833 };
2834 #undef DEF_STUB
2835
2836 /* Note the first a8_veneer type. */
2837 const unsigned arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond;
2838
2839 typedef struct
2840 {
2841 const insn_sequence* template_sequence;
2842 int template_size;
2843 } stub_def;
2844
2845 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2846 static const stub_def stub_definitions[] =
2847 {
2848 {NULL, 0},
2849 DEF_STUBS
2850 };
2851
2852 struct elf32_arm_stub_hash_entry
2853 {
2854 /* Base hash table entry structure. */
2855 struct bfd_hash_entry root;
2856
2857 /* The stub section. */
2858 asection *stub_sec;
2859
2860 /* Offset within stub_sec of the beginning of this stub. */
2861 bfd_vma stub_offset;
2862
2863 /* Given the symbol's value and its section we can determine its final
2864 value when building the stubs (so the stub knows where to jump). */
2865 bfd_vma target_value;
2866 asection *target_section;
2867
2868 /* Same as above but for the source of the branch to the stub. Used for
2869 Cortex-A8 erratum workaround to patch it to branch to the stub. As
2870 such, source section does not need to be recorded since Cortex-A8 erratum
2871 workaround stubs are only generated when both source and target are in the
2872 same section. */
2873 bfd_vma source_value;
2874
2875 /* The instruction which caused this stub to be generated (only valid for
2876 Cortex-A8 erratum workaround stubs at present). */
2877 unsigned long orig_insn;
2878
2879 /* The stub type. */
2880 enum elf32_arm_stub_type stub_type;
2881 /* Its encoding size in bytes. */
2882 int stub_size;
2883 /* Its template. */
2884 const insn_sequence *stub_template;
2885 /* The size of the template (number of entries). */
2886 int stub_template_size;
2887
2888 /* The symbol table entry, if any, that this was derived from. */
2889 struct elf32_arm_link_hash_entry *h;
2890
2891 /* Type of branch. */
2892 enum arm_st_branch_type branch_type;
2893
2894 /* Where this stub is being called from, or, in the case of combined
2895 stub sections, the first input section in the group. */
2896 asection *id_sec;
2897
2898 /* The name for the local symbol at the start of this stub. The
2899 stub name in the hash table has to be unique; this does not, so
2900 it can be friendlier. */
2901 char *output_name;
2902 };
2903
2904 /* Used to build a map of a section. This is required for mixed-endian
2905 code/data. */
2906
2907 typedef struct elf32_elf_section_map
2908 {
2909 bfd_vma vma;
2910 char type;
2911 }
2912 elf32_arm_section_map;
2913
2914 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2915
2916 typedef enum
2917 {
2918 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2919 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2920 VFP11_ERRATUM_ARM_VENEER,
2921 VFP11_ERRATUM_THUMB_VENEER
2922 }
2923 elf32_vfp11_erratum_type;
2924
2925 typedef struct elf32_vfp11_erratum_list
2926 {
2927 struct elf32_vfp11_erratum_list *next;
2928 bfd_vma vma;
2929 union
2930 {
2931 struct
2932 {
2933 struct elf32_vfp11_erratum_list *veneer;
2934 unsigned int vfp_insn;
2935 } b;
2936 struct
2937 {
2938 struct elf32_vfp11_erratum_list *branch;
2939 unsigned int id;
2940 } v;
2941 } u;
2942 elf32_vfp11_erratum_type type;
2943 }
2944 elf32_vfp11_erratum_list;
2945
2946 /* Information about a STM32L4XX erratum veneer, or a branch to such a
2947 veneer. */
2948 typedef enum
2949 {
2950 STM32L4XX_ERRATUM_BRANCH_TO_VENEER,
2951 STM32L4XX_ERRATUM_VENEER
2952 }
2953 elf32_stm32l4xx_erratum_type;
2954
2955 typedef struct elf32_stm32l4xx_erratum_list
2956 {
2957 struct elf32_stm32l4xx_erratum_list *next;
2958 bfd_vma vma;
2959 union
2960 {
2961 struct
2962 {
2963 struct elf32_stm32l4xx_erratum_list *veneer;
2964 unsigned int insn;
2965 } b;
2966 struct
2967 {
2968 struct elf32_stm32l4xx_erratum_list *branch;
2969 unsigned int id;
2970 } v;
2971 } u;
2972 elf32_stm32l4xx_erratum_type type;
2973 }
2974 elf32_stm32l4xx_erratum_list;
2975
2976 typedef enum
2977 {
2978 DELETE_EXIDX_ENTRY,
2979 INSERT_EXIDX_CANTUNWIND_AT_END
2980 }
2981 arm_unwind_edit_type;
2982
2983 /* A (sorted) list of edits to apply to an unwind table. */
2984 typedef struct arm_unwind_table_edit
2985 {
2986 arm_unwind_edit_type type;
2987 /* Note: we sometimes want to insert an unwind entry corresponding to a
2988 section different from the one we're currently writing out, so record the
2989 (text) section this edit relates to here. */
2990 asection *linked_section;
2991 unsigned int index;
2992 struct arm_unwind_table_edit *next;
2993 }
2994 arm_unwind_table_edit;
2995
2996 typedef struct _arm_elf_section_data
2997 {
2998 /* Information about mapping symbols. */
2999 struct bfd_elf_section_data elf;
3000 unsigned int mapcount;
3001 unsigned int mapsize;
3002 elf32_arm_section_map *map;
3003 /* Information about CPU errata. */
3004 unsigned int erratumcount;
3005 elf32_vfp11_erratum_list *erratumlist;
3006 unsigned int stm32l4xx_erratumcount;
3007 elf32_stm32l4xx_erratum_list *stm32l4xx_erratumlist;
3008 unsigned int additional_reloc_count;
3009 /* Information about unwind tables. */
3010 union
3011 {
3012 /* Unwind info attached to a text section. */
3013 struct
3014 {
3015 asection *arm_exidx_sec;
3016 } text;
3017
3018 /* Unwind info attached to an .ARM.exidx section. */
3019 struct
3020 {
3021 arm_unwind_table_edit *unwind_edit_list;
3022 arm_unwind_table_edit *unwind_edit_tail;
3023 } exidx;
3024 } u;
3025 }
3026 _arm_elf_section_data;
3027
3028 #define elf32_arm_section_data(sec) \
3029 ((_arm_elf_section_data *) elf_section_data (sec))
3030
3031 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
3032 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
3033 so may be created multiple times: we use an array of these entries whilst
3034 relaxing which we can refresh easily, then create stubs for each potentially
3035 erratum-triggering instruction once we've settled on a solution. */
3036
3037 struct a8_erratum_fix
3038 {
3039 bfd *input_bfd;
3040 asection *section;
3041 bfd_vma offset;
3042 bfd_vma target_offset;
3043 unsigned long orig_insn;
3044 char *stub_name;
3045 enum elf32_arm_stub_type stub_type;
3046 enum arm_st_branch_type branch_type;
3047 };
3048
3049 /* A table of relocs applied to branches which might trigger Cortex-A8
3050 erratum. */
3051
3052 struct a8_erratum_reloc
3053 {
3054 bfd_vma from;
3055 bfd_vma destination;
3056 struct elf32_arm_link_hash_entry *hash;
3057 const char *sym_name;
3058 unsigned int r_type;
3059 enum arm_st_branch_type branch_type;
3060 bfd_boolean non_a8_stub;
3061 };
3062
3063 /* The size of the thread control block. */
3064 #define TCB_SIZE 8
3065
3066 /* ARM-specific information about a PLT entry, over and above the usual
3067 gotplt_union. */
3068 struct arm_plt_info
3069 {
3070 /* We reference count Thumb references to a PLT entry separately,
3071 so that we can emit the Thumb trampoline only if needed. */
3072 bfd_signed_vma thumb_refcount;
3073
3074 /* Some references from Thumb code may be eliminated by BL->BLX
3075 conversion, so record them separately. */
3076 bfd_signed_vma maybe_thumb_refcount;
3077
3078 /* How many of the recorded PLT accesses were from non-call relocations.
3079 This information is useful when deciding whether anything takes the
3080 address of an STT_GNU_IFUNC PLT. A value of 0 means that all
3081 non-call references to the function should resolve directly to the
3082 real runtime target. */
3083 unsigned int noncall_refcount;
3084
3085 /* Since PLT entries have variable size if the Thumb prologue is
3086 used, we need to record the index into .got.plt instead of
3087 recomputing it from the PLT offset. */
3088 bfd_signed_vma got_offset;
3089 };
3090
3091 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
3092 struct arm_local_iplt_info
3093 {
3094 /* The information that is usually found in the generic ELF part of
3095 the hash table entry. */
3096 union gotplt_union root;
3097
3098 /* The information that is usually found in the ARM-specific part of
3099 the hash table entry. */
3100 struct arm_plt_info arm;
3101
3102 /* A list of all potential dynamic relocations against this symbol. */
3103 struct elf_dyn_relocs *dyn_relocs;
3104 };
3105
3106 /* Structure to handle FDPIC support for local functions. */
3107 struct fdpic_local {
3108 unsigned int funcdesc_cnt;
3109 unsigned int gotofffuncdesc_cnt;
3110 int funcdesc_offset;
3111 };
3112
3113 struct elf_arm_obj_tdata
3114 {
3115 struct elf_obj_tdata root;
3116
3117 /* tls_type for each local got entry. */
3118 char *local_got_tls_type;
3119
3120 /* GOTPLT entries for TLS descriptors. */
3121 bfd_vma *local_tlsdesc_gotent;
3122
3123 /* Information for local symbols that need entries in .iplt. */
3124 struct arm_local_iplt_info **local_iplt;
3125
3126 /* Zero to warn when linking objects with incompatible enum sizes. */
3127 int no_enum_size_warning;
3128
3129 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
3130 int no_wchar_size_warning;
3131
3132 /* Maintains FDPIC counters and funcdesc info. */
3133 struct fdpic_local *local_fdpic_cnts;
3134 };
3135
3136 #define elf_arm_tdata(bfd) \
3137 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
3138
3139 #define elf32_arm_local_got_tls_type(bfd) \
3140 (elf_arm_tdata (bfd)->local_got_tls_type)
3141
3142 #define elf32_arm_local_tlsdesc_gotent(bfd) \
3143 (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
3144
3145 #define elf32_arm_local_iplt(bfd) \
3146 (elf_arm_tdata (bfd)->local_iplt)
3147
3148 #define elf32_arm_local_fdpic_cnts(bfd) \
3149 (elf_arm_tdata (bfd)->local_fdpic_cnts)
3150
3151 #define is_arm_elf(bfd) \
3152 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
3153 && elf_tdata (bfd) != NULL \
3154 && elf_object_id (bfd) == ARM_ELF_DATA)
3155
3156 static bfd_boolean
3157 elf32_arm_mkobject (bfd *abfd)
3158 {
3159 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
3160 ARM_ELF_DATA);
3161 }
3162
3163 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
3164
3165 /* Structure to handle FDPIC support for extern functions. */
3166 struct fdpic_global {
3167 unsigned int gotofffuncdesc_cnt;
3168 unsigned int gotfuncdesc_cnt;
3169 unsigned int funcdesc_cnt;
3170 int funcdesc_offset;
3171 int gotfuncdesc_offset;
3172 };
3173
3174 /* Arm ELF linker hash entry. */
3175 struct elf32_arm_link_hash_entry
3176 {
3177 struct elf_link_hash_entry root;
3178
3179 /* Track dynamic relocs copied for this symbol. */
3180 struct elf_dyn_relocs *dyn_relocs;
3181
3182 /* ARM-specific PLT information. */
3183 struct arm_plt_info plt;
3184
3185 #define GOT_UNKNOWN 0
3186 #define GOT_NORMAL 1
3187 #define GOT_TLS_GD 2
3188 #define GOT_TLS_IE 4
3189 #define GOT_TLS_GDESC 8
3190 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
3191 unsigned int tls_type : 8;
3192
3193 /* True if the symbol's PLT entry is in .iplt rather than .plt. */
3194 unsigned int is_iplt : 1;
3195
3196 unsigned int unused : 23;
3197
3198 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
3199 starting at the end of the jump table. */
3200 bfd_vma tlsdesc_got;
3201
3202 /* The symbol marking the real symbol location for exported thumb
3203 symbols with Arm stubs. */
3204 struct elf_link_hash_entry *export_glue;
3205
3206 /* A pointer to the most recently used stub hash entry against this
3207 symbol. */
3208 struct elf32_arm_stub_hash_entry *stub_cache;
3209
3210 /* Counter for FDPIC relocations against this symbol. */
3211 struct fdpic_global fdpic_cnts;
3212 };
3213
3214 /* Traverse an arm ELF linker hash table. */
3215 #define elf32_arm_link_hash_traverse(table, func, info) \
3216 (elf_link_hash_traverse \
3217 (&(table)->root, \
3218 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
3219 (info)))
3220
3221 /* Get the ARM elf linker hash table from a link_info structure. */
3222 #define elf32_arm_hash_table(info) \
3223 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
3224 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
3225
3226 #define arm_stub_hash_lookup(table, string, create, copy) \
3227 ((struct elf32_arm_stub_hash_entry *) \
3228 bfd_hash_lookup ((table), (string), (create), (copy)))
3229
3230 /* Array to keep track of which stub sections have been created, and
3231 information on stub grouping. */
3232 struct map_stub
3233 {
3234 /* This is the section to which stubs in the group will be
3235 attached. */
3236 asection *link_sec;
3237 /* The stub section. */
3238 asection *stub_sec;
3239 };
3240
3241 #define elf32_arm_compute_jump_table_size(htab) \
3242 ((htab)->next_tls_desc_index * 4)
3243
3244 /* ARM ELF linker hash table. */
3245 struct elf32_arm_link_hash_table
3246 {
3247 /* The main hash table. */
3248 struct elf_link_hash_table root;
3249
3250 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
3251 bfd_size_type thumb_glue_size;
3252
3253 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
3254 bfd_size_type arm_glue_size;
3255
3256 /* The size in bytes of section containing the ARMv4 BX veneers. */
3257 bfd_size_type bx_glue_size;
3258
3259 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
3260 veneer has been populated. */
3261 bfd_vma bx_glue_offset[15];
3262
3263 /* The size in bytes of the section containing glue for VFP11 erratum
3264 veneers. */
3265 bfd_size_type vfp11_erratum_glue_size;
3266
3267 /* The size in bytes of the section containing glue for STM32L4XX erratum
3268 veneers. */
3269 bfd_size_type stm32l4xx_erratum_glue_size;
3270
3271 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
3272 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
3273 elf32_arm_write_section(). */
3274 struct a8_erratum_fix *a8_erratum_fixes;
3275 unsigned int num_a8_erratum_fixes;
3276
3277 /* An arbitrary input BFD chosen to hold the glue sections. */
3278 bfd * bfd_of_glue_owner;
3279
3280 /* Nonzero to output a BE8 image. */
3281 int byteswap_code;
3282
3283 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
3284 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
3285 int target1_is_rel;
3286
3287 /* The relocation to use for R_ARM_TARGET2 relocations. */
3288 int target2_reloc;
3289
3290 /* 0 = Ignore R_ARM_V4BX.
3291 1 = Convert BX to MOV PC.
3292 2 = Generate v4 interworing stubs. */
3293 int fix_v4bx;
3294
3295 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
3296 int fix_cortex_a8;
3297
3298 /* Whether we should fix the ARM1176 BLX immediate issue. */
3299 int fix_arm1176;
3300
3301 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
3302 int use_blx;
3303
3304 /* What sort of code sequences we should look for which may trigger the
3305 VFP11 denorm erratum. */
3306 bfd_arm_vfp11_fix vfp11_fix;
3307
3308 /* Global counter for the number of fixes we have emitted. */
3309 int num_vfp11_fixes;
3310
3311 /* What sort of code sequences we should look for which may trigger the
3312 STM32L4XX erratum. */
3313 bfd_arm_stm32l4xx_fix stm32l4xx_fix;
3314
3315 /* Global counter for the number of fixes we have emitted. */
3316 int num_stm32l4xx_fixes;
3317
3318 /* Nonzero to force PIC branch veneers. */
3319 int pic_veneer;
3320
3321 /* The number of bytes in the initial entry in the PLT. */
3322 bfd_size_type plt_header_size;
3323
3324 /* The number of bytes in the subsequent PLT etries. */
3325 bfd_size_type plt_entry_size;
3326
3327 /* True if the target system is VxWorks. */
3328 int vxworks_p;
3329
3330 /* True if the target system is Symbian OS. */
3331 int symbian_p;
3332
3333 /* True if the target system is Native Client. */
3334 int nacl_p;
3335
3336 /* True if the target uses REL relocations. */
3337 bfd_boolean use_rel;
3338
3339 /* Nonzero if import library must be a secure gateway import library
3340 as per ARMv8-M Security Extensions. */
3341 int cmse_implib;
3342
3343 /* The import library whose symbols' address must remain stable in
3344 the import library generated. */
3345 bfd *in_implib_bfd;
3346
3347 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
3348 bfd_vma next_tls_desc_index;
3349
3350 /* How many R_ARM_TLS_DESC relocations were generated so far. */
3351 bfd_vma num_tls_desc;
3352
3353 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
3354 asection *srelplt2;
3355
3356 /* The offset into splt of the PLT entry for the TLS descriptor
3357 resolver. Special values are 0, if not necessary (or not found
3358 to be necessary yet), and -1 if needed but not determined
3359 yet. */
3360 bfd_vma dt_tlsdesc_plt;
3361
3362 /* The offset into sgot of the GOT entry used by the PLT entry
3363 above. */
3364 bfd_vma dt_tlsdesc_got;
3365
3366 /* Offset in .plt section of tls_arm_trampoline. */
3367 bfd_vma tls_trampoline;
3368
3369 /* Data for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations. */
3370 union
3371 {
3372 bfd_signed_vma refcount;
3373 bfd_vma offset;
3374 } tls_ldm_got;
3375
3376 /* Small local sym cache. */
3377 struct sym_cache sym_cache;
3378
3379 /* For convenience in allocate_dynrelocs. */
3380 bfd * obfd;
3381
3382 /* The amount of space used by the reserved portion of the sgotplt
3383 section, plus whatever space is used by the jump slots. */
3384 bfd_vma sgotplt_jump_table_size;
3385
3386 /* The stub hash table. */
3387 struct bfd_hash_table stub_hash_table;
3388
3389 /* Linker stub bfd. */
3390 bfd *stub_bfd;
3391
3392 /* Linker call-backs. */
3393 asection * (*add_stub_section) (const char *, asection *, asection *,
3394 unsigned int);
3395 void (*layout_sections_again) (void);
3396
3397 /* Array to keep track of which stub sections have been created, and
3398 information on stub grouping. */
3399 struct map_stub *stub_group;
3400
3401 /* Input stub section holding secure gateway veneers. */
3402 asection *cmse_stub_sec;
3403
3404 /* Offset in cmse_stub_sec where new SG veneers (not in input import library)
3405 start to be allocated. */
3406 bfd_vma new_cmse_stub_offset;
3407
3408 /* Number of elements in stub_group. */
3409 unsigned int top_id;
3410
3411 /* Assorted information used by elf32_arm_size_stubs. */
3412 unsigned int bfd_count;
3413 unsigned int top_index;
3414 asection **input_list;
3415
3416 /* True if the target system uses FDPIC. */
3417 int fdpic_p;
3418
3419 /* Fixup section. Used for FDPIC. */
3420 asection *srofixup;
3421 };
3422
3423 /* Add an FDPIC read-only fixup. */
3424 static void
3425 arm_elf_add_rofixup (bfd *output_bfd, asection *srofixup, bfd_vma offset)
3426 {
3427 bfd_vma fixup_offset;
3428
3429 fixup_offset = srofixup->reloc_count++ * 4;
3430 BFD_ASSERT (fixup_offset < srofixup->size);
3431 bfd_put_32 (output_bfd, offset, srofixup->contents + fixup_offset);
3432 }
3433
3434 static inline int
3435 ctz (unsigned int mask)
3436 {
3437 #if GCC_VERSION >= 3004
3438 return __builtin_ctz (mask);
3439 #else
3440 unsigned int i;
3441
3442 for (i = 0; i < 8 * sizeof (mask); i++)
3443 {
3444 if (mask & 0x1)
3445 break;
3446 mask = (mask >> 1);
3447 }
3448 return i;
3449 #endif
3450 }
3451
3452 static inline int
3453 elf32_arm_popcount (unsigned int mask)
3454 {
3455 #if GCC_VERSION >= 3004
3456 return __builtin_popcount (mask);
3457 #else
3458 unsigned int i;
3459 int sum = 0;
3460
3461 for (i = 0; i < 8 * sizeof (mask); i++)
3462 {
3463 if (mask & 0x1)
3464 sum++;
3465 mask = (mask >> 1);
3466 }
3467 return sum;
3468 #endif
3469 }
3470
3471 static void elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
3472 asection *sreloc, Elf_Internal_Rela *rel);
3473
3474 static void
3475 arm_elf_fill_funcdesc(bfd *output_bfd,
3476 struct bfd_link_info *info,
3477 int *funcdesc_offset,
3478 int dynindx,
3479 int offset,
3480 bfd_vma addr,
3481 bfd_vma dynreloc_value,
3482 bfd_vma seg)
3483 {
3484 if ((*funcdesc_offset & 1) == 0)
3485 {
3486 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
3487 asection *sgot = globals->root.sgot;
3488
3489 if (bfd_link_pic(info))
3490 {
3491 asection *srelgot = globals->root.srelgot;
3492 Elf_Internal_Rela outrel;
3493
3494 outrel.r_info = ELF32_R_INFO (dynindx, R_ARM_FUNCDESC_VALUE);
3495 outrel.r_offset = sgot->output_section->vma + sgot->output_offset + offset;
3496 outrel.r_addend = 0;
3497
3498 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
3499 bfd_put_32 (output_bfd, addr, sgot->contents + offset);
3500 bfd_put_32 (output_bfd, seg, sgot->contents + offset + 4);
3501 }
3502 else
3503 {
3504 struct elf_link_hash_entry *hgot = globals->root.hgot;
3505 bfd_vma got_value = hgot->root.u.def.value
3506 + hgot->root.u.def.section->output_section->vma
3507 + hgot->root.u.def.section->output_offset;
3508
3509 arm_elf_add_rofixup(output_bfd, globals->srofixup,
3510 sgot->output_section->vma + sgot->output_offset
3511 + offset);
3512 arm_elf_add_rofixup(output_bfd, globals->srofixup,
3513 sgot->output_section->vma + sgot->output_offset
3514 + offset + 4);
3515 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + offset);
3516 bfd_put_32 (output_bfd, got_value, sgot->contents + offset + 4);
3517 }
3518 *funcdesc_offset |= 1;
3519 }
3520 }
3521
3522 /* Create an entry in an ARM ELF linker hash table. */
3523
3524 static struct bfd_hash_entry *
3525 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
3526 struct bfd_hash_table * table,
3527 const char * string)
3528 {
3529 struct elf32_arm_link_hash_entry * ret =
3530 (struct elf32_arm_link_hash_entry *) entry;
3531
3532 /* Allocate the structure if it has not already been allocated by a
3533 subclass. */
3534 if (ret == NULL)
3535 ret = (struct elf32_arm_link_hash_entry *)
3536 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
3537 if (ret == NULL)
3538 return (struct bfd_hash_entry *) ret;
3539
3540 /* Call the allocation method of the superclass. */
3541 ret = ((struct elf32_arm_link_hash_entry *)
3542 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
3543 table, string));
3544 if (ret != NULL)
3545 {
3546 ret->dyn_relocs = NULL;
3547 ret->tls_type = GOT_UNKNOWN;
3548 ret->tlsdesc_got = (bfd_vma) -1;
3549 ret->plt.thumb_refcount = 0;
3550 ret->plt.maybe_thumb_refcount = 0;
3551 ret->plt.noncall_refcount = 0;
3552 ret->plt.got_offset = -1;
3553 ret->is_iplt = FALSE;
3554 ret->export_glue = NULL;
3555
3556 ret->stub_cache = NULL;
3557
3558 ret->fdpic_cnts.gotofffuncdesc_cnt = 0;
3559 ret->fdpic_cnts.gotfuncdesc_cnt = 0;
3560 ret->fdpic_cnts.funcdesc_cnt = 0;
3561 ret->fdpic_cnts.funcdesc_offset = -1;
3562 ret->fdpic_cnts.gotfuncdesc_offset = -1;
3563 }
3564
3565 return (struct bfd_hash_entry *) ret;
3566 }
3567
3568 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3569 symbols. */
3570
3571 static bfd_boolean
3572 elf32_arm_allocate_local_sym_info (bfd *abfd)
3573 {
3574 if (elf_local_got_refcounts (abfd) == NULL)
3575 {
3576 bfd_size_type num_syms;
3577 bfd_size_type size;
3578 char *data;
3579
3580 num_syms = elf_tdata (abfd)->symtab_hdr.sh_info;
3581 size = num_syms * (sizeof (bfd_signed_vma)
3582 + sizeof (struct arm_local_iplt_info *)
3583 + sizeof (bfd_vma)
3584 + sizeof (char)
3585 + sizeof (struct fdpic_local));
3586 data = bfd_zalloc (abfd, size);
3587 if (data == NULL)
3588 return FALSE;
3589
3590 elf32_arm_local_fdpic_cnts (abfd) = (struct fdpic_local *) data;
3591 data += num_syms * sizeof (struct fdpic_local);
3592
3593 elf_local_got_refcounts (abfd) = (bfd_signed_vma *) data;
3594 data += num_syms * sizeof (bfd_signed_vma);
3595
3596 elf32_arm_local_iplt (abfd) = (struct arm_local_iplt_info **) data;
3597 data += num_syms * sizeof (struct arm_local_iplt_info *);
3598
3599 elf32_arm_local_tlsdesc_gotent (abfd) = (bfd_vma *) data;
3600 data += num_syms * sizeof (bfd_vma);
3601
3602 elf32_arm_local_got_tls_type (abfd) = data;
3603 }
3604 return TRUE;
3605 }
3606
3607 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3608 to input bfd ABFD. Create the information if it doesn't already exist.
3609 Return null if an allocation fails. */
3610
3611 static struct arm_local_iplt_info *
3612 elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx)
3613 {
3614 struct arm_local_iplt_info **ptr;
3615
3616 if (!elf32_arm_allocate_local_sym_info (abfd))
3617 return NULL;
3618
3619 BFD_ASSERT (r_symndx < elf_tdata (abfd)->symtab_hdr.sh_info);
3620 ptr = &elf32_arm_local_iplt (abfd)[r_symndx];
3621 if (*ptr == NULL)
3622 *ptr = bfd_zalloc (abfd, sizeof (**ptr));
3623 return *ptr;
3624 }
3625
3626 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3627 in ABFD's symbol table. If the symbol is global, H points to its
3628 hash table entry, otherwise H is null.
3629
3630 Return true if the symbol does have PLT information. When returning
3631 true, point *ROOT_PLT at the target-independent reference count/offset
3632 union and *ARM_PLT at the ARM-specific information. */
3633
3634 static bfd_boolean
3635 elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_table *globals,
3636 struct elf32_arm_link_hash_entry *h,
3637 unsigned long r_symndx, union gotplt_union **root_plt,
3638 struct arm_plt_info **arm_plt)
3639 {
3640 struct arm_local_iplt_info *local_iplt;
3641
3642 if (globals->root.splt == NULL && globals->root.iplt == NULL)
3643 return FALSE;
3644
3645 if (h != NULL)
3646 {
3647 *root_plt = &h->root.plt;
3648 *arm_plt = &h->plt;
3649 return TRUE;
3650 }
3651
3652 if (elf32_arm_local_iplt (abfd) == NULL)
3653 return FALSE;
3654
3655 local_iplt = elf32_arm_local_iplt (abfd)[r_symndx];
3656 if (local_iplt == NULL)
3657 return FALSE;
3658
3659 *root_plt = &local_iplt->root;
3660 *arm_plt = &local_iplt->arm;
3661 return TRUE;
3662 }
3663
3664 static bfd_boolean using_thumb_only (struct elf32_arm_link_hash_table *globals);
3665
3666 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3667 before it. */
3668
3669 static bfd_boolean
3670 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info *info,
3671 struct arm_plt_info *arm_plt)
3672 {
3673 struct elf32_arm_link_hash_table *htab;
3674
3675 htab = elf32_arm_hash_table (info);
3676
3677 return (!using_thumb_only(htab) && (arm_plt->thumb_refcount != 0
3678 || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0)));
3679 }
3680
3681 /* Return a pointer to the head of the dynamic reloc list that should
3682 be used for local symbol ISYM, which is symbol number R_SYMNDX in
3683 ABFD's symbol table. Return null if an error occurs. */
3684
3685 static struct elf_dyn_relocs **
3686 elf32_arm_get_local_dynreloc_list (bfd *abfd, unsigned long r_symndx,
3687 Elf_Internal_Sym *isym)
3688 {
3689 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
3690 {
3691 struct arm_local_iplt_info *local_iplt;
3692
3693 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
3694 if (local_iplt == NULL)
3695 return NULL;
3696 return &local_iplt->dyn_relocs;
3697 }
3698 else
3699 {
3700 /* Track dynamic relocs needed for local syms too.
3701 We really need local syms available to do this
3702 easily. Oh well. */
3703 asection *s;
3704 void *vpp;
3705
3706 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3707 if (s == NULL)
3708 abort ();
3709
3710 vpp = &elf_section_data (s)->local_dynrel;
3711 return (struct elf_dyn_relocs **) vpp;
3712 }
3713 }
3714
3715 /* Initialize an entry in the stub hash table. */
3716
3717 static struct bfd_hash_entry *
3718 stub_hash_newfunc (struct bfd_hash_entry *entry,
3719 struct bfd_hash_table *table,
3720 const char *string)
3721 {
3722 /* Allocate the structure if it has not already been allocated by a
3723 subclass. */
3724 if (entry == NULL)
3725 {
3726 entry = (struct bfd_hash_entry *)
3727 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
3728 if (entry == NULL)
3729 return entry;
3730 }
3731
3732 /* Call the allocation method of the superclass. */
3733 entry = bfd_hash_newfunc (entry, table, string);
3734 if (entry != NULL)
3735 {
3736 struct elf32_arm_stub_hash_entry *eh;
3737
3738 /* Initialize the local fields. */
3739 eh = (struct elf32_arm_stub_hash_entry *) entry;
3740 eh->stub_sec = NULL;
3741 eh->stub_offset = (bfd_vma) -1;
3742 eh->source_value = 0;
3743 eh->target_value = 0;
3744 eh->target_section = NULL;
3745 eh->orig_insn = 0;
3746 eh->stub_type = arm_stub_none;
3747 eh->stub_size = 0;
3748 eh->stub_template = NULL;
3749 eh->stub_template_size = -1;
3750 eh->h = NULL;
3751 eh->id_sec = NULL;
3752 eh->output_name = NULL;
3753 }
3754
3755 return entry;
3756 }
3757
3758 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3759 shortcuts to them in our hash table. */
3760
3761 static bfd_boolean
3762 create_got_section (bfd *dynobj, struct bfd_link_info *info)
3763 {
3764 struct elf32_arm_link_hash_table *htab;
3765
3766 htab = elf32_arm_hash_table (info);
3767 if (htab == NULL)
3768 return FALSE;
3769
3770 /* BPABI objects never have a GOT, or associated sections. */
3771 if (htab->symbian_p)
3772 return TRUE;
3773
3774 if (! _bfd_elf_create_got_section (dynobj, info))
3775 return FALSE;
3776
3777 /* Also create .rofixup. */
3778 if (htab->fdpic_p)
3779 {
3780 htab->srofixup = bfd_make_section_with_flags (dynobj, ".rofixup",
3781 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS
3782 | SEC_IN_MEMORY | SEC_LINKER_CREATED | SEC_READONLY));
3783 if (htab->srofixup == NULL || ! bfd_set_section_alignment (dynobj, htab->srofixup, 2))
3784 return FALSE;
3785 }
3786
3787 return TRUE;
3788 }
3789
3790 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
3791
3792 static bfd_boolean
3793 create_ifunc_sections (struct bfd_link_info *info)
3794 {
3795 struct elf32_arm_link_hash_table *htab;
3796 const struct elf_backend_data *bed;
3797 bfd *dynobj;
3798 asection *s;
3799 flagword flags;
3800
3801 htab = elf32_arm_hash_table (info);
3802 dynobj = htab->root.dynobj;
3803 bed = get_elf_backend_data (dynobj);
3804 flags = bed->dynamic_sec_flags;
3805
3806 if (htab->root.iplt == NULL)
3807 {
3808 s = bfd_make_section_anyway_with_flags (dynobj, ".iplt",
3809 flags | SEC_READONLY | SEC_CODE);
3810 if (s == NULL
3811 || !bfd_set_section_alignment (dynobj, s, bed->plt_alignment))
3812 return FALSE;
3813 htab->root.iplt = s;
3814 }
3815
3816 if (htab->root.irelplt == NULL)
3817 {
3818 s = bfd_make_section_anyway_with_flags (dynobj,
3819 RELOC_SECTION (htab, ".iplt"),
3820 flags | SEC_READONLY);
3821 if (s == NULL
3822 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3823 return FALSE;
3824 htab->root.irelplt = s;
3825 }
3826
3827 if (htab->root.igotplt == NULL)
3828 {
3829 s = bfd_make_section_anyway_with_flags (dynobj, ".igot.plt", flags);
3830 if (s == NULL
3831 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3832 return FALSE;
3833 htab->root.igotplt = s;
3834 }
3835 return TRUE;
3836 }
3837
3838 /* Determine if we're dealing with a Thumb only architecture. */
3839
3840 static bfd_boolean
3841 using_thumb_only (struct elf32_arm_link_hash_table *globals)
3842 {
3843 int arch;
3844 int profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3845 Tag_CPU_arch_profile);
3846
3847 if (profile)
3848 return profile == 'M';
3849
3850 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3851
3852 /* Force return logic to be reviewed for each new architecture. */
3853 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
3854
3855 if (arch == TAG_CPU_ARCH_V6_M
3856 || arch == TAG_CPU_ARCH_V6S_M
3857 || arch == TAG_CPU_ARCH_V7E_M
3858 || arch == TAG_CPU_ARCH_V8M_BASE
3859 || arch == TAG_CPU_ARCH_V8M_MAIN
3860 || arch == TAG_CPU_ARCH_V8_1M_MAIN)
3861 return TRUE;
3862
3863 return FALSE;
3864 }
3865
3866 /* Determine if we're dealing with a Thumb-2 object. */
3867
3868 static bfd_boolean
3869 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3870 {
3871 int arch;
3872 int thumb_isa = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3873 Tag_THUMB_ISA_use);
3874
3875 if (thumb_isa)
3876 return thumb_isa == 2;
3877
3878 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3879
3880 /* Force return logic to be reviewed for each new architecture. */
3881 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
3882
3883 return (arch == TAG_CPU_ARCH_V6T2
3884 || arch == TAG_CPU_ARCH_V7
3885 || arch == TAG_CPU_ARCH_V7E_M
3886 || arch == TAG_CPU_ARCH_V8
3887 || arch == TAG_CPU_ARCH_V8R
3888 || arch == TAG_CPU_ARCH_V8M_MAIN
3889 || arch == TAG_CPU_ARCH_V8_1M_MAIN);
3890 }
3891
3892 /* Determine whether Thumb-2 BL instruction is available. */
3893
3894 static bfd_boolean
3895 using_thumb2_bl (struct elf32_arm_link_hash_table *globals)
3896 {
3897 int arch =
3898 bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3899
3900 /* Force return logic to be reviewed for each new architecture. */
3901 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
3902
3903 /* Architecture was introduced after ARMv6T2 (eg. ARMv6-M). */
3904 return (arch == TAG_CPU_ARCH_V6T2
3905 || arch >= TAG_CPU_ARCH_V7);
3906 }
3907
3908 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3909 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3910 hash table. */
3911
3912 static bfd_boolean
3913 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
3914 {
3915 struct elf32_arm_link_hash_table *htab;
3916
3917 htab = elf32_arm_hash_table (info);
3918 if (htab == NULL)
3919 return FALSE;
3920
3921 if (!htab->root.sgot && !create_got_section (dynobj, info))
3922 return FALSE;
3923
3924 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3925 return FALSE;
3926
3927 if (htab->vxworks_p)
3928 {
3929 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
3930 return FALSE;
3931
3932 if (bfd_link_pic (info))
3933 {
3934 htab->plt_header_size = 0;
3935 htab->plt_entry_size
3936 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
3937 }
3938 else
3939 {
3940 htab->plt_header_size
3941 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
3942 htab->plt_entry_size
3943 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
3944 }
3945
3946 if (elf_elfheader (dynobj))
3947 elf_elfheader (dynobj)->e_ident[EI_CLASS] = ELFCLASS32;
3948 }
3949 else
3950 {
3951 /* PR ld/16017
3952 Test for thumb only architectures. Note - we cannot just call
3953 using_thumb_only() as the attributes in the output bfd have not been
3954 initialised at this point, so instead we use the input bfd. */
3955 bfd * saved_obfd = htab->obfd;
3956
3957 htab->obfd = dynobj;
3958 if (using_thumb_only (htab))
3959 {
3960 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
3961 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
3962 }
3963 htab->obfd = saved_obfd;
3964 }
3965
3966 if (htab->fdpic_p) {
3967 htab->plt_header_size = 0;
3968 if (info->flags & DF_BIND_NOW)
3969 htab->plt_entry_size = 4 * (ARRAY_SIZE(elf32_arm_fdpic_plt_entry) - 5);
3970 else
3971 htab->plt_entry_size = 4 * ARRAY_SIZE(elf32_arm_fdpic_plt_entry);
3972 }
3973
3974 if (!htab->root.splt
3975 || !htab->root.srelplt
3976 || !htab->root.sdynbss
3977 || (!bfd_link_pic (info) && !htab->root.srelbss))
3978 abort ();
3979
3980 return TRUE;
3981 }
3982
3983 /* Copy the extra info we tack onto an elf_link_hash_entry. */
3984
3985 static void
3986 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
3987 struct elf_link_hash_entry *dir,
3988 struct elf_link_hash_entry *ind)
3989 {
3990 struct elf32_arm_link_hash_entry *edir, *eind;
3991
3992 edir = (struct elf32_arm_link_hash_entry *) dir;
3993 eind = (struct elf32_arm_link_hash_entry *) ind;
3994
3995 if (eind->dyn_relocs != NULL)
3996 {
3997 if (edir->dyn_relocs != NULL)
3998 {
3999 struct elf_dyn_relocs **pp;
4000 struct elf_dyn_relocs *p;
4001
4002 /* Add reloc counts against the indirect sym to the direct sym
4003 list. Merge any entries against the same section. */
4004 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
4005 {
4006 struct elf_dyn_relocs *q;
4007
4008 for (q = edir->dyn_relocs; q != NULL; q = q->next)
4009 if (q->sec == p->sec)
4010 {
4011 q->pc_count += p->pc_count;
4012 q->count += p->count;
4013 *pp = p->next;
4014 break;
4015 }
4016 if (q == NULL)
4017 pp = &p->next;
4018 }
4019 *pp = edir->dyn_relocs;
4020 }
4021
4022 edir->dyn_relocs = eind->dyn_relocs;
4023 eind->dyn_relocs = NULL;
4024 }
4025
4026 if (ind->root.type == bfd_link_hash_indirect)
4027 {
4028 /* Copy over PLT info. */
4029 edir->plt.thumb_refcount += eind->plt.thumb_refcount;
4030 eind->plt.thumb_refcount = 0;
4031 edir->plt.maybe_thumb_refcount += eind->plt.maybe_thumb_refcount;
4032 eind->plt.maybe_thumb_refcount = 0;
4033 edir->plt.noncall_refcount += eind->plt.noncall_refcount;
4034 eind->plt.noncall_refcount = 0;
4035
4036 /* Copy FDPIC counters. */
4037 edir->fdpic_cnts.gotofffuncdesc_cnt += eind->fdpic_cnts.gotofffuncdesc_cnt;
4038 edir->fdpic_cnts.gotfuncdesc_cnt += eind->fdpic_cnts.gotfuncdesc_cnt;
4039 edir->fdpic_cnts.funcdesc_cnt += eind->fdpic_cnts.funcdesc_cnt;
4040
4041 /* We should only allocate a function to .iplt once the final
4042 symbol information is known. */
4043 BFD_ASSERT (!eind->is_iplt);
4044
4045 if (dir->got.refcount <= 0)
4046 {
4047 edir->tls_type = eind->tls_type;
4048 eind->tls_type = GOT_UNKNOWN;
4049 }
4050 }
4051
4052 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
4053 }
4054
4055 /* Destroy an ARM elf linker hash table. */
4056
4057 static void
4058 elf32_arm_link_hash_table_free (bfd *obfd)
4059 {
4060 struct elf32_arm_link_hash_table *ret
4061 = (struct elf32_arm_link_hash_table *) obfd->link.hash;
4062
4063 bfd_hash_table_free (&ret->stub_hash_table);
4064 _bfd_elf_link_hash_table_free (obfd);
4065 }
4066
4067 /* Create an ARM elf linker hash table. */
4068
4069 static struct bfd_link_hash_table *
4070 elf32_arm_link_hash_table_create (bfd *abfd)
4071 {
4072 struct elf32_arm_link_hash_table *ret;
4073 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
4074
4075 ret = (struct elf32_arm_link_hash_table *) bfd_zmalloc (amt);
4076 if (ret == NULL)
4077 return NULL;
4078
4079 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
4080 elf32_arm_link_hash_newfunc,
4081 sizeof (struct elf32_arm_link_hash_entry),
4082 ARM_ELF_DATA))
4083 {
4084 free (ret);
4085 return NULL;
4086 }
4087
4088 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
4089 ret->stm32l4xx_fix = BFD_ARM_STM32L4XX_FIX_NONE;
4090 #ifdef FOUR_WORD_PLT
4091 ret->plt_header_size = 16;
4092 ret->plt_entry_size = 16;
4093 #else
4094 ret->plt_header_size = 20;
4095 ret->plt_entry_size = elf32_arm_use_long_plt_entry ? 16 : 12;
4096 #endif
4097 ret->use_rel = TRUE;
4098 ret->obfd = abfd;
4099 ret->fdpic_p = 0;
4100
4101 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
4102 sizeof (struct elf32_arm_stub_hash_entry)))
4103 {
4104 _bfd_elf_link_hash_table_free (abfd);
4105 return NULL;
4106 }
4107 ret->root.root.hash_table_free = elf32_arm_link_hash_table_free;
4108
4109 return &ret->root.root;
4110 }
4111
4112 /* Determine what kind of NOPs are available. */
4113
4114 static bfd_boolean
4115 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
4116 {
4117 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
4118 Tag_CPU_arch);
4119
4120 /* Force return logic to be reviewed for each new architecture. */
4121 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
4122
4123 return (arch == TAG_CPU_ARCH_V6T2
4124 || arch == TAG_CPU_ARCH_V6K
4125 || arch == TAG_CPU_ARCH_V7
4126 || arch == TAG_CPU_ARCH_V8
4127 || arch == TAG_CPU_ARCH_V8R);
4128 }
4129
4130 static bfd_boolean
4131 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
4132 {
4133 switch (stub_type)
4134 {
4135 case arm_stub_long_branch_thumb_only:
4136 case arm_stub_long_branch_thumb2_only:
4137 case arm_stub_long_branch_thumb2_only_pure:
4138 case arm_stub_long_branch_v4t_thumb_arm:
4139 case arm_stub_short_branch_v4t_thumb_arm:
4140 case arm_stub_long_branch_v4t_thumb_arm_pic:
4141 case arm_stub_long_branch_v4t_thumb_tls_pic:
4142 case arm_stub_long_branch_thumb_only_pic:
4143 case arm_stub_cmse_branch_thumb_only:
4144 return TRUE;
4145 case arm_stub_none:
4146 BFD_FAIL ();
4147 return FALSE;
4148 break;
4149 default:
4150 return FALSE;
4151 }
4152 }
4153
4154 /* Determine the type of stub needed, if any, for a call. */
4155
4156 static enum elf32_arm_stub_type
4157 arm_type_of_stub (struct bfd_link_info *info,
4158 asection *input_sec,
4159 const Elf_Internal_Rela *rel,
4160 unsigned char st_type,
4161 enum arm_st_branch_type *actual_branch_type,
4162 struct elf32_arm_link_hash_entry *hash,
4163 bfd_vma destination,
4164 asection *sym_sec,
4165 bfd *input_bfd,
4166 const char *name)
4167 {
4168 bfd_vma location;
4169 bfd_signed_vma branch_offset;
4170 unsigned int r_type;
4171 struct elf32_arm_link_hash_table * globals;
4172 bfd_boolean thumb2, thumb2_bl, thumb_only;
4173 enum elf32_arm_stub_type stub_type = arm_stub_none;
4174 int use_plt = 0;
4175 enum arm_st_branch_type branch_type = *actual_branch_type;
4176 union gotplt_union *root_plt;
4177 struct arm_plt_info *arm_plt;
4178 int arch;
4179 int thumb2_movw;
4180
4181 if (branch_type == ST_BRANCH_LONG)
4182 return stub_type;
4183
4184 globals = elf32_arm_hash_table (info);
4185 if (globals == NULL)
4186 return stub_type;
4187
4188 thumb_only = using_thumb_only (globals);
4189 thumb2 = using_thumb2 (globals);
4190 thumb2_bl = using_thumb2_bl (globals);
4191
4192 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
4193
4194 /* True for architectures that implement the thumb2 movw instruction. */
4195 thumb2_movw = thumb2 || (arch == TAG_CPU_ARCH_V8M_BASE);
4196
4197 /* Determine where the call point is. */
4198 location = (input_sec->output_offset
4199 + input_sec->output_section->vma
4200 + rel->r_offset);
4201
4202 r_type = ELF32_R_TYPE (rel->r_info);
4203
4204 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
4205 are considering a function call relocation. */
4206 if (thumb_only && (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
4207 || r_type == R_ARM_THM_JUMP19)
4208 && branch_type == ST_BRANCH_TO_ARM)
4209 branch_type = ST_BRANCH_TO_THUMB;
4210
4211 /* For TLS call relocs, it is the caller's responsibility to provide
4212 the address of the appropriate trampoline. */
4213 if (r_type != R_ARM_TLS_CALL
4214 && r_type != R_ARM_THM_TLS_CALL
4215 && elf32_arm_get_plt_info (input_bfd, globals, hash,
4216 ELF32_R_SYM (rel->r_info), &root_plt,
4217 &arm_plt)
4218 && root_plt->offset != (bfd_vma) -1)
4219 {
4220 asection *splt;
4221
4222 if (hash == NULL || hash->is_iplt)
4223 splt = globals->root.iplt;
4224 else
4225 splt = globals->root.splt;
4226 if (splt != NULL)
4227 {
4228 use_plt = 1;
4229
4230 /* Note when dealing with PLT entries: the main PLT stub is in
4231 ARM mode, so if the branch is in Thumb mode, another
4232 Thumb->ARM stub will be inserted later just before the ARM
4233 PLT stub. If a long branch stub is needed, we'll add a
4234 Thumb->Arm one and branch directly to the ARM PLT entry.
4235 Here, we have to check if a pre-PLT Thumb->ARM stub
4236 is needed and if it will be close enough. */
4237
4238 destination = (splt->output_section->vma
4239 + splt->output_offset
4240 + root_plt->offset);
4241 st_type = STT_FUNC;
4242
4243 /* Thumb branch/call to PLT: it can become a branch to ARM
4244 or to Thumb. We must perform the same checks and
4245 corrections as in elf32_arm_final_link_relocate. */
4246 if ((r_type == R_ARM_THM_CALL)
4247 || (r_type == R_ARM_THM_JUMP24))
4248 {
4249 if (globals->use_blx
4250 && r_type == R_ARM_THM_CALL
4251 && !thumb_only)
4252 {
4253 /* If the Thumb BLX instruction is available, convert
4254 the BL to a BLX instruction to call the ARM-mode
4255 PLT entry. */
4256 branch_type = ST_BRANCH_TO_ARM;
4257 }
4258 else
4259 {
4260 if (!thumb_only)
4261 /* Target the Thumb stub before the ARM PLT entry. */
4262 destination -= PLT_THUMB_STUB_SIZE;
4263 branch_type = ST_BRANCH_TO_THUMB;
4264 }
4265 }
4266 else
4267 {
4268 branch_type = ST_BRANCH_TO_ARM;
4269 }
4270 }
4271 }
4272 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
4273 BFD_ASSERT (st_type != STT_GNU_IFUNC);
4274
4275 branch_offset = (bfd_signed_vma)(destination - location);
4276
4277 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
4278 || r_type == R_ARM_THM_TLS_CALL || r_type == R_ARM_THM_JUMP19)
4279 {
4280 /* Handle cases where:
4281 - this call goes too far (different Thumb/Thumb2 max
4282 distance)
4283 - it's a Thumb->Arm call and blx is not available, or it's a
4284 Thumb->Arm branch (not bl). A stub is needed in this case,
4285 but only if this call is not through a PLT entry. Indeed,
4286 PLT stubs handle mode switching already. */
4287 if ((!thumb2_bl
4288 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
4289 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
4290 || (thumb2_bl
4291 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
4292 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
4293 || (thumb2
4294 && (branch_offset > THM2_MAX_FWD_COND_BRANCH_OFFSET
4295 || (branch_offset < THM2_MAX_BWD_COND_BRANCH_OFFSET))
4296 && (r_type == R_ARM_THM_JUMP19))
4297 || (branch_type == ST_BRANCH_TO_ARM
4298 && (((r_type == R_ARM_THM_CALL
4299 || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx)
4300 || (r_type == R_ARM_THM_JUMP24)
4301 || (r_type == R_ARM_THM_JUMP19))
4302 && !use_plt))
4303 {
4304 /* If we need to insert a Thumb-Thumb long branch stub to a
4305 PLT, use one that branches directly to the ARM PLT
4306 stub. If we pretended we'd use the pre-PLT Thumb->ARM
4307 stub, undo this now. */
4308 if ((branch_type == ST_BRANCH_TO_THUMB) && use_plt && !thumb_only)
4309 {
4310 branch_type = ST_BRANCH_TO_ARM;
4311 branch_offset += PLT_THUMB_STUB_SIZE;
4312 }
4313
4314 if (branch_type == ST_BRANCH_TO_THUMB)
4315 {
4316 /* Thumb to thumb. */
4317 if (!thumb_only)
4318 {
4319 if (input_sec->flags & SEC_ELF_PURECODE)
4320 _bfd_error_handler
4321 (_("%pB(%pA): warning: long branch veneers used in"
4322 " section with SHF_ARM_PURECODE section"
4323 " attribute is only supported for M-profile"
4324 " targets that implement the movw instruction"),
4325 input_bfd, input_sec);
4326
4327 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4328 /* PIC stubs. */
4329 ? ((globals->use_blx
4330 && (r_type == R_ARM_THM_CALL))
4331 /* V5T and above. Stub starts with ARM code, so
4332 we must be able to switch mode before
4333 reaching it, which is only possible for 'bl'
4334 (ie R_ARM_THM_CALL relocation). */
4335 ? arm_stub_long_branch_any_thumb_pic
4336 /* On V4T, use Thumb code only. */
4337 : arm_stub_long_branch_v4t_thumb_thumb_pic)
4338
4339 /* non-PIC stubs. */
4340 : ((globals->use_blx
4341 && (r_type == R_ARM_THM_CALL))
4342 /* V5T and above. */
4343 ? arm_stub_long_branch_any_any
4344 /* V4T. */
4345 : arm_stub_long_branch_v4t_thumb_thumb);
4346 }
4347 else
4348 {
4349 if (thumb2_movw && (input_sec->flags & SEC_ELF_PURECODE))
4350 stub_type = arm_stub_long_branch_thumb2_only_pure;
4351 else
4352 {
4353 if (input_sec->flags & SEC_ELF_PURECODE)
4354 _bfd_error_handler
4355 (_("%pB(%pA): warning: long branch veneers used in"
4356 " section with SHF_ARM_PURECODE section"
4357 " attribute is only supported for M-profile"
4358 " targets that implement the movw instruction"),
4359 input_bfd, input_sec);
4360
4361 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4362 /* PIC stub. */
4363 ? arm_stub_long_branch_thumb_only_pic
4364 /* non-PIC stub. */
4365 : (thumb2 ? arm_stub_long_branch_thumb2_only
4366 : arm_stub_long_branch_thumb_only);
4367 }
4368 }
4369 }
4370 else
4371 {
4372 if (input_sec->flags & SEC_ELF_PURECODE)
4373 _bfd_error_handler
4374 (_("%pB(%pA): warning: long branch veneers used in"
4375 " section with SHF_ARM_PURECODE section"
4376 " attribute is only supported" " for M-profile"
4377 " targets that implement the movw instruction"),
4378 input_bfd, input_sec);
4379
4380 /* Thumb to arm. */
4381 if (sym_sec != NULL
4382 && sym_sec->owner != NULL
4383 && !INTERWORK_FLAG (sym_sec->owner))
4384 {
4385 _bfd_error_handler
4386 (_("%pB(%s): warning: interworking not enabled;"
4387 " first occurrence: %pB: %s call to %s"),
4388 sym_sec->owner, name, input_bfd, "Thumb", "ARM");
4389 }
4390
4391 stub_type =
4392 (bfd_link_pic (info) | globals->pic_veneer)
4393 /* PIC stubs. */
4394 ? (r_type == R_ARM_THM_TLS_CALL
4395 /* TLS PIC stubs. */
4396 ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic
4397 : arm_stub_long_branch_v4t_thumb_tls_pic)
4398 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
4399 /* V5T PIC and above. */
4400 ? arm_stub_long_branch_any_arm_pic
4401 /* V4T PIC stub. */
4402 : arm_stub_long_branch_v4t_thumb_arm_pic))
4403
4404 /* non-PIC stubs. */
4405 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
4406 /* V5T and above. */
4407 ? arm_stub_long_branch_any_any
4408 /* V4T. */
4409 : arm_stub_long_branch_v4t_thumb_arm);
4410
4411 /* Handle v4t short branches. */
4412 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
4413 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
4414 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
4415 stub_type = arm_stub_short_branch_v4t_thumb_arm;
4416 }
4417 }
4418 }
4419 else if (r_type == R_ARM_CALL
4420 || r_type == R_ARM_JUMP24
4421 || r_type == R_ARM_PLT32
4422 || r_type == R_ARM_TLS_CALL)
4423 {
4424 if (input_sec->flags & SEC_ELF_PURECODE)
4425 _bfd_error_handler
4426 (_("%pB(%pA): warning: long branch veneers used in"
4427 " section with SHF_ARM_PURECODE section"
4428 " attribute is only supported for M-profile"
4429 " targets that implement the movw instruction"),
4430 input_bfd, input_sec);
4431 if (branch_type == ST_BRANCH_TO_THUMB)
4432 {
4433 /* Arm to thumb. */
4434
4435 if (sym_sec != NULL
4436 && sym_sec->owner != NULL
4437 && !INTERWORK_FLAG (sym_sec->owner))
4438 {
4439 _bfd_error_handler
4440 (_("%pB(%s): warning: interworking not enabled;"
4441 " first occurrence: %pB: %s call to %s"),
4442 sym_sec->owner, name, input_bfd, "ARM", "Thumb");
4443 }
4444
4445 /* We have an extra 2-bytes reach because of
4446 the mode change (bit 24 (H) of BLX encoding). */
4447 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
4448 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
4449 || (r_type == R_ARM_CALL && !globals->use_blx)
4450 || (r_type == R_ARM_JUMP24)
4451 || (r_type == R_ARM_PLT32))
4452 {
4453 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4454 /* PIC stubs. */
4455 ? ((globals->use_blx)
4456 /* V5T and above. */
4457 ? arm_stub_long_branch_any_thumb_pic
4458 /* V4T stub. */
4459 : arm_stub_long_branch_v4t_arm_thumb_pic)
4460
4461 /* non-PIC stubs. */
4462 : ((globals->use_blx)
4463 /* V5T and above. */
4464 ? arm_stub_long_branch_any_any
4465 /* V4T. */
4466 : arm_stub_long_branch_v4t_arm_thumb);
4467 }
4468 }
4469 else
4470 {
4471 /* Arm to arm. */
4472 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
4473 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
4474 {
4475 stub_type =
4476 (bfd_link_pic (info) | globals->pic_veneer)
4477 /* PIC stubs. */
4478 ? (r_type == R_ARM_TLS_CALL
4479 /* TLS PIC Stub. */
4480 ? arm_stub_long_branch_any_tls_pic
4481 : (globals->nacl_p
4482 ? arm_stub_long_branch_arm_nacl_pic
4483 : arm_stub_long_branch_any_arm_pic))
4484 /* non-PIC stubs. */
4485 : (globals->nacl_p
4486 ? arm_stub_long_branch_arm_nacl
4487 : arm_stub_long_branch_any_any);
4488 }
4489 }
4490 }
4491
4492 /* If a stub is needed, record the actual destination type. */
4493 if (stub_type != arm_stub_none)
4494 *actual_branch_type = branch_type;
4495
4496 return stub_type;
4497 }
4498
4499 /* Build a name for an entry in the stub hash table. */
4500
4501 static char *
4502 elf32_arm_stub_name (const asection *input_section,
4503 const asection *sym_sec,
4504 const struct elf32_arm_link_hash_entry *hash,
4505 const Elf_Internal_Rela *rel,
4506 enum elf32_arm_stub_type stub_type)
4507 {
4508 char *stub_name;
4509 bfd_size_type len;
4510
4511 if (hash)
4512 {
4513 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
4514 stub_name = (char *) bfd_malloc (len);
4515 if (stub_name != NULL)
4516 sprintf (stub_name, "%08x_%s+%x_%d",
4517 input_section->id & 0xffffffff,
4518 hash->root.root.root.string,
4519 (int) rel->r_addend & 0xffffffff,
4520 (int) stub_type);
4521 }
4522 else
4523 {
4524 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
4525 stub_name = (char *) bfd_malloc (len);
4526 if (stub_name != NULL)
4527 sprintf (stub_name, "%08x_%x:%x+%x_%d",
4528 input_section->id & 0xffffffff,
4529 sym_sec->id & 0xffffffff,
4530 ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
4531 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL
4532 ? 0 : (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
4533 (int) rel->r_addend & 0xffffffff,
4534 (int) stub_type);
4535 }
4536
4537 return stub_name;
4538 }
4539
4540 /* Look up an entry in the stub hash. Stub entries are cached because
4541 creating the stub name takes a bit of time. */
4542
4543 static struct elf32_arm_stub_hash_entry *
4544 elf32_arm_get_stub_entry (const asection *input_section,
4545 const asection *sym_sec,
4546 struct elf_link_hash_entry *hash,
4547 const Elf_Internal_Rela *rel,
4548 struct elf32_arm_link_hash_table *htab,
4549 enum elf32_arm_stub_type stub_type)
4550 {
4551 struct elf32_arm_stub_hash_entry *stub_entry;
4552 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
4553 const asection *id_sec;
4554
4555 if ((input_section->flags & SEC_CODE) == 0)
4556 return NULL;
4557
4558 /* If this input section is part of a group of sections sharing one
4559 stub section, then use the id of the first section in the group.
4560 Stub names need to include a section id, as there may well be
4561 more than one stub used to reach say, printf, and we need to
4562 distinguish between them. */
4563 BFD_ASSERT (input_section->id <= htab->top_id);
4564 id_sec = htab->stub_group[input_section->id].link_sec;
4565
4566 if (h != NULL && h->stub_cache != NULL
4567 && h->stub_cache->h == h
4568 && h->stub_cache->id_sec == id_sec
4569 && h->stub_cache->stub_type == stub_type)
4570 {
4571 stub_entry = h->stub_cache;
4572 }
4573 else
4574 {
4575 char *stub_name;
4576
4577 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
4578 if (stub_name == NULL)
4579 return NULL;
4580
4581 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
4582 stub_name, FALSE, FALSE);
4583 if (h != NULL)
4584 h->stub_cache = stub_entry;
4585
4586 free (stub_name);
4587 }
4588
4589 return stub_entry;
4590 }
4591
4592 /* Whether veneers of type STUB_TYPE require to be in a dedicated output
4593 section. */
4594
4595 static bfd_boolean
4596 arm_dedicated_stub_output_section_required (enum elf32_arm_stub_type stub_type)
4597 {
4598 if (stub_type >= max_stub_type)
4599 abort (); /* Should be unreachable. */
4600
4601 switch (stub_type)
4602 {
4603 case arm_stub_cmse_branch_thumb_only:
4604 return TRUE;
4605
4606 default:
4607 return FALSE;
4608 }
4609
4610 abort (); /* Should be unreachable. */
4611 }
4612
4613 /* Required alignment (as a power of 2) for the dedicated section holding
4614 veneers of type STUB_TYPE, or 0 if veneers of this type are interspersed
4615 with input sections. */
4616
4617 static int
4618 arm_dedicated_stub_output_section_required_alignment
4619 (enum elf32_arm_stub_type stub_type)
4620 {
4621 if (stub_type >= max_stub_type)
4622 abort (); /* Should be unreachable. */
4623
4624 switch (stub_type)
4625 {
4626 /* Vectors of Secure Gateway veneers must be aligned on 32byte
4627 boundary. */
4628 case arm_stub_cmse_branch_thumb_only:
4629 return 5;
4630
4631 default:
4632 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4633 return 0;
4634 }
4635
4636 abort (); /* Should be unreachable. */
4637 }
4638
4639 /* Name of the dedicated output section to put veneers of type STUB_TYPE, or
4640 NULL if veneers of this type are interspersed with input sections. */
4641
4642 static const char *
4643 arm_dedicated_stub_output_section_name (enum elf32_arm_stub_type stub_type)
4644 {
4645 if (stub_type >= max_stub_type)
4646 abort (); /* Should be unreachable. */
4647
4648 switch (stub_type)
4649 {
4650 case arm_stub_cmse_branch_thumb_only:
4651 return ".gnu.sgstubs";
4652
4653 default:
4654 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4655 return NULL;
4656 }
4657
4658 abort (); /* Should be unreachable. */
4659 }
4660
4661 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4662 returns the address of the hash table field in HTAB holding a pointer to the
4663 corresponding input section. Otherwise, returns NULL. */
4664
4665 static asection **
4666 arm_dedicated_stub_input_section_ptr (struct elf32_arm_link_hash_table *htab,
4667 enum elf32_arm_stub_type stub_type)
4668 {
4669 if (stub_type >= max_stub_type)
4670 abort (); /* Should be unreachable. */
4671
4672 switch (stub_type)
4673 {
4674 case arm_stub_cmse_branch_thumb_only:
4675 return &htab->cmse_stub_sec;
4676
4677 default:
4678 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4679 return NULL;
4680 }
4681
4682 abort (); /* Should be unreachable. */
4683 }
4684
4685 /* Find or create a stub section to contain a stub of type STUB_TYPE. SECTION
4686 is the section that branch into veneer and can be NULL if stub should go in
4687 a dedicated output section. Returns a pointer to the stub section, and the
4688 section to which the stub section will be attached (in *LINK_SEC_P).
4689 LINK_SEC_P may be NULL. */
4690
4691 static asection *
4692 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
4693 struct elf32_arm_link_hash_table *htab,
4694 enum elf32_arm_stub_type stub_type)
4695 {
4696 asection *link_sec, *out_sec, **stub_sec_p;
4697 const char *stub_sec_prefix;
4698 bfd_boolean dedicated_output_section =
4699 arm_dedicated_stub_output_section_required (stub_type);
4700 int align;
4701
4702 if (dedicated_output_section)
4703 {
4704 bfd *output_bfd = htab->obfd;
4705 const char *out_sec_name =
4706 arm_dedicated_stub_output_section_name (stub_type);
4707 link_sec = NULL;
4708 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
4709 stub_sec_prefix = out_sec_name;
4710 align = arm_dedicated_stub_output_section_required_alignment (stub_type);
4711 out_sec = bfd_get_section_by_name (output_bfd, out_sec_name);
4712 if (out_sec == NULL)
4713 {
4714 _bfd_error_handler (_("no address assigned to the veneers output "
4715 "section %s"), out_sec_name);
4716 return NULL;
4717 }
4718 }
4719 else
4720 {
4721 BFD_ASSERT (section->id <= htab->top_id);
4722 link_sec = htab->stub_group[section->id].link_sec;
4723 BFD_ASSERT (link_sec != NULL);
4724 stub_sec_p = &htab->stub_group[section->id].stub_sec;
4725 if (*stub_sec_p == NULL)
4726 stub_sec_p = &htab->stub_group[link_sec->id].stub_sec;
4727 stub_sec_prefix = link_sec->name;
4728 out_sec = link_sec->output_section;
4729 align = htab->nacl_p ? 4 : 3;
4730 }
4731
4732 if (*stub_sec_p == NULL)
4733 {
4734 size_t namelen;
4735 bfd_size_type len;
4736 char *s_name;
4737
4738 namelen = strlen (stub_sec_prefix);
4739 len = namelen + sizeof (STUB_SUFFIX);
4740 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
4741 if (s_name == NULL)
4742 return NULL;
4743
4744 memcpy (s_name, stub_sec_prefix, namelen);
4745 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
4746 *stub_sec_p = (*htab->add_stub_section) (s_name, out_sec, link_sec,
4747 align);
4748 if (*stub_sec_p == NULL)
4749 return NULL;
4750
4751 out_sec->flags |= SEC_ALLOC | SEC_LOAD | SEC_READONLY | SEC_CODE
4752 | SEC_HAS_CONTENTS | SEC_RELOC | SEC_IN_MEMORY
4753 | SEC_KEEP;
4754 }
4755
4756 if (!dedicated_output_section)
4757 htab->stub_group[section->id].stub_sec = *stub_sec_p;
4758
4759 if (link_sec_p)
4760 *link_sec_p = link_sec;
4761
4762 return *stub_sec_p;
4763 }
4764
4765 /* Add a new stub entry to the stub hash. Not all fields of the new
4766 stub entry are initialised. */
4767
4768 static struct elf32_arm_stub_hash_entry *
4769 elf32_arm_add_stub (const char *stub_name, asection *section,
4770 struct elf32_arm_link_hash_table *htab,
4771 enum elf32_arm_stub_type stub_type)
4772 {
4773 asection *link_sec;
4774 asection *stub_sec;
4775 struct elf32_arm_stub_hash_entry *stub_entry;
4776
4777 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab,
4778 stub_type);
4779 if (stub_sec == NULL)
4780 return NULL;
4781
4782 /* Enter this entry into the linker stub hash table. */
4783 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4784 TRUE, FALSE);
4785 if (stub_entry == NULL)
4786 {
4787 if (section == NULL)
4788 section = stub_sec;
4789 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
4790 section->owner, stub_name);
4791 return NULL;
4792 }
4793
4794 stub_entry->stub_sec = stub_sec;
4795 stub_entry->stub_offset = (bfd_vma) -1;
4796 stub_entry->id_sec = link_sec;
4797
4798 return stub_entry;
4799 }
4800
4801 /* Store an Arm insn into an output section not processed by
4802 elf32_arm_write_section. */
4803
4804 static void
4805 put_arm_insn (struct elf32_arm_link_hash_table * htab,
4806 bfd * output_bfd, bfd_vma val, void * ptr)
4807 {
4808 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4809 bfd_putl32 (val, ptr);
4810 else
4811 bfd_putb32 (val, ptr);
4812 }
4813
4814 /* Store a 16-bit Thumb insn into an output section not processed by
4815 elf32_arm_write_section. */
4816
4817 static void
4818 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
4819 bfd * output_bfd, bfd_vma val, void * ptr)
4820 {
4821 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4822 bfd_putl16 (val, ptr);
4823 else
4824 bfd_putb16 (val, ptr);
4825 }
4826
4827 /* Store a Thumb2 insn into an output section not processed by
4828 elf32_arm_write_section. */
4829
4830 static void
4831 put_thumb2_insn (struct elf32_arm_link_hash_table * htab,
4832 bfd * output_bfd, bfd_vma val, bfd_byte * ptr)
4833 {
4834 /* T2 instructions are 16-bit streamed. */
4835 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4836 {
4837 bfd_putl16 ((val >> 16) & 0xffff, ptr);
4838 bfd_putl16 ((val & 0xffff), ptr + 2);
4839 }
4840 else
4841 {
4842 bfd_putb16 ((val >> 16) & 0xffff, ptr);
4843 bfd_putb16 ((val & 0xffff), ptr + 2);
4844 }
4845 }
4846
4847 /* If it's possible to change R_TYPE to a more efficient access
4848 model, return the new reloc type. */
4849
4850 static unsigned
4851 elf32_arm_tls_transition (struct bfd_link_info *info, int r_type,
4852 struct elf_link_hash_entry *h)
4853 {
4854 int is_local = (h == NULL);
4855
4856 if (bfd_link_pic (info)
4857 || (h && h->root.type == bfd_link_hash_undefweak))
4858 return r_type;
4859
4860 /* We do not support relaxations for Old TLS models. */
4861 switch (r_type)
4862 {
4863 case R_ARM_TLS_GOTDESC:
4864 case R_ARM_TLS_CALL:
4865 case R_ARM_THM_TLS_CALL:
4866 case R_ARM_TLS_DESCSEQ:
4867 case R_ARM_THM_TLS_DESCSEQ:
4868 return is_local ? R_ARM_TLS_LE32 : R_ARM_TLS_IE32;
4869 }
4870
4871 return r_type;
4872 }
4873
4874 static bfd_reloc_status_type elf32_arm_final_link_relocate
4875 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
4876 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
4877 const char *, unsigned char, enum arm_st_branch_type,
4878 struct elf_link_hash_entry *, bfd_boolean *, char **);
4879
4880 static unsigned int
4881 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
4882 {
4883 switch (stub_type)
4884 {
4885 case arm_stub_a8_veneer_b_cond:
4886 case arm_stub_a8_veneer_b:
4887 case arm_stub_a8_veneer_bl:
4888 return 2;
4889
4890 case arm_stub_long_branch_any_any:
4891 case arm_stub_long_branch_v4t_arm_thumb:
4892 case arm_stub_long_branch_thumb_only:
4893 case arm_stub_long_branch_thumb2_only:
4894 case arm_stub_long_branch_thumb2_only_pure:
4895 case arm_stub_long_branch_v4t_thumb_thumb:
4896 case arm_stub_long_branch_v4t_thumb_arm:
4897 case arm_stub_short_branch_v4t_thumb_arm:
4898 case arm_stub_long_branch_any_arm_pic:
4899 case arm_stub_long_branch_any_thumb_pic:
4900 case arm_stub_long_branch_v4t_thumb_thumb_pic:
4901 case arm_stub_long_branch_v4t_arm_thumb_pic:
4902 case arm_stub_long_branch_v4t_thumb_arm_pic:
4903 case arm_stub_long_branch_thumb_only_pic:
4904 case arm_stub_long_branch_any_tls_pic:
4905 case arm_stub_long_branch_v4t_thumb_tls_pic:
4906 case arm_stub_cmse_branch_thumb_only:
4907 case arm_stub_a8_veneer_blx:
4908 return 4;
4909
4910 case arm_stub_long_branch_arm_nacl:
4911 case arm_stub_long_branch_arm_nacl_pic:
4912 return 16;
4913
4914 default:
4915 abort (); /* Should be unreachable. */
4916 }
4917 }
4918
4919 /* Returns whether stubs of type STUB_TYPE take over the symbol they are
4920 veneering (TRUE) or have their own symbol (FALSE). */
4921
4922 static bfd_boolean
4923 arm_stub_sym_claimed (enum elf32_arm_stub_type stub_type)
4924 {
4925 if (stub_type >= max_stub_type)
4926 abort (); /* Should be unreachable. */
4927
4928 switch (stub_type)
4929 {
4930 case arm_stub_cmse_branch_thumb_only:
4931 return TRUE;
4932
4933 default:
4934 return FALSE;
4935 }
4936
4937 abort (); /* Should be unreachable. */
4938 }
4939
4940 /* Returns the padding needed for the dedicated section used stubs of type
4941 STUB_TYPE. */
4942
4943 static int
4944 arm_dedicated_stub_section_padding (enum elf32_arm_stub_type stub_type)
4945 {
4946 if (stub_type >= max_stub_type)
4947 abort (); /* Should be unreachable. */
4948
4949 switch (stub_type)
4950 {
4951 case arm_stub_cmse_branch_thumb_only:
4952 return 32;
4953
4954 default:
4955 return 0;
4956 }
4957
4958 abort (); /* Should be unreachable. */
4959 }
4960
4961 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4962 returns the address of the hash table field in HTAB holding the offset at
4963 which new veneers should be layed out in the stub section. */
4964
4965 static bfd_vma*
4966 arm_new_stubs_start_offset_ptr (struct elf32_arm_link_hash_table *htab,
4967 enum elf32_arm_stub_type stub_type)
4968 {
4969 switch (stub_type)
4970 {
4971 case arm_stub_cmse_branch_thumb_only:
4972 return &htab->new_cmse_stub_offset;
4973
4974 default:
4975 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4976 return NULL;
4977 }
4978 }
4979
4980 static bfd_boolean
4981 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
4982 void * in_arg)
4983 {
4984 #define MAXRELOCS 3
4985 bfd_boolean removed_sg_veneer;
4986 struct elf32_arm_stub_hash_entry *stub_entry;
4987 struct elf32_arm_link_hash_table *globals;
4988 struct bfd_link_info *info;
4989 asection *stub_sec;
4990 bfd *stub_bfd;
4991 bfd_byte *loc;
4992 bfd_vma sym_value;
4993 int template_size;
4994 int size;
4995 const insn_sequence *template_sequence;
4996 int i;
4997 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
4998 int stub_reloc_offset[MAXRELOCS] = {0, 0};
4999 int nrelocs = 0;
5000 int just_allocated = 0;
5001
5002 /* Massage our args to the form they really have. */
5003 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
5004 info = (struct bfd_link_info *) in_arg;
5005
5006 globals = elf32_arm_hash_table (info);
5007 if (globals == NULL)
5008 return FALSE;
5009
5010 stub_sec = stub_entry->stub_sec;
5011
5012 if ((globals->fix_cortex_a8 < 0)
5013 != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
5014 /* We have to do less-strictly-aligned fixes last. */
5015 return TRUE;
5016
5017 /* Assign a slot at the end of section if none assigned yet. */
5018 if (stub_entry->stub_offset == (bfd_vma) -1)
5019 {
5020 stub_entry->stub_offset = stub_sec->size;
5021 just_allocated = 1;
5022 }
5023 loc = stub_sec->contents + stub_entry->stub_offset;
5024
5025 stub_bfd = stub_sec->owner;
5026
5027 /* This is the address of the stub destination. */
5028 sym_value = (stub_entry->target_value
5029 + stub_entry->target_section->output_offset
5030 + stub_entry->target_section->output_section->vma);
5031
5032 template_sequence = stub_entry->stub_template;
5033 template_size = stub_entry->stub_template_size;
5034
5035 size = 0;
5036 for (i = 0; i < template_size; i++)
5037 {
5038 switch (template_sequence[i].type)
5039 {
5040 case THUMB16_TYPE:
5041 {
5042 bfd_vma data = (bfd_vma) template_sequence[i].data;
5043 if (template_sequence[i].reloc_addend != 0)
5044 {
5045 /* We've borrowed the reloc_addend field to mean we should
5046 insert a condition code into this (Thumb-1 branch)
5047 instruction. See THUMB16_BCOND_INSN. */
5048 BFD_ASSERT ((data & 0xff00) == 0xd000);
5049 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
5050 }
5051 bfd_put_16 (stub_bfd, data, loc + size);
5052 size += 2;
5053 }
5054 break;
5055
5056 case THUMB32_TYPE:
5057 bfd_put_16 (stub_bfd,
5058 (template_sequence[i].data >> 16) & 0xffff,
5059 loc + size);
5060 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
5061 loc + size + 2);
5062 if (template_sequence[i].r_type != R_ARM_NONE)
5063 {
5064 stub_reloc_idx[nrelocs] = i;
5065 stub_reloc_offset[nrelocs++] = size;
5066 }
5067 size += 4;
5068 break;
5069
5070 case ARM_TYPE:
5071 bfd_put_32 (stub_bfd, template_sequence[i].data,
5072 loc + size);
5073 /* Handle cases where the target is encoded within the
5074 instruction. */
5075 if (template_sequence[i].r_type == R_ARM_JUMP24)
5076 {
5077 stub_reloc_idx[nrelocs] = i;
5078 stub_reloc_offset[nrelocs++] = size;
5079 }
5080 size += 4;
5081 break;
5082
5083 case DATA_TYPE:
5084 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
5085 stub_reloc_idx[nrelocs] = i;
5086 stub_reloc_offset[nrelocs++] = size;
5087 size += 4;
5088 break;
5089
5090 default:
5091 BFD_FAIL ();
5092 return FALSE;
5093 }
5094 }
5095
5096 if (just_allocated)
5097 stub_sec->size += size;
5098
5099 /* Stub size has already been computed in arm_size_one_stub. Check
5100 consistency. */
5101 BFD_ASSERT (size == stub_entry->stub_size);
5102
5103 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
5104 if (stub_entry->branch_type == ST_BRANCH_TO_THUMB)
5105 sym_value |= 1;
5106
5107 /* Assume non empty slots have at least one and at most MAXRELOCS entries
5108 to relocate in each stub. */
5109 removed_sg_veneer =
5110 (size == 0 && stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
5111 BFD_ASSERT (removed_sg_veneer || (nrelocs != 0 && nrelocs <= MAXRELOCS));
5112
5113 for (i = 0; i < nrelocs; i++)
5114 {
5115 Elf_Internal_Rela rel;
5116 bfd_boolean unresolved_reloc;
5117 char *error_message;
5118 bfd_vma points_to =
5119 sym_value + template_sequence[stub_reloc_idx[i]].reloc_addend;
5120
5121 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
5122 rel.r_info = ELF32_R_INFO (0,
5123 template_sequence[stub_reloc_idx[i]].r_type);
5124 rel.r_addend = 0;
5125
5126 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
5127 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
5128 template should refer back to the instruction after the original
5129 branch. We use target_section as Cortex-A8 erratum workaround stubs
5130 are only generated when both source and target are in the same
5131 section. */
5132 points_to = stub_entry->target_section->output_section->vma
5133 + stub_entry->target_section->output_offset
5134 + stub_entry->source_value;
5135
5136 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
5137 (template_sequence[stub_reloc_idx[i]].r_type),
5138 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
5139 points_to, info, stub_entry->target_section, "", STT_FUNC,
5140 stub_entry->branch_type,
5141 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
5142 &error_message);
5143 }
5144
5145 return TRUE;
5146 #undef MAXRELOCS
5147 }
5148
5149 /* Calculate the template, template size and instruction size for a stub.
5150 Return value is the instruction size. */
5151
5152 static unsigned int
5153 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
5154 const insn_sequence **stub_template,
5155 int *stub_template_size)
5156 {
5157 const insn_sequence *template_sequence = NULL;
5158 int template_size = 0, i;
5159 unsigned int size;
5160
5161 template_sequence = stub_definitions[stub_type].template_sequence;
5162 if (stub_template)
5163 *stub_template = template_sequence;
5164
5165 template_size = stub_definitions[stub_type].template_size;
5166 if (stub_template_size)
5167 *stub_template_size = template_size;
5168
5169 size = 0;
5170 for (i = 0; i < template_size; i++)
5171 {
5172 switch (template_sequence[i].type)
5173 {
5174 case THUMB16_TYPE:
5175 size += 2;
5176 break;
5177
5178 case ARM_TYPE:
5179 case THUMB32_TYPE:
5180 case DATA_TYPE:
5181 size += 4;
5182 break;
5183
5184 default:
5185 BFD_FAIL ();
5186 return 0;
5187 }
5188 }
5189
5190 return size;
5191 }
5192
5193 /* As above, but don't actually build the stub. Just bump offset so
5194 we know stub section sizes. */
5195
5196 static bfd_boolean
5197 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
5198 void *in_arg ATTRIBUTE_UNUSED)
5199 {
5200 struct elf32_arm_stub_hash_entry *stub_entry;
5201 const insn_sequence *template_sequence;
5202 int template_size, size;
5203
5204 /* Massage our args to the form they really have. */
5205 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
5206
5207 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
5208 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
5209
5210 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
5211 &template_size);
5212
5213 /* Initialized to -1. Null size indicates an empty slot full of zeros. */
5214 if (stub_entry->stub_template_size)
5215 {
5216 stub_entry->stub_size = size;
5217 stub_entry->stub_template = template_sequence;
5218 stub_entry->stub_template_size = template_size;
5219 }
5220
5221 /* Already accounted for. */
5222 if (stub_entry->stub_offset != (bfd_vma) -1)
5223 return TRUE;
5224
5225 size = (size + 7) & ~7;
5226 stub_entry->stub_sec->size += size;
5227
5228 return TRUE;
5229 }
5230
5231 /* External entry points for sizing and building linker stubs. */
5232
5233 /* Set up various things so that we can make a list of input sections
5234 for each output section included in the link. Returns -1 on error,
5235 0 when no stubs will be needed, and 1 on success. */
5236
5237 int
5238 elf32_arm_setup_section_lists (bfd *output_bfd,
5239 struct bfd_link_info *info)
5240 {
5241 bfd *input_bfd;
5242 unsigned int bfd_count;
5243 unsigned int top_id, top_index;
5244 asection *section;
5245 asection **input_list, **list;
5246 bfd_size_type amt;
5247 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5248
5249 if (htab == NULL)
5250 return 0;
5251 if (! is_elf_hash_table (htab))
5252 return 0;
5253
5254 /* Count the number of input BFDs and find the top input section id. */
5255 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
5256 input_bfd != NULL;
5257 input_bfd = input_bfd->link.next)
5258 {
5259 bfd_count += 1;
5260 for (section = input_bfd->sections;
5261 section != NULL;
5262 section = section->next)
5263 {
5264 if (top_id < section->id)
5265 top_id = section->id;
5266 }
5267 }
5268 htab->bfd_count = bfd_count;
5269
5270 amt = sizeof (struct map_stub) * (top_id + 1);
5271 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
5272 if (htab->stub_group == NULL)
5273 return -1;
5274 htab->top_id = top_id;
5275
5276 /* We can't use output_bfd->section_count here to find the top output
5277 section index as some sections may have been removed, and
5278 _bfd_strip_section_from_output doesn't renumber the indices. */
5279 for (section = output_bfd->sections, top_index = 0;
5280 section != NULL;
5281 section = section->next)
5282 {
5283 if (top_index < section->index)
5284 top_index = section->index;
5285 }
5286
5287 htab->top_index = top_index;
5288 amt = sizeof (asection *) * (top_index + 1);
5289 input_list = (asection **) bfd_malloc (amt);
5290 htab->input_list = input_list;
5291 if (input_list == NULL)
5292 return -1;
5293
5294 /* For sections we aren't interested in, mark their entries with a
5295 value we can check later. */
5296 list = input_list + top_index;
5297 do
5298 *list = bfd_abs_section_ptr;
5299 while (list-- != input_list);
5300
5301 for (section = output_bfd->sections;
5302 section != NULL;
5303 section = section->next)
5304 {
5305 if ((section->flags & SEC_CODE) != 0)
5306 input_list[section->index] = NULL;
5307 }
5308
5309 return 1;
5310 }
5311
5312 /* The linker repeatedly calls this function for each input section,
5313 in the order that input sections are linked into output sections.
5314 Build lists of input sections to determine groupings between which
5315 we may insert linker stubs. */
5316
5317 void
5318 elf32_arm_next_input_section (struct bfd_link_info *info,
5319 asection *isec)
5320 {
5321 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5322
5323 if (htab == NULL)
5324 return;
5325
5326 if (isec->output_section->index <= htab->top_index)
5327 {
5328 asection **list = htab->input_list + isec->output_section->index;
5329
5330 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
5331 {
5332 /* Steal the link_sec pointer for our list. */
5333 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
5334 /* This happens to make the list in reverse order,
5335 which we reverse later. */
5336 PREV_SEC (isec) = *list;
5337 *list = isec;
5338 }
5339 }
5340 }
5341
5342 /* See whether we can group stub sections together. Grouping stub
5343 sections may result in fewer stubs. More importantly, we need to
5344 put all .init* and .fini* stubs at the end of the .init or
5345 .fini output sections respectively, because glibc splits the
5346 _init and _fini functions into multiple parts. Putting a stub in
5347 the middle of a function is not a good idea. */
5348
5349 static void
5350 group_sections (struct elf32_arm_link_hash_table *htab,
5351 bfd_size_type stub_group_size,
5352 bfd_boolean stubs_always_after_branch)
5353 {
5354 asection **list = htab->input_list;
5355
5356 do
5357 {
5358 asection *tail = *list;
5359 asection *head;
5360
5361 if (tail == bfd_abs_section_ptr)
5362 continue;
5363
5364 /* Reverse the list: we must avoid placing stubs at the
5365 beginning of the section because the beginning of the text
5366 section may be required for an interrupt vector in bare metal
5367 code. */
5368 #define NEXT_SEC PREV_SEC
5369 head = NULL;
5370 while (tail != NULL)
5371 {
5372 /* Pop from tail. */
5373 asection *item = tail;
5374 tail = PREV_SEC (item);
5375
5376 /* Push on head. */
5377 NEXT_SEC (item) = head;
5378 head = item;
5379 }
5380
5381 while (head != NULL)
5382 {
5383 asection *curr;
5384 asection *next;
5385 bfd_vma stub_group_start = head->output_offset;
5386 bfd_vma end_of_next;
5387
5388 curr = head;
5389 while (NEXT_SEC (curr) != NULL)
5390 {
5391 next = NEXT_SEC (curr);
5392 end_of_next = next->output_offset + next->size;
5393 if (end_of_next - stub_group_start >= stub_group_size)
5394 /* End of NEXT is too far from start, so stop. */
5395 break;
5396 /* Add NEXT to the group. */
5397 curr = next;
5398 }
5399
5400 /* OK, the size from the start to the start of CURR is less
5401 than stub_group_size and thus can be handled by one stub
5402 section. (Or the head section is itself larger than
5403 stub_group_size, in which case we may be toast.)
5404 We should really be keeping track of the total size of
5405 stubs added here, as stubs contribute to the final output
5406 section size. */
5407 do
5408 {
5409 next = NEXT_SEC (head);
5410 /* Set up this stub group. */
5411 htab->stub_group[head->id].link_sec = curr;
5412 }
5413 while (head != curr && (head = next) != NULL);
5414
5415 /* But wait, there's more! Input sections up to stub_group_size
5416 bytes after the stub section can be handled by it too. */
5417 if (!stubs_always_after_branch)
5418 {
5419 stub_group_start = curr->output_offset + curr->size;
5420
5421 while (next != NULL)
5422 {
5423 end_of_next = next->output_offset + next->size;
5424 if (end_of_next - stub_group_start >= stub_group_size)
5425 /* End of NEXT is too far from stubs, so stop. */
5426 break;
5427 /* Add NEXT to the stub group. */
5428 head = next;
5429 next = NEXT_SEC (head);
5430 htab->stub_group[head->id].link_sec = curr;
5431 }
5432 }
5433 head = next;
5434 }
5435 }
5436 while (list++ != htab->input_list + htab->top_index);
5437
5438 free (htab->input_list);
5439 #undef PREV_SEC
5440 #undef NEXT_SEC
5441 }
5442
5443 /* Comparison function for sorting/searching relocations relating to Cortex-A8
5444 erratum fix. */
5445
5446 static int
5447 a8_reloc_compare (const void *a, const void *b)
5448 {
5449 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
5450 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
5451
5452 if (ra->from < rb->from)
5453 return -1;
5454 else if (ra->from > rb->from)
5455 return 1;
5456 else
5457 return 0;
5458 }
5459
5460 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
5461 const char *, char **);
5462
5463 /* Helper function to scan code for sequences which might trigger the Cortex-A8
5464 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
5465 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
5466 otherwise. */
5467
5468 static bfd_boolean
5469 cortex_a8_erratum_scan (bfd *input_bfd,
5470 struct bfd_link_info *info,
5471 struct a8_erratum_fix **a8_fixes_p,
5472 unsigned int *num_a8_fixes_p,
5473 unsigned int *a8_fix_table_size_p,
5474 struct a8_erratum_reloc *a8_relocs,
5475 unsigned int num_a8_relocs,
5476 unsigned prev_num_a8_fixes,
5477 bfd_boolean *stub_changed_p)
5478 {
5479 asection *section;
5480 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5481 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
5482 unsigned int num_a8_fixes = *num_a8_fixes_p;
5483 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
5484
5485 if (htab == NULL)
5486 return FALSE;
5487
5488 for (section = input_bfd->sections;
5489 section != NULL;
5490 section = section->next)
5491 {
5492 bfd_byte *contents = NULL;
5493 struct _arm_elf_section_data *sec_data;
5494 unsigned int span;
5495 bfd_vma base_vma;
5496
5497 if (elf_section_type (section) != SHT_PROGBITS
5498 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
5499 || (section->flags & SEC_EXCLUDE) != 0
5500 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
5501 || (section->output_section == bfd_abs_section_ptr))
5502 continue;
5503
5504 base_vma = section->output_section->vma + section->output_offset;
5505
5506 if (elf_section_data (section)->this_hdr.contents != NULL)
5507 contents = elf_section_data (section)->this_hdr.contents;
5508 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
5509 return TRUE;
5510
5511 sec_data = elf32_arm_section_data (section);
5512
5513 for (span = 0; span < sec_data->mapcount; span++)
5514 {
5515 unsigned int span_start = sec_data->map[span].vma;
5516 unsigned int span_end = (span == sec_data->mapcount - 1)
5517 ? section->size : sec_data->map[span + 1].vma;
5518 unsigned int i;
5519 char span_type = sec_data->map[span].type;
5520 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
5521
5522 if (span_type != 't')
5523 continue;
5524
5525 /* Span is entirely within a single 4KB region: skip scanning. */
5526 if (((base_vma + span_start) & ~0xfff)
5527 == ((base_vma + span_end) & ~0xfff))
5528 continue;
5529
5530 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
5531
5532 * The opcode is BLX.W, BL.W, B.W, Bcc.W
5533 * The branch target is in the same 4KB region as the
5534 first half of the branch.
5535 * The instruction before the branch is a 32-bit
5536 length non-branch instruction. */
5537 for (i = span_start; i < span_end;)
5538 {
5539 unsigned int insn = bfd_getl16 (&contents[i]);
5540 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
5541 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
5542
5543 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
5544 insn_32bit = TRUE;
5545
5546 if (insn_32bit)
5547 {
5548 /* Load the rest of the insn (in manual-friendly order). */
5549 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
5550
5551 /* Encoding T4: B<c>.W. */
5552 is_b = (insn & 0xf800d000) == 0xf0009000;
5553 /* Encoding T1: BL<c>.W. */
5554 is_bl = (insn & 0xf800d000) == 0xf000d000;
5555 /* Encoding T2: BLX<c>.W. */
5556 is_blx = (insn & 0xf800d000) == 0xf000c000;
5557 /* Encoding T3: B<c>.W (not permitted in IT block). */
5558 is_bcc = (insn & 0xf800d000) == 0xf0008000
5559 && (insn & 0x07f00000) != 0x03800000;
5560 }
5561
5562 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
5563
5564 if (((base_vma + i) & 0xfff) == 0xffe
5565 && insn_32bit
5566 && is_32bit_branch
5567 && last_was_32bit
5568 && ! last_was_branch)
5569 {
5570 bfd_signed_vma offset = 0;
5571 bfd_boolean force_target_arm = FALSE;
5572 bfd_boolean force_target_thumb = FALSE;
5573 bfd_vma target;
5574 enum elf32_arm_stub_type stub_type = arm_stub_none;
5575 struct a8_erratum_reloc key, *found;
5576 bfd_boolean use_plt = FALSE;
5577
5578 key.from = base_vma + i;
5579 found = (struct a8_erratum_reloc *)
5580 bsearch (&key, a8_relocs, num_a8_relocs,
5581 sizeof (struct a8_erratum_reloc),
5582 &a8_reloc_compare);
5583
5584 if (found)
5585 {
5586 char *error_message = NULL;
5587 struct elf_link_hash_entry *entry;
5588
5589 /* We don't care about the error returned from this
5590 function, only if there is glue or not. */
5591 entry = find_thumb_glue (info, found->sym_name,
5592 &error_message);
5593
5594 if (entry)
5595 found->non_a8_stub = TRUE;
5596
5597 /* Keep a simpler condition, for the sake of clarity. */
5598 if (htab->root.splt != NULL && found->hash != NULL
5599 && found->hash->root.plt.offset != (bfd_vma) -1)
5600 use_plt = TRUE;
5601
5602 if (found->r_type == R_ARM_THM_CALL)
5603 {
5604 if (found->branch_type == ST_BRANCH_TO_ARM
5605 || use_plt)
5606 force_target_arm = TRUE;
5607 else
5608 force_target_thumb = TRUE;
5609 }
5610 }
5611
5612 /* Check if we have an offending branch instruction. */
5613
5614 if (found && found->non_a8_stub)
5615 /* We've already made a stub for this instruction, e.g.
5616 it's a long branch or a Thumb->ARM stub. Assume that
5617 stub will suffice to work around the A8 erratum (see
5618 setting of always_after_branch above). */
5619 ;
5620 else if (is_bcc)
5621 {
5622 offset = (insn & 0x7ff) << 1;
5623 offset |= (insn & 0x3f0000) >> 4;
5624 offset |= (insn & 0x2000) ? 0x40000 : 0;
5625 offset |= (insn & 0x800) ? 0x80000 : 0;
5626 offset |= (insn & 0x4000000) ? 0x100000 : 0;
5627 if (offset & 0x100000)
5628 offset |= ~ ((bfd_signed_vma) 0xfffff);
5629 stub_type = arm_stub_a8_veneer_b_cond;
5630 }
5631 else if (is_b || is_bl || is_blx)
5632 {
5633 int s = (insn & 0x4000000) != 0;
5634 int j1 = (insn & 0x2000) != 0;
5635 int j2 = (insn & 0x800) != 0;
5636 int i1 = !(j1 ^ s);
5637 int i2 = !(j2 ^ s);
5638
5639 offset = (insn & 0x7ff) << 1;
5640 offset |= (insn & 0x3ff0000) >> 4;
5641 offset |= i2 << 22;
5642 offset |= i1 << 23;
5643 offset |= s << 24;
5644 if (offset & 0x1000000)
5645 offset |= ~ ((bfd_signed_vma) 0xffffff);
5646
5647 if (is_blx)
5648 offset &= ~ ((bfd_signed_vma) 3);
5649
5650 stub_type = is_blx ? arm_stub_a8_veneer_blx :
5651 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
5652 }
5653
5654 if (stub_type != arm_stub_none)
5655 {
5656 bfd_vma pc_for_insn = base_vma + i + 4;
5657
5658 /* The original instruction is a BL, but the target is
5659 an ARM instruction. If we were not making a stub,
5660 the BL would have been converted to a BLX. Use the
5661 BLX stub instead in that case. */
5662 if (htab->use_blx && force_target_arm
5663 && stub_type == arm_stub_a8_veneer_bl)
5664 {
5665 stub_type = arm_stub_a8_veneer_blx;
5666 is_blx = TRUE;
5667 is_bl = FALSE;
5668 }
5669 /* Conversely, if the original instruction was
5670 BLX but the target is Thumb mode, use the BL
5671 stub. */
5672 else if (force_target_thumb
5673 && stub_type == arm_stub_a8_veneer_blx)
5674 {
5675 stub_type = arm_stub_a8_veneer_bl;
5676 is_blx = FALSE;
5677 is_bl = TRUE;
5678 }
5679
5680 if (is_blx)
5681 pc_for_insn &= ~ ((bfd_vma) 3);
5682
5683 /* If we found a relocation, use the proper destination,
5684 not the offset in the (unrelocated) instruction.
5685 Note this is always done if we switched the stub type
5686 above. */
5687 if (found)
5688 offset =
5689 (bfd_signed_vma) (found->destination - pc_for_insn);
5690
5691 /* If the stub will use a Thumb-mode branch to a
5692 PLT target, redirect it to the preceding Thumb
5693 entry point. */
5694 if (stub_type != arm_stub_a8_veneer_blx && use_plt)
5695 offset -= PLT_THUMB_STUB_SIZE;
5696
5697 target = pc_for_insn + offset;
5698
5699 /* The BLX stub is ARM-mode code. Adjust the offset to
5700 take the different PC value (+8 instead of +4) into
5701 account. */
5702 if (stub_type == arm_stub_a8_veneer_blx)
5703 offset += 4;
5704
5705 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
5706 {
5707 char *stub_name = NULL;
5708
5709 if (num_a8_fixes == a8_fix_table_size)
5710 {
5711 a8_fix_table_size *= 2;
5712 a8_fixes = (struct a8_erratum_fix *)
5713 bfd_realloc (a8_fixes,
5714 sizeof (struct a8_erratum_fix)
5715 * a8_fix_table_size);
5716 }
5717
5718 if (num_a8_fixes < prev_num_a8_fixes)
5719 {
5720 /* If we're doing a subsequent scan,
5721 check if we've found the same fix as
5722 before, and try and reuse the stub
5723 name. */
5724 stub_name = a8_fixes[num_a8_fixes].stub_name;
5725 if ((a8_fixes[num_a8_fixes].section != section)
5726 || (a8_fixes[num_a8_fixes].offset != i))
5727 {
5728 free (stub_name);
5729 stub_name = NULL;
5730 *stub_changed_p = TRUE;
5731 }
5732 }
5733
5734 if (!stub_name)
5735 {
5736 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
5737 if (stub_name != NULL)
5738 sprintf (stub_name, "%x:%x", section->id, i);
5739 }
5740
5741 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
5742 a8_fixes[num_a8_fixes].section = section;
5743 a8_fixes[num_a8_fixes].offset = i;
5744 a8_fixes[num_a8_fixes].target_offset =
5745 target - base_vma;
5746 a8_fixes[num_a8_fixes].orig_insn = insn;
5747 a8_fixes[num_a8_fixes].stub_name = stub_name;
5748 a8_fixes[num_a8_fixes].stub_type = stub_type;
5749 a8_fixes[num_a8_fixes].branch_type =
5750 is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB;
5751
5752 num_a8_fixes++;
5753 }
5754 }
5755 }
5756
5757 i += insn_32bit ? 4 : 2;
5758 last_was_32bit = insn_32bit;
5759 last_was_branch = is_32bit_branch;
5760 }
5761 }
5762
5763 if (elf_section_data (section)->this_hdr.contents == NULL)
5764 free (contents);
5765 }
5766
5767 *a8_fixes_p = a8_fixes;
5768 *num_a8_fixes_p = num_a8_fixes;
5769 *a8_fix_table_size_p = a8_fix_table_size;
5770
5771 return FALSE;
5772 }
5773
5774 /* Create or update a stub entry depending on whether the stub can already be
5775 found in HTAB. The stub is identified by:
5776 - its type STUB_TYPE
5777 - its source branch (note that several can share the same stub) whose
5778 section and relocation (if any) are given by SECTION and IRELA
5779 respectively
5780 - its target symbol whose input section, hash, name, value and branch type
5781 are given in SYM_SEC, HASH, SYM_NAME, SYM_VALUE and BRANCH_TYPE
5782 respectively
5783
5784 If found, the value of the stub's target symbol is updated from SYM_VALUE
5785 and *NEW_STUB is set to FALSE. Otherwise, *NEW_STUB is set to
5786 TRUE and the stub entry is initialized.
5787
5788 Returns the stub that was created or updated, or NULL if an error
5789 occurred. */
5790
5791 static struct elf32_arm_stub_hash_entry *
5792 elf32_arm_create_stub (struct elf32_arm_link_hash_table *htab,
5793 enum elf32_arm_stub_type stub_type, asection *section,
5794 Elf_Internal_Rela *irela, asection *sym_sec,
5795 struct elf32_arm_link_hash_entry *hash, char *sym_name,
5796 bfd_vma sym_value, enum arm_st_branch_type branch_type,
5797 bfd_boolean *new_stub)
5798 {
5799 const asection *id_sec;
5800 char *stub_name;
5801 struct elf32_arm_stub_hash_entry *stub_entry;
5802 unsigned int r_type;
5803 bfd_boolean sym_claimed = arm_stub_sym_claimed (stub_type);
5804
5805 BFD_ASSERT (stub_type != arm_stub_none);
5806 *new_stub = FALSE;
5807
5808 if (sym_claimed)
5809 stub_name = sym_name;
5810 else
5811 {
5812 BFD_ASSERT (irela);
5813 BFD_ASSERT (section);
5814 BFD_ASSERT (section->id <= htab->top_id);
5815
5816 /* Support for grouping stub sections. */
5817 id_sec = htab->stub_group[section->id].link_sec;
5818
5819 /* Get the name of this stub. */
5820 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash, irela,
5821 stub_type);
5822 if (!stub_name)
5823 return NULL;
5824 }
5825
5826 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, FALSE,
5827 FALSE);
5828 /* The proper stub has already been created, just update its value. */
5829 if (stub_entry != NULL)
5830 {
5831 if (!sym_claimed)
5832 free (stub_name);
5833 stub_entry->target_value = sym_value;
5834 return stub_entry;
5835 }
5836
5837 stub_entry = elf32_arm_add_stub (stub_name, section, htab, stub_type);
5838 if (stub_entry == NULL)
5839 {
5840 if (!sym_claimed)
5841 free (stub_name);
5842 return NULL;
5843 }
5844
5845 stub_entry->target_value = sym_value;
5846 stub_entry->target_section = sym_sec;
5847 stub_entry->stub_type = stub_type;
5848 stub_entry->h = hash;
5849 stub_entry->branch_type = branch_type;
5850
5851 if (sym_claimed)
5852 stub_entry->output_name = sym_name;
5853 else
5854 {
5855 if (sym_name == NULL)
5856 sym_name = "unnamed";
5857 stub_entry->output_name = (char *)
5858 bfd_alloc (htab->stub_bfd, sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
5859 + strlen (sym_name));
5860 if (stub_entry->output_name == NULL)
5861 {
5862 free (stub_name);
5863 return NULL;
5864 }
5865
5866 /* For historical reasons, use the existing names for ARM-to-Thumb and
5867 Thumb-to-ARM stubs. */
5868 r_type = ELF32_R_TYPE (irela->r_info);
5869 if ((r_type == (unsigned int) R_ARM_THM_CALL
5870 || r_type == (unsigned int) R_ARM_THM_JUMP24
5871 || r_type == (unsigned int) R_ARM_THM_JUMP19)
5872 && branch_type == ST_BRANCH_TO_ARM)
5873 sprintf (stub_entry->output_name, THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
5874 else if ((r_type == (unsigned int) R_ARM_CALL
5875 || r_type == (unsigned int) R_ARM_JUMP24)
5876 && branch_type == ST_BRANCH_TO_THUMB)
5877 sprintf (stub_entry->output_name, ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
5878 else
5879 sprintf (stub_entry->output_name, STUB_ENTRY_NAME, sym_name);
5880 }
5881
5882 *new_stub = TRUE;
5883 return stub_entry;
5884 }
5885
5886 /* Scan symbols in INPUT_BFD to identify secure entry functions needing a
5887 gateway veneer to transition from non secure to secure state and create them
5888 accordingly.
5889
5890 "ARMv8-M Security Extensions: Requirements on Development Tools" document
5891 defines the conditions that govern Secure Gateway veneer creation for a
5892 given symbol <SYM> as follows:
5893 - it has function type
5894 - it has non local binding
5895 - a symbol named __acle_se_<SYM> (called special symbol) exists with the
5896 same type, binding and value as <SYM> (called normal symbol).
5897 An entry function can handle secure state transition itself in which case
5898 its special symbol would have a different value from the normal symbol.
5899
5900 OUT_ATTR gives the output attributes, SYM_HASHES the symbol index to hash
5901 entry mapping while HTAB gives the name to hash entry mapping.
5902 *CMSE_STUB_CREATED is increased by the number of secure gateway veneer
5903 created.
5904
5905 The return value gives whether a stub failed to be allocated. */
5906
5907 static bfd_boolean
5908 cmse_scan (bfd *input_bfd, struct elf32_arm_link_hash_table *htab,
5909 obj_attribute *out_attr, struct elf_link_hash_entry **sym_hashes,
5910 int *cmse_stub_created)
5911 {
5912 const struct elf_backend_data *bed;
5913 Elf_Internal_Shdr *symtab_hdr;
5914 unsigned i, j, sym_count, ext_start;
5915 Elf_Internal_Sym *cmse_sym, *local_syms;
5916 struct elf32_arm_link_hash_entry *hash, *cmse_hash = NULL;
5917 enum arm_st_branch_type branch_type;
5918 char *sym_name, *lsym_name;
5919 bfd_vma sym_value;
5920 asection *section;
5921 struct elf32_arm_stub_hash_entry *stub_entry;
5922 bfd_boolean is_v8m, new_stub, cmse_invalid, ret = TRUE;
5923
5924 bed = get_elf_backend_data (input_bfd);
5925 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
5926 sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
5927 ext_start = symtab_hdr->sh_info;
5928 is_v8m = (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
5929 && out_attr[Tag_CPU_arch_profile].i == 'M');
5930
5931 local_syms = (Elf_Internal_Sym *) symtab_hdr->contents;
5932 if (local_syms == NULL)
5933 local_syms = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
5934 symtab_hdr->sh_info, 0, NULL, NULL,
5935 NULL);
5936 if (symtab_hdr->sh_info && local_syms == NULL)
5937 return FALSE;
5938
5939 /* Scan symbols. */
5940 for (i = 0; i < sym_count; i++)
5941 {
5942 cmse_invalid = FALSE;
5943
5944 if (i < ext_start)
5945 {
5946 cmse_sym = &local_syms[i];
5947 /* Not a special symbol. */
5948 if (!ARM_GET_SYM_CMSE_SPCL (cmse_sym->st_target_internal))
5949 continue;
5950 sym_name = bfd_elf_string_from_elf_section (input_bfd,
5951 symtab_hdr->sh_link,
5952 cmse_sym->st_name);
5953 /* Special symbol with local binding. */
5954 cmse_invalid = TRUE;
5955 }
5956 else
5957 {
5958 cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
5959 sym_name = (char *) cmse_hash->root.root.root.string;
5960
5961 /* Not a special symbol. */
5962 if (!ARM_GET_SYM_CMSE_SPCL (cmse_hash->root.target_internal))
5963 continue;
5964
5965 /* Special symbol has incorrect binding or type. */
5966 if ((cmse_hash->root.root.type != bfd_link_hash_defined
5967 && cmse_hash->root.root.type != bfd_link_hash_defweak)
5968 || cmse_hash->root.type != STT_FUNC)
5969 cmse_invalid = TRUE;
5970 }
5971
5972 if (!is_v8m)
5973 {
5974 _bfd_error_handler (_("%pB: special symbol `%s' only allowed for "
5975 "ARMv8-M architecture or later"),
5976 input_bfd, sym_name);
5977 is_v8m = TRUE; /* Avoid multiple warning. */
5978 ret = FALSE;
5979 }
5980
5981 if (cmse_invalid)
5982 {
5983 _bfd_error_handler (_("%pB: invalid special symbol `%s'; it must be"
5984 " a global or weak function symbol"),
5985 input_bfd, sym_name);
5986 ret = FALSE;
5987 if (i < ext_start)
5988 continue;
5989 }
5990
5991 sym_name += strlen (CMSE_PREFIX);
5992 hash = (struct elf32_arm_link_hash_entry *)
5993 elf_link_hash_lookup (&(htab)->root, sym_name, FALSE, FALSE, TRUE);
5994
5995 /* No associated normal symbol or it is neither global nor weak. */
5996 if (!hash
5997 || (hash->root.root.type != bfd_link_hash_defined
5998 && hash->root.root.type != bfd_link_hash_defweak)
5999 || hash->root.type != STT_FUNC)
6000 {
6001 /* Initialize here to avoid warning about use of possibly
6002 uninitialized variable. */
6003 j = 0;
6004
6005 if (!hash)
6006 {
6007 /* Searching for a normal symbol with local binding. */
6008 for (; j < ext_start; j++)
6009 {
6010 lsym_name =
6011 bfd_elf_string_from_elf_section (input_bfd,
6012 symtab_hdr->sh_link,
6013 local_syms[j].st_name);
6014 if (!strcmp (sym_name, lsym_name))
6015 break;
6016 }
6017 }
6018
6019 if (hash || j < ext_start)
6020 {
6021 _bfd_error_handler
6022 (_("%pB: invalid standard symbol `%s'; it must be "
6023 "a global or weak function symbol"),
6024 input_bfd, sym_name);
6025 }
6026 else
6027 _bfd_error_handler
6028 (_("%pB: absent standard symbol `%s'"), input_bfd, sym_name);
6029 ret = FALSE;
6030 if (!hash)
6031 continue;
6032 }
6033
6034 sym_value = hash->root.root.u.def.value;
6035 section = hash->root.root.u.def.section;
6036
6037 if (cmse_hash->root.root.u.def.section != section)
6038 {
6039 _bfd_error_handler
6040 (_("%pB: `%s' and its special symbol are in different sections"),
6041 input_bfd, sym_name);
6042 ret = FALSE;
6043 }
6044 if (cmse_hash->root.root.u.def.value != sym_value)
6045 continue; /* Ignore: could be an entry function starting with SG. */
6046
6047 /* If this section is a link-once section that will be discarded, then
6048 don't create any stubs. */
6049 if (section->output_section == NULL)
6050 {
6051 _bfd_error_handler
6052 (_("%pB: entry function `%s' not output"), input_bfd, sym_name);
6053 continue;
6054 }
6055
6056 if (hash->root.size == 0)
6057 {
6058 _bfd_error_handler
6059 (_("%pB: entry function `%s' is empty"), input_bfd, sym_name);
6060 ret = FALSE;
6061 }
6062
6063 if (!ret)
6064 continue;
6065 branch_type = ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
6066 stub_entry
6067 = elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
6068 NULL, NULL, section, hash, sym_name,
6069 sym_value, branch_type, &new_stub);
6070
6071 if (stub_entry == NULL)
6072 ret = FALSE;
6073 else
6074 {
6075 BFD_ASSERT (new_stub);
6076 (*cmse_stub_created)++;
6077 }
6078 }
6079
6080 if (!symtab_hdr->contents)
6081 free (local_syms);
6082 return ret;
6083 }
6084
6085 /* Return TRUE iff a symbol identified by its linker HASH entry is a secure
6086 code entry function, ie can be called from non secure code without using a
6087 veneer. */
6088
6089 static bfd_boolean
6090 cmse_entry_fct_p (struct elf32_arm_link_hash_entry *hash)
6091 {
6092 bfd_byte contents[4];
6093 uint32_t first_insn;
6094 asection *section;
6095 file_ptr offset;
6096 bfd *abfd;
6097
6098 /* Defined symbol of function type. */
6099 if (hash->root.root.type != bfd_link_hash_defined
6100 && hash->root.root.type != bfd_link_hash_defweak)
6101 return FALSE;
6102 if (hash->root.type != STT_FUNC)
6103 return FALSE;
6104
6105 /* Read first instruction. */
6106 section = hash->root.root.u.def.section;
6107 abfd = section->owner;
6108 offset = hash->root.root.u.def.value - section->vma;
6109 if (!bfd_get_section_contents (abfd, section, contents, offset,
6110 sizeof (contents)))
6111 return FALSE;
6112
6113 first_insn = bfd_get_32 (abfd, contents);
6114
6115 /* Starts by SG instruction. */
6116 return first_insn == 0xe97fe97f;
6117 }
6118
6119 /* Output the name (in symbol table) of the veneer GEN_ENTRY if it is a new
6120 secure gateway veneers (ie. the veneers was not in the input import library)
6121 and there is no output import library (GEN_INFO->out_implib_bfd is NULL. */
6122
6123 static bfd_boolean
6124 arm_list_new_cmse_stub (struct bfd_hash_entry *gen_entry, void *gen_info)
6125 {
6126 struct elf32_arm_stub_hash_entry *stub_entry;
6127 struct bfd_link_info *info;
6128
6129 /* Massage our args to the form they really have. */
6130 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
6131 info = (struct bfd_link_info *) gen_info;
6132
6133 if (info->out_implib_bfd)
6134 return TRUE;
6135
6136 if (stub_entry->stub_type != arm_stub_cmse_branch_thumb_only)
6137 return TRUE;
6138
6139 if (stub_entry->stub_offset == (bfd_vma) -1)
6140 _bfd_error_handler (" %s", stub_entry->output_name);
6141
6142 return TRUE;
6143 }
6144
6145 /* Set offset of each secure gateway veneers so that its address remain
6146 identical to the one in the input import library referred by
6147 HTAB->in_implib_bfd. A warning is issued for veneers that disappeared
6148 (present in input import library but absent from the executable being
6149 linked) or if new veneers appeared and there is no output import library
6150 (INFO->out_implib_bfd is NULL and *CMSE_STUB_CREATED is bigger than the
6151 number of secure gateway veneers found in the input import library.
6152
6153 The function returns whether an error occurred. If no error occurred,
6154 *CMSE_STUB_CREATED gives the number of SG veneers created by both cmse_scan
6155 and this function and HTAB->new_cmse_stub_offset is set to the biggest
6156 veneer observed set for new veneers to be layed out after. */
6157
6158 static bfd_boolean
6159 set_cmse_veneer_addr_from_implib (struct bfd_link_info *info,
6160 struct elf32_arm_link_hash_table *htab,
6161 int *cmse_stub_created)
6162 {
6163 long symsize;
6164 char *sym_name;
6165 flagword flags;
6166 long i, symcount;
6167 bfd *in_implib_bfd;
6168 asection *stub_out_sec;
6169 bfd_boolean ret = TRUE;
6170 Elf_Internal_Sym *intsym;
6171 const char *out_sec_name;
6172 bfd_size_type cmse_stub_size;
6173 asymbol **sympp = NULL, *sym;
6174 struct elf32_arm_link_hash_entry *hash;
6175 const insn_sequence *cmse_stub_template;
6176 struct elf32_arm_stub_hash_entry *stub_entry;
6177 int cmse_stub_template_size, new_cmse_stubs_created = *cmse_stub_created;
6178 bfd_vma veneer_value, stub_offset, next_cmse_stub_offset;
6179 bfd_vma cmse_stub_array_start = (bfd_vma) -1, cmse_stub_sec_vma = 0;
6180
6181 /* No input secure gateway import library. */
6182 if (!htab->in_implib_bfd)
6183 return TRUE;
6184
6185 in_implib_bfd = htab->in_implib_bfd;
6186 if (!htab->cmse_implib)
6187 {
6188 _bfd_error_handler (_("%pB: --in-implib only supported for Secure "
6189 "Gateway import libraries"), in_implib_bfd);
6190 return FALSE;
6191 }
6192
6193 /* Get symbol table size. */
6194 symsize = bfd_get_symtab_upper_bound (in_implib_bfd);
6195 if (symsize < 0)
6196 return FALSE;
6197
6198 /* Read in the input secure gateway import library's symbol table. */
6199 sympp = (asymbol **) xmalloc (symsize);
6200 symcount = bfd_canonicalize_symtab (in_implib_bfd, sympp);
6201 if (symcount < 0)
6202 {
6203 ret = FALSE;
6204 goto free_sym_buf;
6205 }
6206
6207 htab->new_cmse_stub_offset = 0;
6208 cmse_stub_size =
6209 find_stub_size_and_template (arm_stub_cmse_branch_thumb_only,
6210 &cmse_stub_template,
6211 &cmse_stub_template_size);
6212 out_sec_name =
6213 arm_dedicated_stub_output_section_name (arm_stub_cmse_branch_thumb_only);
6214 stub_out_sec =
6215 bfd_get_section_by_name (htab->obfd, out_sec_name);
6216 if (stub_out_sec != NULL)
6217 cmse_stub_sec_vma = stub_out_sec->vma;
6218
6219 /* Set addresses of veneers mentionned in input secure gateway import
6220 library's symbol table. */
6221 for (i = 0; i < symcount; i++)
6222 {
6223 sym = sympp[i];
6224 flags = sym->flags;
6225 sym_name = (char *) bfd_asymbol_name (sym);
6226 intsym = &((elf_symbol_type *) sym)->internal_elf_sym;
6227
6228 if (sym->section != bfd_abs_section_ptr
6229 || !(flags & (BSF_GLOBAL | BSF_WEAK))
6230 || (flags & BSF_FUNCTION) != BSF_FUNCTION
6231 || (ARM_GET_SYM_BRANCH_TYPE (intsym->st_target_internal)
6232 != ST_BRANCH_TO_THUMB))
6233 {
6234 _bfd_error_handler (_("%pB: invalid import library entry: `%s'; "
6235 "symbol should be absolute, global and "
6236 "refer to Thumb functions"),
6237 in_implib_bfd, sym_name);
6238 ret = FALSE;
6239 continue;
6240 }
6241
6242 veneer_value = bfd_asymbol_value (sym);
6243 stub_offset = veneer_value - cmse_stub_sec_vma;
6244 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, sym_name,
6245 FALSE, FALSE);
6246 hash = (struct elf32_arm_link_hash_entry *)
6247 elf_link_hash_lookup (&(htab)->root, sym_name, FALSE, FALSE, TRUE);
6248
6249 /* Stub entry should have been created by cmse_scan or the symbol be of
6250 a secure function callable from non secure code. */
6251 if (!stub_entry && !hash)
6252 {
6253 bfd_boolean new_stub;
6254
6255 _bfd_error_handler
6256 (_("entry function `%s' disappeared from secure code"), sym_name);
6257 hash = (struct elf32_arm_link_hash_entry *)
6258 elf_link_hash_lookup (&(htab)->root, sym_name, TRUE, TRUE, TRUE);
6259 stub_entry
6260 = elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
6261 NULL, NULL, bfd_abs_section_ptr, hash,
6262 sym_name, veneer_value,
6263 ST_BRANCH_TO_THUMB, &new_stub);
6264 if (stub_entry == NULL)
6265 ret = FALSE;
6266 else
6267 {
6268 BFD_ASSERT (new_stub);
6269 new_cmse_stubs_created++;
6270 (*cmse_stub_created)++;
6271 }
6272 stub_entry->stub_template_size = stub_entry->stub_size = 0;
6273 stub_entry->stub_offset = stub_offset;
6274 }
6275 /* Symbol found is not callable from non secure code. */
6276 else if (!stub_entry)
6277 {
6278 if (!cmse_entry_fct_p (hash))
6279 {
6280 _bfd_error_handler (_("`%s' refers to a non entry function"),
6281 sym_name);
6282 ret = FALSE;
6283 }
6284 continue;
6285 }
6286 else
6287 {
6288 /* Only stubs for SG veneers should have been created. */
6289 BFD_ASSERT (stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
6290
6291 /* Check visibility hasn't changed. */
6292 if (!!(flags & BSF_GLOBAL)
6293 != (hash->root.root.type == bfd_link_hash_defined))
6294 _bfd_error_handler
6295 (_("%pB: visibility of symbol `%s' has changed"), in_implib_bfd,
6296 sym_name);
6297
6298 stub_entry->stub_offset = stub_offset;
6299 }
6300
6301 /* Size should match that of a SG veneer. */
6302 if (intsym->st_size != cmse_stub_size)
6303 {
6304 _bfd_error_handler (_("%pB: incorrect size for symbol `%s'"),
6305 in_implib_bfd, sym_name);
6306 ret = FALSE;
6307 }
6308
6309 /* Previous veneer address is before current SG veneer section. */
6310 if (veneer_value < cmse_stub_sec_vma)
6311 {
6312 /* Avoid offset underflow. */
6313 if (stub_entry)
6314 stub_entry->stub_offset = 0;
6315 stub_offset = 0;
6316 ret = FALSE;
6317 }
6318
6319 /* Complain if stub offset not a multiple of stub size. */
6320 if (stub_offset % cmse_stub_size)
6321 {
6322 _bfd_error_handler
6323 (_("offset of veneer for entry function `%s' not a multiple of "
6324 "its size"), sym_name);
6325 ret = FALSE;
6326 }
6327
6328 if (!ret)
6329 continue;
6330
6331 new_cmse_stubs_created--;
6332 if (veneer_value < cmse_stub_array_start)
6333 cmse_stub_array_start = veneer_value;
6334 next_cmse_stub_offset = stub_offset + ((cmse_stub_size + 7) & ~7);
6335 if (next_cmse_stub_offset > htab->new_cmse_stub_offset)
6336 htab->new_cmse_stub_offset = next_cmse_stub_offset;
6337 }
6338
6339 if (!info->out_implib_bfd && new_cmse_stubs_created != 0)
6340 {
6341 BFD_ASSERT (new_cmse_stubs_created > 0);
6342 _bfd_error_handler
6343 (_("new entry function(s) introduced but no output import library "
6344 "specified:"));
6345 bfd_hash_traverse (&htab->stub_hash_table, arm_list_new_cmse_stub, info);
6346 }
6347
6348 if (cmse_stub_array_start != cmse_stub_sec_vma)
6349 {
6350 _bfd_error_handler
6351 (_("start address of `%s' is different from previous link"),
6352 out_sec_name);
6353 ret = FALSE;
6354 }
6355
6356 free_sym_buf:
6357 free (sympp);
6358 return ret;
6359 }
6360
6361 /* Determine and set the size of the stub section for a final link.
6362
6363 The basic idea here is to examine all the relocations looking for
6364 PC-relative calls to a target that is unreachable with a "bl"
6365 instruction. */
6366
6367 bfd_boolean
6368 elf32_arm_size_stubs (bfd *output_bfd,
6369 bfd *stub_bfd,
6370 struct bfd_link_info *info,
6371 bfd_signed_vma group_size,
6372 asection * (*add_stub_section) (const char *, asection *,
6373 asection *,
6374 unsigned int),
6375 void (*layout_sections_again) (void))
6376 {
6377 bfd_boolean ret = TRUE;
6378 obj_attribute *out_attr;
6379 int cmse_stub_created = 0;
6380 bfd_size_type stub_group_size;
6381 bfd_boolean m_profile, stubs_always_after_branch, first_veneer_scan = TRUE;
6382 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
6383 struct a8_erratum_fix *a8_fixes = NULL;
6384 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
6385 struct a8_erratum_reloc *a8_relocs = NULL;
6386 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
6387
6388 if (htab == NULL)
6389 return FALSE;
6390
6391 if (htab->fix_cortex_a8)
6392 {
6393 a8_fixes = (struct a8_erratum_fix *)
6394 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
6395 a8_relocs = (struct a8_erratum_reloc *)
6396 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
6397 }
6398
6399 /* Propagate mach to stub bfd, because it may not have been
6400 finalized when we created stub_bfd. */
6401 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
6402 bfd_get_mach (output_bfd));
6403
6404 /* Stash our params away. */
6405 htab->stub_bfd = stub_bfd;
6406 htab->add_stub_section = add_stub_section;
6407 htab->layout_sections_again = layout_sections_again;
6408 stubs_always_after_branch = group_size < 0;
6409
6410 out_attr = elf_known_obj_attributes_proc (output_bfd);
6411 m_profile = out_attr[Tag_CPU_arch_profile].i == 'M';
6412
6413 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
6414 as the first half of a 32-bit branch straddling two 4K pages. This is a
6415 crude way of enforcing that. */
6416 if (htab->fix_cortex_a8)
6417 stubs_always_after_branch = 1;
6418
6419 if (group_size < 0)
6420 stub_group_size = -group_size;
6421 else
6422 stub_group_size = group_size;
6423
6424 if (stub_group_size == 1)
6425 {
6426 /* Default values. */
6427 /* Thumb branch range is +-4MB has to be used as the default
6428 maximum size (a given section can contain both ARM and Thumb
6429 code, so the worst case has to be taken into account).
6430
6431 This value is 24K less than that, which allows for 2025
6432 12-byte stubs. If we exceed that, then we will fail to link.
6433 The user will have to relink with an explicit group size
6434 option. */
6435 stub_group_size = 4170000;
6436 }
6437
6438 group_sections (htab, stub_group_size, stubs_always_after_branch);
6439
6440 /* If we're applying the cortex A8 fix, we need to determine the
6441 program header size now, because we cannot change it later --
6442 that could alter section placements. Notice the A8 erratum fix
6443 ends up requiring the section addresses to remain unchanged
6444 modulo the page size. That's something we cannot represent
6445 inside BFD, and we don't want to force the section alignment to
6446 be the page size. */
6447 if (htab->fix_cortex_a8)
6448 (*htab->layout_sections_again) ();
6449
6450 while (1)
6451 {
6452 bfd *input_bfd;
6453 unsigned int bfd_indx;
6454 asection *stub_sec;
6455 enum elf32_arm_stub_type stub_type;
6456 bfd_boolean stub_changed = FALSE;
6457 unsigned prev_num_a8_fixes = num_a8_fixes;
6458
6459 num_a8_fixes = 0;
6460 for (input_bfd = info->input_bfds, bfd_indx = 0;
6461 input_bfd != NULL;
6462 input_bfd = input_bfd->link.next, bfd_indx++)
6463 {
6464 Elf_Internal_Shdr *symtab_hdr;
6465 asection *section;
6466 Elf_Internal_Sym *local_syms = NULL;
6467
6468 if (!is_arm_elf (input_bfd)
6469 || (elf_dyn_lib_class (input_bfd) & DYN_AS_NEEDED) != 0)
6470 continue;
6471
6472 num_a8_relocs = 0;
6473
6474 /* We'll need the symbol table in a second. */
6475 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
6476 if (symtab_hdr->sh_info == 0)
6477 continue;
6478
6479 /* Limit scan of symbols to object file whose profile is
6480 Microcontroller to not hinder performance in the general case. */
6481 if (m_profile && first_veneer_scan)
6482 {
6483 struct elf_link_hash_entry **sym_hashes;
6484
6485 sym_hashes = elf_sym_hashes (input_bfd);
6486 if (!cmse_scan (input_bfd, htab, out_attr, sym_hashes,
6487 &cmse_stub_created))
6488 goto error_ret_free_local;
6489
6490 if (cmse_stub_created != 0)
6491 stub_changed = TRUE;
6492 }
6493
6494 /* Walk over each section attached to the input bfd. */
6495 for (section = input_bfd->sections;
6496 section != NULL;
6497 section = section->next)
6498 {
6499 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
6500
6501 /* If there aren't any relocs, then there's nothing more
6502 to do. */
6503 if ((section->flags & SEC_RELOC) == 0
6504 || section->reloc_count == 0
6505 || (section->flags & SEC_CODE) == 0)
6506 continue;
6507
6508 /* If this section is a link-once section that will be
6509 discarded, then don't create any stubs. */
6510 if (section->output_section == NULL
6511 || section->output_section->owner != output_bfd)
6512 continue;
6513
6514 /* Get the relocs. */
6515 internal_relocs
6516 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
6517 NULL, info->keep_memory);
6518 if (internal_relocs == NULL)
6519 goto error_ret_free_local;
6520
6521 /* Now examine each relocation. */
6522 irela = internal_relocs;
6523 irelaend = irela + section->reloc_count;
6524 for (; irela < irelaend; irela++)
6525 {
6526 unsigned int r_type, r_indx;
6527 asection *sym_sec;
6528 bfd_vma sym_value;
6529 bfd_vma destination;
6530 struct elf32_arm_link_hash_entry *hash;
6531 const char *sym_name;
6532 unsigned char st_type;
6533 enum arm_st_branch_type branch_type;
6534 bfd_boolean created_stub = FALSE;
6535
6536 r_type = ELF32_R_TYPE (irela->r_info);
6537 r_indx = ELF32_R_SYM (irela->r_info);
6538
6539 if (r_type >= (unsigned int) R_ARM_max)
6540 {
6541 bfd_set_error (bfd_error_bad_value);
6542 error_ret_free_internal:
6543 if (elf_section_data (section)->relocs == NULL)
6544 free (internal_relocs);
6545 /* Fall through. */
6546 error_ret_free_local:
6547 if (local_syms != NULL
6548 && (symtab_hdr->contents
6549 != (unsigned char *) local_syms))
6550 free (local_syms);
6551 return FALSE;
6552 }
6553
6554 hash = NULL;
6555 if (r_indx >= symtab_hdr->sh_info)
6556 hash = elf32_arm_hash_entry
6557 (elf_sym_hashes (input_bfd)
6558 [r_indx - symtab_hdr->sh_info]);
6559
6560 /* Only look for stubs on branch instructions, or
6561 non-relaxed TLSCALL */
6562 if ((r_type != (unsigned int) R_ARM_CALL)
6563 && (r_type != (unsigned int) R_ARM_THM_CALL)
6564 && (r_type != (unsigned int) R_ARM_JUMP24)
6565 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
6566 && (r_type != (unsigned int) R_ARM_THM_XPC22)
6567 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
6568 && (r_type != (unsigned int) R_ARM_PLT32)
6569 && !((r_type == (unsigned int) R_ARM_TLS_CALL
6570 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
6571 && r_type == elf32_arm_tls_transition
6572 (info, r_type, &hash->root)
6573 && ((hash ? hash->tls_type
6574 : (elf32_arm_local_got_tls_type
6575 (input_bfd)[r_indx]))
6576 & GOT_TLS_GDESC) != 0))
6577 continue;
6578
6579 /* Now determine the call target, its name, value,
6580 section. */
6581 sym_sec = NULL;
6582 sym_value = 0;
6583 destination = 0;
6584 sym_name = NULL;
6585
6586 if (r_type == (unsigned int) R_ARM_TLS_CALL
6587 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
6588 {
6589 /* A non-relaxed TLS call. The target is the
6590 plt-resident trampoline and nothing to do
6591 with the symbol. */
6592 BFD_ASSERT (htab->tls_trampoline > 0);
6593 sym_sec = htab->root.splt;
6594 sym_value = htab->tls_trampoline;
6595 hash = 0;
6596 st_type = STT_FUNC;
6597 branch_type = ST_BRANCH_TO_ARM;
6598 }
6599 else if (!hash)
6600 {
6601 /* It's a local symbol. */
6602 Elf_Internal_Sym *sym;
6603
6604 if (local_syms == NULL)
6605 {
6606 local_syms
6607 = (Elf_Internal_Sym *) symtab_hdr->contents;
6608 if (local_syms == NULL)
6609 local_syms
6610 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
6611 symtab_hdr->sh_info, 0,
6612 NULL, NULL, NULL);
6613 if (local_syms == NULL)
6614 goto error_ret_free_internal;
6615 }
6616
6617 sym = local_syms + r_indx;
6618 if (sym->st_shndx == SHN_UNDEF)
6619 sym_sec = bfd_und_section_ptr;
6620 else if (sym->st_shndx == SHN_ABS)
6621 sym_sec = bfd_abs_section_ptr;
6622 else if (sym->st_shndx == SHN_COMMON)
6623 sym_sec = bfd_com_section_ptr;
6624 else
6625 sym_sec =
6626 bfd_section_from_elf_index (input_bfd, sym->st_shndx);
6627
6628 if (!sym_sec)
6629 /* This is an undefined symbol. It can never
6630 be resolved. */
6631 continue;
6632
6633 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
6634 sym_value = sym->st_value;
6635 destination = (sym_value + irela->r_addend
6636 + sym_sec->output_offset
6637 + sym_sec->output_section->vma);
6638 st_type = ELF_ST_TYPE (sym->st_info);
6639 branch_type =
6640 ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
6641 sym_name
6642 = bfd_elf_string_from_elf_section (input_bfd,
6643 symtab_hdr->sh_link,
6644 sym->st_name);
6645 }
6646 else
6647 {
6648 /* It's an external symbol. */
6649 while (hash->root.root.type == bfd_link_hash_indirect
6650 || hash->root.root.type == bfd_link_hash_warning)
6651 hash = ((struct elf32_arm_link_hash_entry *)
6652 hash->root.root.u.i.link);
6653
6654 if (hash->root.root.type == bfd_link_hash_defined
6655 || hash->root.root.type == bfd_link_hash_defweak)
6656 {
6657 sym_sec = hash->root.root.u.def.section;
6658 sym_value = hash->root.root.u.def.value;
6659
6660 struct elf32_arm_link_hash_table *globals =
6661 elf32_arm_hash_table (info);
6662
6663 /* For a destination in a shared library,
6664 use the PLT stub as target address to
6665 decide whether a branch stub is
6666 needed. */
6667 if (globals != NULL
6668 && globals->root.splt != NULL
6669 && hash != NULL
6670 && hash->root.plt.offset != (bfd_vma) -1)
6671 {
6672 sym_sec = globals->root.splt;
6673 sym_value = hash->root.plt.offset;
6674 if (sym_sec->output_section != NULL)
6675 destination = (sym_value
6676 + sym_sec->output_offset
6677 + sym_sec->output_section->vma);
6678 }
6679 else if (sym_sec->output_section != NULL)
6680 destination = (sym_value + irela->r_addend
6681 + sym_sec->output_offset
6682 + sym_sec->output_section->vma);
6683 }
6684 else if ((hash->root.root.type == bfd_link_hash_undefined)
6685 || (hash->root.root.type == bfd_link_hash_undefweak))
6686 {
6687 /* For a shared library, use the PLT stub as
6688 target address to decide whether a long
6689 branch stub is needed.
6690 For absolute code, they cannot be handled. */
6691 struct elf32_arm_link_hash_table *globals =
6692 elf32_arm_hash_table (info);
6693
6694 if (globals != NULL
6695 && globals->root.splt != NULL
6696 && hash != NULL
6697 && hash->root.plt.offset != (bfd_vma) -1)
6698 {
6699 sym_sec = globals->root.splt;
6700 sym_value = hash->root.plt.offset;
6701 if (sym_sec->output_section != NULL)
6702 destination = (sym_value
6703 + sym_sec->output_offset
6704 + sym_sec->output_section->vma);
6705 }
6706 else
6707 continue;
6708 }
6709 else
6710 {
6711 bfd_set_error (bfd_error_bad_value);
6712 goto error_ret_free_internal;
6713 }
6714 st_type = hash->root.type;
6715 branch_type =
6716 ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
6717 sym_name = hash->root.root.root.string;
6718 }
6719
6720 do
6721 {
6722 bfd_boolean new_stub;
6723 struct elf32_arm_stub_hash_entry *stub_entry;
6724
6725 /* Determine what (if any) linker stub is needed. */
6726 stub_type = arm_type_of_stub (info, section, irela,
6727 st_type, &branch_type,
6728 hash, destination, sym_sec,
6729 input_bfd, sym_name);
6730 if (stub_type == arm_stub_none)
6731 break;
6732
6733 /* We've either created a stub for this reloc already,
6734 or we are about to. */
6735 stub_entry =
6736 elf32_arm_create_stub (htab, stub_type, section, irela,
6737 sym_sec, hash,
6738 (char *) sym_name, sym_value,
6739 branch_type, &new_stub);
6740
6741 created_stub = stub_entry != NULL;
6742 if (!created_stub)
6743 goto error_ret_free_internal;
6744 else if (!new_stub)
6745 break;
6746 else
6747 stub_changed = TRUE;
6748 }
6749 while (0);
6750
6751 /* Look for relocations which might trigger Cortex-A8
6752 erratum. */
6753 if (htab->fix_cortex_a8
6754 && (r_type == (unsigned int) R_ARM_THM_JUMP24
6755 || r_type == (unsigned int) R_ARM_THM_JUMP19
6756 || r_type == (unsigned int) R_ARM_THM_CALL
6757 || r_type == (unsigned int) R_ARM_THM_XPC22))
6758 {
6759 bfd_vma from = section->output_section->vma
6760 + section->output_offset
6761 + irela->r_offset;
6762
6763 if ((from & 0xfff) == 0xffe)
6764 {
6765 /* Found a candidate. Note we haven't checked the
6766 destination is within 4K here: if we do so (and
6767 don't create an entry in a8_relocs) we can't tell
6768 that a branch should have been relocated when
6769 scanning later. */
6770 if (num_a8_relocs == a8_reloc_table_size)
6771 {
6772 a8_reloc_table_size *= 2;
6773 a8_relocs = (struct a8_erratum_reloc *)
6774 bfd_realloc (a8_relocs,
6775 sizeof (struct a8_erratum_reloc)
6776 * a8_reloc_table_size);
6777 }
6778
6779 a8_relocs[num_a8_relocs].from = from;
6780 a8_relocs[num_a8_relocs].destination = destination;
6781 a8_relocs[num_a8_relocs].r_type = r_type;
6782 a8_relocs[num_a8_relocs].branch_type = branch_type;
6783 a8_relocs[num_a8_relocs].sym_name = sym_name;
6784 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
6785 a8_relocs[num_a8_relocs].hash = hash;
6786
6787 num_a8_relocs++;
6788 }
6789 }
6790 }
6791
6792 /* We're done with the internal relocs, free them. */
6793 if (elf_section_data (section)->relocs == NULL)
6794 free (internal_relocs);
6795 }
6796
6797 if (htab->fix_cortex_a8)
6798 {
6799 /* Sort relocs which might apply to Cortex-A8 erratum. */
6800 qsort (a8_relocs, num_a8_relocs,
6801 sizeof (struct a8_erratum_reloc),
6802 &a8_reloc_compare);
6803
6804 /* Scan for branches which might trigger Cortex-A8 erratum. */
6805 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
6806 &num_a8_fixes, &a8_fix_table_size,
6807 a8_relocs, num_a8_relocs,
6808 prev_num_a8_fixes, &stub_changed)
6809 != 0)
6810 goto error_ret_free_local;
6811 }
6812
6813 if (local_syms != NULL
6814 && symtab_hdr->contents != (unsigned char *) local_syms)
6815 {
6816 if (!info->keep_memory)
6817 free (local_syms);
6818 else
6819 symtab_hdr->contents = (unsigned char *) local_syms;
6820 }
6821 }
6822
6823 if (first_veneer_scan
6824 && !set_cmse_veneer_addr_from_implib (info, htab,
6825 &cmse_stub_created))
6826 ret = FALSE;
6827
6828 if (prev_num_a8_fixes != num_a8_fixes)
6829 stub_changed = TRUE;
6830
6831 if (!stub_changed)
6832 break;
6833
6834 /* OK, we've added some stubs. Find out the new size of the
6835 stub sections. */
6836 for (stub_sec = htab->stub_bfd->sections;
6837 stub_sec != NULL;
6838 stub_sec = stub_sec->next)
6839 {
6840 /* Ignore non-stub sections. */
6841 if (!strstr (stub_sec->name, STUB_SUFFIX))
6842 continue;
6843
6844 stub_sec->size = 0;
6845 }
6846
6847 /* Add new SG veneers after those already in the input import
6848 library. */
6849 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
6850 stub_type++)
6851 {
6852 bfd_vma *start_offset_p;
6853 asection **stub_sec_p;
6854
6855 start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
6856 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6857 if (start_offset_p == NULL)
6858 continue;
6859
6860 BFD_ASSERT (stub_sec_p != NULL);
6861 if (*stub_sec_p != NULL)
6862 (*stub_sec_p)->size = *start_offset_p;
6863 }
6864
6865 /* Compute stub section size, considering padding. */
6866 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
6867 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
6868 stub_type++)
6869 {
6870 int size, padding;
6871 asection **stub_sec_p;
6872
6873 padding = arm_dedicated_stub_section_padding (stub_type);
6874 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6875 /* Skip if no stub input section or no stub section padding
6876 required. */
6877 if ((stub_sec_p != NULL && *stub_sec_p == NULL) || padding == 0)
6878 continue;
6879 /* Stub section padding required but no dedicated section. */
6880 BFD_ASSERT (stub_sec_p);
6881
6882 size = (*stub_sec_p)->size;
6883 size = (size + padding - 1) & ~(padding - 1);
6884 (*stub_sec_p)->size = size;
6885 }
6886
6887 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
6888 if (htab->fix_cortex_a8)
6889 for (i = 0; i < num_a8_fixes; i++)
6890 {
6891 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
6892 a8_fixes[i].section, htab, a8_fixes[i].stub_type);
6893
6894 if (stub_sec == NULL)
6895 return FALSE;
6896
6897 stub_sec->size
6898 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
6899 NULL);
6900 }
6901
6902
6903 /* Ask the linker to do its stuff. */
6904 (*htab->layout_sections_again) ();
6905 first_veneer_scan = FALSE;
6906 }
6907
6908 /* Add stubs for Cortex-A8 erratum fixes now. */
6909 if (htab->fix_cortex_a8)
6910 {
6911 for (i = 0; i < num_a8_fixes; i++)
6912 {
6913 struct elf32_arm_stub_hash_entry *stub_entry;
6914 char *stub_name = a8_fixes[i].stub_name;
6915 asection *section = a8_fixes[i].section;
6916 unsigned int section_id = a8_fixes[i].section->id;
6917 asection *link_sec = htab->stub_group[section_id].link_sec;
6918 asection *stub_sec = htab->stub_group[section_id].stub_sec;
6919 const insn_sequence *template_sequence;
6920 int template_size, size = 0;
6921
6922 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
6923 TRUE, FALSE);
6924 if (stub_entry == NULL)
6925 {
6926 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
6927 section->owner, stub_name);
6928 return FALSE;
6929 }
6930
6931 stub_entry->stub_sec = stub_sec;
6932 stub_entry->stub_offset = (bfd_vma) -1;
6933 stub_entry->id_sec = link_sec;
6934 stub_entry->stub_type = a8_fixes[i].stub_type;
6935 stub_entry->source_value = a8_fixes[i].offset;
6936 stub_entry->target_section = a8_fixes[i].section;
6937 stub_entry->target_value = a8_fixes[i].target_offset;
6938 stub_entry->orig_insn = a8_fixes[i].orig_insn;
6939 stub_entry->branch_type = a8_fixes[i].branch_type;
6940
6941 size = find_stub_size_and_template (a8_fixes[i].stub_type,
6942 &template_sequence,
6943 &template_size);
6944
6945 stub_entry->stub_size = size;
6946 stub_entry->stub_template = template_sequence;
6947 stub_entry->stub_template_size = template_size;
6948 }
6949
6950 /* Stash the Cortex-A8 erratum fix array for use later in
6951 elf32_arm_write_section(). */
6952 htab->a8_erratum_fixes = a8_fixes;
6953 htab->num_a8_erratum_fixes = num_a8_fixes;
6954 }
6955 else
6956 {
6957 htab->a8_erratum_fixes = NULL;
6958 htab->num_a8_erratum_fixes = 0;
6959 }
6960 return ret;
6961 }
6962
6963 /* Build all the stubs associated with the current output file. The
6964 stubs are kept in a hash table attached to the main linker hash
6965 table. We also set up the .plt entries for statically linked PIC
6966 functions here. This function is called via arm_elf_finish in the
6967 linker. */
6968
6969 bfd_boolean
6970 elf32_arm_build_stubs (struct bfd_link_info *info)
6971 {
6972 asection *stub_sec;
6973 struct bfd_hash_table *table;
6974 enum elf32_arm_stub_type stub_type;
6975 struct elf32_arm_link_hash_table *htab;
6976
6977 htab = elf32_arm_hash_table (info);
6978 if (htab == NULL)
6979 return FALSE;
6980
6981 for (stub_sec = htab->stub_bfd->sections;
6982 stub_sec != NULL;
6983 stub_sec = stub_sec->next)
6984 {
6985 bfd_size_type size;
6986
6987 /* Ignore non-stub sections. */
6988 if (!strstr (stub_sec->name, STUB_SUFFIX))
6989 continue;
6990
6991 /* Allocate memory to hold the linker stubs. Zeroing the stub sections
6992 must at least be done for stub section requiring padding and for SG
6993 veneers to ensure that a non secure code branching to a removed SG
6994 veneer causes an error. */
6995 size = stub_sec->size;
6996 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
6997 if (stub_sec->contents == NULL && size != 0)
6998 return FALSE;
6999
7000 stub_sec->size = 0;
7001 }
7002
7003 /* Add new SG veneers after those already in the input import library. */
7004 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
7005 {
7006 bfd_vma *start_offset_p;
7007 asection **stub_sec_p;
7008
7009 start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
7010 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
7011 if (start_offset_p == NULL)
7012 continue;
7013
7014 BFD_ASSERT (stub_sec_p != NULL);
7015 if (*stub_sec_p != NULL)
7016 (*stub_sec_p)->size = *start_offset_p;
7017 }
7018
7019 /* Build the stubs as directed by the stub hash table. */
7020 table = &htab->stub_hash_table;
7021 bfd_hash_traverse (table, arm_build_one_stub, info);
7022 if (htab->fix_cortex_a8)
7023 {
7024 /* Place the cortex a8 stubs last. */
7025 htab->fix_cortex_a8 = -1;
7026 bfd_hash_traverse (table, arm_build_one_stub, info);
7027 }
7028
7029 return TRUE;
7030 }
7031
7032 /* Locate the Thumb encoded calling stub for NAME. */
7033
7034 static struct elf_link_hash_entry *
7035 find_thumb_glue (struct bfd_link_info *link_info,
7036 const char *name,
7037 char **error_message)
7038 {
7039 char *tmp_name;
7040 struct elf_link_hash_entry *hash;
7041 struct elf32_arm_link_hash_table *hash_table;
7042
7043 /* We need a pointer to the armelf specific hash table. */
7044 hash_table = elf32_arm_hash_table (link_info);
7045 if (hash_table == NULL)
7046 return NULL;
7047
7048 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7049 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
7050
7051 BFD_ASSERT (tmp_name);
7052
7053 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
7054
7055 hash = elf_link_hash_lookup
7056 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
7057
7058 if (hash == NULL
7059 && asprintf (error_message, _("unable to find %s glue '%s' for '%s'"),
7060 "Thumb", tmp_name, name) == -1)
7061 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
7062
7063 free (tmp_name);
7064
7065 return hash;
7066 }
7067
7068 /* Locate the ARM encoded calling stub for NAME. */
7069
7070 static struct elf_link_hash_entry *
7071 find_arm_glue (struct bfd_link_info *link_info,
7072 const char *name,
7073 char **error_message)
7074 {
7075 char *tmp_name;
7076 struct elf_link_hash_entry *myh;
7077 struct elf32_arm_link_hash_table *hash_table;
7078
7079 /* We need a pointer to the elfarm specific hash table. */
7080 hash_table = elf32_arm_hash_table (link_info);
7081 if (hash_table == NULL)
7082 return NULL;
7083
7084 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7085 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
7086
7087 BFD_ASSERT (tmp_name);
7088
7089 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
7090
7091 myh = elf_link_hash_lookup
7092 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
7093
7094 if (myh == NULL
7095 && asprintf (error_message, _("unable to find %s glue '%s' for '%s'"),
7096 "ARM", tmp_name, name) == -1)
7097 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
7098
7099 free (tmp_name);
7100
7101 return myh;
7102 }
7103
7104 /* ARM->Thumb glue (static images):
7105
7106 .arm
7107 __func_from_arm:
7108 ldr r12, __func_addr
7109 bx r12
7110 __func_addr:
7111 .word func @ behave as if you saw a ARM_32 reloc.
7112
7113 (v5t static images)
7114 .arm
7115 __func_from_arm:
7116 ldr pc, __func_addr
7117 __func_addr:
7118 .word func @ behave as if you saw a ARM_32 reloc.
7119
7120 (relocatable images)
7121 .arm
7122 __func_from_arm:
7123 ldr r12, __func_offset
7124 add r12, r12, pc
7125 bx r12
7126 __func_offset:
7127 .word func - . */
7128
7129 #define ARM2THUMB_STATIC_GLUE_SIZE 12
7130 static const insn32 a2t1_ldr_insn = 0xe59fc000;
7131 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
7132 static const insn32 a2t3_func_addr_insn = 0x00000001;
7133
7134 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
7135 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
7136 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
7137
7138 #define ARM2THUMB_PIC_GLUE_SIZE 16
7139 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
7140 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
7141 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
7142
7143 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
7144
7145 .thumb .thumb
7146 .align 2 .align 2
7147 __func_from_thumb: __func_from_thumb:
7148 bx pc push {r6, lr}
7149 nop ldr r6, __func_addr
7150 .arm mov lr, pc
7151 b func bx r6
7152 .arm
7153 ;; back_to_thumb
7154 ldmia r13! {r6, lr}
7155 bx lr
7156 __func_addr:
7157 .word func */
7158
7159 #define THUMB2ARM_GLUE_SIZE 8
7160 static const insn16 t2a1_bx_pc_insn = 0x4778;
7161 static const insn16 t2a2_noop_insn = 0x46c0;
7162 static const insn32 t2a3_b_insn = 0xea000000;
7163
7164 #define VFP11_ERRATUM_VENEER_SIZE 8
7165 #define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16
7166 #define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24
7167
7168 #define ARM_BX_VENEER_SIZE 12
7169 static const insn32 armbx1_tst_insn = 0xe3100001;
7170 static const insn32 armbx2_moveq_insn = 0x01a0f000;
7171 static const insn32 armbx3_bx_insn = 0xe12fff10;
7172
7173 #ifndef ELFARM_NABI_C_INCLUDED
7174 static void
7175 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
7176 {
7177 asection * s;
7178 bfd_byte * contents;
7179
7180 if (size == 0)
7181 {
7182 /* Do not include empty glue sections in the output. */
7183 if (abfd != NULL)
7184 {
7185 s = bfd_get_linker_section (abfd, name);
7186 if (s != NULL)
7187 s->flags |= SEC_EXCLUDE;
7188 }
7189 return;
7190 }
7191
7192 BFD_ASSERT (abfd != NULL);
7193
7194 s = bfd_get_linker_section (abfd, name);
7195 BFD_ASSERT (s != NULL);
7196
7197 contents = (bfd_byte *) bfd_alloc (abfd, size);
7198
7199 BFD_ASSERT (s->size == size);
7200 s->contents = contents;
7201 }
7202
7203 bfd_boolean
7204 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
7205 {
7206 struct elf32_arm_link_hash_table * globals;
7207
7208 globals = elf32_arm_hash_table (info);
7209 BFD_ASSERT (globals != NULL);
7210
7211 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7212 globals->arm_glue_size,
7213 ARM2THUMB_GLUE_SECTION_NAME);
7214
7215 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7216 globals->thumb_glue_size,
7217 THUMB2ARM_GLUE_SECTION_NAME);
7218
7219 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7220 globals->vfp11_erratum_glue_size,
7221 VFP11_ERRATUM_VENEER_SECTION_NAME);
7222
7223 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7224 globals->stm32l4xx_erratum_glue_size,
7225 STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7226
7227 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7228 globals->bx_glue_size,
7229 ARM_BX_GLUE_SECTION_NAME);
7230
7231 return TRUE;
7232 }
7233
7234 /* Allocate space and symbols for calling a Thumb function from Arm mode.
7235 returns the symbol identifying the stub. */
7236
7237 static struct elf_link_hash_entry *
7238 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
7239 struct elf_link_hash_entry * h)
7240 {
7241 const char * name = h->root.root.string;
7242 asection * s;
7243 char * tmp_name;
7244 struct elf_link_hash_entry * myh;
7245 struct bfd_link_hash_entry * bh;
7246 struct elf32_arm_link_hash_table * globals;
7247 bfd_vma val;
7248 bfd_size_type size;
7249
7250 globals = elf32_arm_hash_table (link_info);
7251 BFD_ASSERT (globals != NULL);
7252 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7253
7254 s = bfd_get_linker_section
7255 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
7256
7257 BFD_ASSERT (s != NULL);
7258
7259 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7260 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
7261
7262 BFD_ASSERT (tmp_name);
7263
7264 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
7265
7266 myh = elf_link_hash_lookup
7267 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7268
7269 if (myh != NULL)
7270 {
7271 /* We've already seen this guy. */
7272 free (tmp_name);
7273 return myh;
7274 }
7275
7276 /* The only trick here is using hash_table->arm_glue_size as the value.
7277 Even though the section isn't allocated yet, this is where we will be
7278 putting it. The +1 on the value marks that the stub has not been
7279 output yet - not that it is a Thumb function. */
7280 bh = NULL;
7281 val = globals->arm_glue_size + 1;
7282 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
7283 tmp_name, BSF_GLOBAL, s, val,
7284 NULL, TRUE, FALSE, &bh);
7285
7286 myh = (struct elf_link_hash_entry *) bh;
7287 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7288 myh->forced_local = 1;
7289
7290 free (tmp_name);
7291
7292 if (bfd_link_pic (link_info)
7293 || globals->root.is_relocatable_executable
7294 || globals->pic_veneer)
7295 size = ARM2THUMB_PIC_GLUE_SIZE;
7296 else if (globals->use_blx)
7297 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
7298 else
7299 size = ARM2THUMB_STATIC_GLUE_SIZE;
7300
7301 s->size += size;
7302 globals->arm_glue_size += size;
7303
7304 return myh;
7305 }
7306
7307 /* Allocate space for ARMv4 BX veneers. */
7308
7309 static void
7310 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
7311 {
7312 asection * s;
7313 struct elf32_arm_link_hash_table *globals;
7314 char *tmp_name;
7315 struct elf_link_hash_entry *myh;
7316 struct bfd_link_hash_entry *bh;
7317 bfd_vma val;
7318
7319 /* BX PC does not need a veneer. */
7320 if (reg == 15)
7321 return;
7322
7323 globals = elf32_arm_hash_table (link_info);
7324 BFD_ASSERT (globals != NULL);
7325 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7326
7327 /* Check if this veneer has already been allocated. */
7328 if (globals->bx_glue_offset[reg])
7329 return;
7330
7331 s = bfd_get_linker_section
7332 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
7333
7334 BFD_ASSERT (s != NULL);
7335
7336 /* Add symbol for veneer. */
7337 tmp_name = (char *)
7338 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
7339
7340 BFD_ASSERT (tmp_name);
7341
7342 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
7343
7344 myh = elf_link_hash_lookup
7345 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
7346
7347 BFD_ASSERT (myh == NULL);
7348
7349 bh = NULL;
7350 val = globals->bx_glue_size;
7351 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
7352 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7353 NULL, TRUE, FALSE, &bh);
7354
7355 myh = (struct elf_link_hash_entry *) bh;
7356 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7357 myh->forced_local = 1;
7358
7359 s->size += ARM_BX_VENEER_SIZE;
7360 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
7361 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
7362 }
7363
7364
7365 /* Add an entry to the code/data map for section SEC. */
7366
7367 static void
7368 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
7369 {
7370 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
7371 unsigned int newidx;
7372
7373 if (sec_data->map == NULL)
7374 {
7375 sec_data->map = (elf32_arm_section_map *)
7376 bfd_malloc (sizeof (elf32_arm_section_map));
7377 sec_data->mapcount = 0;
7378 sec_data->mapsize = 1;
7379 }
7380
7381 newidx = sec_data->mapcount++;
7382
7383 if (sec_data->mapcount > sec_data->mapsize)
7384 {
7385 sec_data->mapsize *= 2;
7386 sec_data->map = (elf32_arm_section_map *)
7387 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
7388 * sizeof (elf32_arm_section_map));
7389 }
7390
7391 if (sec_data->map)
7392 {
7393 sec_data->map[newidx].vma = vma;
7394 sec_data->map[newidx].type = type;
7395 }
7396 }
7397
7398
7399 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
7400 veneers are handled for now. */
7401
7402 static bfd_vma
7403 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
7404 elf32_vfp11_erratum_list *branch,
7405 bfd *branch_bfd,
7406 asection *branch_sec,
7407 unsigned int offset)
7408 {
7409 asection *s;
7410 struct elf32_arm_link_hash_table *hash_table;
7411 char *tmp_name;
7412 struct elf_link_hash_entry *myh;
7413 struct bfd_link_hash_entry *bh;
7414 bfd_vma val;
7415 struct _arm_elf_section_data *sec_data;
7416 elf32_vfp11_erratum_list *newerr;
7417
7418 hash_table = elf32_arm_hash_table (link_info);
7419 BFD_ASSERT (hash_table != NULL);
7420 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
7421
7422 s = bfd_get_linker_section
7423 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
7424
7425 sec_data = elf32_arm_section_data (s);
7426
7427 BFD_ASSERT (s != NULL);
7428
7429 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7430 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
7431
7432 BFD_ASSERT (tmp_name);
7433
7434 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
7435 hash_table->num_vfp11_fixes);
7436
7437 myh = elf_link_hash_lookup
7438 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7439
7440 BFD_ASSERT (myh == NULL);
7441
7442 bh = NULL;
7443 val = hash_table->vfp11_erratum_glue_size;
7444 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
7445 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7446 NULL, TRUE, FALSE, &bh);
7447
7448 myh = (struct elf_link_hash_entry *) bh;
7449 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7450 myh->forced_local = 1;
7451
7452 /* Link veneer back to calling location. */
7453 sec_data->erratumcount += 1;
7454 newerr = (elf32_vfp11_erratum_list *)
7455 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
7456
7457 newerr->type = VFP11_ERRATUM_ARM_VENEER;
7458 newerr->vma = -1;
7459 newerr->u.v.branch = branch;
7460 newerr->u.v.id = hash_table->num_vfp11_fixes;
7461 branch->u.b.veneer = newerr;
7462
7463 newerr->next = sec_data->erratumlist;
7464 sec_data->erratumlist = newerr;
7465
7466 /* A symbol for the return from the veneer. */
7467 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
7468 hash_table->num_vfp11_fixes);
7469
7470 myh = elf_link_hash_lookup
7471 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7472
7473 if (myh != NULL)
7474 abort ();
7475
7476 bh = NULL;
7477 val = offset + 4;
7478 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
7479 branch_sec, val, NULL, TRUE, FALSE, &bh);
7480
7481 myh = (struct elf_link_hash_entry *) bh;
7482 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7483 myh->forced_local = 1;
7484
7485 free (tmp_name);
7486
7487 /* Generate a mapping symbol for the veneer section, and explicitly add an
7488 entry for that symbol to the code/data map for the section. */
7489 if (hash_table->vfp11_erratum_glue_size == 0)
7490 {
7491 bh = NULL;
7492 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
7493 ever requires this erratum fix. */
7494 _bfd_generic_link_add_one_symbol (link_info,
7495 hash_table->bfd_of_glue_owner, "$a",
7496 BSF_LOCAL, s, 0, NULL,
7497 TRUE, FALSE, &bh);
7498
7499 myh = (struct elf_link_hash_entry *) bh;
7500 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7501 myh->forced_local = 1;
7502
7503 /* The elf32_arm_init_maps function only cares about symbols from input
7504 BFDs. We must make a note of this generated mapping symbol
7505 ourselves so that code byteswapping works properly in
7506 elf32_arm_write_section. */
7507 elf32_arm_section_map_add (s, 'a', 0);
7508 }
7509
7510 s->size += VFP11_ERRATUM_VENEER_SIZE;
7511 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
7512 hash_table->num_vfp11_fixes++;
7513
7514 /* The offset of the veneer. */
7515 return val;
7516 }
7517
7518 /* Record information about a STM32L4XX STM erratum veneer. Only THUMB-mode
7519 veneers need to be handled because used only in Cortex-M. */
7520
7521 static bfd_vma
7522 record_stm32l4xx_erratum_veneer (struct bfd_link_info *link_info,
7523 elf32_stm32l4xx_erratum_list *branch,
7524 bfd *branch_bfd,
7525 asection *branch_sec,
7526 unsigned int offset,
7527 bfd_size_type veneer_size)
7528 {
7529 asection *s;
7530 struct elf32_arm_link_hash_table *hash_table;
7531 char *tmp_name;
7532 struct elf_link_hash_entry *myh;
7533 struct bfd_link_hash_entry *bh;
7534 bfd_vma val;
7535 struct _arm_elf_section_data *sec_data;
7536 elf32_stm32l4xx_erratum_list *newerr;
7537
7538 hash_table = elf32_arm_hash_table (link_info);
7539 BFD_ASSERT (hash_table != NULL);
7540 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
7541
7542 s = bfd_get_linker_section
7543 (hash_table->bfd_of_glue_owner, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7544
7545 BFD_ASSERT (s != NULL);
7546
7547 sec_data = elf32_arm_section_data (s);
7548
7549 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7550 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
7551
7552 BFD_ASSERT (tmp_name);
7553
7554 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
7555 hash_table->num_stm32l4xx_fixes);
7556
7557 myh = elf_link_hash_lookup
7558 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7559
7560 BFD_ASSERT (myh == NULL);
7561
7562 bh = NULL;
7563 val = hash_table->stm32l4xx_erratum_glue_size;
7564 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
7565 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7566 NULL, TRUE, FALSE, &bh);
7567
7568 myh = (struct elf_link_hash_entry *) bh;
7569 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7570 myh->forced_local = 1;
7571
7572 /* Link veneer back to calling location. */
7573 sec_data->stm32l4xx_erratumcount += 1;
7574 newerr = (elf32_stm32l4xx_erratum_list *)
7575 bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list));
7576
7577 newerr->type = STM32L4XX_ERRATUM_VENEER;
7578 newerr->vma = -1;
7579 newerr->u.v.branch = branch;
7580 newerr->u.v.id = hash_table->num_stm32l4xx_fixes;
7581 branch->u.b.veneer = newerr;
7582
7583 newerr->next = sec_data->stm32l4xx_erratumlist;
7584 sec_data->stm32l4xx_erratumlist = newerr;
7585
7586 /* A symbol for the return from the veneer. */
7587 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
7588 hash_table->num_stm32l4xx_fixes);
7589
7590 myh = elf_link_hash_lookup
7591 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7592
7593 if (myh != NULL)
7594 abort ();
7595
7596 bh = NULL;
7597 val = offset + 4;
7598 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
7599 branch_sec, val, NULL, TRUE, FALSE, &bh);
7600
7601 myh = (struct elf_link_hash_entry *) bh;
7602 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7603 myh->forced_local = 1;
7604
7605 free (tmp_name);
7606
7607 /* Generate a mapping symbol for the veneer section, and explicitly add an
7608 entry for that symbol to the code/data map for the section. */
7609 if (hash_table->stm32l4xx_erratum_glue_size == 0)
7610 {
7611 bh = NULL;
7612 /* Creates a THUMB symbol since there is no other choice. */
7613 _bfd_generic_link_add_one_symbol (link_info,
7614 hash_table->bfd_of_glue_owner, "$t",
7615 BSF_LOCAL, s, 0, NULL,
7616 TRUE, FALSE, &bh);
7617
7618 myh = (struct elf_link_hash_entry *) bh;
7619 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7620 myh->forced_local = 1;
7621
7622 /* The elf32_arm_init_maps function only cares about symbols from input
7623 BFDs. We must make a note of this generated mapping symbol
7624 ourselves so that code byteswapping works properly in
7625 elf32_arm_write_section. */
7626 elf32_arm_section_map_add (s, 't', 0);
7627 }
7628
7629 s->size += veneer_size;
7630 hash_table->stm32l4xx_erratum_glue_size += veneer_size;
7631 hash_table->num_stm32l4xx_fixes++;
7632
7633 /* The offset of the veneer. */
7634 return val;
7635 }
7636
7637 #define ARM_GLUE_SECTION_FLAGS \
7638 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
7639 | SEC_READONLY | SEC_LINKER_CREATED)
7640
7641 /* Create a fake section for use by the ARM backend of the linker. */
7642
7643 static bfd_boolean
7644 arm_make_glue_section (bfd * abfd, const char * name)
7645 {
7646 asection * sec;
7647
7648 sec = bfd_get_linker_section (abfd, name);
7649 if (sec != NULL)
7650 /* Already made. */
7651 return TRUE;
7652
7653 sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
7654
7655 if (sec == NULL
7656 || !bfd_set_section_alignment (abfd, sec, 2))
7657 return FALSE;
7658
7659 /* Set the gc mark to prevent the section from being removed by garbage
7660 collection, despite the fact that no relocs refer to this section. */
7661 sec->gc_mark = 1;
7662
7663 return TRUE;
7664 }
7665
7666 /* Set size of .plt entries. This function is called from the
7667 linker scripts in ld/emultempl/{armelf}.em. */
7668
7669 void
7670 bfd_elf32_arm_use_long_plt (void)
7671 {
7672 elf32_arm_use_long_plt_entry = TRUE;
7673 }
7674
7675 /* Add the glue sections to ABFD. This function is called from the
7676 linker scripts in ld/emultempl/{armelf}.em. */
7677
7678 bfd_boolean
7679 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
7680 struct bfd_link_info *info)
7681 {
7682 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
7683 bfd_boolean dostm32l4xx = globals
7684 && globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE;
7685 bfd_boolean addglue;
7686
7687 /* If we are only performing a partial
7688 link do not bother adding the glue. */
7689 if (bfd_link_relocatable (info))
7690 return TRUE;
7691
7692 addglue = arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
7693 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
7694 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
7695 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
7696
7697 if (!dostm32l4xx)
7698 return addglue;
7699
7700 return addglue
7701 && arm_make_glue_section (abfd, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7702 }
7703
7704 /* Mark output sections of veneers needing a dedicated one with SEC_KEEP. This
7705 ensures they are not marked for deletion by
7706 strip_excluded_output_sections () when veneers are going to be created
7707 later. Not doing so would trigger assert on empty section size in
7708 lang_size_sections_1 (). */
7709
7710 void
7711 bfd_elf32_arm_keep_private_stub_output_sections (struct bfd_link_info *info)
7712 {
7713 enum elf32_arm_stub_type stub_type;
7714
7715 /* If we are only performing a partial
7716 link do not bother adding the glue. */
7717 if (bfd_link_relocatable (info))
7718 return;
7719
7720 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
7721 {
7722 asection *out_sec;
7723 const char *out_sec_name;
7724
7725 if (!arm_dedicated_stub_output_section_required (stub_type))
7726 continue;
7727
7728 out_sec_name = arm_dedicated_stub_output_section_name (stub_type);
7729 out_sec = bfd_get_section_by_name (info->output_bfd, out_sec_name);
7730 if (out_sec != NULL)
7731 out_sec->flags |= SEC_KEEP;
7732 }
7733 }
7734
7735 /* Select a BFD to be used to hold the sections used by the glue code.
7736 This function is called from the linker scripts in ld/emultempl/
7737 {armelf/pe}.em. */
7738
7739 bfd_boolean
7740 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
7741 {
7742 struct elf32_arm_link_hash_table *globals;
7743
7744 /* If we are only performing a partial link
7745 do not bother getting a bfd to hold the glue. */
7746 if (bfd_link_relocatable (info))
7747 return TRUE;
7748
7749 /* Make sure we don't attach the glue sections to a dynamic object. */
7750 BFD_ASSERT (!(abfd->flags & DYNAMIC));
7751
7752 globals = elf32_arm_hash_table (info);
7753 BFD_ASSERT (globals != NULL);
7754
7755 if (globals->bfd_of_glue_owner != NULL)
7756 return TRUE;
7757
7758 /* Save the bfd for later use. */
7759 globals->bfd_of_glue_owner = abfd;
7760
7761 return TRUE;
7762 }
7763
7764 static void
7765 check_use_blx (struct elf32_arm_link_hash_table *globals)
7766 {
7767 int cpu_arch;
7768
7769 cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
7770 Tag_CPU_arch);
7771
7772 if (globals->fix_arm1176)
7773 {
7774 if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K)
7775 globals->use_blx = 1;
7776 }
7777 else
7778 {
7779 if (cpu_arch > TAG_CPU_ARCH_V4T)
7780 globals->use_blx = 1;
7781 }
7782 }
7783
7784 bfd_boolean
7785 bfd_elf32_arm_process_before_allocation (bfd *abfd,
7786 struct bfd_link_info *link_info)
7787 {
7788 Elf_Internal_Shdr *symtab_hdr;
7789 Elf_Internal_Rela *internal_relocs = NULL;
7790 Elf_Internal_Rela *irel, *irelend;
7791 bfd_byte *contents = NULL;
7792
7793 asection *sec;
7794 struct elf32_arm_link_hash_table *globals;
7795
7796 /* If we are only performing a partial link do not bother
7797 to construct any glue. */
7798 if (bfd_link_relocatable (link_info))
7799 return TRUE;
7800
7801 /* Here we have a bfd that is to be included on the link. We have a
7802 hook to do reloc rummaging, before section sizes are nailed down. */
7803 globals = elf32_arm_hash_table (link_info);
7804 BFD_ASSERT (globals != NULL);
7805
7806 check_use_blx (globals);
7807
7808 if (globals->byteswap_code && !bfd_big_endian (abfd))
7809 {
7810 _bfd_error_handler (_("%pB: BE8 images only valid in big-endian mode"),
7811 abfd);
7812 return FALSE;
7813 }
7814
7815 /* PR 5398: If we have not decided to include any loadable sections in
7816 the output then we will not have a glue owner bfd. This is OK, it
7817 just means that there is nothing else for us to do here. */
7818 if (globals->bfd_of_glue_owner == NULL)
7819 return TRUE;
7820
7821 /* Rummage around all the relocs and map the glue vectors. */
7822 sec = abfd->sections;
7823
7824 if (sec == NULL)
7825 return TRUE;
7826
7827 for (; sec != NULL; sec = sec->next)
7828 {
7829 if (sec->reloc_count == 0)
7830 continue;
7831
7832 if ((sec->flags & SEC_EXCLUDE) != 0)
7833 continue;
7834
7835 symtab_hdr = & elf_symtab_hdr (abfd);
7836
7837 /* Load the relocs. */
7838 internal_relocs
7839 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
7840
7841 if (internal_relocs == NULL)
7842 goto error_return;
7843
7844 irelend = internal_relocs + sec->reloc_count;
7845 for (irel = internal_relocs; irel < irelend; irel++)
7846 {
7847 long r_type;
7848 unsigned long r_index;
7849
7850 struct elf_link_hash_entry *h;
7851
7852 r_type = ELF32_R_TYPE (irel->r_info);
7853 r_index = ELF32_R_SYM (irel->r_info);
7854
7855 /* These are the only relocation types we care about. */
7856 if ( r_type != R_ARM_PC24
7857 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
7858 continue;
7859
7860 /* Get the section contents if we haven't done so already. */
7861 if (contents == NULL)
7862 {
7863 /* Get cached copy if it exists. */
7864 if (elf_section_data (sec)->this_hdr.contents != NULL)
7865 contents = elf_section_data (sec)->this_hdr.contents;
7866 else
7867 {
7868 /* Go get them off disk. */
7869 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
7870 goto error_return;
7871 }
7872 }
7873
7874 if (r_type == R_ARM_V4BX)
7875 {
7876 int reg;
7877
7878 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
7879 record_arm_bx_glue (link_info, reg);
7880 continue;
7881 }
7882
7883 /* If the relocation is not against a symbol it cannot concern us. */
7884 h = NULL;
7885
7886 /* We don't care about local symbols. */
7887 if (r_index < symtab_hdr->sh_info)
7888 continue;
7889
7890 /* This is an external symbol. */
7891 r_index -= symtab_hdr->sh_info;
7892 h = (struct elf_link_hash_entry *)
7893 elf_sym_hashes (abfd)[r_index];
7894
7895 /* If the relocation is against a static symbol it must be within
7896 the current section and so cannot be a cross ARM/Thumb relocation. */
7897 if (h == NULL)
7898 continue;
7899
7900 /* If the call will go through a PLT entry then we do not need
7901 glue. */
7902 if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1)
7903 continue;
7904
7905 switch (r_type)
7906 {
7907 case R_ARM_PC24:
7908 /* This one is a call from arm code. We need to look up
7909 the target of the call. If it is a thumb target, we
7910 insert glue. */
7911 if (ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
7912 == ST_BRANCH_TO_THUMB)
7913 record_arm_to_thumb_glue (link_info, h);
7914 break;
7915
7916 default:
7917 abort ();
7918 }
7919 }
7920
7921 if (contents != NULL
7922 && elf_section_data (sec)->this_hdr.contents != contents)
7923 free (contents);
7924 contents = NULL;
7925
7926 if (internal_relocs != NULL
7927 && elf_section_data (sec)->relocs != internal_relocs)
7928 free (internal_relocs);
7929 internal_relocs = NULL;
7930 }
7931
7932 return TRUE;
7933
7934 error_return:
7935 if (contents != NULL
7936 && elf_section_data (sec)->this_hdr.contents != contents)
7937 free (contents);
7938 if (internal_relocs != NULL
7939 && elf_section_data (sec)->relocs != internal_relocs)
7940 free (internal_relocs);
7941
7942 return FALSE;
7943 }
7944 #endif
7945
7946
7947 /* Initialise maps of ARM/Thumb/data for input BFDs. */
7948
7949 void
7950 bfd_elf32_arm_init_maps (bfd *abfd)
7951 {
7952 Elf_Internal_Sym *isymbuf;
7953 Elf_Internal_Shdr *hdr;
7954 unsigned int i, localsyms;
7955
7956 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
7957 if (! is_arm_elf (abfd))
7958 return;
7959
7960 if ((abfd->flags & DYNAMIC) != 0)
7961 return;
7962
7963 hdr = & elf_symtab_hdr (abfd);
7964 localsyms = hdr->sh_info;
7965
7966 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
7967 should contain the number of local symbols, which should come before any
7968 global symbols. Mapping symbols are always local. */
7969 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
7970 NULL);
7971
7972 /* No internal symbols read? Skip this BFD. */
7973 if (isymbuf == NULL)
7974 return;
7975
7976 for (i = 0; i < localsyms; i++)
7977 {
7978 Elf_Internal_Sym *isym = &isymbuf[i];
7979 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
7980 const char *name;
7981
7982 if (sec != NULL
7983 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
7984 {
7985 name = bfd_elf_string_from_elf_section (abfd,
7986 hdr->sh_link, isym->st_name);
7987
7988 if (bfd_is_arm_special_symbol_name (name,
7989 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
7990 elf32_arm_section_map_add (sec, name[1], isym->st_value);
7991 }
7992 }
7993 }
7994
7995
7996 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
7997 say what they wanted. */
7998
7999 void
8000 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
8001 {
8002 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8003 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8004
8005 if (globals == NULL)
8006 return;
8007
8008 if (globals->fix_cortex_a8 == -1)
8009 {
8010 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
8011 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
8012 && (out_attr[Tag_CPU_arch_profile].i == 'A'
8013 || out_attr[Tag_CPU_arch_profile].i == 0))
8014 globals->fix_cortex_a8 = 1;
8015 else
8016 globals->fix_cortex_a8 = 0;
8017 }
8018 }
8019
8020
8021 void
8022 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
8023 {
8024 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8025 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8026
8027 if (globals == NULL)
8028 return;
8029 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
8030 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
8031 {
8032 switch (globals->vfp11_fix)
8033 {
8034 case BFD_ARM_VFP11_FIX_DEFAULT:
8035 case BFD_ARM_VFP11_FIX_NONE:
8036 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
8037 break;
8038
8039 default:
8040 /* Give a warning, but do as the user requests anyway. */
8041 _bfd_error_handler (_("%pB: warning: selected VFP11 erratum "
8042 "workaround is not necessary for target architecture"), obfd);
8043 }
8044 }
8045 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
8046 /* For earlier architectures, we might need the workaround, but do not
8047 enable it by default. If users is running with broken hardware, they
8048 must enable the erratum fix explicitly. */
8049 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
8050 }
8051
8052 void
8053 bfd_elf32_arm_set_stm32l4xx_fix (bfd *obfd, struct bfd_link_info *link_info)
8054 {
8055 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8056 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8057
8058 if (globals == NULL)
8059 return;
8060
8061 /* We assume only Cortex-M4 may require the fix. */
8062 if (out_attr[Tag_CPU_arch].i != TAG_CPU_ARCH_V7E_M
8063 || out_attr[Tag_CPU_arch_profile].i != 'M')
8064 {
8065 if (globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE)
8066 /* Give a warning, but do as the user requests anyway. */
8067 _bfd_error_handler
8068 (_("%pB: warning: selected STM32L4XX erratum "
8069 "workaround is not necessary for target architecture"), obfd);
8070 }
8071 }
8072
8073 enum bfd_arm_vfp11_pipe
8074 {
8075 VFP11_FMAC,
8076 VFP11_LS,
8077 VFP11_DS,
8078 VFP11_BAD
8079 };
8080
8081 /* Return a VFP register number. This is encoded as RX:X for single-precision
8082 registers, or X:RX for double-precision registers, where RX is the group of
8083 four bits in the instruction encoding and X is the single extension bit.
8084 RX and X fields are specified using their lowest (starting) bit. The return
8085 value is:
8086
8087 0...31: single-precision registers s0...s31
8088 32...63: double-precision registers d0...d31.
8089
8090 Although X should be zero for VFP11 (encoding d0...d15 only), we might
8091 encounter VFP3 instructions, so we allow the full range for DP registers. */
8092
8093 static unsigned int
8094 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
8095 unsigned int x)
8096 {
8097 if (is_double)
8098 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
8099 else
8100 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
8101 }
8102
8103 /* Set bits in *WMASK according to a register number REG as encoded by
8104 bfd_arm_vfp11_regno(). Ignore d16-d31. */
8105
8106 static void
8107 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
8108 {
8109 if (reg < 32)
8110 *wmask |= 1 << reg;
8111 else if (reg < 48)
8112 *wmask |= 3 << ((reg - 32) * 2);
8113 }
8114
8115 /* Return TRUE if WMASK overwrites anything in REGS. */
8116
8117 static bfd_boolean
8118 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
8119 {
8120 int i;
8121
8122 for (i = 0; i < numregs; i++)
8123 {
8124 unsigned int reg = regs[i];
8125
8126 if (reg < 32 && (wmask & (1 << reg)) != 0)
8127 return TRUE;
8128
8129 reg -= 32;
8130
8131 if (reg >= 16)
8132 continue;
8133
8134 if ((wmask & (3 << (reg * 2))) != 0)
8135 return TRUE;
8136 }
8137
8138 return FALSE;
8139 }
8140
8141 /* In this function, we're interested in two things: finding input registers
8142 for VFP data-processing instructions, and finding the set of registers which
8143 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
8144 hold the written set, so FLDM etc. are easy to deal with (we're only
8145 interested in 32 SP registers or 16 dp registers, due to the VFP version
8146 implemented by the chip in question). DP registers are marked by setting
8147 both SP registers in the write mask). */
8148
8149 static enum bfd_arm_vfp11_pipe
8150 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
8151 int *numregs)
8152 {
8153 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
8154 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
8155
8156 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
8157 {
8158 unsigned int pqrs;
8159 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
8160 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
8161
8162 pqrs = ((insn & 0x00800000) >> 20)
8163 | ((insn & 0x00300000) >> 19)
8164 | ((insn & 0x00000040) >> 6);
8165
8166 switch (pqrs)
8167 {
8168 case 0: /* fmac[sd]. */
8169 case 1: /* fnmac[sd]. */
8170 case 2: /* fmsc[sd]. */
8171 case 3: /* fnmsc[sd]. */
8172 vpipe = VFP11_FMAC;
8173 bfd_arm_vfp11_write_mask (destmask, fd);
8174 regs[0] = fd;
8175 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
8176 regs[2] = fm;
8177 *numregs = 3;
8178 break;
8179
8180 case 4: /* fmul[sd]. */
8181 case 5: /* fnmul[sd]. */
8182 case 6: /* fadd[sd]. */
8183 case 7: /* fsub[sd]. */
8184 vpipe = VFP11_FMAC;
8185 goto vfp_binop;
8186
8187 case 8: /* fdiv[sd]. */
8188 vpipe = VFP11_DS;
8189 vfp_binop:
8190 bfd_arm_vfp11_write_mask (destmask, fd);
8191 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
8192 regs[1] = fm;
8193 *numregs = 2;
8194 break;
8195
8196 case 15: /* extended opcode. */
8197 {
8198 unsigned int extn = ((insn >> 15) & 0x1e)
8199 | ((insn >> 7) & 1);
8200
8201 switch (extn)
8202 {
8203 case 0: /* fcpy[sd]. */
8204 case 1: /* fabs[sd]. */
8205 case 2: /* fneg[sd]. */
8206 case 8: /* fcmp[sd]. */
8207 case 9: /* fcmpe[sd]. */
8208 case 10: /* fcmpz[sd]. */
8209 case 11: /* fcmpez[sd]. */
8210 case 16: /* fuito[sd]. */
8211 case 17: /* fsito[sd]. */
8212 case 24: /* ftoui[sd]. */
8213 case 25: /* ftouiz[sd]. */
8214 case 26: /* ftosi[sd]. */
8215 case 27: /* ftosiz[sd]. */
8216 /* These instructions will not bounce due to underflow. */
8217 *numregs = 0;
8218 vpipe = VFP11_FMAC;
8219 break;
8220
8221 case 3: /* fsqrt[sd]. */
8222 /* fsqrt cannot underflow, but it can (perhaps) overwrite
8223 registers to cause the erratum in previous instructions. */
8224 bfd_arm_vfp11_write_mask (destmask, fd);
8225 vpipe = VFP11_DS;
8226 break;
8227
8228 case 15: /* fcvt{ds,sd}. */
8229 {
8230 int rnum = 0;
8231
8232 bfd_arm_vfp11_write_mask (destmask, fd);
8233
8234 /* Only FCVTSD can underflow. */
8235 if ((insn & 0x100) != 0)
8236 regs[rnum++] = fm;
8237
8238 *numregs = rnum;
8239
8240 vpipe = VFP11_FMAC;
8241 }
8242 break;
8243
8244 default:
8245 return VFP11_BAD;
8246 }
8247 }
8248 break;
8249
8250 default:
8251 return VFP11_BAD;
8252 }
8253 }
8254 /* Two-register transfer. */
8255 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
8256 {
8257 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
8258
8259 if ((insn & 0x100000) == 0)
8260 {
8261 if (is_double)
8262 bfd_arm_vfp11_write_mask (destmask, fm);
8263 else
8264 {
8265 bfd_arm_vfp11_write_mask (destmask, fm);
8266 bfd_arm_vfp11_write_mask (destmask, fm + 1);
8267 }
8268 }
8269
8270 vpipe = VFP11_LS;
8271 }
8272 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
8273 {
8274 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
8275 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
8276
8277 switch (puw)
8278 {
8279 case 0: /* Two-reg transfer. We should catch these above. */
8280 abort ();
8281
8282 case 2: /* fldm[sdx]. */
8283 case 3:
8284 case 5:
8285 {
8286 unsigned int i, offset = insn & 0xff;
8287
8288 if (is_double)
8289 offset >>= 1;
8290
8291 for (i = fd; i < fd + offset; i++)
8292 bfd_arm_vfp11_write_mask (destmask, i);
8293 }
8294 break;
8295
8296 case 4: /* fld[sd]. */
8297 case 6:
8298 bfd_arm_vfp11_write_mask (destmask, fd);
8299 break;
8300
8301 default:
8302 return VFP11_BAD;
8303 }
8304
8305 vpipe = VFP11_LS;
8306 }
8307 /* Single-register transfer. Note L==0. */
8308 else if ((insn & 0x0f100e10) == 0x0e000a10)
8309 {
8310 unsigned int opcode = (insn >> 21) & 7;
8311 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
8312
8313 switch (opcode)
8314 {
8315 case 0: /* fmsr/fmdlr. */
8316 case 1: /* fmdhr. */
8317 /* Mark fmdhr and fmdlr as writing to the whole of the DP
8318 destination register. I don't know if this is exactly right,
8319 but it is the conservative choice. */
8320 bfd_arm_vfp11_write_mask (destmask, fn);
8321 break;
8322
8323 case 7: /* fmxr. */
8324 break;
8325 }
8326
8327 vpipe = VFP11_LS;
8328 }
8329
8330 return vpipe;
8331 }
8332
8333
8334 static int elf32_arm_compare_mapping (const void * a, const void * b);
8335
8336
8337 /* Look for potentially-troublesome code sequences which might trigger the
8338 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
8339 (available from ARM) for details of the erratum. A short version is
8340 described in ld.texinfo. */
8341
8342 bfd_boolean
8343 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
8344 {
8345 asection *sec;
8346 bfd_byte *contents = NULL;
8347 int state = 0;
8348 int regs[3], numregs = 0;
8349 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8350 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
8351
8352 if (globals == NULL)
8353 return FALSE;
8354
8355 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
8356 The states transition as follows:
8357
8358 0 -> 1 (vector) or 0 -> 2 (scalar)
8359 A VFP FMAC-pipeline instruction has been seen. Fill
8360 regs[0]..regs[numregs-1] with its input operands. Remember this
8361 instruction in 'first_fmac'.
8362
8363 1 -> 2
8364 Any instruction, except for a VFP instruction which overwrites
8365 regs[*].
8366
8367 1 -> 3 [ -> 0 ] or
8368 2 -> 3 [ -> 0 ]
8369 A VFP instruction has been seen which overwrites any of regs[*].
8370 We must make a veneer! Reset state to 0 before examining next
8371 instruction.
8372
8373 2 -> 0
8374 If we fail to match anything in state 2, reset to state 0 and reset
8375 the instruction pointer to the instruction after 'first_fmac'.
8376
8377 If the VFP11 vector mode is in use, there must be at least two unrelated
8378 instructions between anti-dependent VFP11 instructions to properly avoid
8379 triggering the erratum, hence the use of the extra state 1. */
8380
8381 /* If we are only performing a partial link do not bother
8382 to construct any glue. */
8383 if (bfd_link_relocatable (link_info))
8384 return TRUE;
8385
8386 /* Skip if this bfd does not correspond to an ELF image. */
8387 if (! is_arm_elf (abfd))
8388 return TRUE;
8389
8390 /* We should have chosen a fix type by the time we get here. */
8391 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
8392
8393 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
8394 return TRUE;
8395
8396 /* Skip this BFD if it corresponds to an executable or dynamic object. */
8397 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
8398 return TRUE;
8399
8400 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8401 {
8402 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
8403 struct _arm_elf_section_data *sec_data;
8404
8405 /* If we don't have executable progbits, we're not interested in this
8406 section. Also skip if section is to be excluded. */
8407 if (elf_section_type (sec) != SHT_PROGBITS
8408 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
8409 || (sec->flags & SEC_EXCLUDE) != 0
8410 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
8411 || sec->output_section == bfd_abs_section_ptr
8412 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
8413 continue;
8414
8415 sec_data = elf32_arm_section_data (sec);
8416
8417 if (sec_data->mapcount == 0)
8418 continue;
8419
8420 if (elf_section_data (sec)->this_hdr.contents != NULL)
8421 contents = elf_section_data (sec)->this_hdr.contents;
8422 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
8423 goto error_return;
8424
8425 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
8426 elf32_arm_compare_mapping);
8427
8428 for (span = 0; span < sec_data->mapcount; span++)
8429 {
8430 unsigned int span_start = sec_data->map[span].vma;
8431 unsigned int span_end = (span == sec_data->mapcount - 1)
8432 ? sec->size : sec_data->map[span + 1].vma;
8433 char span_type = sec_data->map[span].type;
8434
8435 /* FIXME: Only ARM mode is supported at present. We may need to
8436 support Thumb-2 mode also at some point. */
8437 if (span_type != 'a')
8438 continue;
8439
8440 for (i = span_start; i < span_end;)
8441 {
8442 unsigned int next_i = i + 4;
8443 unsigned int insn = bfd_big_endian (abfd)
8444 ? (contents[i] << 24)
8445 | (contents[i + 1] << 16)
8446 | (contents[i + 2] << 8)
8447 | contents[i + 3]
8448 : (contents[i + 3] << 24)
8449 | (contents[i + 2] << 16)
8450 | (contents[i + 1] << 8)
8451 | contents[i];
8452 unsigned int writemask = 0;
8453 enum bfd_arm_vfp11_pipe vpipe;
8454
8455 switch (state)
8456 {
8457 case 0:
8458 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
8459 &numregs);
8460 /* I'm assuming the VFP11 erratum can trigger with denorm
8461 operands on either the FMAC or the DS pipeline. This might
8462 lead to slightly overenthusiastic veneer insertion. */
8463 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
8464 {
8465 state = use_vector ? 1 : 2;
8466 first_fmac = i;
8467 veneer_of_insn = insn;
8468 }
8469 break;
8470
8471 case 1:
8472 {
8473 int other_regs[3], other_numregs;
8474 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
8475 other_regs,
8476 &other_numregs);
8477 if (vpipe != VFP11_BAD
8478 && bfd_arm_vfp11_antidependency (writemask, regs,
8479 numregs))
8480 state = 3;
8481 else
8482 state = 2;
8483 }
8484 break;
8485
8486 case 2:
8487 {
8488 int other_regs[3], other_numregs;
8489 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
8490 other_regs,
8491 &other_numregs);
8492 if (vpipe != VFP11_BAD
8493 && bfd_arm_vfp11_antidependency (writemask, regs,
8494 numregs))
8495 state = 3;
8496 else
8497 {
8498 state = 0;
8499 next_i = first_fmac + 4;
8500 }
8501 }
8502 break;
8503
8504 case 3:
8505 abort (); /* Should be unreachable. */
8506 }
8507
8508 if (state == 3)
8509 {
8510 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
8511 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
8512
8513 elf32_arm_section_data (sec)->erratumcount += 1;
8514
8515 newerr->u.b.vfp_insn = veneer_of_insn;
8516
8517 switch (span_type)
8518 {
8519 case 'a':
8520 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
8521 break;
8522
8523 default:
8524 abort ();
8525 }
8526
8527 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
8528 first_fmac);
8529
8530 newerr->vma = -1;
8531
8532 newerr->next = sec_data->erratumlist;
8533 sec_data->erratumlist = newerr;
8534
8535 state = 0;
8536 }
8537
8538 i = next_i;
8539 }
8540 }
8541
8542 if (contents != NULL
8543 && elf_section_data (sec)->this_hdr.contents != contents)
8544 free (contents);
8545 contents = NULL;
8546 }
8547
8548 return TRUE;
8549
8550 error_return:
8551 if (contents != NULL
8552 && elf_section_data (sec)->this_hdr.contents != contents)
8553 free (contents);
8554
8555 return FALSE;
8556 }
8557
8558 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
8559 after sections have been laid out, using specially-named symbols. */
8560
8561 void
8562 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
8563 struct bfd_link_info *link_info)
8564 {
8565 asection *sec;
8566 struct elf32_arm_link_hash_table *globals;
8567 char *tmp_name;
8568
8569 if (bfd_link_relocatable (link_info))
8570 return;
8571
8572 /* Skip if this bfd does not correspond to an ELF image. */
8573 if (! is_arm_elf (abfd))
8574 return;
8575
8576 globals = elf32_arm_hash_table (link_info);
8577 if (globals == NULL)
8578 return;
8579
8580 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
8581 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
8582
8583 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8584 {
8585 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
8586 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
8587
8588 for (; errnode != NULL; errnode = errnode->next)
8589 {
8590 struct elf_link_hash_entry *myh;
8591 bfd_vma vma;
8592
8593 switch (errnode->type)
8594 {
8595 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
8596 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
8597 /* Find veneer symbol. */
8598 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
8599 errnode->u.b.veneer->u.v.id);
8600
8601 myh = elf_link_hash_lookup
8602 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8603
8604 if (myh == NULL)
8605 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8606 abfd, "VFP11", tmp_name);
8607
8608 vma = myh->root.u.def.section->output_section->vma
8609 + myh->root.u.def.section->output_offset
8610 + myh->root.u.def.value;
8611
8612 errnode->u.b.veneer->vma = vma;
8613 break;
8614
8615 case VFP11_ERRATUM_ARM_VENEER:
8616 case VFP11_ERRATUM_THUMB_VENEER:
8617 /* Find return location. */
8618 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
8619 errnode->u.v.id);
8620
8621 myh = elf_link_hash_lookup
8622 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8623
8624 if (myh == NULL)
8625 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8626 abfd, "VFP11", tmp_name);
8627
8628 vma = myh->root.u.def.section->output_section->vma
8629 + myh->root.u.def.section->output_offset
8630 + myh->root.u.def.value;
8631
8632 errnode->u.v.branch->vma = vma;
8633 break;
8634
8635 default:
8636 abort ();
8637 }
8638 }
8639 }
8640
8641 free (tmp_name);
8642 }
8643
8644 /* Find virtual-memory addresses for STM32L4XX erratum veneers and
8645 return locations after sections have been laid out, using
8646 specially-named symbols. */
8647
8648 void
8649 bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd *abfd,
8650 struct bfd_link_info *link_info)
8651 {
8652 asection *sec;
8653 struct elf32_arm_link_hash_table *globals;
8654 char *tmp_name;
8655
8656 if (bfd_link_relocatable (link_info))
8657 return;
8658
8659 /* Skip if this bfd does not correspond to an ELF image. */
8660 if (! is_arm_elf (abfd))
8661 return;
8662
8663 globals = elf32_arm_hash_table (link_info);
8664 if (globals == NULL)
8665 return;
8666
8667 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
8668 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
8669
8670 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8671 {
8672 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
8673 elf32_stm32l4xx_erratum_list *errnode = sec_data->stm32l4xx_erratumlist;
8674
8675 for (; errnode != NULL; errnode = errnode->next)
8676 {
8677 struct elf_link_hash_entry *myh;
8678 bfd_vma vma;
8679
8680 switch (errnode->type)
8681 {
8682 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
8683 /* Find veneer symbol. */
8684 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
8685 errnode->u.b.veneer->u.v.id);
8686
8687 myh = elf_link_hash_lookup
8688 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8689
8690 if (myh == NULL)
8691 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8692 abfd, "STM32L4XX", tmp_name);
8693
8694 vma = myh->root.u.def.section->output_section->vma
8695 + myh->root.u.def.section->output_offset
8696 + myh->root.u.def.value;
8697
8698 errnode->u.b.veneer->vma = vma;
8699 break;
8700
8701 case STM32L4XX_ERRATUM_VENEER:
8702 /* Find return location. */
8703 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
8704 errnode->u.v.id);
8705
8706 myh = elf_link_hash_lookup
8707 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8708
8709 if (myh == NULL)
8710 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8711 abfd, "STM32L4XX", tmp_name);
8712
8713 vma = myh->root.u.def.section->output_section->vma
8714 + myh->root.u.def.section->output_offset
8715 + myh->root.u.def.value;
8716
8717 errnode->u.v.branch->vma = vma;
8718 break;
8719
8720 default:
8721 abort ();
8722 }
8723 }
8724 }
8725
8726 free (tmp_name);
8727 }
8728
8729 static inline bfd_boolean
8730 is_thumb2_ldmia (const insn32 insn)
8731 {
8732 /* Encoding T2: LDM<c>.W <Rn>{!},<registers>
8733 1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll. */
8734 return (insn & 0xffd02000) == 0xe8900000;
8735 }
8736
8737 static inline bfd_boolean
8738 is_thumb2_ldmdb (const insn32 insn)
8739 {
8740 /* Encoding T1: LDMDB<c> <Rn>{!},<registers>
8741 1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll. */
8742 return (insn & 0xffd02000) == 0xe9100000;
8743 }
8744
8745 static inline bfd_boolean
8746 is_thumb2_vldm (const insn32 insn)
8747 {
8748 /* A6.5 Extension register load or store instruction
8749 A7.7.229
8750 We look for SP 32-bit and DP 64-bit registers.
8751 Encoding T1 VLDM{mode}<c> <Rn>{!}, <list>
8752 <list> is consecutive 64-bit registers
8753 1110 - 110P - UDW1 - rrrr - vvvv - 1011 - iiii - iiii
8754 Encoding T2 VLDM{mode}<c> <Rn>{!}, <list>
8755 <list> is consecutive 32-bit registers
8756 1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii
8757 if P==0 && U==1 && W==1 && Rn=1101 VPOP
8758 if PUW=010 || PUW=011 || PUW=101 VLDM. */
8759 return
8760 (((insn & 0xfe100f00) == 0xec100b00) ||
8761 ((insn & 0xfe100f00) == 0xec100a00))
8762 && /* (IA without !). */
8763 (((((insn << 7) >> 28) & 0xd) == 0x4)
8764 /* (IA with !), includes VPOP (when reg number is SP). */
8765 || ((((insn << 7) >> 28) & 0xd) == 0x5)
8766 /* (DB with !). */
8767 || ((((insn << 7) >> 28) & 0xd) == 0x9));
8768 }
8769
8770 /* STM STM32L4XX erratum : This function assumes that it receives an LDM or
8771 VLDM opcode and:
8772 - computes the number and the mode of memory accesses
8773 - decides if the replacement should be done:
8774 . replaces only if > 8-word accesses
8775 . or (testing purposes only) replaces all accesses. */
8776
8777 static bfd_boolean
8778 stm32l4xx_need_create_replacing_stub (const insn32 insn,
8779 bfd_arm_stm32l4xx_fix stm32l4xx_fix)
8780 {
8781 int nb_words = 0;
8782
8783 /* The field encoding the register list is the same for both LDMIA
8784 and LDMDB encodings. */
8785 if (is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn))
8786 nb_words = elf32_arm_popcount (insn & 0x0000ffff);
8787 else if (is_thumb2_vldm (insn))
8788 nb_words = (insn & 0xff);
8789
8790 /* DEFAULT mode accounts for the real bug condition situation,
8791 ALL mode inserts stubs for each LDM/VLDM instruction (testing). */
8792 return
8793 (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_DEFAULT) ? nb_words > 8 :
8794 (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_ALL) ? TRUE : FALSE;
8795 }
8796
8797 /* Look for potentially-troublesome code sequences which might trigger
8798 the STM STM32L4XX erratum. */
8799
8800 bfd_boolean
8801 bfd_elf32_arm_stm32l4xx_erratum_scan (bfd *abfd,
8802 struct bfd_link_info *link_info)
8803 {
8804 asection *sec;
8805 bfd_byte *contents = NULL;
8806 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8807
8808 if (globals == NULL)
8809 return FALSE;
8810
8811 /* If we are only performing a partial link do not bother
8812 to construct any glue. */
8813 if (bfd_link_relocatable (link_info))
8814 return TRUE;
8815
8816 /* Skip if this bfd does not correspond to an ELF image. */
8817 if (! is_arm_elf (abfd))
8818 return TRUE;
8819
8820 if (globals->stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_NONE)
8821 return TRUE;
8822
8823 /* Skip this BFD if it corresponds to an executable or dynamic object. */
8824 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
8825 return TRUE;
8826
8827 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8828 {
8829 unsigned int i, span;
8830 struct _arm_elf_section_data *sec_data;
8831
8832 /* If we don't have executable progbits, we're not interested in this
8833 section. Also skip if section is to be excluded. */
8834 if (elf_section_type (sec) != SHT_PROGBITS
8835 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
8836 || (sec->flags & SEC_EXCLUDE) != 0
8837 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
8838 || sec->output_section == bfd_abs_section_ptr
8839 || strcmp (sec->name, STM32L4XX_ERRATUM_VENEER_SECTION_NAME) == 0)
8840 continue;
8841
8842 sec_data = elf32_arm_section_data (sec);
8843
8844 if (sec_data->mapcount == 0)
8845 continue;
8846
8847 if (elf_section_data (sec)->this_hdr.contents != NULL)
8848 contents = elf_section_data (sec)->this_hdr.contents;
8849 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
8850 goto error_return;
8851
8852 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
8853 elf32_arm_compare_mapping);
8854
8855 for (span = 0; span < sec_data->mapcount; span++)
8856 {
8857 unsigned int span_start = sec_data->map[span].vma;
8858 unsigned int span_end = (span == sec_data->mapcount - 1)
8859 ? sec->size : sec_data->map[span + 1].vma;
8860 char span_type = sec_data->map[span].type;
8861 int itblock_current_pos = 0;
8862
8863 /* Only Thumb2 mode need be supported with this CM4 specific
8864 code, we should not encounter any arm mode eg span_type
8865 != 'a'. */
8866 if (span_type != 't')
8867 continue;
8868
8869 for (i = span_start; i < span_end;)
8870 {
8871 unsigned int insn = bfd_get_16 (abfd, &contents[i]);
8872 bfd_boolean insn_32bit = FALSE;
8873 bfd_boolean is_ldm = FALSE;
8874 bfd_boolean is_vldm = FALSE;
8875 bfd_boolean is_not_last_in_it_block = FALSE;
8876
8877 /* The first 16-bits of all 32-bit thumb2 instructions start
8878 with opcode[15..13]=0b111 and the encoded op1 can be anything
8879 except opcode[12..11]!=0b00.
8880 See 32-bit Thumb instruction encoding. */
8881 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
8882 insn_32bit = TRUE;
8883
8884 /* Compute the predicate that tells if the instruction
8885 is concerned by the IT block
8886 - Creates an error if there is a ldm that is not
8887 last in the IT block thus cannot be replaced
8888 - Otherwise we can create a branch at the end of the
8889 IT block, it will be controlled naturally by IT
8890 with the proper pseudo-predicate
8891 - So the only interesting predicate is the one that
8892 tells that we are not on the last item of an IT
8893 block. */
8894 if (itblock_current_pos != 0)
8895 is_not_last_in_it_block = !!--itblock_current_pos;
8896
8897 if (insn_32bit)
8898 {
8899 /* Load the rest of the insn (in manual-friendly order). */
8900 insn = (insn << 16) | bfd_get_16 (abfd, &contents[i + 2]);
8901 is_ldm = is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn);
8902 is_vldm = is_thumb2_vldm (insn);
8903
8904 /* Veneers are created for (v)ldm depending on
8905 option flags and memory accesses conditions; but
8906 if the instruction is not the last instruction of
8907 an IT block, we cannot create a jump there, so we
8908 bail out. */
8909 if ((is_ldm || is_vldm)
8910 && stm32l4xx_need_create_replacing_stub
8911 (insn, globals->stm32l4xx_fix))
8912 {
8913 if (is_not_last_in_it_block)
8914 {
8915 _bfd_error_handler
8916 /* xgettext:c-format */
8917 (_("%pB(%pA+%#x): error: multiple load detected"
8918 " in non-last IT block instruction:"
8919 " STM32L4XX veneer cannot be generated; "
8920 "use gcc option -mrestrict-it to generate"
8921 " only one instruction per IT block"),
8922 abfd, sec, i);
8923 }
8924 else
8925 {
8926 elf32_stm32l4xx_erratum_list *newerr =
8927 (elf32_stm32l4xx_erratum_list *)
8928 bfd_zmalloc
8929 (sizeof (elf32_stm32l4xx_erratum_list));
8930
8931 elf32_arm_section_data (sec)
8932 ->stm32l4xx_erratumcount += 1;
8933 newerr->u.b.insn = insn;
8934 /* We create only thumb branches. */
8935 newerr->type =
8936 STM32L4XX_ERRATUM_BRANCH_TO_VENEER;
8937 record_stm32l4xx_erratum_veneer
8938 (link_info, newerr, abfd, sec,
8939 i,
8940 is_ldm ?
8941 STM32L4XX_ERRATUM_LDM_VENEER_SIZE:
8942 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
8943 newerr->vma = -1;
8944 newerr->next = sec_data->stm32l4xx_erratumlist;
8945 sec_data->stm32l4xx_erratumlist = newerr;
8946 }
8947 }
8948 }
8949 else
8950 {
8951 /* A7.7.37 IT p208
8952 IT blocks are only encoded in T1
8953 Encoding T1: IT{x{y{z}}} <firstcond>
8954 1 0 1 1 - 1 1 1 1 - firstcond - mask
8955 if mask = '0000' then see 'related encodings'
8956 We don't deal with UNPREDICTABLE, just ignore these.
8957 There can be no nested IT blocks so an IT block
8958 is naturally a new one for which it is worth
8959 computing its size. */
8960 bfd_boolean is_newitblock = ((insn & 0xff00) == 0xbf00)
8961 && ((insn & 0x000f) != 0x0000);
8962 /* If we have a new IT block we compute its size. */
8963 if (is_newitblock)
8964 {
8965 /* Compute the number of instructions controlled
8966 by the IT block, it will be used to decide
8967 whether we are inside an IT block or not. */
8968 unsigned int mask = insn & 0x000f;
8969 itblock_current_pos = 4 - ctz (mask);
8970 }
8971 }
8972
8973 i += insn_32bit ? 4 : 2;
8974 }
8975 }
8976
8977 if (contents != NULL
8978 && elf_section_data (sec)->this_hdr.contents != contents)
8979 free (contents);
8980 contents = NULL;
8981 }
8982
8983 return TRUE;
8984
8985 error_return:
8986 if (contents != NULL
8987 && elf_section_data (sec)->this_hdr.contents != contents)
8988 free (contents);
8989
8990 return FALSE;
8991 }
8992
8993 /* Set target relocation values needed during linking. */
8994
8995 void
8996 bfd_elf32_arm_set_target_params (struct bfd *output_bfd,
8997 struct bfd_link_info *link_info,
8998 struct elf32_arm_params *params)
8999 {
9000 struct elf32_arm_link_hash_table *globals;
9001
9002 globals = elf32_arm_hash_table (link_info);
9003 if (globals == NULL)
9004 return;
9005
9006 globals->target1_is_rel = params->target1_is_rel;
9007 if (globals->fdpic_p)
9008 globals->target2_reloc = R_ARM_GOT32;
9009 else if (strcmp (params->target2_type, "rel") == 0)
9010 globals->target2_reloc = R_ARM_REL32;
9011 else if (strcmp (params->target2_type, "abs") == 0)
9012 globals->target2_reloc = R_ARM_ABS32;
9013 else if (strcmp (params->target2_type, "got-rel") == 0)
9014 globals->target2_reloc = R_ARM_GOT_PREL;
9015 else
9016 {
9017 _bfd_error_handler (_("invalid TARGET2 relocation type '%s'"),
9018 params->target2_type);
9019 }
9020 globals->fix_v4bx = params->fix_v4bx;
9021 globals->use_blx |= params->use_blx;
9022 globals->vfp11_fix = params->vfp11_denorm_fix;
9023 globals->stm32l4xx_fix = params->stm32l4xx_fix;
9024 if (globals->fdpic_p)
9025 globals->pic_veneer = 1;
9026 else
9027 globals->pic_veneer = params->pic_veneer;
9028 globals->fix_cortex_a8 = params->fix_cortex_a8;
9029 globals->fix_arm1176 = params->fix_arm1176;
9030 globals->cmse_implib = params->cmse_implib;
9031 globals->in_implib_bfd = params->in_implib_bfd;
9032
9033 BFD_ASSERT (is_arm_elf (output_bfd));
9034 elf_arm_tdata (output_bfd)->no_enum_size_warning
9035 = params->no_enum_size_warning;
9036 elf_arm_tdata (output_bfd)->no_wchar_size_warning
9037 = params->no_wchar_size_warning;
9038 }
9039
9040 /* Replace the target offset of a Thumb bl or b.w instruction. */
9041
9042 static void
9043 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
9044 {
9045 bfd_vma upper;
9046 bfd_vma lower;
9047 int reloc_sign;
9048
9049 BFD_ASSERT ((offset & 1) == 0);
9050
9051 upper = bfd_get_16 (abfd, insn);
9052 lower = bfd_get_16 (abfd, insn + 2);
9053 reloc_sign = (offset < 0) ? 1 : 0;
9054 upper = (upper & ~(bfd_vma) 0x7ff)
9055 | ((offset >> 12) & 0x3ff)
9056 | (reloc_sign << 10);
9057 lower = (lower & ~(bfd_vma) 0x2fff)
9058 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
9059 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
9060 | ((offset >> 1) & 0x7ff);
9061 bfd_put_16 (abfd, upper, insn);
9062 bfd_put_16 (abfd, lower, insn + 2);
9063 }
9064
9065 /* Thumb code calling an ARM function. */
9066
9067 static int
9068 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
9069 const char * name,
9070 bfd * input_bfd,
9071 bfd * output_bfd,
9072 asection * input_section,
9073 bfd_byte * hit_data,
9074 asection * sym_sec,
9075 bfd_vma offset,
9076 bfd_signed_vma addend,
9077 bfd_vma val,
9078 char **error_message)
9079 {
9080 asection * s = 0;
9081 bfd_vma my_offset;
9082 long int ret_offset;
9083 struct elf_link_hash_entry * myh;
9084 struct elf32_arm_link_hash_table * globals;
9085
9086 myh = find_thumb_glue (info, name, error_message);
9087 if (myh == NULL)
9088 return FALSE;
9089
9090 globals = elf32_arm_hash_table (info);
9091 BFD_ASSERT (globals != NULL);
9092 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9093
9094 my_offset = myh->root.u.def.value;
9095
9096 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9097 THUMB2ARM_GLUE_SECTION_NAME);
9098
9099 BFD_ASSERT (s != NULL);
9100 BFD_ASSERT (s->contents != NULL);
9101 BFD_ASSERT (s->output_section != NULL);
9102
9103 if ((my_offset & 0x01) == 0x01)
9104 {
9105 if (sym_sec != NULL
9106 && sym_sec->owner != NULL
9107 && !INTERWORK_FLAG (sym_sec->owner))
9108 {
9109 _bfd_error_handler
9110 (_("%pB(%s): warning: interworking not enabled;"
9111 " first occurrence: %pB: %s call to %s"),
9112 sym_sec->owner, name, input_bfd, "Thumb", "ARM");
9113
9114 return FALSE;
9115 }
9116
9117 --my_offset;
9118 myh->root.u.def.value = my_offset;
9119
9120 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
9121 s->contents + my_offset);
9122
9123 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
9124 s->contents + my_offset + 2);
9125
9126 ret_offset =
9127 /* Address of destination of the stub. */
9128 ((bfd_signed_vma) val)
9129 - ((bfd_signed_vma)
9130 /* Offset from the start of the current section
9131 to the start of the stubs. */
9132 (s->output_offset
9133 /* Offset of the start of this stub from the start of the stubs. */
9134 + my_offset
9135 /* Address of the start of the current section. */
9136 + s->output_section->vma)
9137 /* The branch instruction is 4 bytes into the stub. */
9138 + 4
9139 /* ARM branches work from the pc of the instruction + 8. */
9140 + 8);
9141
9142 put_arm_insn (globals, output_bfd,
9143 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
9144 s->contents + my_offset + 4);
9145 }
9146
9147 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
9148
9149 /* Now go back and fix up the original BL insn to point to here. */
9150 ret_offset =
9151 /* Address of where the stub is located. */
9152 (s->output_section->vma + s->output_offset + my_offset)
9153 /* Address of where the BL is located. */
9154 - (input_section->output_section->vma + input_section->output_offset
9155 + offset)
9156 /* Addend in the relocation. */
9157 - addend
9158 /* Biassing for PC-relative addressing. */
9159 - 8;
9160
9161 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
9162
9163 return TRUE;
9164 }
9165
9166 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
9167
9168 static struct elf_link_hash_entry *
9169 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
9170 const char * name,
9171 bfd * input_bfd,
9172 bfd * output_bfd,
9173 asection * sym_sec,
9174 bfd_vma val,
9175 asection * s,
9176 char ** error_message)
9177 {
9178 bfd_vma my_offset;
9179 long int ret_offset;
9180 struct elf_link_hash_entry * myh;
9181 struct elf32_arm_link_hash_table * globals;
9182
9183 myh = find_arm_glue (info, name, error_message);
9184 if (myh == NULL)
9185 return NULL;
9186
9187 globals = elf32_arm_hash_table (info);
9188 BFD_ASSERT (globals != NULL);
9189 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9190
9191 my_offset = myh->root.u.def.value;
9192
9193 if ((my_offset & 0x01) == 0x01)
9194 {
9195 if (sym_sec != NULL
9196 && sym_sec->owner != NULL
9197 && !INTERWORK_FLAG (sym_sec->owner))
9198 {
9199 _bfd_error_handler
9200 (_("%pB(%s): warning: interworking not enabled;"
9201 " first occurrence: %pB: %s call to %s"),
9202 sym_sec->owner, name, input_bfd, "ARM", "Thumb");
9203 }
9204
9205 --my_offset;
9206 myh->root.u.def.value = my_offset;
9207
9208 if (bfd_link_pic (info)
9209 || globals->root.is_relocatable_executable
9210 || globals->pic_veneer)
9211 {
9212 /* For relocatable objects we can't use absolute addresses,
9213 so construct the address from a relative offset. */
9214 /* TODO: If the offset is small it's probably worth
9215 constructing the address with adds. */
9216 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
9217 s->contents + my_offset);
9218 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
9219 s->contents + my_offset + 4);
9220 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
9221 s->contents + my_offset + 8);
9222 /* Adjust the offset by 4 for the position of the add,
9223 and 8 for the pipeline offset. */
9224 ret_offset = (val - (s->output_offset
9225 + s->output_section->vma
9226 + my_offset + 12))
9227 | 1;
9228 bfd_put_32 (output_bfd, ret_offset,
9229 s->contents + my_offset + 12);
9230 }
9231 else if (globals->use_blx)
9232 {
9233 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
9234 s->contents + my_offset);
9235
9236 /* It's a thumb address. Add the low order bit. */
9237 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
9238 s->contents + my_offset + 4);
9239 }
9240 else
9241 {
9242 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
9243 s->contents + my_offset);
9244
9245 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
9246 s->contents + my_offset + 4);
9247
9248 /* It's a thumb address. Add the low order bit. */
9249 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
9250 s->contents + my_offset + 8);
9251
9252 my_offset += 12;
9253 }
9254 }
9255
9256 BFD_ASSERT (my_offset <= globals->arm_glue_size);
9257
9258 return myh;
9259 }
9260
9261 /* Arm code calling a Thumb function. */
9262
9263 static int
9264 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
9265 const char * name,
9266 bfd * input_bfd,
9267 bfd * output_bfd,
9268 asection * input_section,
9269 bfd_byte * hit_data,
9270 asection * sym_sec,
9271 bfd_vma offset,
9272 bfd_signed_vma addend,
9273 bfd_vma val,
9274 char **error_message)
9275 {
9276 unsigned long int tmp;
9277 bfd_vma my_offset;
9278 asection * s;
9279 long int ret_offset;
9280 struct elf_link_hash_entry * myh;
9281 struct elf32_arm_link_hash_table * globals;
9282
9283 globals = elf32_arm_hash_table (info);
9284 BFD_ASSERT (globals != NULL);
9285 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9286
9287 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9288 ARM2THUMB_GLUE_SECTION_NAME);
9289 BFD_ASSERT (s != NULL);
9290 BFD_ASSERT (s->contents != NULL);
9291 BFD_ASSERT (s->output_section != NULL);
9292
9293 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
9294 sym_sec, val, s, error_message);
9295 if (!myh)
9296 return FALSE;
9297
9298 my_offset = myh->root.u.def.value;
9299 tmp = bfd_get_32 (input_bfd, hit_data);
9300 tmp = tmp & 0xFF000000;
9301
9302 /* Somehow these are both 4 too far, so subtract 8. */
9303 ret_offset = (s->output_offset
9304 + my_offset
9305 + s->output_section->vma
9306 - (input_section->output_offset
9307 + input_section->output_section->vma
9308 + offset + addend)
9309 - 8);
9310
9311 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
9312
9313 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
9314
9315 return TRUE;
9316 }
9317
9318 /* Populate Arm stub for an exported Thumb function. */
9319
9320 static bfd_boolean
9321 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
9322 {
9323 struct bfd_link_info * info = (struct bfd_link_info *) inf;
9324 asection * s;
9325 struct elf_link_hash_entry * myh;
9326 struct elf32_arm_link_hash_entry *eh;
9327 struct elf32_arm_link_hash_table * globals;
9328 asection *sec;
9329 bfd_vma val;
9330 char *error_message;
9331
9332 eh = elf32_arm_hash_entry (h);
9333 /* Allocate stubs for exported Thumb functions on v4t. */
9334 if (eh->export_glue == NULL)
9335 return TRUE;
9336
9337 globals = elf32_arm_hash_table (info);
9338 BFD_ASSERT (globals != NULL);
9339 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9340
9341 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9342 ARM2THUMB_GLUE_SECTION_NAME);
9343 BFD_ASSERT (s != NULL);
9344 BFD_ASSERT (s->contents != NULL);
9345 BFD_ASSERT (s->output_section != NULL);
9346
9347 sec = eh->export_glue->root.u.def.section;
9348
9349 BFD_ASSERT (sec->output_section != NULL);
9350
9351 val = eh->export_glue->root.u.def.value + sec->output_offset
9352 + sec->output_section->vma;
9353
9354 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
9355 h->root.u.def.section->owner,
9356 globals->obfd, sec, val, s,
9357 &error_message);
9358 BFD_ASSERT (myh);
9359 return TRUE;
9360 }
9361
9362 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
9363
9364 static bfd_vma
9365 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
9366 {
9367 bfd_byte *p;
9368 bfd_vma glue_addr;
9369 asection *s;
9370 struct elf32_arm_link_hash_table *globals;
9371
9372 globals = elf32_arm_hash_table (info);
9373 BFD_ASSERT (globals != NULL);
9374 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9375
9376 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9377 ARM_BX_GLUE_SECTION_NAME);
9378 BFD_ASSERT (s != NULL);
9379 BFD_ASSERT (s->contents != NULL);
9380 BFD_ASSERT (s->output_section != NULL);
9381
9382 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
9383
9384 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
9385
9386 if ((globals->bx_glue_offset[reg] & 1) == 0)
9387 {
9388 p = s->contents + glue_addr;
9389 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
9390 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
9391 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
9392 globals->bx_glue_offset[reg] |= 1;
9393 }
9394
9395 return glue_addr + s->output_section->vma + s->output_offset;
9396 }
9397
9398 /* Generate Arm stubs for exported Thumb symbols. */
9399 static void
9400 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
9401 struct bfd_link_info *link_info)
9402 {
9403 struct elf32_arm_link_hash_table * globals;
9404
9405 if (link_info == NULL)
9406 /* Ignore this if we are not called by the ELF backend linker. */
9407 return;
9408
9409 globals = elf32_arm_hash_table (link_info);
9410 if (globals == NULL)
9411 return;
9412
9413 /* If blx is available then exported Thumb symbols are OK and there is
9414 nothing to do. */
9415 if (globals->use_blx)
9416 return;
9417
9418 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
9419 link_info);
9420 }
9421
9422 /* Reserve space for COUNT dynamic relocations in relocation selection
9423 SRELOC. */
9424
9425 static void
9426 elf32_arm_allocate_dynrelocs (struct bfd_link_info *info, asection *sreloc,
9427 bfd_size_type count)
9428 {
9429 struct elf32_arm_link_hash_table *htab;
9430
9431 htab = elf32_arm_hash_table (info);
9432 BFD_ASSERT (htab->root.dynamic_sections_created);
9433 if (sreloc == NULL)
9434 abort ();
9435 sreloc->size += RELOC_SIZE (htab) * count;
9436 }
9437
9438 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
9439 dynamic, the relocations should go in SRELOC, otherwise they should
9440 go in the special .rel.iplt section. */
9441
9442 static void
9443 elf32_arm_allocate_irelocs (struct bfd_link_info *info, asection *sreloc,
9444 bfd_size_type count)
9445 {
9446 struct elf32_arm_link_hash_table *htab;
9447
9448 htab = elf32_arm_hash_table (info);
9449 if (!htab->root.dynamic_sections_created)
9450 htab->root.irelplt->size += RELOC_SIZE (htab) * count;
9451 else
9452 {
9453 BFD_ASSERT (sreloc != NULL);
9454 sreloc->size += RELOC_SIZE (htab) * count;
9455 }
9456 }
9457
9458 /* Add relocation REL to the end of relocation section SRELOC. */
9459
9460 static void
9461 elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
9462 asection *sreloc, Elf_Internal_Rela *rel)
9463 {
9464 bfd_byte *loc;
9465 struct elf32_arm_link_hash_table *htab;
9466
9467 htab = elf32_arm_hash_table (info);
9468 if (!htab->root.dynamic_sections_created
9469 && ELF32_R_TYPE (rel->r_info) == R_ARM_IRELATIVE)
9470 sreloc = htab->root.irelplt;
9471 if (sreloc == NULL)
9472 abort ();
9473 loc = sreloc->contents;
9474 loc += sreloc->reloc_count++ * RELOC_SIZE (htab);
9475 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
9476 abort ();
9477 SWAP_RELOC_OUT (htab) (output_bfd, rel, loc);
9478 }
9479
9480 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
9481 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
9482 to .plt. */
9483
9484 static void
9485 elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
9486 bfd_boolean is_iplt_entry,
9487 union gotplt_union *root_plt,
9488 struct arm_plt_info *arm_plt)
9489 {
9490 struct elf32_arm_link_hash_table *htab;
9491 asection *splt;
9492 asection *sgotplt;
9493
9494 htab = elf32_arm_hash_table (info);
9495
9496 if (is_iplt_entry)
9497 {
9498 splt = htab->root.iplt;
9499 sgotplt = htab->root.igotplt;
9500
9501 /* NaCl uses a special first entry in .iplt too. */
9502 if (htab->nacl_p && splt->size == 0)
9503 splt->size += htab->plt_header_size;
9504
9505 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
9506 elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1);
9507 }
9508 else
9509 {
9510 splt = htab->root.splt;
9511 sgotplt = htab->root.sgotplt;
9512
9513 if (htab->fdpic_p)
9514 {
9515 /* Allocate room for R_ARM_FUNCDESC_VALUE. */
9516 /* For lazy binding, relocations will be put into .rel.plt, in
9517 .rel.got otherwise. */
9518 /* FIXME: today we don't support lazy binding so put it in .rel.got */
9519 if (info->flags & DF_BIND_NOW)
9520 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
9521 else
9522 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
9523 }
9524 else
9525 {
9526 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
9527 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
9528 }
9529
9530 /* If this is the first .plt entry, make room for the special
9531 first entry. */
9532 if (splt->size == 0)
9533 splt->size += htab->plt_header_size;
9534
9535 htab->next_tls_desc_index++;
9536 }
9537
9538 /* Allocate the PLT entry itself, including any leading Thumb stub. */
9539 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9540 splt->size += PLT_THUMB_STUB_SIZE;
9541 root_plt->offset = splt->size;
9542 splt->size += htab->plt_entry_size;
9543
9544 if (!htab->symbian_p)
9545 {
9546 /* We also need to make an entry in the .got.plt section, which
9547 will be placed in the .got section by the linker script. */
9548 if (is_iplt_entry)
9549 arm_plt->got_offset = sgotplt->size;
9550 else
9551 arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc;
9552 if (htab->fdpic_p)
9553 /* Function descriptor takes 64 bits in GOT. */
9554 sgotplt->size += 8;
9555 else
9556 sgotplt->size += 4;
9557 }
9558 }
9559
9560 static bfd_vma
9561 arm_movw_immediate (bfd_vma value)
9562 {
9563 return (value & 0x00000fff) | ((value & 0x0000f000) << 4);
9564 }
9565
9566 static bfd_vma
9567 arm_movt_immediate (bfd_vma value)
9568 {
9569 return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12);
9570 }
9571
9572 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
9573 the entry lives in .iplt and resolves to (*SYM_VALUE)().
9574 Otherwise, DYNINDX is the index of the symbol in the dynamic
9575 symbol table and SYM_VALUE is undefined.
9576
9577 ROOT_PLT points to the offset of the PLT entry from the start of its
9578 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
9579 bookkeeping information.
9580
9581 Returns FALSE if there was a problem. */
9582
9583 static bfd_boolean
9584 elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
9585 union gotplt_union *root_plt,
9586 struct arm_plt_info *arm_plt,
9587 int dynindx, bfd_vma sym_value)
9588 {
9589 struct elf32_arm_link_hash_table *htab;
9590 asection *sgot;
9591 asection *splt;
9592 asection *srel;
9593 bfd_byte *loc;
9594 bfd_vma plt_index;
9595 Elf_Internal_Rela rel;
9596 bfd_vma plt_header_size;
9597 bfd_vma got_header_size;
9598
9599 htab = elf32_arm_hash_table (info);
9600
9601 /* Pick the appropriate sections and sizes. */
9602 if (dynindx == -1)
9603 {
9604 splt = htab->root.iplt;
9605 sgot = htab->root.igotplt;
9606 srel = htab->root.irelplt;
9607
9608 /* There are no reserved entries in .igot.plt, and no special
9609 first entry in .iplt. */
9610 got_header_size = 0;
9611 plt_header_size = 0;
9612 }
9613 else
9614 {
9615 splt = htab->root.splt;
9616 sgot = htab->root.sgotplt;
9617 srel = htab->root.srelplt;
9618
9619 got_header_size = get_elf_backend_data (output_bfd)->got_header_size;
9620 plt_header_size = htab->plt_header_size;
9621 }
9622 BFD_ASSERT (splt != NULL && srel != NULL);
9623
9624 /* Fill in the entry in the procedure linkage table. */
9625 if (htab->symbian_p)
9626 {
9627 BFD_ASSERT (dynindx >= 0);
9628 put_arm_insn (htab, output_bfd,
9629 elf32_arm_symbian_plt_entry[0],
9630 splt->contents + root_plt->offset);
9631 bfd_put_32 (output_bfd,
9632 elf32_arm_symbian_plt_entry[1],
9633 splt->contents + root_plt->offset + 4);
9634
9635 /* Fill in the entry in the .rel.plt section. */
9636 rel.r_offset = (splt->output_section->vma
9637 + splt->output_offset
9638 + root_plt->offset + 4);
9639 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_GLOB_DAT);
9640
9641 /* Get the index in the procedure linkage table which
9642 corresponds to this symbol. This is the index of this symbol
9643 in all the symbols for which we are making plt entries. The
9644 first entry in the procedure linkage table is reserved. */
9645 plt_index = ((root_plt->offset - plt_header_size)
9646 / htab->plt_entry_size);
9647 }
9648 else
9649 {
9650 bfd_vma got_offset, got_address, plt_address;
9651 bfd_vma got_displacement, initial_got_entry;
9652 bfd_byte * ptr;
9653
9654 BFD_ASSERT (sgot != NULL);
9655
9656 /* Get the offset into the .(i)got.plt table of the entry that
9657 corresponds to this function. */
9658 got_offset = (arm_plt->got_offset & -2);
9659
9660 /* Get the index in the procedure linkage table which
9661 corresponds to this symbol. This is the index of this symbol
9662 in all the symbols for which we are making plt entries.
9663 After the reserved .got.plt entries, all symbols appear in
9664 the same order as in .plt. */
9665 if (htab->fdpic_p)
9666 /* Function descriptor takes 8 bytes. */
9667 plt_index = (got_offset - got_header_size) / 8;
9668 else
9669 plt_index = (got_offset - got_header_size) / 4;
9670
9671 /* Calculate the address of the GOT entry. */
9672 got_address = (sgot->output_section->vma
9673 + sgot->output_offset
9674 + got_offset);
9675
9676 /* ...and the address of the PLT entry. */
9677 plt_address = (splt->output_section->vma
9678 + splt->output_offset
9679 + root_plt->offset);
9680
9681 ptr = splt->contents + root_plt->offset;
9682 if (htab->vxworks_p && bfd_link_pic (info))
9683 {
9684 unsigned int i;
9685 bfd_vma val;
9686
9687 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
9688 {
9689 val = elf32_arm_vxworks_shared_plt_entry[i];
9690 if (i == 2)
9691 val |= got_address - sgot->output_section->vma;
9692 if (i == 5)
9693 val |= plt_index * RELOC_SIZE (htab);
9694 if (i == 2 || i == 5)
9695 bfd_put_32 (output_bfd, val, ptr);
9696 else
9697 put_arm_insn (htab, output_bfd, val, ptr);
9698 }
9699 }
9700 else if (htab->vxworks_p)
9701 {
9702 unsigned int i;
9703 bfd_vma val;
9704
9705 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
9706 {
9707 val = elf32_arm_vxworks_exec_plt_entry[i];
9708 if (i == 2)
9709 val |= got_address;
9710 if (i == 4)
9711 val |= 0xffffff & -((root_plt->offset + i * 4 + 8) >> 2);
9712 if (i == 5)
9713 val |= plt_index * RELOC_SIZE (htab);
9714 if (i == 2 || i == 5)
9715 bfd_put_32 (output_bfd, val, ptr);
9716 else
9717 put_arm_insn (htab, output_bfd, val, ptr);
9718 }
9719
9720 loc = (htab->srelplt2->contents
9721 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
9722
9723 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
9724 referencing the GOT for this PLT entry. */
9725 rel.r_offset = plt_address + 8;
9726 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
9727 rel.r_addend = got_offset;
9728 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9729 loc += RELOC_SIZE (htab);
9730
9731 /* Create the R_ARM_ABS32 relocation referencing the
9732 beginning of the PLT for this GOT entry. */
9733 rel.r_offset = got_address;
9734 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
9735 rel.r_addend = 0;
9736 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9737 }
9738 else if (htab->nacl_p)
9739 {
9740 /* Calculate the displacement between the PLT slot and the
9741 common tail that's part of the special initial PLT slot. */
9742 int32_t tail_displacement
9743 = ((splt->output_section->vma + splt->output_offset
9744 + ARM_NACL_PLT_TAIL_OFFSET)
9745 - (plt_address + htab->plt_entry_size + 4));
9746 BFD_ASSERT ((tail_displacement & 3) == 0);
9747 tail_displacement >>= 2;
9748
9749 BFD_ASSERT ((tail_displacement & 0xff000000) == 0
9750 || (-tail_displacement & 0xff000000) == 0);
9751
9752 /* Calculate the displacement between the PLT slot and the entry
9753 in the GOT. The offset accounts for the value produced by
9754 adding to pc in the penultimate instruction of the PLT stub. */
9755 got_displacement = (got_address
9756 - (plt_address + htab->plt_entry_size));
9757
9758 /* NaCl does not support interworking at all. */
9759 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt));
9760
9761 put_arm_insn (htab, output_bfd,
9762 elf32_arm_nacl_plt_entry[0]
9763 | arm_movw_immediate (got_displacement),
9764 ptr + 0);
9765 put_arm_insn (htab, output_bfd,
9766 elf32_arm_nacl_plt_entry[1]
9767 | arm_movt_immediate (got_displacement),
9768 ptr + 4);
9769 put_arm_insn (htab, output_bfd,
9770 elf32_arm_nacl_plt_entry[2],
9771 ptr + 8);
9772 put_arm_insn (htab, output_bfd,
9773 elf32_arm_nacl_plt_entry[3]
9774 | (tail_displacement & 0x00ffffff),
9775 ptr + 12);
9776 }
9777 else if (htab->fdpic_p)
9778 {
9779 const bfd_vma *plt_entry = using_thumb_only(htab)
9780 ? elf32_arm_fdpic_thumb_plt_entry
9781 : elf32_arm_fdpic_plt_entry;
9782
9783 /* Fill-up Thumb stub if needed. */
9784 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9785 {
9786 put_thumb_insn (htab, output_bfd,
9787 elf32_arm_plt_thumb_stub[0], ptr - 4);
9788 put_thumb_insn (htab, output_bfd,
9789 elf32_arm_plt_thumb_stub[1], ptr - 2);
9790 }
9791 /* As we are using 32 bit instructions even for the Thumb
9792 version, we have to use 'put_arm_insn' instead of
9793 'put_thumb_insn'. */
9794 put_arm_insn(htab, output_bfd, plt_entry[0], ptr + 0);
9795 put_arm_insn(htab, output_bfd, plt_entry[1], ptr + 4);
9796 put_arm_insn(htab, output_bfd, plt_entry[2], ptr + 8);
9797 put_arm_insn(htab, output_bfd, plt_entry[3], ptr + 12);
9798 bfd_put_32 (output_bfd, got_offset, ptr + 16);
9799
9800 if (!(info->flags & DF_BIND_NOW))
9801 {
9802 /* funcdesc_value_reloc_offset. */
9803 bfd_put_32 (output_bfd,
9804 htab->root.srelplt->reloc_count * RELOC_SIZE (htab),
9805 ptr + 20);
9806 put_arm_insn(htab, output_bfd, plt_entry[6], ptr + 24);
9807 put_arm_insn(htab, output_bfd, plt_entry[7], ptr + 28);
9808 put_arm_insn(htab, output_bfd, plt_entry[8], ptr + 32);
9809 put_arm_insn(htab, output_bfd, plt_entry[9], ptr + 36);
9810 }
9811 }
9812 else if (using_thumb_only (htab))
9813 {
9814 /* PR ld/16017: Generate thumb only PLT entries. */
9815 if (!using_thumb2 (htab))
9816 {
9817 /* FIXME: We ought to be able to generate thumb-1 PLT
9818 instructions... */
9819 _bfd_error_handler (_("%pB: warning: thumb-1 mode PLT generation not currently supported"),
9820 output_bfd);
9821 return FALSE;
9822 }
9823
9824 /* Calculate the displacement between the PLT slot and the entry in
9825 the GOT. The 12-byte offset accounts for the value produced by
9826 adding to pc in the 3rd instruction of the PLT stub. */
9827 got_displacement = got_address - (plt_address + 12);
9828
9829 /* As we are using 32 bit instructions we have to use 'put_arm_insn'
9830 instead of 'put_thumb_insn'. */
9831 put_arm_insn (htab, output_bfd,
9832 elf32_thumb2_plt_entry[0]
9833 | ((got_displacement & 0x000000ff) << 16)
9834 | ((got_displacement & 0x00000700) << 20)
9835 | ((got_displacement & 0x00000800) >> 1)
9836 | ((got_displacement & 0x0000f000) >> 12),
9837 ptr + 0);
9838 put_arm_insn (htab, output_bfd,
9839 elf32_thumb2_plt_entry[1]
9840 | ((got_displacement & 0x00ff0000) )
9841 | ((got_displacement & 0x07000000) << 4)
9842 | ((got_displacement & 0x08000000) >> 17)
9843 | ((got_displacement & 0xf0000000) >> 28),
9844 ptr + 4);
9845 put_arm_insn (htab, output_bfd,
9846 elf32_thumb2_plt_entry[2],
9847 ptr + 8);
9848 put_arm_insn (htab, output_bfd,
9849 elf32_thumb2_plt_entry[3],
9850 ptr + 12);
9851 }
9852 else
9853 {
9854 /* Calculate the displacement between the PLT slot and the
9855 entry in the GOT. The eight-byte offset accounts for the
9856 value produced by adding to pc in the first instruction
9857 of the PLT stub. */
9858 got_displacement = got_address - (plt_address + 8);
9859
9860 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9861 {
9862 put_thumb_insn (htab, output_bfd,
9863 elf32_arm_plt_thumb_stub[0], ptr - 4);
9864 put_thumb_insn (htab, output_bfd,
9865 elf32_arm_plt_thumb_stub[1], ptr - 2);
9866 }
9867
9868 if (!elf32_arm_use_long_plt_entry)
9869 {
9870 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
9871
9872 put_arm_insn (htab, output_bfd,
9873 elf32_arm_plt_entry_short[0]
9874 | ((got_displacement & 0x0ff00000) >> 20),
9875 ptr + 0);
9876 put_arm_insn (htab, output_bfd,
9877 elf32_arm_plt_entry_short[1]
9878 | ((got_displacement & 0x000ff000) >> 12),
9879 ptr+ 4);
9880 put_arm_insn (htab, output_bfd,
9881 elf32_arm_plt_entry_short[2]
9882 | (got_displacement & 0x00000fff),
9883 ptr + 8);
9884 #ifdef FOUR_WORD_PLT
9885 bfd_put_32 (output_bfd, elf32_arm_plt_entry_short[3], ptr + 12);
9886 #endif
9887 }
9888 else
9889 {
9890 put_arm_insn (htab, output_bfd,
9891 elf32_arm_plt_entry_long[0]
9892 | ((got_displacement & 0xf0000000) >> 28),
9893 ptr + 0);
9894 put_arm_insn (htab, output_bfd,
9895 elf32_arm_plt_entry_long[1]
9896 | ((got_displacement & 0x0ff00000) >> 20),
9897 ptr + 4);
9898 put_arm_insn (htab, output_bfd,
9899 elf32_arm_plt_entry_long[2]
9900 | ((got_displacement & 0x000ff000) >> 12),
9901 ptr+ 8);
9902 put_arm_insn (htab, output_bfd,
9903 elf32_arm_plt_entry_long[3]
9904 | (got_displacement & 0x00000fff),
9905 ptr + 12);
9906 }
9907 }
9908
9909 /* Fill in the entry in the .rel(a).(i)plt section. */
9910 rel.r_offset = got_address;
9911 rel.r_addend = 0;
9912 if (dynindx == -1)
9913 {
9914 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
9915 The dynamic linker or static executable then calls SYM_VALUE
9916 to determine the correct run-time value of the .igot.plt entry. */
9917 rel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
9918 initial_got_entry = sym_value;
9919 }
9920 else
9921 {
9922 /* For FDPIC we will have to resolve a R_ARM_FUNCDESC_VALUE
9923 used by PLT entry. */
9924 if (htab->fdpic_p)
9925 {
9926 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_FUNCDESC_VALUE);
9927 initial_got_entry = 0;
9928 }
9929 else
9930 {
9931 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT);
9932 initial_got_entry = (splt->output_section->vma
9933 + splt->output_offset);
9934 }
9935 }
9936
9937 /* Fill in the entry in the global offset table. */
9938 bfd_put_32 (output_bfd, initial_got_entry,
9939 sgot->contents + got_offset);
9940
9941 if (htab->fdpic_p && !(info->flags & DF_BIND_NOW))
9942 {
9943 /* Setup initial funcdesc value. */
9944 /* FIXME: we don't support lazy binding because there is a
9945 race condition between both words getting written and
9946 some other thread attempting to read them. The ARM
9947 architecture does not have an atomic 64 bit load/store
9948 instruction that could be used to prevent it; it is
9949 recommended that threaded FDPIC applications run with the
9950 LD_BIND_NOW environment variable set. */
9951 bfd_put_32(output_bfd, plt_address + 0x18,
9952 sgot->contents + got_offset);
9953 bfd_put_32(output_bfd, -1 /*TODO*/,
9954 sgot->contents + got_offset + 4);
9955 }
9956 }
9957
9958 if (dynindx == -1)
9959 elf32_arm_add_dynreloc (output_bfd, info, srel, &rel);
9960 else
9961 {
9962 if (htab->fdpic_p)
9963 {
9964 /* For FDPIC we put PLT relocationss into .rel.got when not
9965 lazy binding otherwise we put them in .rel.plt. For now,
9966 we don't support lazy binding so put it in .rel.got. */
9967 if (info->flags & DF_BIND_NOW)
9968 elf32_arm_add_dynreloc(output_bfd, info, htab->root.srelgot, &rel);
9969 else
9970 elf32_arm_add_dynreloc(output_bfd, info, htab->root.srelplt, &rel);
9971 }
9972 else
9973 {
9974 loc = srel->contents + plt_index * RELOC_SIZE (htab);
9975 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9976 }
9977 }
9978
9979 return TRUE;
9980 }
9981
9982 /* Some relocations map to different relocations depending on the
9983 target. Return the real relocation. */
9984
9985 static int
9986 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
9987 int r_type)
9988 {
9989 switch (r_type)
9990 {
9991 case R_ARM_TARGET1:
9992 if (globals->target1_is_rel)
9993 return R_ARM_REL32;
9994 else
9995 return R_ARM_ABS32;
9996
9997 case R_ARM_TARGET2:
9998 return globals->target2_reloc;
9999
10000 default:
10001 return r_type;
10002 }
10003 }
10004
10005 /* Return the base VMA address which should be subtracted from real addresses
10006 when resolving @dtpoff relocation.
10007 This is PT_TLS segment p_vaddr. */
10008
10009 static bfd_vma
10010 dtpoff_base (struct bfd_link_info *info)
10011 {
10012 /* If tls_sec is NULL, we should have signalled an error already. */
10013 if (elf_hash_table (info)->tls_sec == NULL)
10014 return 0;
10015 return elf_hash_table (info)->tls_sec->vma;
10016 }
10017
10018 /* Return the relocation value for @tpoff relocation
10019 if STT_TLS virtual address is ADDRESS. */
10020
10021 static bfd_vma
10022 tpoff (struct bfd_link_info *info, bfd_vma address)
10023 {
10024 struct elf_link_hash_table *htab = elf_hash_table (info);
10025 bfd_vma base;
10026
10027 /* If tls_sec is NULL, we should have signalled an error already. */
10028 if (htab->tls_sec == NULL)
10029 return 0;
10030 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
10031 return address - htab->tls_sec->vma + base;
10032 }
10033
10034 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
10035 VALUE is the relocation value. */
10036
10037 static bfd_reloc_status_type
10038 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
10039 {
10040 if (value > 0xfff)
10041 return bfd_reloc_overflow;
10042
10043 value |= bfd_get_32 (abfd, data) & 0xfffff000;
10044 bfd_put_32 (abfd, value, data);
10045 return bfd_reloc_ok;
10046 }
10047
10048 /* Handle TLS relaxations. Relaxing is possible for symbols that use
10049 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
10050 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
10051
10052 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
10053 is to then call final_link_relocate. Return other values in the
10054 case of error.
10055
10056 FIXME:When --emit-relocs is in effect, we'll emit relocs describing
10057 the pre-relaxed code. It would be nice if the relocs were updated
10058 to match the optimization. */
10059
10060 static bfd_reloc_status_type
10061 elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals,
10062 bfd *input_bfd, asection *input_sec, bfd_byte *contents,
10063 Elf_Internal_Rela *rel, unsigned long is_local)
10064 {
10065 unsigned long insn;
10066
10067 switch (ELF32_R_TYPE (rel->r_info))
10068 {
10069 default:
10070 return bfd_reloc_notsupported;
10071
10072 case R_ARM_TLS_GOTDESC:
10073 if (is_local)
10074 insn = 0;
10075 else
10076 {
10077 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
10078 if (insn & 1)
10079 insn -= 5; /* THUMB */
10080 else
10081 insn -= 8; /* ARM */
10082 }
10083 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
10084 return bfd_reloc_continue;
10085
10086 case R_ARM_THM_TLS_DESCSEQ:
10087 /* Thumb insn. */
10088 insn = bfd_get_16 (input_bfd, contents + rel->r_offset);
10089 if ((insn & 0xff78) == 0x4478) /* add rx, pc */
10090 {
10091 if (is_local)
10092 /* nop */
10093 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10094 }
10095 else if ((insn & 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
10096 {
10097 if (is_local)
10098 /* nop */
10099 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10100 else
10101 /* ldr rx,[ry] */
10102 bfd_put_16 (input_bfd, insn & 0xf83f, contents + rel->r_offset);
10103 }
10104 else if ((insn & 0xff87) == 0x4780) /* blx rx */
10105 {
10106 if (is_local)
10107 /* nop */
10108 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10109 else
10110 /* mov r0, rx */
10111 bfd_put_16 (input_bfd, 0x4600 | (insn & 0x78),
10112 contents + rel->r_offset);
10113 }
10114 else
10115 {
10116 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
10117 /* It's a 32 bit instruction, fetch the rest of it for
10118 error generation. */
10119 insn = (insn << 16)
10120 | bfd_get_16 (input_bfd, contents + rel->r_offset + 2);
10121 _bfd_error_handler
10122 /* xgettext:c-format */
10123 (_("%pB(%pA+%#" PRIx64 "): "
10124 "unexpected %s instruction '%#lx' in TLS trampoline"),
10125 input_bfd, input_sec, (uint64_t) rel->r_offset,
10126 "Thumb", insn);
10127 return bfd_reloc_notsupported;
10128 }
10129 break;
10130
10131 case R_ARM_TLS_DESCSEQ:
10132 /* arm insn. */
10133 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
10134 if ((insn & 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
10135 {
10136 if (is_local)
10137 /* mov rx, ry */
10138 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xffff),
10139 contents + rel->r_offset);
10140 }
10141 else if ((insn & 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
10142 {
10143 if (is_local)
10144 /* nop */
10145 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
10146 else
10147 /* ldr rx,[ry] */
10148 bfd_put_32 (input_bfd, insn & 0xfffff000,
10149 contents + rel->r_offset);
10150 }
10151 else if ((insn & 0xfffffff0) == 0xe12fff30) /* blx rx */
10152 {
10153 if (is_local)
10154 /* nop */
10155 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
10156 else
10157 /* mov r0, rx */
10158 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xf),
10159 contents + rel->r_offset);
10160 }
10161 else
10162 {
10163 _bfd_error_handler
10164 /* xgettext:c-format */
10165 (_("%pB(%pA+%#" PRIx64 "): "
10166 "unexpected %s instruction '%#lx' in TLS trampoline"),
10167 input_bfd, input_sec, (uint64_t) rel->r_offset,
10168 "ARM", insn);
10169 return bfd_reloc_notsupported;
10170 }
10171 break;
10172
10173 case R_ARM_TLS_CALL:
10174 /* GD->IE relaxation, turn the instruction into 'nop' or
10175 'ldr r0, [pc,r0]' */
10176 insn = is_local ? 0xe1a00000 : 0xe79f0000;
10177 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
10178 break;
10179
10180 case R_ARM_THM_TLS_CALL:
10181 /* GD->IE relaxation. */
10182 if (!is_local)
10183 /* add r0,pc; ldr r0, [r0] */
10184 insn = 0x44786800;
10185 else if (using_thumb2 (globals))
10186 /* nop.w */
10187 insn = 0xf3af8000;
10188 else
10189 /* nop; nop */
10190 insn = 0xbf00bf00;
10191
10192 bfd_put_16 (input_bfd, insn >> 16, contents + rel->r_offset);
10193 bfd_put_16 (input_bfd, insn & 0xffff, contents + rel->r_offset + 2);
10194 break;
10195 }
10196 return bfd_reloc_ok;
10197 }
10198
10199 /* For a given value of n, calculate the value of G_n as required to
10200 deal with group relocations. We return it in the form of an
10201 encoded constant-and-rotation, together with the final residual. If n is
10202 specified as less than zero, then final_residual is filled with the
10203 input value and no further action is performed. */
10204
10205 static bfd_vma
10206 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
10207 {
10208 int current_n;
10209 bfd_vma g_n;
10210 bfd_vma encoded_g_n = 0;
10211 bfd_vma residual = value; /* Also known as Y_n. */
10212
10213 for (current_n = 0; current_n <= n; current_n++)
10214 {
10215 int shift;
10216
10217 /* Calculate which part of the value to mask. */
10218 if (residual == 0)
10219 shift = 0;
10220 else
10221 {
10222 int msb;
10223
10224 /* Determine the most significant bit in the residual and
10225 align the resulting value to a 2-bit boundary. */
10226 for (msb = 30; msb >= 0; msb -= 2)
10227 if (residual & (3 << msb))
10228 break;
10229
10230 /* The desired shift is now (msb - 6), or zero, whichever
10231 is the greater. */
10232 shift = msb - 6;
10233 if (shift < 0)
10234 shift = 0;
10235 }
10236
10237 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
10238 g_n = residual & (0xff << shift);
10239 encoded_g_n = (g_n >> shift)
10240 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
10241
10242 /* Calculate the residual for the next time around. */
10243 residual &= ~g_n;
10244 }
10245
10246 *final_residual = residual;
10247
10248 return encoded_g_n;
10249 }
10250
10251 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
10252 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
10253
10254 static int
10255 identify_add_or_sub (bfd_vma insn)
10256 {
10257 int opcode = insn & 0x1e00000;
10258
10259 if (opcode == 1 << 23) /* ADD */
10260 return 1;
10261
10262 if (opcode == 1 << 22) /* SUB */
10263 return -1;
10264
10265 return 0;
10266 }
10267
10268 /* Helper function to compute the Addend for Armv8.1-M Mainline relocations. */
10269 static bfd_vma
10270 get_value_helper (bfd_vma plt_offset,
10271 asection *splt,
10272 asection *input_section,
10273 asection *sym_sec,
10274 struct elf_link_hash_entry * h,
10275 struct bfd_link_info *info,
10276 bfd *input_bfd,
10277 Elf_Internal_Rela *rel,
10278 const char *sym_name,
10279 unsigned char st_type,
10280 struct elf32_arm_link_hash_table *globals,
10281 bfd_boolean *unresolved_reloc_p)
10282 {
10283 bfd_vma value = 0;
10284 enum arm_st_branch_type branch_type;
10285 enum elf32_arm_stub_type stub_type = arm_stub_none;
10286 struct elf32_arm_stub_hash_entry *stub_entry;
10287 struct elf32_arm_link_hash_entry *hash
10288 = (struct elf32_arm_link_hash_entry *)h;
10289
10290
10291 if (plt_offset != (bfd_vma) -1)
10292 {
10293 value = (splt->output_section->vma
10294 + splt->output_offset
10295 + plt_offset);
10296 value -= PLT_THUMB_STUB_SIZE;
10297 *unresolved_reloc_p = FALSE;
10298 }
10299
10300 stub_type = arm_type_of_stub (info, input_section, rel,
10301 st_type, &branch_type,
10302 hash, value, sym_sec,
10303 input_bfd, sym_name);
10304
10305 if (stub_type != arm_stub_none)
10306 {
10307 stub_entry = elf32_arm_get_stub_entry (input_section,
10308 sym_sec, h,
10309 rel, globals,
10310 stub_type);
10311 if (stub_entry != NULL)
10312 {
10313 value = (stub_entry->stub_offset
10314 + stub_entry->stub_sec->output_offset
10315 + stub_entry->stub_sec->output_section->vma);
10316 }
10317 }
10318 return value;
10319 }
10320
10321 /* Perform a relocation as part of a final link. */
10322
10323 static bfd_reloc_status_type
10324 elf32_arm_final_link_relocate (reloc_howto_type * howto,
10325 bfd * input_bfd,
10326 bfd * output_bfd,
10327 asection * input_section,
10328 bfd_byte * contents,
10329 Elf_Internal_Rela * rel,
10330 bfd_vma value,
10331 struct bfd_link_info * info,
10332 asection * sym_sec,
10333 const char * sym_name,
10334 unsigned char st_type,
10335 enum arm_st_branch_type branch_type,
10336 struct elf_link_hash_entry * h,
10337 bfd_boolean * unresolved_reloc_p,
10338 char ** error_message)
10339 {
10340 unsigned long r_type = howto->type;
10341 unsigned long r_symndx;
10342 bfd_byte * hit_data = contents + rel->r_offset;
10343 bfd_vma * local_got_offsets;
10344 bfd_vma * local_tlsdesc_gotents;
10345 asection * sgot;
10346 asection * splt;
10347 asection * sreloc = NULL;
10348 asection * srelgot;
10349 bfd_vma addend;
10350 bfd_signed_vma signed_addend;
10351 unsigned char dynreloc_st_type;
10352 bfd_vma dynreloc_value;
10353 struct elf32_arm_link_hash_table * globals;
10354 struct elf32_arm_link_hash_entry *eh;
10355 union gotplt_union *root_plt;
10356 struct arm_plt_info *arm_plt;
10357 bfd_vma plt_offset;
10358 bfd_vma gotplt_offset;
10359 bfd_boolean has_iplt_entry;
10360 bfd_boolean resolved_to_zero;
10361
10362 globals = elf32_arm_hash_table (info);
10363 if (globals == NULL)
10364 return bfd_reloc_notsupported;
10365
10366 BFD_ASSERT (is_arm_elf (input_bfd));
10367 BFD_ASSERT (howto != NULL);
10368
10369 /* Some relocation types map to different relocations depending on the
10370 target. We pick the right one here. */
10371 r_type = arm_real_reloc_type (globals, r_type);
10372
10373 /* It is possible to have linker relaxations on some TLS access
10374 models. Update our information here. */
10375 r_type = elf32_arm_tls_transition (info, r_type, h);
10376
10377 if (r_type != howto->type)
10378 howto = elf32_arm_howto_from_type (r_type);
10379
10380 eh = (struct elf32_arm_link_hash_entry *) h;
10381 sgot = globals->root.sgot;
10382 local_got_offsets = elf_local_got_offsets (input_bfd);
10383 local_tlsdesc_gotents = elf32_arm_local_tlsdesc_gotent (input_bfd);
10384
10385 if (globals->root.dynamic_sections_created)
10386 srelgot = globals->root.srelgot;
10387 else
10388 srelgot = NULL;
10389
10390 r_symndx = ELF32_R_SYM (rel->r_info);
10391
10392 if (globals->use_rel)
10393 {
10394 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
10395
10396 if (addend & ((howto->src_mask + 1) >> 1))
10397 {
10398 signed_addend = -1;
10399 signed_addend &= ~ howto->src_mask;
10400 signed_addend |= addend;
10401 }
10402 else
10403 signed_addend = addend;
10404 }
10405 else
10406 addend = signed_addend = rel->r_addend;
10407
10408 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
10409 are resolving a function call relocation. */
10410 if (using_thumb_only (globals)
10411 && (r_type == R_ARM_THM_CALL
10412 || r_type == R_ARM_THM_JUMP24)
10413 && branch_type == ST_BRANCH_TO_ARM)
10414 branch_type = ST_BRANCH_TO_THUMB;
10415
10416 /* Record the symbol information that should be used in dynamic
10417 relocations. */
10418 dynreloc_st_type = st_type;
10419 dynreloc_value = value;
10420 if (branch_type == ST_BRANCH_TO_THUMB)
10421 dynreloc_value |= 1;
10422
10423 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
10424 VALUE appropriately for relocations that we resolve at link time. */
10425 has_iplt_entry = FALSE;
10426 if (elf32_arm_get_plt_info (input_bfd, globals, eh, r_symndx, &root_plt,
10427 &arm_plt)
10428 && root_plt->offset != (bfd_vma) -1)
10429 {
10430 plt_offset = root_plt->offset;
10431 gotplt_offset = arm_plt->got_offset;
10432
10433 if (h == NULL || eh->is_iplt)
10434 {
10435 has_iplt_entry = TRUE;
10436 splt = globals->root.iplt;
10437
10438 /* Populate .iplt entries here, because not all of them will
10439 be seen by finish_dynamic_symbol. The lower bit is set if
10440 we have already populated the entry. */
10441 if (plt_offset & 1)
10442 plt_offset--;
10443 else
10444 {
10445 if (elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
10446 -1, dynreloc_value))
10447 root_plt->offset |= 1;
10448 else
10449 return bfd_reloc_notsupported;
10450 }
10451
10452 /* Static relocations always resolve to the .iplt entry. */
10453 st_type = STT_FUNC;
10454 value = (splt->output_section->vma
10455 + splt->output_offset
10456 + plt_offset);
10457 branch_type = ST_BRANCH_TO_ARM;
10458
10459 /* If there are non-call relocations that resolve to the .iplt
10460 entry, then all dynamic ones must too. */
10461 if (arm_plt->noncall_refcount != 0)
10462 {
10463 dynreloc_st_type = st_type;
10464 dynreloc_value = value;
10465 }
10466 }
10467 else
10468 /* We populate the .plt entry in finish_dynamic_symbol. */
10469 splt = globals->root.splt;
10470 }
10471 else
10472 {
10473 splt = NULL;
10474 plt_offset = (bfd_vma) -1;
10475 gotplt_offset = (bfd_vma) -1;
10476 }
10477
10478 resolved_to_zero = (h != NULL
10479 && UNDEFWEAK_NO_DYNAMIC_RELOC (info, h));
10480
10481 switch (r_type)
10482 {
10483 case R_ARM_NONE:
10484 /* We don't need to find a value for this symbol. It's just a
10485 marker. */
10486 *unresolved_reloc_p = FALSE;
10487 return bfd_reloc_ok;
10488
10489 case R_ARM_ABS12:
10490 if (!globals->vxworks_p)
10491 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
10492 /* Fall through. */
10493
10494 case R_ARM_PC24:
10495 case R_ARM_ABS32:
10496 case R_ARM_ABS32_NOI:
10497 case R_ARM_REL32:
10498 case R_ARM_REL32_NOI:
10499 case R_ARM_CALL:
10500 case R_ARM_JUMP24:
10501 case R_ARM_XPC25:
10502 case R_ARM_PREL31:
10503 case R_ARM_PLT32:
10504 /* Handle relocations which should use the PLT entry. ABS32/REL32
10505 will use the symbol's value, which may point to a PLT entry, but we
10506 don't need to handle that here. If we created a PLT entry, all
10507 branches in this object should go to it, except if the PLT is too
10508 far away, in which case a long branch stub should be inserted. */
10509 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
10510 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
10511 && r_type != R_ARM_CALL
10512 && r_type != R_ARM_JUMP24
10513 && r_type != R_ARM_PLT32)
10514 && plt_offset != (bfd_vma) -1)
10515 {
10516 /* If we've created a .plt section, and assigned a PLT entry
10517 to this function, it must either be a STT_GNU_IFUNC reference
10518 or not be known to bind locally. In other cases, we should
10519 have cleared the PLT entry by now. */
10520 BFD_ASSERT (has_iplt_entry || !SYMBOL_CALLS_LOCAL (info, h));
10521
10522 value = (splt->output_section->vma
10523 + splt->output_offset
10524 + plt_offset);
10525 *unresolved_reloc_p = FALSE;
10526 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10527 contents, rel->r_offset, value,
10528 rel->r_addend);
10529 }
10530
10531 /* When generating a shared object or relocatable executable, these
10532 relocations are copied into the output file to be resolved at
10533 run time. */
10534 if ((bfd_link_pic (info)
10535 || globals->root.is_relocatable_executable
10536 || globals->fdpic_p)
10537 && (input_section->flags & SEC_ALLOC)
10538 && !(globals->vxworks_p
10539 && strcmp (input_section->output_section->name,
10540 ".tls_vars") == 0)
10541 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
10542 || !SYMBOL_CALLS_LOCAL (info, h))
10543 && !(input_bfd == globals->stub_bfd
10544 && strstr (input_section->name, STUB_SUFFIX))
10545 && (h == NULL
10546 || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
10547 && !resolved_to_zero)
10548 || h->root.type != bfd_link_hash_undefweak)
10549 && r_type != R_ARM_PC24
10550 && r_type != R_ARM_CALL
10551 && r_type != R_ARM_JUMP24
10552 && r_type != R_ARM_PREL31
10553 && r_type != R_ARM_PLT32)
10554 {
10555 Elf_Internal_Rela outrel;
10556 bfd_boolean skip, relocate;
10557 int isrofixup = 0;
10558
10559 if ((r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
10560 && !h->def_regular)
10561 {
10562 char *v = _("shared object");
10563
10564 if (bfd_link_executable (info))
10565 v = _("PIE executable");
10566
10567 _bfd_error_handler
10568 (_("%pB: relocation %s against external or undefined symbol `%s'"
10569 " can not be used when making a %s; recompile with -fPIC"), input_bfd,
10570 elf32_arm_howto_table_1[r_type].name, h->root.root.string, v);
10571 return bfd_reloc_notsupported;
10572 }
10573
10574 *unresolved_reloc_p = FALSE;
10575
10576 if (sreloc == NULL && globals->root.dynamic_sections_created)
10577 {
10578 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
10579 ! globals->use_rel);
10580
10581 if (sreloc == NULL)
10582 return bfd_reloc_notsupported;
10583 }
10584
10585 skip = FALSE;
10586 relocate = FALSE;
10587
10588 outrel.r_addend = addend;
10589 outrel.r_offset =
10590 _bfd_elf_section_offset (output_bfd, info, input_section,
10591 rel->r_offset);
10592 if (outrel.r_offset == (bfd_vma) -1)
10593 skip = TRUE;
10594 else if (outrel.r_offset == (bfd_vma) -2)
10595 skip = TRUE, relocate = TRUE;
10596 outrel.r_offset += (input_section->output_section->vma
10597 + input_section->output_offset);
10598
10599 if (skip)
10600 memset (&outrel, 0, sizeof outrel);
10601 else if (h != NULL
10602 && h->dynindx != -1
10603 && (!bfd_link_pic (info)
10604 || !(bfd_link_pie (info)
10605 || SYMBOLIC_BIND (info, h))
10606 || !h->def_regular))
10607 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
10608 else
10609 {
10610 int symbol;
10611
10612 /* This symbol is local, or marked to become local. */
10613 BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI
10614 || (globals->fdpic_p && !bfd_link_pic(info)));
10615 if (globals->symbian_p)
10616 {
10617 asection *osec;
10618
10619 /* On Symbian OS, the data segment and text segement
10620 can be relocated independently. Therefore, we
10621 must indicate the segment to which this
10622 relocation is relative. The BPABI allows us to
10623 use any symbol in the right segment; we just use
10624 the section symbol as it is convenient. (We
10625 cannot use the symbol given by "h" directly as it
10626 will not appear in the dynamic symbol table.)
10627
10628 Note that the dynamic linker ignores the section
10629 symbol value, so we don't subtract osec->vma
10630 from the emitted reloc addend. */
10631 if (sym_sec)
10632 osec = sym_sec->output_section;
10633 else
10634 osec = input_section->output_section;
10635 symbol = elf_section_data (osec)->dynindx;
10636 if (symbol == 0)
10637 {
10638 struct elf_link_hash_table *htab = elf_hash_table (info);
10639
10640 if ((osec->flags & SEC_READONLY) == 0
10641 && htab->data_index_section != NULL)
10642 osec = htab->data_index_section;
10643 else
10644 osec = htab->text_index_section;
10645 symbol = elf_section_data (osec)->dynindx;
10646 }
10647 BFD_ASSERT (symbol != 0);
10648 }
10649 else
10650 /* On SVR4-ish systems, the dynamic loader cannot
10651 relocate the text and data segments independently,
10652 so the symbol does not matter. */
10653 symbol = 0;
10654 if (dynreloc_st_type == STT_GNU_IFUNC)
10655 /* We have an STT_GNU_IFUNC symbol that doesn't resolve
10656 to the .iplt entry. Instead, every non-call reference
10657 must use an R_ARM_IRELATIVE relocation to obtain the
10658 correct run-time address. */
10659 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE);
10660 else if (globals->fdpic_p && !bfd_link_pic(info))
10661 isrofixup = 1;
10662 else
10663 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
10664 if (globals->use_rel)
10665 relocate = TRUE;
10666 else
10667 outrel.r_addend += dynreloc_value;
10668 }
10669
10670 if (isrofixup)
10671 arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
10672 else
10673 elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel);
10674
10675 /* If this reloc is against an external symbol, we do not want to
10676 fiddle with the addend. Otherwise, we need to include the symbol
10677 value so that it becomes an addend for the dynamic reloc. */
10678 if (! relocate)
10679 return bfd_reloc_ok;
10680
10681 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10682 contents, rel->r_offset,
10683 dynreloc_value, (bfd_vma) 0);
10684 }
10685 else switch (r_type)
10686 {
10687 case R_ARM_ABS12:
10688 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
10689
10690 case R_ARM_XPC25: /* Arm BLX instruction. */
10691 case R_ARM_CALL:
10692 case R_ARM_JUMP24:
10693 case R_ARM_PC24: /* Arm B/BL instruction. */
10694 case R_ARM_PLT32:
10695 {
10696 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
10697
10698 if (r_type == R_ARM_XPC25)
10699 {
10700 /* Check for Arm calling Arm function. */
10701 /* FIXME: Should we translate the instruction into a BL
10702 instruction instead ? */
10703 if (branch_type != ST_BRANCH_TO_THUMB)
10704 _bfd_error_handler
10705 (_("\%pB: warning: %s BLX instruction targets"
10706 " %s function '%s'"),
10707 input_bfd, "ARM",
10708 "ARM", h ? h->root.root.string : "(local)");
10709 }
10710 else if (r_type == R_ARM_PC24)
10711 {
10712 /* Check for Arm calling Thumb function. */
10713 if (branch_type == ST_BRANCH_TO_THUMB)
10714 {
10715 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
10716 output_bfd, input_section,
10717 hit_data, sym_sec, rel->r_offset,
10718 signed_addend, value,
10719 error_message))
10720 return bfd_reloc_ok;
10721 else
10722 return bfd_reloc_dangerous;
10723 }
10724 }
10725
10726 /* Check if a stub has to be inserted because the
10727 destination is too far or we are changing mode. */
10728 if ( r_type == R_ARM_CALL
10729 || r_type == R_ARM_JUMP24
10730 || r_type == R_ARM_PLT32)
10731 {
10732 enum elf32_arm_stub_type stub_type = arm_stub_none;
10733 struct elf32_arm_link_hash_entry *hash;
10734
10735 hash = (struct elf32_arm_link_hash_entry *) h;
10736 stub_type = arm_type_of_stub (info, input_section, rel,
10737 st_type, &branch_type,
10738 hash, value, sym_sec,
10739 input_bfd, sym_name);
10740
10741 if (stub_type != arm_stub_none)
10742 {
10743 /* The target is out of reach, so redirect the
10744 branch to the local stub for this function. */
10745 stub_entry = elf32_arm_get_stub_entry (input_section,
10746 sym_sec, h,
10747 rel, globals,
10748 stub_type);
10749 {
10750 if (stub_entry != NULL)
10751 value = (stub_entry->stub_offset
10752 + stub_entry->stub_sec->output_offset
10753 + stub_entry->stub_sec->output_section->vma);
10754
10755 if (plt_offset != (bfd_vma) -1)
10756 *unresolved_reloc_p = FALSE;
10757 }
10758 }
10759 else
10760 {
10761 /* If the call goes through a PLT entry, make sure to
10762 check distance to the right destination address. */
10763 if (plt_offset != (bfd_vma) -1)
10764 {
10765 value = (splt->output_section->vma
10766 + splt->output_offset
10767 + plt_offset);
10768 *unresolved_reloc_p = FALSE;
10769 /* The PLT entry is in ARM mode, regardless of the
10770 target function. */
10771 branch_type = ST_BRANCH_TO_ARM;
10772 }
10773 }
10774 }
10775
10776 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
10777 where:
10778 S is the address of the symbol in the relocation.
10779 P is address of the instruction being relocated.
10780 A is the addend (extracted from the instruction) in bytes.
10781
10782 S is held in 'value'.
10783 P is the base address of the section containing the
10784 instruction plus the offset of the reloc into that
10785 section, ie:
10786 (input_section->output_section->vma +
10787 input_section->output_offset +
10788 rel->r_offset).
10789 A is the addend, converted into bytes, ie:
10790 (signed_addend * 4)
10791
10792 Note: None of these operations have knowledge of the pipeline
10793 size of the processor, thus it is up to the assembler to
10794 encode this information into the addend. */
10795 value -= (input_section->output_section->vma
10796 + input_section->output_offset);
10797 value -= rel->r_offset;
10798 if (globals->use_rel)
10799 value += (signed_addend << howto->size);
10800 else
10801 /* RELA addends do not have to be adjusted by howto->size. */
10802 value += signed_addend;
10803
10804 signed_addend = value;
10805 signed_addend >>= howto->rightshift;
10806
10807 /* A branch to an undefined weak symbol is turned into a jump to
10808 the next instruction unless a PLT entry will be created.
10809 Do the same for local undefined symbols (but not for STN_UNDEF).
10810 The jump to the next instruction is optimized as a NOP depending
10811 on the architecture. */
10812 if (h ? (h->root.type == bfd_link_hash_undefweak
10813 && plt_offset == (bfd_vma) -1)
10814 : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
10815 {
10816 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
10817
10818 if (arch_has_arm_nop (globals))
10819 value |= 0x0320f000;
10820 else
10821 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
10822 }
10823 else
10824 {
10825 /* Perform a signed range check. */
10826 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
10827 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
10828 return bfd_reloc_overflow;
10829
10830 addend = (value & 2);
10831
10832 value = (signed_addend & howto->dst_mask)
10833 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
10834
10835 if (r_type == R_ARM_CALL)
10836 {
10837 /* Set the H bit in the BLX instruction. */
10838 if (branch_type == ST_BRANCH_TO_THUMB)
10839 {
10840 if (addend)
10841 value |= (1 << 24);
10842 else
10843 value &= ~(bfd_vma)(1 << 24);
10844 }
10845
10846 /* Select the correct instruction (BL or BLX). */
10847 /* Only if we are not handling a BL to a stub. In this
10848 case, mode switching is performed by the stub. */
10849 if (branch_type == ST_BRANCH_TO_THUMB && !stub_entry)
10850 value |= (1 << 28);
10851 else if (stub_entry || branch_type != ST_BRANCH_UNKNOWN)
10852 {
10853 value &= ~(bfd_vma)(1 << 28);
10854 value |= (1 << 24);
10855 }
10856 }
10857 }
10858 }
10859 break;
10860
10861 case R_ARM_ABS32:
10862 value += addend;
10863 if (branch_type == ST_BRANCH_TO_THUMB)
10864 value |= 1;
10865 break;
10866
10867 case R_ARM_ABS32_NOI:
10868 value += addend;
10869 break;
10870
10871 case R_ARM_REL32:
10872 value += addend;
10873 if (branch_type == ST_BRANCH_TO_THUMB)
10874 value |= 1;
10875 value -= (input_section->output_section->vma
10876 + input_section->output_offset + rel->r_offset);
10877 break;
10878
10879 case R_ARM_REL32_NOI:
10880 value += addend;
10881 value -= (input_section->output_section->vma
10882 + input_section->output_offset + rel->r_offset);
10883 break;
10884
10885 case R_ARM_PREL31:
10886 value -= (input_section->output_section->vma
10887 + input_section->output_offset + rel->r_offset);
10888 value += signed_addend;
10889 if (! h || h->root.type != bfd_link_hash_undefweak)
10890 {
10891 /* Check for overflow. */
10892 if ((value ^ (value >> 1)) & (1 << 30))
10893 return bfd_reloc_overflow;
10894 }
10895 value &= 0x7fffffff;
10896 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
10897 if (branch_type == ST_BRANCH_TO_THUMB)
10898 value |= 1;
10899 break;
10900 }
10901
10902 bfd_put_32 (input_bfd, value, hit_data);
10903 return bfd_reloc_ok;
10904
10905 case R_ARM_ABS8:
10906 /* PR 16202: Refectch the addend using the correct size. */
10907 if (globals->use_rel)
10908 addend = bfd_get_8 (input_bfd, hit_data);
10909 value += addend;
10910
10911 /* There is no way to tell whether the user intended to use a signed or
10912 unsigned addend. When checking for overflow we accept either,
10913 as specified by the AAELF. */
10914 if ((long) value > 0xff || (long) value < -0x80)
10915 return bfd_reloc_overflow;
10916
10917 bfd_put_8 (input_bfd, value, hit_data);
10918 return bfd_reloc_ok;
10919
10920 case R_ARM_ABS16:
10921 /* PR 16202: Refectch the addend using the correct size. */
10922 if (globals->use_rel)
10923 addend = bfd_get_16 (input_bfd, hit_data);
10924 value += addend;
10925
10926 /* See comment for R_ARM_ABS8. */
10927 if ((long) value > 0xffff || (long) value < -0x8000)
10928 return bfd_reloc_overflow;
10929
10930 bfd_put_16 (input_bfd, value, hit_data);
10931 return bfd_reloc_ok;
10932
10933 case R_ARM_THM_ABS5:
10934 /* Support ldr and str instructions for the thumb. */
10935 if (globals->use_rel)
10936 {
10937 /* Need to refetch addend. */
10938 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
10939 /* ??? Need to determine shift amount from operand size. */
10940 addend >>= howto->rightshift;
10941 }
10942 value += addend;
10943
10944 /* ??? Isn't value unsigned? */
10945 if ((long) value > 0x1f || (long) value < -0x10)
10946 return bfd_reloc_overflow;
10947
10948 /* ??? Value needs to be properly shifted into place first. */
10949 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
10950 bfd_put_16 (input_bfd, value, hit_data);
10951 return bfd_reloc_ok;
10952
10953 case R_ARM_THM_ALU_PREL_11_0:
10954 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
10955 {
10956 bfd_vma insn;
10957 bfd_signed_vma relocation;
10958
10959 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
10960 | bfd_get_16 (input_bfd, hit_data + 2);
10961
10962 if (globals->use_rel)
10963 {
10964 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
10965 | ((insn & (1 << 26)) >> 15);
10966 if (insn & 0xf00000)
10967 signed_addend = -signed_addend;
10968 }
10969
10970 relocation = value + signed_addend;
10971 relocation -= Pa (input_section->output_section->vma
10972 + input_section->output_offset
10973 + rel->r_offset);
10974
10975 /* PR 21523: Use an absolute value. The user of this reloc will
10976 have already selected an ADD or SUB insn appropriately. */
10977 value = llabs (relocation);
10978
10979 if (value >= 0x1000)
10980 return bfd_reloc_overflow;
10981
10982 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
10983 if (branch_type == ST_BRANCH_TO_THUMB)
10984 value |= 1;
10985
10986 insn = (insn & 0xfb0f8f00) | (value & 0xff)
10987 | ((value & 0x700) << 4)
10988 | ((value & 0x800) << 15);
10989 if (relocation < 0)
10990 insn |= 0xa00000;
10991
10992 bfd_put_16 (input_bfd, insn >> 16, hit_data);
10993 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
10994
10995 return bfd_reloc_ok;
10996 }
10997
10998 case R_ARM_THM_PC8:
10999 /* PR 10073: This reloc is not generated by the GNU toolchain,
11000 but it is supported for compatibility with third party libraries
11001 generated by other compilers, specifically the ARM/IAR. */
11002 {
11003 bfd_vma insn;
11004 bfd_signed_vma relocation;
11005
11006 insn = bfd_get_16 (input_bfd, hit_data);
11007
11008 if (globals->use_rel)
11009 addend = ((((insn & 0x00ff) << 2) + 4) & 0x3ff) -4;
11010
11011 relocation = value + addend;
11012 relocation -= Pa (input_section->output_section->vma
11013 + input_section->output_offset
11014 + rel->r_offset);
11015
11016 value = relocation;
11017
11018 /* We do not check for overflow of this reloc. Although strictly
11019 speaking this is incorrect, it appears to be necessary in order
11020 to work with IAR generated relocs. Since GCC and GAS do not
11021 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
11022 a problem for them. */
11023 value &= 0x3fc;
11024
11025 insn = (insn & 0xff00) | (value >> 2);
11026
11027 bfd_put_16 (input_bfd, insn, hit_data);
11028
11029 return bfd_reloc_ok;
11030 }
11031
11032 case R_ARM_THM_PC12:
11033 /* Corresponds to: ldr.w reg, [pc, #offset]. */
11034 {
11035 bfd_vma insn;
11036 bfd_signed_vma relocation;
11037
11038 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
11039 | bfd_get_16 (input_bfd, hit_data + 2);
11040
11041 if (globals->use_rel)
11042 {
11043 signed_addend = insn & 0xfff;
11044 if (!(insn & (1 << 23)))
11045 signed_addend = -signed_addend;
11046 }
11047
11048 relocation = value + signed_addend;
11049 relocation -= Pa (input_section->output_section->vma
11050 + input_section->output_offset
11051 + rel->r_offset);
11052
11053 value = relocation;
11054
11055 if (value >= 0x1000)
11056 return bfd_reloc_overflow;
11057
11058 insn = (insn & 0xff7ff000) | value;
11059 if (relocation >= 0)
11060 insn |= (1 << 23);
11061
11062 bfd_put_16 (input_bfd, insn >> 16, hit_data);
11063 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
11064
11065 return bfd_reloc_ok;
11066 }
11067
11068 case R_ARM_THM_XPC22:
11069 case R_ARM_THM_CALL:
11070 case R_ARM_THM_JUMP24:
11071 /* Thumb BL (branch long instruction). */
11072 {
11073 bfd_vma relocation;
11074 bfd_vma reloc_sign;
11075 bfd_boolean overflow = FALSE;
11076 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
11077 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
11078 bfd_signed_vma reloc_signed_max;
11079 bfd_signed_vma reloc_signed_min;
11080 bfd_vma check;
11081 bfd_signed_vma signed_check;
11082 int bitsize;
11083 const int thumb2 = using_thumb2 (globals);
11084 const int thumb2_bl = using_thumb2_bl (globals);
11085
11086 /* A branch to an undefined weak symbol is turned into a jump to
11087 the next instruction unless a PLT entry will be created.
11088 The jump to the next instruction is optimized as a NOP.W for
11089 Thumb-2 enabled architectures. */
11090 if (h && h->root.type == bfd_link_hash_undefweak
11091 && plt_offset == (bfd_vma) -1)
11092 {
11093 if (thumb2)
11094 {
11095 bfd_put_16 (input_bfd, 0xf3af, hit_data);
11096 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
11097 }
11098 else
11099 {
11100 bfd_put_16 (input_bfd, 0xe000, hit_data);
11101 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
11102 }
11103 return bfd_reloc_ok;
11104 }
11105
11106 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
11107 with Thumb-1) involving the J1 and J2 bits. */
11108 if (globals->use_rel)
11109 {
11110 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
11111 bfd_vma upper = upper_insn & 0x3ff;
11112 bfd_vma lower = lower_insn & 0x7ff;
11113 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
11114 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
11115 bfd_vma i1 = j1 ^ s ? 0 : 1;
11116 bfd_vma i2 = j2 ^ s ? 0 : 1;
11117
11118 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
11119 /* Sign extend. */
11120 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
11121
11122 signed_addend = addend;
11123 }
11124
11125 if (r_type == R_ARM_THM_XPC22)
11126 {
11127 /* Check for Thumb to Thumb call. */
11128 /* FIXME: Should we translate the instruction into a BL
11129 instruction instead ? */
11130 if (branch_type == ST_BRANCH_TO_THUMB)
11131 _bfd_error_handler
11132 (_("%pB: warning: %s BLX instruction targets"
11133 " %s function '%s'"),
11134 input_bfd, "Thumb",
11135 "Thumb", h ? h->root.root.string : "(local)");
11136 }
11137 else
11138 {
11139 /* If it is not a call to Thumb, assume call to Arm.
11140 If it is a call relative to a section name, then it is not a
11141 function call at all, but rather a long jump. Calls through
11142 the PLT do not require stubs. */
11143 if (branch_type == ST_BRANCH_TO_ARM && plt_offset == (bfd_vma) -1)
11144 {
11145 if (globals->use_blx && r_type == R_ARM_THM_CALL)
11146 {
11147 /* Convert BL to BLX. */
11148 lower_insn = (lower_insn & ~0x1000) | 0x0800;
11149 }
11150 else if (( r_type != R_ARM_THM_CALL)
11151 && (r_type != R_ARM_THM_JUMP24))
11152 {
11153 if (elf32_thumb_to_arm_stub
11154 (info, sym_name, input_bfd, output_bfd, input_section,
11155 hit_data, sym_sec, rel->r_offset, signed_addend, value,
11156 error_message))
11157 return bfd_reloc_ok;
11158 else
11159 return bfd_reloc_dangerous;
11160 }
11161 }
11162 else if (branch_type == ST_BRANCH_TO_THUMB
11163 && globals->use_blx
11164 && r_type == R_ARM_THM_CALL)
11165 {
11166 /* Make sure this is a BL. */
11167 lower_insn |= 0x1800;
11168 }
11169 }
11170
11171 enum elf32_arm_stub_type stub_type = arm_stub_none;
11172 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
11173 {
11174 /* Check if a stub has to be inserted because the destination
11175 is too far. */
11176 struct elf32_arm_stub_hash_entry *stub_entry;
11177 struct elf32_arm_link_hash_entry *hash;
11178
11179 hash = (struct elf32_arm_link_hash_entry *) h;
11180
11181 stub_type = arm_type_of_stub (info, input_section, rel,
11182 st_type, &branch_type,
11183 hash, value, sym_sec,
11184 input_bfd, sym_name);
11185
11186 if (stub_type != arm_stub_none)
11187 {
11188 /* The target is out of reach or we are changing modes, so
11189 redirect the branch to the local stub for this
11190 function. */
11191 stub_entry = elf32_arm_get_stub_entry (input_section,
11192 sym_sec, h,
11193 rel, globals,
11194 stub_type);
11195 if (stub_entry != NULL)
11196 {
11197 value = (stub_entry->stub_offset
11198 + stub_entry->stub_sec->output_offset
11199 + stub_entry->stub_sec->output_section->vma);
11200
11201 if (plt_offset != (bfd_vma) -1)
11202 *unresolved_reloc_p = FALSE;
11203 }
11204
11205 /* If this call becomes a call to Arm, force BLX. */
11206 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
11207 {
11208 if ((stub_entry
11209 && !arm_stub_is_thumb (stub_entry->stub_type))
11210 || branch_type != ST_BRANCH_TO_THUMB)
11211 lower_insn = (lower_insn & ~0x1000) | 0x0800;
11212 }
11213 }
11214 }
11215
11216 /* Handle calls via the PLT. */
11217 if (stub_type == arm_stub_none && plt_offset != (bfd_vma) -1)
11218 {
11219 value = (splt->output_section->vma
11220 + splt->output_offset
11221 + plt_offset);
11222
11223 if (globals->use_blx
11224 && r_type == R_ARM_THM_CALL
11225 && ! using_thumb_only (globals))
11226 {
11227 /* If the Thumb BLX instruction is available, convert
11228 the BL to a BLX instruction to call the ARM-mode
11229 PLT entry. */
11230 lower_insn = (lower_insn & ~0x1000) | 0x0800;
11231 branch_type = ST_BRANCH_TO_ARM;
11232 }
11233 else
11234 {
11235 if (! using_thumb_only (globals))
11236 /* Target the Thumb stub before the ARM PLT entry. */
11237 value -= PLT_THUMB_STUB_SIZE;
11238 branch_type = ST_BRANCH_TO_THUMB;
11239 }
11240 *unresolved_reloc_p = FALSE;
11241 }
11242
11243 relocation = value + signed_addend;
11244
11245 relocation -= (input_section->output_section->vma
11246 + input_section->output_offset
11247 + rel->r_offset);
11248
11249 check = relocation >> howto->rightshift;
11250
11251 /* If this is a signed value, the rightshift just dropped
11252 leading 1 bits (assuming twos complement). */
11253 if ((bfd_signed_vma) relocation >= 0)
11254 signed_check = check;
11255 else
11256 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
11257
11258 /* Calculate the permissable maximum and minimum values for
11259 this relocation according to whether we're relocating for
11260 Thumb-2 or not. */
11261 bitsize = howto->bitsize;
11262 if (!thumb2_bl)
11263 bitsize -= 2;
11264 reloc_signed_max = (1 << (bitsize - 1)) - 1;
11265 reloc_signed_min = ~reloc_signed_max;
11266
11267 /* Assumes two's complement. */
11268 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11269 overflow = TRUE;
11270
11271 if ((lower_insn & 0x5000) == 0x4000)
11272 /* For a BLX instruction, make sure that the relocation is rounded up
11273 to a word boundary. This follows the semantics of the instruction
11274 which specifies that bit 1 of the target address will come from bit
11275 1 of the base address. */
11276 relocation = (relocation + 2) & ~ 3;
11277
11278 /* Put RELOCATION back into the insn. Assumes two's complement.
11279 We use the Thumb-2 encoding, which is safe even if dealing with
11280 a Thumb-1 instruction by virtue of our overflow check above. */
11281 reloc_sign = (signed_check < 0) ? 1 : 0;
11282 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
11283 | ((relocation >> 12) & 0x3ff)
11284 | (reloc_sign << 10);
11285 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
11286 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
11287 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
11288 | ((relocation >> 1) & 0x7ff);
11289
11290 /* Put the relocated value back in the object file: */
11291 bfd_put_16 (input_bfd, upper_insn, hit_data);
11292 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11293
11294 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
11295 }
11296 break;
11297
11298 case R_ARM_THM_JUMP19:
11299 /* Thumb32 conditional branch instruction. */
11300 {
11301 bfd_vma relocation;
11302 bfd_boolean overflow = FALSE;
11303 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
11304 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
11305 bfd_signed_vma reloc_signed_max = 0xffffe;
11306 bfd_signed_vma reloc_signed_min = -0x100000;
11307 bfd_signed_vma signed_check;
11308 enum elf32_arm_stub_type stub_type = arm_stub_none;
11309 struct elf32_arm_stub_hash_entry *stub_entry;
11310 struct elf32_arm_link_hash_entry *hash;
11311
11312 /* Need to refetch the addend, reconstruct the top three bits,
11313 and squish the two 11 bit pieces together. */
11314 if (globals->use_rel)
11315 {
11316 bfd_vma S = (upper_insn & 0x0400) >> 10;
11317 bfd_vma upper = (upper_insn & 0x003f);
11318 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
11319 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
11320 bfd_vma lower = (lower_insn & 0x07ff);
11321
11322 upper |= J1 << 6;
11323 upper |= J2 << 7;
11324 upper |= (!S) << 8;
11325 upper -= 0x0100; /* Sign extend. */
11326
11327 addend = (upper << 12) | (lower << 1);
11328 signed_addend = addend;
11329 }
11330
11331 /* Handle calls via the PLT. */
11332 if (plt_offset != (bfd_vma) -1)
11333 {
11334 value = (splt->output_section->vma
11335 + splt->output_offset
11336 + plt_offset);
11337 /* Target the Thumb stub before the ARM PLT entry. */
11338 value -= PLT_THUMB_STUB_SIZE;
11339 *unresolved_reloc_p = FALSE;
11340 }
11341
11342 hash = (struct elf32_arm_link_hash_entry *)h;
11343
11344 stub_type = arm_type_of_stub (info, input_section, rel,
11345 st_type, &branch_type,
11346 hash, value, sym_sec,
11347 input_bfd, sym_name);
11348 if (stub_type != arm_stub_none)
11349 {
11350 stub_entry = elf32_arm_get_stub_entry (input_section,
11351 sym_sec, h,
11352 rel, globals,
11353 stub_type);
11354 if (stub_entry != NULL)
11355 {
11356 value = (stub_entry->stub_offset
11357 + stub_entry->stub_sec->output_offset
11358 + stub_entry->stub_sec->output_section->vma);
11359 }
11360 }
11361
11362 relocation = value + signed_addend;
11363 relocation -= (input_section->output_section->vma
11364 + input_section->output_offset
11365 + rel->r_offset);
11366 signed_check = (bfd_signed_vma) relocation;
11367
11368 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11369 overflow = TRUE;
11370
11371 /* Put RELOCATION back into the insn. */
11372 {
11373 bfd_vma S = (relocation & 0x00100000) >> 20;
11374 bfd_vma J2 = (relocation & 0x00080000) >> 19;
11375 bfd_vma J1 = (relocation & 0x00040000) >> 18;
11376 bfd_vma hi = (relocation & 0x0003f000) >> 12;
11377 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
11378
11379 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
11380 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
11381 }
11382
11383 /* Put the relocated value back in the object file: */
11384 bfd_put_16 (input_bfd, upper_insn, hit_data);
11385 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11386
11387 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
11388 }
11389
11390 case R_ARM_THM_JUMP11:
11391 case R_ARM_THM_JUMP8:
11392 case R_ARM_THM_JUMP6:
11393 /* Thumb B (branch) instruction). */
11394 {
11395 bfd_signed_vma relocation;
11396 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
11397 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
11398 bfd_signed_vma signed_check;
11399
11400 /* CZB cannot jump backward. */
11401 if (r_type == R_ARM_THM_JUMP6)
11402 reloc_signed_min = 0;
11403
11404 if (globals->use_rel)
11405 {
11406 /* Need to refetch addend. */
11407 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
11408 if (addend & ((howto->src_mask + 1) >> 1))
11409 {
11410 signed_addend = -1;
11411 signed_addend &= ~ howto->src_mask;
11412 signed_addend |= addend;
11413 }
11414 else
11415 signed_addend = addend;
11416 /* The value in the insn has been right shifted. We need to
11417 undo this, so that we can perform the address calculation
11418 in terms of bytes. */
11419 signed_addend <<= howto->rightshift;
11420 }
11421 relocation = value + signed_addend;
11422
11423 relocation -= (input_section->output_section->vma
11424 + input_section->output_offset
11425 + rel->r_offset);
11426
11427 relocation >>= howto->rightshift;
11428 signed_check = relocation;
11429
11430 if (r_type == R_ARM_THM_JUMP6)
11431 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
11432 else
11433 relocation &= howto->dst_mask;
11434 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
11435
11436 bfd_put_16 (input_bfd, relocation, hit_data);
11437
11438 /* Assumes two's complement. */
11439 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11440 return bfd_reloc_overflow;
11441
11442 return bfd_reloc_ok;
11443 }
11444
11445 case R_ARM_ALU_PCREL7_0:
11446 case R_ARM_ALU_PCREL15_8:
11447 case R_ARM_ALU_PCREL23_15:
11448 {
11449 bfd_vma insn;
11450 bfd_vma relocation;
11451
11452 insn = bfd_get_32 (input_bfd, hit_data);
11453 if (globals->use_rel)
11454 {
11455 /* Extract the addend. */
11456 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
11457 signed_addend = addend;
11458 }
11459 relocation = value + signed_addend;
11460
11461 relocation -= (input_section->output_section->vma
11462 + input_section->output_offset
11463 + rel->r_offset);
11464 insn = (insn & ~0xfff)
11465 | ((howto->bitpos << 7) & 0xf00)
11466 | ((relocation >> howto->bitpos) & 0xff);
11467 bfd_put_32 (input_bfd, value, hit_data);
11468 }
11469 return bfd_reloc_ok;
11470
11471 case R_ARM_GNU_VTINHERIT:
11472 case R_ARM_GNU_VTENTRY:
11473 return bfd_reloc_ok;
11474
11475 case R_ARM_GOTOFF32:
11476 /* Relocation is relative to the start of the
11477 global offset table. */
11478
11479 BFD_ASSERT (sgot != NULL);
11480 if (sgot == NULL)
11481 return bfd_reloc_notsupported;
11482
11483 /* If we are addressing a Thumb function, we need to adjust the
11484 address by one, so that attempts to call the function pointer will
11485 correctly interpret it as Thumb code. */
11486 if (branch_type == ST_BRANCH_TO_THUMB)
11487 value += 1;
11488
11489 /* Note that sgot->output_offset is not involved in this
11490 calculation. We always want the start of .got. If we
11491 define _GLOBAL_OFFSET_TABLE in a different way, as is
11492 permitted by the ABI, we might have to change this
11493 calculation. */
11494 value -= sgot->output_section->vma;
11495 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11496 contents, rel->r_offset, value,
11497 rel->r_addend);
11498
11499 case R_ARM_GOTPC:
11500 /* Use global offset table as symbol value. */
11501 BFD_ASSERT (sgot != NULL);
11502
11503 if (sgot == NULL)
11504 return bfd_reloc_notsupported;
11505
11506 *unresolved_reloc_p = FALSE;
11507 value = sgot->output_section->vma;
11508 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11509 contents, rel->r_offset, value,
11510 rel->r_addend);
11511
11512 case R_ARM_GOT32:
11513 case R_ARM_GOT_PREL:
11514 /* Relocation is to the entry for this symbol in the
11515 global offset table. */
11516 if (sgot == NULL)
11517 return bfd_reloc_notsupported;
11518
11519 if (dynreloc_st_type == STT_GNU_IFUNC
11520 && plt_offset != (bfd_vma) -1
11521 && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h)))
11522 {
11523 /* We have a relocation against a locally-binding STT_GNU_IFUNC
11524 symbol, and the relocation resolves directly to the runtime
11525 target rather than to the .iplt entry. This means that any
11526 .got entry would be the same value as the .igot.plt entry,
11527 so there's no point creating both. */
11528 sgot = globals->root.igotplt;
11529 value = sgot->output_offset + gotplt_offset;
11530 }
11531 else if (h != NULL)
11532 {
11533 bfd_vma off;
11534
11535 off = h->got.offset;
11536 BFD_ASSERT (off != (bfd_vma) -1);
11537 if ((off & 1) != 0)
11538 {
11539 /* We have already processsed one GOT relocation against
11540 this symbol. */
11541 off &= ~1;
11542 if (globals->root.dynamic_sections_created
11543 && !SYMBOL_REFERENCES_LOCAL (info, h))
11544 *unresolved_reloc_p = FALSE;
11545 }
11546 else
11547 {
11548 Elf_Internal_Rela outrel;
11549 int isrofixup = 0;
11550
11551 if (((h->dynindx != -1) || globals->fdpic_p)
11552 && !SYMBOL_REFERENCES_LOCAL (info, h))
11553 {
11554 /* If the symbol doesn't resolve locally in a static
11555 object, we have an undefined reference. If the
11556 symbol doesn't resolve locally in a dynamic object,
11557 it should be resolved by the dynamic linker. */
11558 if (globals->root.dynamic_sections_created)
11559 {
11560 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
11561 *unresolved_reloc_p = FALSE;
11562 }
11563 else
11564 outrel.r_info = 0;
11565 outrel.r_addend = 0;
11566 }
11567 else
11568 {
11569 if (dynreloc_st_type == STT_GNU_IFUNC)
11570 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
11571 else if (bfd_link_pic (info)
11572 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11573 || h->root.type != bfd_link_hash_undefweak))
11574 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
11575 else
11576 {
11577 outrel.r_info = 0;
11578 if (globals->fdpic_p)
11579 isrofixup = 1;
11580 }
11581 outrel.r_addend = dynreloc_value;
11582 }
11583
11584 /* The GOT entry is initialized to zero by default.
11585 See if we should install a different value. */
11586 if (outrel.r_addend != 0
11587 && (globals->use_rel || outrel.r_info == 0))
11588 {
11589 bfd_put_32 (output_bfd, outrel.r_addend,
11590 sgot->contents + off);
11591 outrel.r_addend = 0;
11592 }
11593
11594 if (isrofixup)
11595 arm_elf_add_rofixup (output_bfd,
11596 elf32_arm_hash_table(info)->srofixup,
11597 sgot->output_section->vma
11598 + sgot->output_offset + off);
11599
11600 else if (outrel.r_info != 0)
11601 {
11602 outrel.r_offset = (sgot->output_section->vma
11603 + sgot->output_offset
11604 + off);
11605 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11606 }
11607
11608 h->got.offset |= 1;
11609 }
11610 value = sgot->output_offset + off;
11611 }
11612 else
11613 {
11614 bfd_vma off;
11615
11616 BFD_ASSERT (local_got_offsets != NULL
11617 && local_got_offsets[r_symndx] != (bfd_vma) -1);
11618
11619 off = local_got_offsets[r_symndx];
11620
11621 /* The offset must always be a multiple of 4. We use the
11622 least significant bit to record whether we have already
11623 generated the necessary reloc. */
11624 if ((off & 1) != 0)
11625 off &= ~1;
11626 else
11627 {
11628 Elf_Internal_Rela outrel;
11629 int isrofixup = 0;
11630
11631 if (dynreloc_st_type == STT_GNU_IFUNC)
11632 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
11633 else if (bfd_link_pic (info))
11634 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
11635 else
11636 {
11637 outrel.r_info = 0;
11638 if (globals->fdpic_p)
11639 isrofixup = 1;
11640 }
11641
11642 /* The GOT entry is initialized to zero by default.
11643 See if we should install a different value. */
11644 if (globals->use_rel || outrel.r_info == 0)
11645 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off);
11646
11647 if (isrofixup)
11648 arm_elf_add_rofixup (output_bfd,
11649 globals->srofixup,
11650 sgot->output_section->vma
11651 + sgot->output_offset + off);
11652
11653 else if (outrel.r_info != 0)
11654 {
11655 outrel.r_addend = addend + dynreloc_value;
11656 outrel.r_offset = (sgot->output_section->vma
11657 + sgot->output_offset
11658 + off);
11659 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11660 }
11661
11662 local_got_offsets[r_symndx] |= 1;
11663 }
11664
11665 value = sgot->output_offset + off;
11666 }
11667 if (r_type != R_ARM_GOT32)
11668 value += sgot->output_section->vma;
11669
11670 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11671 contents, rel->r_offset, value,
11672 rel->r_addend);
11673
11674 case R_ARM_TLS_LDO32:
11675 value = value - dtpoff_base (info);
11676
11677 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11678 contents, rel->r_offset, value,
11679 rel->r_addend);
11680
11681 case R_ARM_TLS_LDM32:
11682 case R_ARM_TLS_LDM32_FDPIC:
11683 {
11684 bfd_vma off;
11685
11686 if (sgot == NULL)
11687 abort ();
11688
11689 off = globals->tls_ldm_got.offset;
11690
11691 if ((off & 1) != 0)
11692 off &= ~1;
11693 else
11694 {
11695 /* If we don't know the module number, create a relocation
11696 for it. */
11697 if (bfd_link_pic (info))
11698 {
11699 Elf_Internal_Rela outrel;
11700
11701 if (srelgot == NULL)
11702 abort ();
11703
11704 outrel.r_addend = 0;
11705 outrel.r_offset = (sgot->output_section->vma
11706 + sgot->output_offset + off);
11707 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
11708
11709 if (globals->use_rel)
11710 bfd_put_32 (output_bfd, outrel.r_addend,
11711 sgot->contents + off);
11712
11713 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11714 }
11715 else
11716 bfd_put_32 (output_bfd, 1, sgot->contents + off);
11717
11718 globals->tls_ldm_got.offset |= 1;
11719 }
11720
11721 if (r_type == R_ARM_TLS_LDM32_FDPIC)
11722 {
11723 bfd_put_32(output_bfd,
11724 globals->root.sgot->output_offset + off,
11725 contents + rel->r_offset);
11726
11727 return bfd_reloc_ok;
11728 }
11729 else
11730 {
11731 value = sgot->output_section->vma + sgot->output_offset + off
11732 - (input_section->output_section->vma
11733 + input_section->output_offset + rel->r_offset);
11734
11735 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11736 contents, rel->r_offset, value,
11737 rel->r_addend);
11738 }
11739 }
11740
11741 case R_ARM_TLS_CALL:
11742 case R_ARM_THM_TLS_CALL:
11743 case R_ARM_TLS_GD32:
11744 case R_ARM_TLS_GD32_FDPIC:
11745 case R_ARM_TLS_IE32:
11746 case R_ARM_TLS_IE32_FDPIC:
11747 case R_ARM_TLS_GOTDESC:
11748 case R_ARM_TLS_DESCSEQ:
11749 case R_ARM_THM_TLS_DESCSEQ:
11750 {
11751 bfd_vma off, offplt;
11752 int indx = 0;
11753 char tls_type;
11754
11755 BFD_ASSERT (sgot != NULL);
11756
11757 if (h != NULL)
11758 {
11759 bfd_boolean dyn;
11760 dyn = globals->root.dynamic_sections_created;
11761 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
11762 bfd_link_pic (info),
11763 h)
11764 && (!bfd_link_pic (info)
11765 || !SYMBOL_REFERENCES_LOCAL (info, h)))
11766 {
11767 *unresolved_reloc_p = FALSE;
11768 indx = h->dynindx;
11769 }
11770 off = h->got.offset;
11771 offplt = elf32_arm_hash_entry (h)->tlsdesc_got;
11772 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
11773 }
11774 else
11775 {
11776 BFD_ASSERT (local_got_offsets != NULL);
11777 off = local_got_offsets[r_symndx];
11778 offplt = local_tlsdesc_gotents[r_symndx];
11779 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
11780 }
11781
11782 /* Linker relaxations happens from one of the
11783 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
11784 if (ELF32_R_TYPE(rel->r_info) != r_type)
11785 tls_type = GOT_TLS_IE;
11786
11787 BFD_ASSERT (tls_type != GOT_UNKNOWN);
11788
11789 if ((off & 1) != 0)
11790 off &= ~1;
11791 else
11792 {
11793 bfd_boolean need_relocs = FALSE;
11794 Elf_Internal_Rela outrel;
11795 int cur_off = off;
11796
11797 /* The GOT entries have not been initialized yet. Do it
11798 now, and emit any relocations. If both an IE GOT and a
11799 GD GOT are necessary, we emit the GD first. */
11800
11801 if ((bfd_link_pic (info) || indx != 0)
11802 && (h == NULL
11803 || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11804 && !resolved_to_zero)
11805 || h->root.type != bfd_link_hash_undefweak))
11806 {
11807 need_relocs = TRUE;
11808 BFD_ASSERT (srelgot != NULL);
11809 }
11810
11811 if (tls_type & GOT_TLS_GDESC)
11812 {
11813 bfd_byte *loc;
11814
11815 /* We should have relaxed, unless this is an undefined
11816 weak symbol. */
11817 BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak))
11818 || bfd_link_pic (info));
11819 BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8
11820 <= globals->root.sgotplt->size);
11821
11822 outrel.r_addend = 0;
11823 outrel.r_offset = (globals->root.sgotplt->output_section->vma
11824 + globals->root.sgotplt->output_offset
11825 + offplt
11826 + globals->sgotplt_jump_table_size);
11827
11828 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DESC);
11829 sreloc = globals->root.srelplt;
11830 loc = sreloc->contents;
11831 loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals);
11832 BFD_ASSERT (loc + RELOC_SIZE (globals)
11833 <= sreloc->contents + sreloc->size);
11834
11835 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
11836
11837 /* For globals, the first word in the relocation gets
11838 the relocation index and the top bit set, or zero,
11839 if we're binding now. For locals, it gets the
11840 symbol's offset in the tls section. */
11841 bfd_put_32 (output_bfd,
11842 !h ? value - elf_hash_table (info)->tls_sec->vma
11843 : info->flags & DF_BIND_NOW ? 0
11844 : 0x80000000 | ELF32_R_SYM (outrel.r_info),
11845 globals->root.sgotplt->contents + offplt
11846 + globals->sgotplt_jump_table_size);
11847
11848 /* Second word in the relocation is always zero. */
11849 bfd_put_32 (output_bfd, 0,
11850 globals->root.sgotplt->contents + offplt
11851 + globals->sgotplt_jump_table_size + 4);
11852 }
11853 if (tls_type & GOT_TLS_GD)
11854 {
11855 if (need_relocs)
11856 {
11857 outrel.r_addend = 0;
11858 outrel.r_offset = (sgot->output_section->vma
11859 + sgot->output_offset
11860 + cur_off);
11861 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
11862
11863 if (globals->use_rel)
11864 bfd_put_32 (output_bfd, outrel.r_addend,
11865 sgot->contents + cur_off);
11866
11867 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11868
11869 if (indx == 0)
11870 bfd_put_32 (output_bfd, value - dtpoff_base (info),
11871 sgot->contents + cur_off + 4);
11872 else
11873 {
11874 outrel.r_addend = 0;
11875 outrel.r_info = ELF32_R_INFO (indx,
11876 R_ARM_TLS_DTPOFF32);
11877 outrel.r_offset += 4;
11878
11879 if (globals->use_rel)
11880 bfd_put_32 (output_bfd, outrel.r_addend,
11881 sgot->contents + cur_off + 4);
11882
11883 elf32_arm_add_dynreloc (output_bfd, info,
11884 srelgot, &outrel);
11885 }
11886 }
11887 else
11888 {
11889 /* If we are not emitting relocations for a
11890 general dynamic reference, then we must be in a
11891 static link or an executable link with the
11892 symbol binding locally. Mark it as belonging
11893 to module 1, the executable. */
11894 bfd_put_32 (output_bfd, 1,
11895 sgot->contents + cur_off);
11896 bfd_put_32 (output_bfd, value - dtpoff_base (info),
11897 sgot->contents + cur_off + 4);
11898 }
11899
11900 cur_off += 8;
11901 }
11902
11903 if (tls_type & GOT_TLS_IE)
11904 {
11905 if (need_relocs)
11906 {
11907 if (indx == 0)
11908 outrel.r_addend = value - dtpoff_base (info);
11909 else
11910 outrel.r_addend = 0;
11911 outrel.r_offset = (sgot->output_section->vma
11912 + sgot->output_offset
11913 + cur_off);
11914 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
11915
11916 if (globals->use_rel)
11917 bfd_put_32 (output_bfd, outrel.r_addend,
11918 sgot->contents + cur_off);
11919
11920 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11921 }
11922 else
11923 bfd_put_32 (output_bfd, tpoff (info, value),
11924 sgot->contents + cur_off);
11925 cur_off += 4;
11926 }
11927
11928 if (h != NULL)
11929 h->got.offset |= 1;
11930 else
11931 local_got_offsets[r_symndx] |= 1;
11932 }
11933
11934 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32 && r_type != R_ARM_TLS_GD32_FDPIC)
11935 off += 8;
11936 else if (tls_type & GOT_TLS_GDESC)
11937 off = offplt;
11938
11939 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL
11940 || ELF32_R_TYPE(rel->r_info) == R_ARM_THM_TLS_CALL)
11941 {
11942 bfd_signed_vma offset;
11943 /* TLS stubs are arm mode. The original symbol is a
11944 data object, so branch_type is bogus. */
11945 branch_type = ST_BRANCH_TO_ARM;
11946 enum elf32_arm_stub_type stub_type
11947 = arm_type_of_stub (info, input_section, rel,
11948 st_type, &branch_type,
11949 (struct elf32_arm_link_hash_entry *)h,
11950 globals->tls_trampoline, globals->root.splt,
11951 input_bfd, sym_name);
11952
11953 if (stub_type != arm_stub_none)
11954 {
11955 struct elf32_arm_stub_hash_entry *stub_entry
11956 = elf32_arm_get_stub_entry
11957 (input_section, globals->root.splt, 0, rel,
11958 globals, stub_type);
11959 offset = (stub_entry->stub_offset
11960 + stub_entry->stub_sec->output_offset
11961 + stub_entry->stub_sec->output_section->vma);
11962 }
11963 else
11964 offset = (globals->root.splt->output_section->vma
11965 + globals->root.splt->output_offset
11966 + globals->tls_trampoline);
11967
11968 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL)
11969 {
11970 unsigned long inst;
11971
11972 offset -= (input_section->output_section->vma
11973 + input_section->output_offset
11974 + rel->r_offset + 8);
11975
11976 inst = offset >> 2;
11977 inst &= 0x00ffffff;
11978 value = inst | (globals->use_blx ? 0xfa000000 : 0xeb000000);
11979 }
11980 else
11981 {
11982 /* Thumb blx encodes the offset in a complicated
11983 fashion. */
11984 unsigned upper_insn, lower_insn;
11985 unsigned neg;
11986
11987 offset -= (input_section->output_section->vma
11988 + input_section->output_offset
11989 + rel->r_offset + 4);
11990
11991 if (stub_type != arm_stub_none
11992 && arm_stub_is_thumb (stub_type))
11993 {
11994 lower_insn = 0xd000;
11995 }
11996 else
11997 {
11998 lower_insn = 0xc000;
11999 /* Round up the offset to a word boundary. */
12000 offset = (offset + 2) & ~2;
12001 }
12002
12003 neg = offset < 0;
12004 upper_insn = (0xf000
12005 | ((offset >> 12) & 0x3ff)
12006 | (neg << 10));
12007 lower_insn |= (((!((offset >> 23) & 1)) ^ neg) << 13)
12008 | (((!((offset >> 22) & 1)) ^ neg) << 11)
12009 | ((offset >> 1) & 0x7ff);
12010 bfd_put_16 (input_bfd, upper_insn, hit_data);
12011 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
12012 return bfd_reloc_ok;
12013 }
12014 }
12015 /* These relocations needs special care, as besides the fact
12016 they point somewhere in .gotplt, the addend must be
12017 adjusted accordingly depending on the type of instruction
12018 we refer to. */
12019 else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC))
12020 {
12021 unsigned long data, insn;
12022 unsigned thumb;
12023
12024 data = bfd_get_32 (input_bfd, hit_data);
12025 thumb = data & 1;
12026 data &= ~1u;
12027
12028 if (thumb)
12029 {
12030 insn = bfd_get_16 (input_bfd, contents + rel->r_offset - data);
12031 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
12032 insn = (insn << 16)
12033 | bfd_get_16 (input_bfd,
12034 contents + rel->r_offset - data + 2);
12035 if ((insn & 0xf800c000) == 0xf000c000)
12036 /* bl/blx */
12037 value = -6;
12038 else if ((insn & 0xffffff00) == 0x4400)
12039 /* add */
12040 value = -5;
12041 else
12042 {
12043 _bfd_error_handler
12044 /* xgettext:c-format */
12045 (_("%pB(%pA+%#" PRIx64 "): "
12046 "unexpected %s instruction '%#lx' "
12047 "referenced by TLS_GOTDESC"),
12048 input_bfd, input_section, (uint64_t) rel->r_offset,
12049 "Thumb", insn);
12050 return bfd_reloc_notsupported;
12051 }
12052 }
12053 else
12054 {
12055 insn = bfd_get_32 (input_bfd, contents + rel->r_offset - data);
12056
12057 switch (insn >> 24)
12058 {
12059 case 0xeb: /* bl */
12060 case 0xfa: /* blx */
12061 value = -4;
12062 break;
12063
12064 case 0xe0: /* add */
12065 value = -8;
12066 break;
12067
12068 default:
12069 _bfd_error_handler
12070 /* xgettext:c-format */
12071 (_("%pB(%pA+%#" PRIx64 "): "
12072 "unexpected %s instruction '%#lx' "
12073 "referenced by TLS_GOTDESC"),
12074 input_bfd, input_section, (uint64_t) rel->r_offset,
12075 "ARM", insn);
12076 return bfd_reloc_notsupported;
12077 }
12078 }
12079
12080 value += ((globals->root.sgotplt->output_section->vma
12081 + globals->root.sgotplt->output_offset + off)
12082 - (input_section->output_section->vma
12083 + input_section->output_offset
12084 + rel->r_offset)
12085 + globals->sgotplt_jump_table_size);
12086 }
12087 else
12088 value = ((globals->root.sgot->output_section->vma
12089 + globals->root.sgot->output_offset + off)
12090 - (input_section->output_section->vma
12091 + input_section->output_offset + rel->r_offset));
12092
12093 if (globals->fdpic_p && (r_type == R_ARM_TLS_GD32_FDPIC ||
12094 r_type == R_ARM_TLS_IE32_FDPIC))
12095 {
12096 /* For FDPIC relocations, resolve to the offset of the GOT
12097 entry from the start of GOT. */
12098 bfd_put_32(output_bfd,
12099 globals->root.sgot->output_offset + off,
12100 contents + rel->r_offset);
12101
12102 return bfd_reloc_ok;
12103 }
12104 else
12105 {
12106 return _bfd_final_link_relocate (howto, input_bfd, input_section,
12107 contents, rel->r_offset, value,
12108 rel->r_addend);
12109 }
12110 }
12111
12112 case R_ARM_TLS_LE32:
12113 if (bfd_link_dll (info))
12114 {
12115 _bfd_error_handler
12116 /* xgettext:c-format */
12117 (_("%pB(%pA+%#" PRIx64 "): %s relocation not permitted "
12118 "in shared object"),
12119 input_bfd, input_section, (uint64_t) rel->r_offset, howto->name);
12120 return bfd_reloc_notsupported;
12121 }
12122 else
12123 value = tpoff (info, value);
12124
12125 return _bfd_final_link_relocate (howto, input_bfd, input_section,
12126 contents, rel->r_offset, value,
12127 rel->r_addend);
12128
12129 case R_ARM_V4BX:
12130 if (globals->fix_v4bx)
12131 {
12132 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12133
12134 /* Ensure that we have a BX instruction. */
12135 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
12136
12137 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
12138 {
12139 /* Branch to veneer. */
12140 bfd_vma glue_addr;
12141 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
12142 glue_addr -= input_section->output_section->vma
12143 + input_section->output_offset
12144 + rel->r_offset + 8;
12145 insn = (insn & 0xf0000000) | 0x0a000000
12146 | ((glue_addr >> 2) & 0x00ffffff);
12147 }
12148 else
12149 {
12150 /* Preserve Rm (lowest four bits) and the condition code
12151 (highest four bits). Other bits encode MOV PC,Rm. */
12152 insn = (insn & 0xf000000f) | 0x01a0f000;
12153 }
12154
12155 bfd_put_32 (input_bfd, insn, hit_data);
12156 }
12157 return bfd_reloc_ok;
12158
12159 case R_ARM_MOVW_ABS_NC:
12160 case R_ARM_MOVT_ABS:
12161 case R_ARM_MOVW_PREL_NC:
12162 case R_ARM_MOVT_PREL:
12163 /* Until we properly support segment-base-relative addressing then
12164 we assume the segment base to be zero, as for the group relocations.
12165 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
12166 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
12167 case R_ARM_MOVW_BREL_NC:
12168 case R_ARM_MOVW_BREL:
12169 case R_ARM_MOVT_BREL:
12170 {
12171 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12172
12173 if (globals->use_rel)
12174 {
12175 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
12176 signed_addend = (addend ^ 0x8000) - 0x8000;
12177 }
12178
12179 value += signed_addend;
12180
12181 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
12182 value -= (input_section->output_section->vma
12183 + input_section->output_offset + rel->r_offset);
12184
12185 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
12186 return bfd_reloc_overflow;
12187
12188 if (branch_type == ST_BRANCH_TO_THUMB)
12189 value |= 1;
12190
12191 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
12192 || r_type == R_ARM_MOVT_BREL)
12193 value >>= 16;
12194
12195 insn &= 0xfff0f000;
12196 insn |= value & 0xfff;
12197 insn |= (value & 0xf000) << 4;
12198 bfd_put_32 (input_bfd, insn, hit_data);
12199 }
12200 return bfd_reloc_ok;
12201
12202 case R_ARM_THM_MOVW_ABS_NC:
12203 case R_ARM_THM_MOVT_ABS:
12204 case R_ARM_THM_MOVW_PREL_NC:
12205 case R_ARM_THM_MOVT_PREL:
12206 /* Until we properly support segment-base-relative addressing then
12207 we assume the segment base to be zero, as for the above relocations.
12208 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
12209 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
12210 as R_ARM_THM_MOVT_ABS. */
12211 case R_ARM_THM_MOVW_BREL_NC:
12212 case R_ARM_THM_MOVW_BREL:
12213 case R_ARM_THM_MOVT_BREL:
12214 {
12215 bfd_vma insn;
12216
12217 insn = bfd_get_16 (input_bfd, hit_data) << 16;
12218 insn |= bfd_get_16 (input_bfd, hit_data + 2);
12219
12220 if (globals->use_rel)
12221 {
12222 addend = ((insn >> 4) & 0xf000)
12223 | ((insn >> 15) & 0x0800)
12224 | ((insn >> 4) & 0x0700)
12225 | (insn & 0x00ff);
12226 signed_addend = (addend ^ 0x8000) - 0x8000;
12227 }
12228
12229 value += signed_addend;
12230
12231 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
12232 value -= (input_section->output_section->vma
12233 + input_section->output_offset + rel->r_offset);
12234
12235 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
12236 return bfd_reloc_overflow;
12237
12238 if (branch_type == ST_BRANCH_TO_THUMB)
12239 value |= 1;
12240
12241 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
12242 || r_type == R_ARM_THM_MOVT_BREL)
12243 value >>= 16;
12244
12245 insn &= 0xfbf08f00;
12246 insn |= (value & 0xf000) << 4;
12247 insn |= (value & 0x0800) << 15;
12248 insn |= (value & 0x0700) << 4;
12249 insn |= (value & 0x00ff);
12250
12251 bfd_put_16 (input_bfd, insn >> 16, hit_data);
12252 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
12253 }
12254 return bfd_reloc_ok;
12255
12256 case R_ARM_ALU_PC_G0_NC:
12257 case R_ARM_ALU_PC_G1_NC:
12258 case R_ARM_ALU_PC_G0:
12259 case R_ARM_ALU_PC_G1:
12260 case R_ARM_ALU_PC_G2:
12261 case R_ARM_ALU_SB_G0_NC:
12262 case R_ARM_ALU_SB_G1_NC:
12263 case R_ARM_ALU_SB_G0:
12264 case R_ARM_ALU_SB_G1:
12265 case R_ARM_ALU_SB_G2:
12266 {
12267 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12268 bfd_vma pc = input_section->output_section->vma
12269 + input_section->output_offset + rel->r_offset;
12270 /* sb is the origin of the *segment* containing the symbol. */
12271 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12272 bfd_vma residual;
12273 bfd_vma g_n;
12274 bfd_signed_vma signed_value;
12275 int group = 0;
12276
12277 /* Determine which group of bits to select. */
12278 switch (r_type)
12279 {
12280 case R_ARM_ALU_PC_G0_NC:
12281 case R_ARM_ALU_PC_G0:
12282 case R_ARM_ALU_SB_G0_NC:
12283 case R_ARM_ALU_SB_G0:
12284 group = 0;
12285 break;
12286
12287 case R_ARM_ALU_PC_G1_NC:
12288 case R_ARM_ALU_PC_G1:
12289 case R_ARM_ALU_SB_G1_NC:
12290 case R_ARM_ALU_SB_G1:
12291 group = 1;
12292 break;
12293
12294 case R_ARM_ALU_PC_G2:
12295 case R_ARM_ALU_SB_G2:
12296 group = 2;
12297 break;
12298
12299 default:
12300 abort ();
12301 }
12302
12303 /* If REL, extract the addend from the insn. If RELA, it will
12304 have already been fetched for us. */
12305 if (globals->use_rel)
12306 {
12307 int negative;
12308 bfd_vma constant = insn & 0xff;
12309 bfd_vma rotation = (insn & 0xf00) >> 8;
12310
12311 if (rotation == 0)
12312 signed_addend = constant;
12313 else
12314 {
12315 /* Compensate for the fact that in the instruction, the
12316 rotation is stored in multiples of 2 bits. */
12317 rotation *= 2;
12318
12319 /* Rotate "constant" right by "rotation" bits. */
12320 signed_addend = (constant >> rotation) |
12321 (constant << (8 * sizeof (bfd_vma) - rotation));
12322 }
12323
12324 /* Determine if the instruction is an ADD or a SUB.
12325 (For REL, this determines the sign of the addend.) */
12326 negative = identify_add_or_sub (insn);
12327 if (negative == 0)
12328 {
12329 _bfd_error_handler
12330 /* xgettext:c-format */
12331 (_("%pB(%pA+%#" PRIx64 "): only ADD or SUB instructions "
12332 "are allowed for ALU group relocations"),
12333 input_bfd, input_section, (uint64_t) rel->r_offset);
12334 return bfd_reloc_overflow;
12335 }
12336
12337 signed_addend *= negative;
12338 }
12339
12340 /* Compute the value (X) to go in the place. */
12341 if (r_type == R_ARM_ALU_PC_G0_NC
12342 || r_type == R_ARM_ALU_PC_G1_NC
12343 || r_type == R_ARM_ALU_PC_G0
12344 || r_type == R_ARM_ALU_PC_G1
12345 || r_type == R_ARM_ALU_PC_G2)
12346 /* PC relative. */
12347 signed_value = value - pc + signed_addend;
12348 else
12349 /* Section base relative. */
12350 signed_value = value - sb + signed_addend;
12351
12352 /* If the target symbol is a Thumb function, then set the
12353 Thumb bit in the address. */
12354 if (branch_type == ST_BRANCH_TO_THUMB)
12355 signed_value |= 1;
12356
12357 /* Calculate the value of the relevant G_n, in encoded
12358 constant-with-rotation format. */
12359 g_n = calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12360 group, &residual);
12361
12362 /* Check for overflow if required. */
12363 if ((r_type == R_ARM_ALU_PC_G0
12364 || r_type == R_ARM_ALU_PC_G1
12365 || r_type == R_ARM_ALU_PC_G2
12366 || r_type == R_ARM_ALU_SB_G0
12367 || r_type == R_ARM_ALU_SB_G1
12368 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
12369 {
12370 _bfd_error_handler
12371 /* xgettext:c-format */
12372 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12373 "splitting %#" PRIx64 " for group relocation %s"),
12374 input_bfd, input_section, (uint64_t) rel->r_offset,
12375 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12376 howto->name);
12377 return bfd_reloc_overflow;
12378 }
12379
12380 /* Mask out the value and the ADD/SUB part of the opcode; take care
12381 not to destroy the S bit. */
12382 insn &= 0xff1ff000;
12383
12384 /* Set the opcode according to whether the value to go in the
12385 place is negative. */
12386 if (signed_value < 0)
12387 insn |= 1 << 22;
12388 else
12389 insn |= 1 << 23;
12390
12391 /* Encode the offset. */
12392 insn |= g_n;
12393
12394 bfd_put_32 (input_bfd, insn, hit_data);
12395 }
12396 return bfd_reloc_ok;
12397
12398 case R_ARM_LDR_PC_G0:
12399 case R_ARM_LDR_PC_G1:
12400 case R_ARM_LDR_PC_G2:
12401 case R_ARM_LDR_SB_G0:
12402 case R_ARM_LDR_SB_G1:
12403 case R_ARM_LDR_SB_G2:
12404 {
12405 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12406 bfd_vma pc = input_section->output_section->vma
12407 + input_section->output_offset + rel->r_offset;
12408 /* sb is the origin of the *segment* containing the symbol. */
12409 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12410 bfd_vma residual;
12411 bfd_signed_vma signed_value;
12412 int group = 0;
12413
12414 /* Determine which groups of bits to calculate. */
12415 switch (r_type)
12416 {
12417 case R_ARM_LDR_PC_G0:
12418 case R_ARM_LDR_SB_G0:
12419 group = 0;
12420 break;
12421
12422 case R_ARM_LDR_PC_G1:
12423 case R_ARM_LDR_SB_G1:
12424 group = 1;
12425 break;
12426
12427 case R_ARM_LDR_PC_G2:
12428 case R_ARM_LDR_SB_G2:
12429 group = 2;
12430 break;
12431
12432 default:
12433 abort ();
12434 }
12435
12436 /* If REL, extract the addend from the insn. If RELA, it will
12437 have already been fetched for us. */
12438 if (globals->use_rel)
12439 {
12440 int negative = (insn & (1 << 23)) ? 1 : -1;
12441 signed_addend = negative * (insn & 0xfff);
12442 }
12443
12444 /* Compute the value (X) to go in the place. */
12445 if (r_type == R_ARM_LDR_PC_G0
12446 || r_type == R_ARM_LDR_PC_G1
12447 || r_type == R_ARM_LDR_PC_G2)
12448 /* PC relative. */
12449 signed_value = value - pc + signed_addend;
12450 else
12451 /* Section base relative. */
12452 signed_value = value - sb + signed_addend;
12453
12454 /* Calculate the value of the relevant G_{n-1} to obtain
12455 the residual at that stage. */
12456 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12457 group - 1, &residual);
12458
12459 /* Check for overflow. */
12460 if (residual >= 0x1000)
12461 {
12462 _bfd_error_handler
12463 /* xgettext:c-format */
12464 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12465 "splitting %#" PRIx64 " for group relocation %s"),
12466 input_bfd, input_section, (uint64_t) rel->r_offset,
12467 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12468 howto->name);
12469 return bfd_reloc_overflow;
12470 }
12471
12472 /* Mask out the value and U bit. */
12473 insn &= 0xff7ff000;
12474
12475 /* Set the U bit if the value to go in the place is non-negative. */
12476 if (signed_value >= 0)
12477 insn |= 1 << 23;
12478
12479 /* Encode the offset. */
12480 insn |= residual;
12481
12482 bfd_put_32 (input_bfd, insn, hit_data);
12483 }
12484 return bfd_reloc_ok;
12485
12486 case R_ARM_LDRS_PC_G0:
12487 case R_ARM_LDRS_PC_G1:
12488 case R_ARM_LDRS_PC_G2:
12489 case R_ARM_LDRS_SB_G0:
12490 case R_ARM_LDRS_SB_G1:
12491 case R_ARM_LDRS_SB_G2:
12492 {
12493 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12494 bfd_vma pc = input_section->output_section->vma
12495 + input_section->output_offset + rel->r_offset;
12496 /* sb is the origin of the *segment* containing the symbol. */
12497 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12498 bfd_vma residual;
12499 bfd_signed_vma signed_value;
12500 int group = 0;
12501
12502 /* Determine which groups of bits to calculate. */
12503 switch (r_type)
12504 {
12505 case R_ARM_LDRS_PC_G0:
12506 case R_ARM_LDRS_SB_G0:
12507 group = 0;
12508 break;
12509
12510 case R_ARM_LDRS_PC_G1:
12511 case R_ARM_LDRS_SB_G1:
12512 group = 1;
12513 break;
12514
12515 case R_ARM_LDRS_PC_G2:
12516 case R_ARM_LDRS_SB_G2:
12517 group = 2;
12518 break;
12519
12520 default:
12521 abort ();
12522 }
12523
12524 /* If REL, extract the addend from the insn. If RELA, it will
12525 have already been fetched for us. */
12526 if (globals->use_rel)
12527 {
12528 int negative = (insn & (1 << 23)) ? 1 : -1;
12529 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
12530 }
12531
12532 /* Compute the value (X) to go in the place. */
12533 if (r_type == R_ARM_LDRS_PC_G0
12534 || r_type == R_ARM_LDRS_PC_G1
12535 || r_type == R_ARM_LDRS_PC_G2)
12536 /* PC relative. */
12537 signed_value = value - pc + signed_addend;
12538 else
12539 /* Section base relative. */
12540 signed_value = value - sb + signed_addend;
12541
12542 /* Calculate the value of the relevant G_{n-1} to obtain
12543 the residual at that stage. */
12544 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12545 group - 1, &residual);
12546
12547 /* Check for overflow. */
12548 if (residual >= 0x100)
12549 {
12550 _bfd_error_handler
12551 /* xgettext:c-format */
12552 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12553 "splitting %#" PRIx64 " for group relocation %s"),
12554 input_bfd, input_section, (uint64_t) rel->r_offset,
12555 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12556 howto->name);
12557 return bfd_reloc_overflow;
12558 }
12559
12560 /* Mask out the value and U bit. */
12561 insn &= 0xff7ff0f0;
12562
12563 /* Set the U bit if the value to go in the place is non-negative. */
12564 if (signed_value >= 0)
12565 insn |= 1 << 23;
12566
12567 /* Encode the offset. */
12568 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
12569
12570 bfd_put_32 (input_bfd, insn, hit_data);
12571 }
12572 return bfd_reloc_ok;
12573
12574 case R_ARM_LDC_PC_G0:
12575 case R_ARM_LDC_PC_G1:
12576 case R_ARM_LDC_PC_G2:
12577 case R_ARM_LDC_SB_G0:
12578 case R_ARM_LDC_SB_G1:
12579 case R_ARM_LDC_SB_G2:
12580 {
12581 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12582 bfd_vma pc = input_section->output_section->vma
12583 + input_section->output_offset + rel->r_offset;
12584 /* sb is the origin of the *segment* containing the symbol. */
12585 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12586 bfd_vma residual;
12587 bfd_signed_vma signed_value;
12588 int group = 0;
12589
12590 /* Determine which groups of bits to calculate. */
12591 switch (r_type)
12592 {
12593 case R_ARM_LDC_PC_G0:
12594 case R_ARM_LDC_SB_G0:
12595 group = 0;
12596 break;
12597
12598 case R_ARM_LDC_PC_G1:
12599 case R_ARM_LDC_SB_G1:
12600 group = 1;
12601 break;
12602
12603 case R_ARM_LDC_PC_G2:
12604 case R_ARM_LDC_SB_G2:
12605 group = 2;
12606 break;
12607
12608 default:
12609 abort ();
12610 }
12611
12612 /* If REL, extract the addend from the insn. If RELA, it will
12613 have already been fetched for us. */
12614 if (globals->use_rel)
12615 {
12616 int negative = (insn & (1 << 23)) ? 1 : -1;
12617 signed_addend = negative * ((insn & 0xff) << 2);
12618 }
12619
12620 /* Compute the value (X) to go in the place. */
12621 if (r_type == R_ARM_LDC_PC_G0
12622 || r_type == R_ARM_LDC_PC_G1
12623 || r_type == R_ARM_LDC_PC_G2)
12624 /* PC relative. */
12625 signed_value = value - pc + signed_addend;
12626 else
12627 /* Section base relative. */
12628 signed_value = value - sb + signed_addend;
12629
12630 /* Calculate the value of the relevant G_{n-1} to obtain
12631 the residual at that stage. */
12632 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12633 group - 1, &residual);
12634
12635 /* Check for overflow. (The absolute value to go in the place must be
12636 divisible by four and, after having been divided by four, must
12637 fit in eight bits.) */
12638 if ((residual & 0x3) != 0 || residual >= 0x400)
12639 {
12640 _bfd_error_handler
12641 /* xgettext:c-format */
12642 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12643 "splitting %#" PRIx64 " for group relocation %s"),
12644 input_bfd, input_section, (uint64_t) rel->r_offset,
12645 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12646 howto->name);
12647 return bfd_reloc_overflow;
12648 }
12649
12650 /* Mask out the value and U bit. */
12651 insn &= 0xff7fff00;
12652
12653 /* Set the U bit if the value to go in the place is non-negative. */
12654 if (signed_value >= 0)
12655 insn |= 1 << 23;
12656
12657 /* Encode the offset. */
12658 insn |= residual >> 2;
12659
12660 bfd_put_32 (input_bfd, insn, hit_data);
12661 }
12662 return bfd_reloc_ok;
12663
12664 case R_ARM_THM_ALU_ABS_G0_NC:
12665 case R_ARM_THM_ALU_ABS_G1_NC:
12666 case R_ARM_THM_ALU_ABS_G2_NC:
12667 case R_ARM_THM_ALU_ABS_G3_NC:
12668 {
12669 const int shift_array[4] = {0, 8, 16, 24};
12670 bfd_vma insn = bfd_get_16 (input_bfd, hit_data);
12671 bfd_vma addr = value;
12672 int shift = shift_array[r_type - R_ARM_THM_ALU_ABS_G0_NC];
12673
12674 /* Compute address. */
12675 if (globals->use_rel)
12676 signed_addend = insn & 0xff;
12677 addr += signed_addend;
12678 if (branch_type == ST_BRANCH_TO_THUMB)
12679 addr |= 1;
12680 /* Clean imm8 insn. */
12681 insn &= 0xff00;
12682 /* And update with correct part of address. */
12683 insn |= (addr >> shift) & 0xff;
12684 /* Update insn. */
12685 bfd_put_16 (input_bfd, insn, hit_data);
12686 }
12687
12688 *unresolved_reloc_p = FALSE;
12689 return bfd_reloc_ok;
12690
12691 case R_ARM_GOTOFFFUNCDESC:
12692 {
12693 if (h == NULL)
12694 {
12695 struct fdpic_local *local_fdpic_cnts = elf32_arm_local_fdpic_cnts(input_bfd);
12696 int dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12697 int offset = local_fdpic_cnts[r_symndx].funcdesc_offset & ~1;
12698 bfd_vma addr = dynreloc_value - sym_sec->output_section->vma;
12699 bfd_vma seg = -1;
12700
12701 if (bfd_link_pic(info) && dynindx == 0)
12702 abort();
12703
12704 /* Resolve relocation. */
12705 bfd_put_32(output_bfd, (offset + sgot->output_offset)
12706 , contents + rel->r_offset);
12707 /* Emit R_ARM_FUNCDESC_VALUE or two fixups on funcdesc if
12708 not done yet. */
12709 arm_elf_fill_funcdesc(output_bfd, info,
12710 &local_fdpic_cnts[r_symndx].funcdesc_offset,
12711 dynindx, offset, addr, dynreloc_value, seg);
12712 }
12713 else
12714 {
12715 int dynindx;
12716 int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12717 bfd_vma addr;
12718 bfd_vma seg = -1;
12719
12720 /* For static binaries, sym_sec can be null. */
12721 if (sym_sec)
12722 {
12723 dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12724 addr = dynreloc_value - sym_sec->output_section->vma;
12725 }
12726 else
12727 {
12728 dynindx = 0;
12729 addr = 0;
12730 }
12731
12732 if (bfd_link_pic(info) && dynindx == 0)
12733 abort();
12734
12735 /* This case cannot occur since funcdesc is allocated by
12736 the dynamic loader so we cannot resolve the relocation. */
12737 if (h->dynindx != -1)
12738 abort();
12739
12740 /* Resolve relocation. */
12741 bfd_put_32(output_bfd, (offset + sgot->output_offset),
12742 contents + rel->r_offset);
12743 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12744 arm_elf_fill_funcdesc(output_bfd, info,
12745 &eh->fdpic_cnts.funcdesc_offset,
12746 dynindx, offset, addr, dynreloc_value, seg);
12747 }
12748 }
12749 *unresolved_reloc_p = FALSE;
12750 return bfd_reloc_ok;
12751
12752 case R_ARM_GOTFUNCDESC:
12753 {
12754 if (h != NULL)
12755 {
12756 Elf_Internal_Rela outrel;
12757
12758 /* Resolve relocation. */
12759 bfd_put_32(output_bfd, ((eh->fdpic_cnts.gotfuncdesc_offset & ~1)
12760 + sgot->output_offset),
12761 contents + rel->r_offset);
12762 /* Add funcdesc and associated R_ARM_FUNCDESC_VALUE. */
12763 if(h->dynindx == -1)
12764 {
12765 int dynindx;
12766 int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12767 bfd_vma addr;
12768 bfd_vma seg = -1;
12769
12770 /* For static binaries sym_sec can be null. */
12771 if (sym_sec)
12772 {
12773 dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12774 addr = dynreloc_value - sym_sec->output_section->vma;
12775 }
12776 else
12777 {
12778 dynindx = 0;
12779 addr = 0;
12780 }
12781
12782 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12783 arm_elf_fill_funcdesc(output_bfd, info,
12784 &eh->fdpic_cnts.funcdesc_offset,
12785 dynindx, offset, addr, dynreloc_value, seg);
12786 }
12787
12788 /* Add a dynamic relocation on GOT entry if not already done. */
12789 if ((eh->fdpic_cnts.gotfuncdesc_offset & 1) == 0)
12790 {
12791 if (h->dynindx == -1)
12792 {
12793 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12794 if (h->root.type == bfd_link_hash_undefweak)
12795 bfd_put_32(output_bfd, 0, sgot->contents
12796 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1));
12797 else
12798 bfd_put_32(output_bfd, sgot->output_section->vma
12799 + sgot->output_offset
12800 + (eh->fdpic_cnts.funcdesc_offset & ~1),
12801 sgot->contents
12802 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1));
12803 }
12804 else
12805 {
12806 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_FUNCDESC);
12807 }
12808 outrel.r_offset = sgot->output_section->vma
12809 + sgot->output_offset
12810 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1);
12811 outrel.r_addend = 0;
12812 if (h->dynindx == -1 && !bfd_link_pic(info))
12813 if (h->root.type == bfd_link_hash_undefweak)
12814 arm_elf_add_rofixup(output_bfd, globals->srofixup, -1);
12815 else
12816 arm_elf_add_rofixup(output_bfd, globals->srofixup,
12817 outrel.r_offset);
12818 else
12819 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12820 eh->fdpic_cnts.gotfuncdesc_offset |= 1;
12821 }
12822 }
12823 else
12824 {
12825 /* Such relocation on static function should not have been
12826 emitted by the compiler. */
12827 abort();
12828 }
12829 }
12830 *unresolved_reloc_p = FALSE;
12831 return bfd_reloc_ok;
12832
12833 case R_ARM_FUNCDESC:
12834 {
12835 if (h == NULL)
12836 {
12837 struct fdpic_local *local_fdpic_cnts = elf32_arm_local_fdpic_cnts(input_bfd);
12838 Elf_Internal_Rela outrel;
12839 int dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12840 int offset = local_fdpic_cnts[r_symndx].funcdesc_offset & ~1;
12841 bfd_vma addr = dynreloc_value - sym_sec->output_section->vma;
12842 bfd_vma seg = -1;
12843
12844 if (bfd_link_pic(info) && dynindx == 0)
12845 abort();
12846
12847 /* Replace static FUNCDESC relocation with a
12848 R_ARM_RELATIVE dynamic relocation or with a rofixup for
12849 executable. */
12850 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12851 outrel.r_offset = input_section->output_section->vma
12852 + input_section->output_offset + rel->r_offset;
12853 outrel.r_addend = 0;
12854 if (bfd_link_pic(info))
12855 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12856 else
12857 arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
12858
12859 bfd_put_32 (input_bfd, sgot->output_section->vma
12860 + sgot->output_offset + offset, hit_data);
12861
12862 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12863 arm_elf_fill_funcdesc(output_bfd, info,
12864 &local_fdpic_cnts[r_symndx].funcdesc_offset,
12865 dynindx, offset, addr, dynreloc_value, seg);
12866 }
12867 else
12868 {
12869 if (h->dynindx == -1)
12870 {
12871 int dynindx;
12872 int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12873 bfd_vma addr;
12874 bfd_vma seg = -1;
12875 Elf_Internal_Rela outrel;
12876
12877 /* For static binaries sym_sec can be null. */
12878 if (sym_sec)
12879 {
12880 dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12881 addr = dynreloc_value - sym_sec->output_section->vma;
12882 }
12883 else
12884 {
12885 dynindx = 0;
12886 addr = 0;
12887 }
12888
12889 if (bfd_link_pic(info) && dynindx == 0)
12890 abort();
12891
12892 /* Replace static FUNCDESC relocation with a
12893 R_ARM_RELATIVE dynamic relocation. */
12894 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12895 outrel.r_offset = input_section->output_section->vma
12896 + input_section->output_offset + rel->r_offset;
12897 outrel.r_addend = 0;
12898 if (bfd_link_pic(info))
12899 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12900 else
12901 arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
12902
12903 bfd_put_32 (input_bfd, sgot->output_section->vma
12904 + sgot->output_offset + offset, hit_data);
12905
12906 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12907 arm_elf_fill_funcdesc(output_bfd, info,
12908 &eh->fdpic_cnts.funcdesc_offset,
12909 dynindx, offset, addr, dynreloc_value, seg);
12910 }
12911 else
12912 {
12913 Elf_Internal_Rela outrel;
12914
12915 /* Add a dynamic relocation. */
12916 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_FUNCDESC);
12917 outrel.r_offset = input_section->output_section->vma
12918 + input_section->output_offset + rel->r_offset;
12919 outrel.r_addend = 0;
12920 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12921 }
12922 }
12923 }
12924 *unresolved_reloc_p = FALSE;
12925 return bfd_reloc_ok;
12926
12927 case R_ARM_THM_BF16:
12928 {
12929 bfd_vma relocation;
12930 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
12931 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
12932
12933 if (globals->use_rel)
12934 {
12935 bfd_vma immA = (upper_insn & 0x001f);
12936 bfd_vma immB = (lower_insn & 0x07fe) >> 1;
12937 bfd_vma immC = (lower_insn & 0x0800) >> 11;
12938 addend = (immA << 12);
12939 addend |= (immB << 2);
12940 addend |= (immC << 1);
12941 addend |= 1;
12942 /* Sign extend. */
12943 addend = (addend & 0x10000) ? addend - (1 << 17) : addend;
12944 }
12945
12946 value = get_value_helper (plt_offset, splt, input_section, sym_sec, h,
12947 info, input_bfd, rel, sym_name, st_type,
12948 globals, unresolved_reloc_p);
12949
12950 relocation = value + addend;
12951 relocation -= (input_section->output_section->vma
12952 + input_section->output_offset
12953 + rel->r_offset);
12954
12955 /* Put RELOCATION back into the insn. */
12956 {
12957 bfd_vma immA = (relocation & 0x0001f000) >> 12;
12958 bfd_vma immB = (relocation & 0x00000ffc) >> 2;
12959 bfd_vma immC = (relocation & 0x00000002) >> 1;
12960
12961 upper_insn = (upper_insn & 0xffe0) | immA;
12962 lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
12963 }
12964
12965 /* Put the relocated value back in the object file: */
12966 bfd_put_16 (input_bfd, upper_insn, hit_data);
12967 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
12968
12969 return bfd_reloc_ok;
12970 }
12971
12972 default:
12973 return bfd_reloc_notsupported;
12974 }
12975 }
12976
12977 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
12978 static void
12979 arm_add_to_rel (bfd * abfd,
12980 bfd_byte * address,
12981 reloc_howto_type * howto,
12982 bfd_signed_vma increment)
12983 {
12984 bfd_signed_vma addend;
12985
12986 if (howto->type == R_ARM_THM_CALL
12987 || howto->type == R_ARM_THM_JUMP24)
12988 {
12989 int upper_insn, lower_insn;
12990 int upper, lower;
12991
12992 upper_insn = bfd_get_16 (abfd, address);
12993 lower_insn = bfd_get_16 (abfd, address + 2);
12994 upper = upper_insn & 0x7ff;
12995 lower = lower_insn & 0x7ff;
12996
12997 addend = (upper << 12) | (lower << 1);
12998 addend += increment;
12999 addend >>= 1;
13000
13001 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
13002 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
13003
13004 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
13005 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
13006 }
13007 else
13008 {
13009 bfd_vma contents;
13010
13011 contents = bfd_get_32 (abfd, address);
13012
13013 /* Get the (signed) value from the instruction. */
13014 addend = contents & howto->src_mask;
13015 if (addend & ((howto->src_mask + 1) >> 1))
13016 {
13017 bfd_signed_vma mask;
13018
13019 mask = -1;
13020 mask &= ~ howto->src_mask;
13021 addend |= mask;
13022 }
13023
13024 /* Add in the increment, (which is a byte value). */
13025 switch (howto->type)
13026 {
13027 default:
13028 addend += increment;
13029 break;
13030
13031 case R_ARM_PC24:
13032 case R_ARM_PLT32:
13033 case R_ARM_CALL:
13034 case R_ARM_JUMP24:
13035 addend <<= howto->size;
13036 addend += increment;
13037
13038 /* Should we check for overflow here ? */
13039
13040 /* Drop any undesired bits. */
13041 addend >>= howto->rightshift;
13042 break;
13043 }
13044
13045 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
13046
13047 bfd_put_32 (abfd, contents, address);
13048 }
13049 }
13050
13051 #define IS_ARM_TLS_RELOC(R_TYPE) \
13052 ((R_TYPE) == R_ARM_TLS_GD32 \
13053 || (R_TYPE) == R_ARM_TLS_GD32_FDPIC \
13054 || (R_TYPE) == R_ARM_TLS_LDO32 \
13055 || (R_TYPE) == R_ARM_TLS_LDM32 \
13056 || (R_TYPE) == R_ARM_TLS_LDM32_FDPIC \
13057 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
13058 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
13059 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
13060 || (R_TYPE) == R_ARM_TLS_LE32 \
13061 || (R_TYPE) == R_ARM_TLS_IE32 \
13062 || (R_TYPE) == R_ARM_TLS_IE32_FDPIC \
13063 || IS_ARM_TLS_GNU_RELOC (R_TYPE))
13064
13065 /* Specific set of relocations for the gnu tls dialect. */
13066 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
13067 ((R_TYPE) == R_ARM_TLS_GOTDESC \
13068 || (R_TYPE) == R_ARM_TLS_CALL \
13069 || (R_TYPE) == R_ARM_THM_TLS_CALL \
13070 || (R_TYPE) == R_ARM_TLS_DESCSEQ \
13071 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
13072
13073 /* Relocate an ARM ELF section. */
13074
13075 static bfd_boolean
13076 elf32_arm_relocate_section (bfd * output_bfd,
13077 struct bfd_link_info * info,
13078 bfd * input_bfd,
13079 asection * input_section,
13080 bfd_byte * contents,
13081 Elf_Internal_Rela * relocs,
13082 Elf_Internal_Sym * local_syms,
13083 asection ** local_sections)
13084 {
13085 Elf_Internal_Shdr *symtab_hdr;
13086 struct elf_link_hash_entry **sym_hashes;
13087 Elf_Internal_Rela *rel;
13088 Elf_Internal_Rela *relend;
13089 const char *name;
13090 struct elf32_arm_link_hash_table * globals;
13091
13092 globals = elf32_arm_hash_table (info);
13093 if (globals == NULL)
13094 return FALSE;
13095
13096 symtab_hdr = & elf_symtab_hdr (input_bfd);
13097 sym_hashes = elf_sym_hashes (input_bfd);
13098
13099 rel = relocs;
13100 relend = relocs + input_section->reloc_count;
13101 for (; rel < relend; rel++)
13102 {
13103 int r_type;
13104 reloc_howto_type * howto;
13105 unsigned long r_symndx;
13106 Elf_Internal_Sym * sym;
13107 asection * sec;
13108 struct elf_link_hash_entry * h;
13109 bfd_vma relocation;
13110 bfd_reloc_status_type r;
13111 arelent bfd_reloc;
13112 char sym_type;
13113 bfd_boolean unresolved_reloc = FALSE;
13114 char *error_message = NULL;
13115
13116 r_symndx = ELF32_R_SYM (rel->r_info);
13117 r_type = ELF32_R_TYPE (rel->r_info);
13118 r_type = arm_real_reloc_type (globals, r_type);
13119
13120 if ( r_type == R_ARM_GNU_VTENTRY
13121 || r_type == R_ARM_GNU_VTINHERIT)
13122 continue;
13123
13124 howto = bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
13125
13126 if (howto == NULL)
13127 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
13128
13129 h = NULL;
13130 sym = NULL;
13131 sec = NULL;
13132
13133 if (r_symndx < symtab_hdr->sh_info)
13134 {
13135 sym = local_syms + r_symndx;
13136 sym_type = ELF32_ST_TYPE (sym->st_info);
13137 sec = local_sections[r_symndx];
13138
13139 /* An object file might have a reference to a local
13140 undefined symbol. This is a daft object file, but we
13141 should at least do something about it. V4BX & NONE
13142 relocations do not use the symbol and are explicitly
13143 allowed to use the undefined symbol, so allow those.
13144 Likewise for relocations against STN_UNDEF. */
13145 if (r_type != R_ARM_V4BX
13146 && r_type != R_ARM_NONE
13147 && r_symndx != STN_UNDEF
13148 && bfd_is_und_section (sec)
13149 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
13150 (*info->callbacks->undefined_symbol)
13151 (info, bfd_elf_string_from_elf_section
13152 (input_bfd, symtab_hdr->sh_link, sym->st_name),
13153 input_bfd, input_section,
13154 rel->r_offset, TRUE);
13155
13156 if (globals->use_rel)
13157 {
13158 relocation = (sec->output_section->vma
13159 + sec->output_offset
13160 + sym->st_value);
13161 if (!bfd_link_relocatable (info)
13162 && (sec->flags & SEC_MERGE)
13163 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
13164 {
13165 asection *msec;
13166 bfd_vma addend, value;
13167
13168 switch (r_type)
13169 {
13170 case R_ARM_MOVW_ABS_NC:
13171 case R_ARM_MOVT_ABS:
13172 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
13173 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
13174 addend = (addend ^ 0x8000) - 0x8000;
13175 break;
13176
13177 case R_ARM_THM_MOVW_ABS_NC:
13178 case R_ARM_THM_MOVT_ABS:
13179 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
13180 << 16;
13181 value |= bfd_get_16 (input_bfd,
13182 contents + rel->r_offset + 2);
13183 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
13184 | ((value & 0x04000000) >> 15);
13185 addend = (addend ^ 0x8000) - 0x8000;
13186 break;
13187
13188 default:
13189 if (howto->rightshift
13190 || (howto->src_mask & (howto->src_mask + 1)))
13191 {
13192 _bfd_error_handler
13193 /* xgettext:c-format */
13194 (_("%pB(%pA+%#" PRIx64 "): "
13195 "%s relocation against SEC_MERGE section"),
13196 input_bfd, input_section,
13197 (uint64_t) rel->r_offset, howto->name);
13198 return FALSE;
13199 }
13200
13201 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
13202
13203 /* Get the (signed) value from the instruction. */
13204 addend = value & howto->src_mask;
13205 if (addend & ((howto->src_mask + 1) >> 1))
13206 {
13207 bfd_signed_vma mask;
13208
13209 mask = -1;
13210 mask &= ~ howto->src_mask;
13211 addend |= mask;
13212 }
13213 break;
13214 }
13215
13216 msec = sec;
13217 addend =
13218 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
13219 - relocation;
13220 addend += msec->output_section->vma + msec->output_offset;
13221
13222 /* Cases here must match those in the preceding
13223 switch statement. */
13224 switch (r_type)
13225 {
13226 case R_ARM_MOVW_ABS_NC:
13227 case R_ARM_MOVT_ABS:
13228 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
13229 | (addend & 0xfff);
13230 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
13231 break;
13232
13233 case R_ARM_THM_MOVW_ABS_NC:
13234 case R_ARM_THM_MOVT_ABS:
13235 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
13236 | (addend & 0xff) | ((addend & 0x0800) << 15);
13237 bfd_put_16 (input_bfd, value >> 16,
13238 contents + rel->r_offset);
13239 bfd_put_16 (input_bfd, value,
13240 contents + rel->r_offset + 2);
13241 break;
13242
13243 default:
13244 value = (value & ~ howto->dst_mask)
13245 | (addend & howto->dst_mask);
13246 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
13247 break;
13248 }
13249 }
13250 }
13251 else
13252 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
13253 }
13254 else
13255 {
13256 bfd_boolean warned, ignored;
13257
13258 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
13259 r_symndx, symtab_hdr, sym_hashes,
13260 h, sec, relocation,
13261 unresolved_reloc, warned, ignored);
13262
13263 sym_type = h->type;
13264 }
13265
13266 if (sec != NULL && discarded_section (sec))
13267 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
13268 rel, 1, relend, howto, 0, contents);
13269
13270 if (bfd_link_relocatable (info))
13271 {
13272 /* This is a relocatable link. We don't have to change
13273 anything, unless the reloc is against a section symbol,
13274 in which case we have to adjust according to where the
13275 section symbol winds up in the output section. */
13276 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
13277 {
13278 if (globals->use_rel)
13279 arm_add_to_rel (input_bfd, contents + rel->r_offset,
13280 howto, (bfd_signed_vma) sec->output_offset);
13281 else
13282 rel->r_addend += sec->output_offset;
13283 }
13284 continue;
13285 }
13286
13287 if (h != NULL)
13288 name = h->root.root.string;
13289 else
13290 {
13291 name = (bfd_elf_string_from_elf_section
13292 (input_bfd, symtab_hdr->sh_link, sym->st_name));
13293 if (name == NULL || *name == '\0')
13294 name = bfd_section_name (input_bfd, sec);
13295 }
13296
13297 if (r_symndx != STN_UNDEF
13298 && r_type != R_ARM_NONE
13299 && (h == NULL
13300 || h->root.type == bfd_link_hash_defined
13301 || h->root.type == bfd_link_hash_defweak)
13302 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
13303 {
13304 _bfd_error_handler
13305 ((sym_type == STT_TLS
13306 /* xgettext:c-format */
13307 ? _("%pB(%pA+%#" PRIx64 "): %s used with TLS symbol %s")
13308 /* xgettext:c-format */
13309 : _("%pB(%pA+%#" PRIx64 "): %s used with non-TLS symbol %s")),
13310 input_bfd,
13311 input_section,
13312 (uint64_t) rel->r_offset,
13313 howto->name,
13314 name);
13315 }
13316
13317 /* We call elf32_arm_final_link_relocate unless we're completely
13318 done, i.e., the relaxation produced the final output we want,
13319 and we won't let anybody mess with it. Also, we have to do
13320 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
13321 both in relaxed and non-relaxed cases. */
13322 if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
13323 || (IS_ARM_TLS_GNU_RELOC (r_type)
13324 && !((h ? elf32_arm_hash_entry (h)->tls_type :
13325 elf32_arm_local_got_tls_type (input_bfd)[r_symndx])
13326 & GOT_TLS_GDESC)))
13327 {
13328 r = elf32_arm_tls_relax (globals, input_bfd, input_section,
13329 contents, rel, h == NULL);
13330 /* This may have been marked unresolved because it came from
13331 a shared library. But we've just dealt with that. */
13332 unresolved_reloc = 0;
13333 }
13334 else
13335 r = bfd_reloc_continue;
13336
13337 if (r == bfd_reloc_continue)
13338 {
13339 unsigned char branch_type =
13340 h ? ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
13341 : ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
13342
13343 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
13344 input_section, contents, rel,
13345 relocation, info, sec, name,
13346 sym_type, branch_type, h,
13347 &unresolved_reloc,
13348 &error_message);
13349 }
13350
13351 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
13352 because such sections are not SEC_ALLOC and thus ld.so will
13353 not process them. */
13354 if (unresolved_reloc
13355 && !((input_section->flags & SEC_DEBUGGING) != 0
13356 && h->def_dynamic)
13357 && _bfd_elf_section_offset (output_bfd, info, input_section,
13358 rel->r_offset) != (bfd_vma) -1)
13359 {
13360 _bfd_error_handler
13361 /* xgettext:c-format */
13362 (_("%pB(%pA+%#" PRIx64 "): "
13363 "unresolvable %s relocation against symbol `%s'"),
13364 input_bfd,
13365 input_section,
13366 (uint64_t) rel->r_offset,
13367 howto->name,
13368 h->root.root.string);
13369 return FALSE;
13370 }
13371
13372 if (r != bfd_reloc_ok)
13373 {
13374 switch (r)
13375 {
13376 case bfd_reloc_overflow:
13377 /* If the overflowing reloc was to an undefined symbol,
13378 we have already printed one error message and there
13379 is no point complaining again. */
13380 if (!h || h->root.type != bfd_link_hash_undefined)
13381 (*info->callbacks->reloc_overflow)
13382 (info, (h ? &h->root : NULL), name, howto->name,
13383 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
13384 break;
13385
13386 case bfd_reloc_undefined:
13387 (*info->callbacks->undefined_symbol)
13388 (info, name, input_bfd, input_section, rel->r_offset, TRUE);
13389 break;
13390
13391 case bfd_reloc_outofrange:
13392 error_message = _("out of range");
13393 goto common_error;
13394
13395 case bfd_reloc_notsupported:
13396 error_message = _("unsupported relocation");
13397 goto common_error;
13398
13399 case bfd_reloc_dangerous:
13400 /* error_message should already be set. */
13401 goto common_error;
13402
13403 default:
13404 error_message = _("unknown error");
13405 /* Fall through. */
13406
13407 common_error:
13408 BFD_ASSERT (error_message != NULL);
13409 (*info->callbacks->reloc_dangerous)
13410 (info, error_message, input_bfd, input_section, rel->r_offset);
13411 break;
13412 }
13413 }
13414 }
13415
13416 return TRUE;
13417 }
13418
13419 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
13420 adds the edit to the start of the list. (The list must be built in order of
13421 ascending TINDEX: the function's callers are primarily responsible for
13422 maintaining that condition). */
13423
13424 static void
13425 add_unwind_table_edit (arm_unwind_table_edit **head,
13426 arm_unwind_table_edit **tail,
13427 arm_unwind_edit_type type,
13428 asection *linked_section,
13429 unsigned int tindex)
13430 {
13431 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
13432 xmalloc (sizeof (arm_unwind_table_edit));
13433
13434 new_edit->type = type;
13435 new_edit->linked_section = linked_section;
13436 new_edit->index = tindex;
13437
13438 if (tindex > 0)
13439 {
13440 new_edit->next = NULL;
13441
13442 if (*tail)
13443 (*tail)->next = new_edit;
13444
13445 (*tail) = new_edit;
13446
13447 if (!*head)
13448 (*head) = new_edit;
13449 }
13450 else
13451 {
13452 new_edit->next = *head;
13453
13454 if (!*tail)
13455 *tail = new_edit;
13456
13457 *head = new_edit;
13458 }
13459 }
13460
13461 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
13462
13463 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
13464 static void
13465 adjust_exidx_size(asection *exidx_sec, int adjust)
13466 {
13467 asection *out_sec;
13468
13469 if (!exidx_sec->rawsize)
13470 exidx_sec->rawsize = exidx_sec->size;
13471
13472 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
13473 out_sec = exidx_sec->output_section;
13474 /* Adjust size of output section. */
13475 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
13476 }
13477
13478 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
13479 static void
13480 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
13481 {
13482 struct _arm_elf_section_data *exidx_arm_data;
13483
13484 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
13485 add_unwind_table_edit (
13486 &exidx_arm_data->u.exidx.unwind_edit_list,
13487 &exidx_arm_data->u.exidx.unwind_edit_tail,
13488 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
13489
13490 exidx_arm_data->additional_reloc_count++;
13491
13492 adjust_exidx_size(exidx_sec, 8);
13493 }
13494
13495 /* Scan .ARM.exidx tables, and create a list describing edits which should be
13496 made to those tables, such that:
13497
13498 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
13499 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
13500 codes which have been inlined into the index).
13501
13502 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
13503
13504 The edits are applied when the tables are written
13505 (in elf32_arm_write_section). */
13506
13507 bfd_boolean
13508 elf32_arm_fix_exidx_coverage (asection **text_section_order,
13509 unsigned int num_text_sections,
13510 struct bfd_link_info *info,
13511 bfd_boolean merge_exidx_entries)
13512 {
13513 bfd *inp;
13514 unsigned int last_second_word = 0, i;
13515 asection *last_exidx_sec = NULL;
13516 asection *last_text_sec = NULL;
13517 int last_unwind_type = -1;
13518
13519 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
13520 text sections. */
13521 for (inp = info->input_bfds; inp != NULL; inp = inp->link.next)
13522 {
13523 asection *sec;
13524
13525 for (sec = inp->sections; sec != NULL; sec = sec->next)
13526 {
13527 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
13528 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
13529
13530 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
13531 continue;
13532
13533 if (elf_sec->linked_to)
13534 {
13535 Elf_Internal_Shdr *linked_hdr
13536 = &elf_section_data (elf_sec->linked_to)->this_hdr;
13537 struct _arm_elf_section_data *linked_sec_arm_data
13538 = get_arm_elf_section_data (linked_hdr->bfd_section);
13539
13540 if (linked_sec_arm_data == NULL)
13541 continue;
13542
13543 /* Link this .ARM.exidx section back from the text section it
13544 describes. */
13545 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
13546 }
13547 }
13548 }
13549
13550 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
13551 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
13552 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
13553
13554 for (i = 0; i < num_text_sections; i++)
13555 {
13556 asection *sec = text_section_order[i];
13557 asection *exidx_sec;
13558 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
13559 struct _arm_elf_section_data *exidx_arm_data;
13560 bfd_byte *contents = NULL;
13561 int deleted_exidx_bytes = 0;
13562 bfd_vma j;
13563 arm_unwind_table_edit *unwind_edit_head = NULL;
13564 arm_unwind_table_edit *unwind_edit_tail = NULL;
13565 Elf_Internal_Shdr *hdr;
13566 bfd *ibfd;
13567
13568 if (arm_data == NULL)
13569 continue;
13570
13571 exidx_sec = arm_data->u.text.arm_exidx_sec;
13572 if (exidx_sec == NULL)
13573 {
13574 /* Section has no unwind data. */
13575 if (last_unwind_type == 0 || !last_exidx_sec)
13576 continue;
13577
13578 /* Ignore zero sized sections. */
13579 if (sec->size == 0)
13580 continue;
13581
13582 insert_cantunwind_after(last_text_sec, last_exidx_sec);
13583 last_unwind_type = 0;
13584 continue;
13585 }
13586
13587 /* Skip /DISCARD/ sections. */
13588 if (bfd_is_abs_section (exidx_sec->output_section))
13589 continue;
13590
13591 hdr = &elf_section_data (exidx_sec)->this_hdr;
13592 if (hdr->sh_type != SHT_ARM_EXIDX)
13593 continue;
13594
13595 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
13596 if (exidx_arm_data == NULL)
13597 continue;
13598
13599 ibfd = exidx_sec->owner;
13600
13601 if (hdr->contents != NULL)
13602 contents = hdr->contents;
13603 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
13604 /* An error? */
13605 continue;
13606
13607 if (last_unwind_type > 0)
13608 {
13609 unsigned int first_word = bfd_get_32 (ibfd, contents);
13610 /* Add cantunwind if first unwind item does not match section
13611 start. */
13612 if (first_word != sec->vma)
13613 {
13614 insert_cantunwind_after (last_text_sec, last_exidx_sec);
13615 last_unwind_type = 0;
13616 }
13617 }
13618
13619 for (j = 0; j < hdr->sh_size; j += 8)
13620 {
13621 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
13622 int unwind_type;
13623 int elide = 0;
13624
13625 /* An EXIDX_CANTUNWIND entry. */
13626 if (second_word == 1)
13627 {
13628 if (last_unwind_type == 0)
13629 elide = 1;
13630 unwind_type = 0;
13631 }
13632 /* Inlined unwinding data. Merge if equal to previous. */
13633 else if ((second_word & 0x80000000) != 0)
13634 {
13635 if (merge_exidx_entries
13636 && last_second_word == second_word && last_unwind_type == 1)
13637 elide = 1;
13638 unwind_type = 1;
13639 last_second_word = second_word;
13640 }
13641 /* Normal table entry. In theory we could merge these too,
13642 but duplicate entries are likely to be much less common. */
13643 else
13644 unwind_type = 2;
13645
13646 if (elide && !bfd_link_relocatable (info))
13647 {
13648 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
13649 DELETE_EXIDX_ENTRY, NULL, j / 8);
13650
13651 deleted_exidx_bytes += 8;
13652 }
13653
13654 last_unwind_type = unwind_type;
13655 }
13656
13657 /* Free contents if we allocated it ourselves. */
13658 if (contents != hdr->contents)
13659 free (contents);
13660
13661 /* Record edits to be applied later (in elf32_arm_write_section). */
13662 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
13663 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
13664
13665 if (deleted_exidx_bytes > 0)
13666 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
13667
13668 last_exidx_sec = exidx_sec;
13669 last_text_sec = sec;
13670 }
13671
13672 /* Add terminating CANTUNWIND entry. */
13673 if (!bfd_link_relocatable (info) && last_exidx_sec
13674 && last_unwind_type != 0)
13675 insert_cantunwind_after(last_text_sec, last_exidx_sec);
13676
13677 return TRUE;
13678 }
13679
13680 static bfd_boolean
13681 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
13682 bfd *ibfd, const char *name)
13683 {
13684 asection *sec, *osec;
13685
13686 sec = bfd_get_linker_section (ibfd, name);
13687 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
13688 return TRUE;
13689
13690 osec = sec->output_section;
13691 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
13692 return TRUE;
13693
13694 if (! bfd_set_section_contents (obfd, osec, sec->contents,
13695 sec->output_offset, sec->size))
13696 return FALSE;
13697
13698 return TRUE;
13699 }
13700
13701 static bfd_boolean
13702 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
13703 {
13704 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
13705 asection *sec, *osec;
13706
13707 if (globals == NULL)
13708 return FALSE;
13709
13710 /* Invoke the regular ELF backend linker to do all the work. */
13711 if (!bfd_elf_final_link (abfd, info))
13712 return FALSE;
13713
13714 /* Process stub sections (eg BE8 encoding, ...). */
13715 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
13716 unsigned int i;
13717 for (i=0; i<htab->top_id; i++)
13718 {
13719 sec = htab->stub_group[i].stub_sec;
13720 /* Only process it once, in its link_sec slot. */
13721 if (sec && i == htab->stub_group[i].link_sec->id)
13722 {
13723 osec = sec->output_section;
13724 elf32_arm_write_section (abfd, info, sec, sec->contents);
13725 if (! bfd_set_section_contents (abfd, osec, sec->contents,
13726 sec->output_offset, sec->size))
13727 return FALSE;
13728 }
13729 }
13730
13731 /* Write out any glue sections now that we have created all the
13732 stubs. */
13733 if (globals->bfd_of_glue_owner != NULL)
13734 {
13735 if (! elf32_arm_output_glue_section (info, abfd,
13736 globals->bfd_of_glue_owner,
13737 ARM2THUMB_GLUE_SECTION_NAME))
13738 return FALSE;
13739
13740 if (! elf32_arm_output_glue_section (info, abfd,
13741 globals->bfd_of_glue_owner,
13742 THUMB2ARM_GLUE_SECTION_NAME))
13743 return FALSE;
13744
13745 if (! elf32_arm_output_glue_section (info, abfd,
13746 globals->bfd_of_glue_owner,
13747 VFP11_ERRATUM_VENEER_SECTION_NAME))
13748 return FALSE;
13749
13750 if (! elf32_arm_output_glue_section (info, abfd,
13751 globals->bfd_of_glue_owner,
13752 STM32L4XX_ERRATUM_VENEER_SECTION_NAME))
13753 return FALSE;
13754
13755 if (! elf32_arm_output_glue_section (info, abfd,
13756 globals->bfd_of_glue_owner,
13757 ARM_BX_GLUE_SECTION_NAME))
13758 return FALSE;
13759 }
13760
13761 return TRUE;
13762 }
13763
13764 /* Return a best guess for the machine number based on the attributes. */
13765
13766 static unsigned int
13767 bfd_arm_get_mach_from_attributes (bfd * abfd)
13768 {
13769 int arch = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_CPU_arch);
13770
13771 switch (arch)
13772 {
13773 case TAG_CPU_ARCH_PRE_V4: return bfd_mach_arm_3M;
13774 case TAG_CPU_ARCH_V4: return bfd_mach_arm_4;
13775 case TAG_CPU_ARCH_V4T: return bfd_mach_arm_4T;
13776 case TAG_CPU_ARCH_V5T: return bfd_mach_arm_5T;
13777
13778 case TAG_CPU_ARCH_V5TE:
13779 {
13780 char * name;
13781
13782 BFD_ASSERT (Tag_CPU_name < NUM_KNOWN_OBJ_ATTRIBUTES);
13783 name = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_CPU_name].s;
13784
13785 if (name)
13786 {
13787 if (strcmp (name, "IWMMXT2") == 0)
13788 return bfd_mach_arm_iWMMXt2;
13789
13790 if (strcmp (name, "IWMMXT") == 0)
13791 return bfd_mach_arm_iWMMXt;
13792
13793 if (strcmp (name, "XSCALE") == 0)
13794 {
13795 int wmmx;
13796
13797 BFD_ASSERT (Tag_WMMX_arch < NUM_KNOWN_OBJ_ATTRIBUTES);
13798 wmmx = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_WMMX_arch].i;
13799 switch (wmmx)
13800 {
13801 case 1: return bfd_mach_arm_iWMMXt;
13802 case 2: return bfd_mach_arm_iWMMXt2;
13803 default: return bfd_mach_arm_XScale;
13804 }
13805 }
13806 }
13807
13808 return bfd_mach_arm_5TE;
13809 }
13810
13811 case TAG_CPU_ARCH_V5TEJ:
13812 return bfd_mach_arm_5TEJ;
13813 case TAG_CPU_ARCH_V6:
13814 return bfd_mach_arm_6;
13815 case TAG_CPU_ARCH_V6KZ:
13816 return bfd_mach_arm_6KZ;
13817 case TAG_CPU_ARCH_V6T2:
13818 return bfd_mach_arm_6T2;
13819 case TAG_CPU_ARCH_V6K:
13820 return bfd_mach_arm_6K;
13821 case TAG_CPU_ARCH_V7:
13822 return bfd_mach_arm_7;
13823 case TAG_CPU_ARCH_V6_M:
13824 return bfd_mach_arm_6M;
13825 case TAG_CPU_ARCH_V6S_M:
13826 return bfd_mach_arm_6SM;
13827 case TAG_CPU_ARCH_V7E_M:
13828 return bfd_mach_arm_7EM;
13829 case TAG_CPU_ARCH_V8:
13830 return bfd_mach_arm_8;
13831 case TAG_CPU_ARCH_V8R:
13832 return bfd_mach_arm_8R;
13833 case TAG_CPU_ARCH_V8M_BASE:
13834 return bfd_mach_arm_8M_BASE;
13835 case TAG_CPU_ARCH_V8M_MAIN:
13836 return bfd_mach_arm_8M_MAIN;
13837 case TAG_CPU_ARCH_V8_1M_MAIN:
13838 return bfd_mach_arm_8_1M_MAIN;
13839
13840 default:
13841 /* Force entry to be added for any new known Tag_CPU_arch value. */
13842 BFD_ASSERT (arch > MAX_TAG_CPU_ARCH);
13843
13844 /* Unknown Tag_CPU_arch value. */
13845 return bfd_mach_arm_unknown;
13846 }
13847 }
13848
13849 /* Set the right machine number. */
13850
13851 static bfd_boolean
13852 elf32_arm_object_p (bfd *abfd)
13853 {
13854 unsigned int mach;
13855
13856 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
13857
13858 if (mach == bfd_mach_arm_unknown)
13859 {
13860 if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
13861 mach = bfd_mach_arm_ep9312;
13862 else
13863 mach = bfd_arm_get_mach_from_attributes (abfd);
13864 }
13865
13866 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
13867 return TRUE;
13868 }
13869
13870 /* Function to keep ARM specific flags in the ELF header. */
13871
13872 static bfd_boolean
13873 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
13874 {
13875 if (elf_flags_init (abfd)
13876 && elf_elfheader (abfd)->e_flags != flags)
13877 {
13878 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
13879 {
13880 if (flags & EF_ARM_INTERWORK)
13881 _bfd_error_handler
13882 (_("warning: not setting interworking flag of %pB since it has already been specified as non-interworking"),
13883 abfd);
13884 else
13885 _bfd_error_handler
13886 (_("warning: clearing the interworking flag of %pB due to outside request"),
13887 abfd);
13888 }
13889 }
13890 else
13891 {
13892 elf_elfheader (abfd)->e_flags = flags;
13893 elf_flags_init (abfd) = TRUE;
13894 }
13895
13896 return TRUE;
13897 }
13898
13899 /* Copy backend specific data from one object module to another. */
13900
13901 static bfd_boolean
13902 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
13903 {
13904 flagword in_flags;
13905 flagword out_flags;
13906
13907 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
13908 return TRUE;
13909
13910 in_flags = elf_elfheader (ibfd)->e_flags;
13911 out_flags = elf_elfheader (obfd)->e_flags;
13912
13913 if (elf_flags_init (obfd)
13914 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
13915 && in_flags != out_flags)
13916 {
13917 /* Cannot mix APCS26 and APCS32 code. */
13918 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
13919 return FALSE;
13920
13921 /* Cannot mix float APCS and non-float APCS code. */
13922 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
13923 return FALSE;
13924
13925 /* If the src and dest have different interworking flags
13926 then turn off the interworking bit. */
13927 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
13928 {
13929 if (out_flags & EF_ARM_INTERWORK)
13930 _bfd_error_handler
13931 (_("warning: clearing the interworking flag of %pB because non-interworking code in %pB has been linked with it"),
13932 obfd, ibfd);
13933
13934 in_flags &= ~EF_ARM_INTERWORK;
13935 }
13936
13937 /* Likewise for PIC, though don't warn for this case. */
13938 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
13939 in_flags &= ~EF_ARM_PIC;
13940 }
13941
13942 elf_elfheader (obfd)->e_flags = in_flags;
13943 elf_flags_init (obfd) = TRUE;
13944
13945 return _bfd_elf_copy_private_bfd_data (ibfd, obfd);
13946 }
13947
13948 /* Values for Tag_ABI_PCS_R9_use. */
13949 enum
13950 {
13951 AEABI_R9_V6,
13952 AEABI_R9_SB,
13953 AEABI_R9_TLS,
13954 AEABI_R9_unused
13955 };
13956
13957 /* Values for Tag_ABI_PCS_RW_data. */
13958 enum
13959 {
13960 AEABI_PCS_RW_data_absolute,
13961 AEABI_PCS_RW_data_PCrel,
13962 AEABI_PCS_RW_data_SBrel,
13963 AEABI_PCS_RW_data_unused
13964 };
13965
13966 /* Values for Tag_ABI_enum_size. */
13967 enum
13968 {
13969 AEABI_enum_unused,
13970 AEABI_enum_short,
13971 AEABI_enum_wide,
13972 AEABI_enum_forced_wide
13973 };
13974
13975 /* Determine whether an object attribute tag takes an integer, a
13976 string or both. */
13977
13978 static int
13979 elf32_arm_obj_attrs_arg_type (int tag)
13980 {
13981 if (tag == Tag_compatibility)
13982 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
13983 else if (tag == Tag_nodefaults)
13984 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
13985 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
13986 return ATTR_TYPE_FLAG_STR_VAL;
13987 else if (tag < 32)
13988 return ATTR_TYPE_FLAG_INT_VAL;
13989 else
13990 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
13991 }
13992
13993 /* The ABI defines that Tag_conformance should be emitted first, and that
13994 Tag_nodefaults should be second (if either is defined). This sets those
13995 two positions, and bumps up the position of all the remaining tags to
13996 compensate. */
13997 static int
13998 elf32_arm_obj_attrs_order (int num)
13999 {
14000 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
14001 return Tag_conformance;
14002 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
14003 return Tag_nodefaults;
14004 if ((num - 2) < Tag_nodefaults)
14005 return num - 2;
14006 if ((num - 1) < Tag_conformance)
14007 return num - 1;
14008 return num;
14009 }
14010
14011 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
14012 static bfd_boolean
14013 elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag)
14014 {
14015 if ((tag & 127) < 64)
14016 {
14017 _bfd_error_handler
14018 (_("%pB: unknown mandatory EABI object attribute %d"),
14019 abfd, tag);
14020 bfd_set_error (bfd_error_bad_value);
14021 return FALSE;
14022 }
14023 else
14024 {
14025 _bfd_error_handler
14026 (_("warning: %pB: unknown EABI object attribute %d"),
14027 abfd, tag);
14028 return TRUE;
14029 }
14030 }
14031
14032 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
14033 Returns -1 if no architecture could be read. */
14034
14035 static int
14036 get_secondary_compatible_arch (bfd *abfd)
14037 {
14038 obj_attribute *attr =
14039 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
14040
14041 /* Note: the tag and its argument below are uleb128 values, though
14042 currently-defined values fit in one byte for each. */
14043 if (attr->s
14044 && attr->s[0] == Tag_CPU_arch
14045 && (attr->s[1] & 128) != 128
14046 && attr->s[2] == 0)
14047 return attr->s[1];
14048
14049 /* This tag is "safely ignorable", so don't complain if it looks funny. */
14050 return -1;
14051 }
14052
14053 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
14054 The tag is removed if ARCH is -1. */
14055
14056 static void
14057 set_secondary_compatible_arch (bfd *abfd, int arch)
14058 {
14059 obj_attribute *attr =
14060 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
14061
14062 if (arch == -1)
14063 {
14064 attr->s = NULL;
14065 return;
14066 }
14067
14068 /* Note: the tag and its argument below are uleb128 values, though
14069 currently-defined values fit in one byte for each. */
14070 if (!attr->s)
14071 attr->s = (char *) bfd_alloc (abfd, 3);
14072 attr->s[0] = Tag_CPU_arch;
14073 attr->s[1] = arch;
14074 attr->s[2] = '\0';
14075 }
14076
14077 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
14078 into account. */
14079
14080 static int
14081 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
14082 int newtag, int secondary_compat)
14083 {
14084 #define T(X) TAG_CPU_ARCH_##X
14085 int tagl, tagh, result;
14086 const int v6t2[] =
14087 {
14088 T(V6T2), /* PRE_V4. */
14089 T(V6T2), /* V4. */
14090 T(V6T2), /* V4T. */
14091 T(V6T2), /* V5T. */
14092 T(V6T2), /* V5TE. */
14093 T(V6T2), /* V5TEJ. */
14094 T(V6T2), /* V6. */
14095 T(V7), /* V6KZ. */
14096 T(V6T2) /* V6T2. */
14097 };
14098 const int v6k[] =
14099 {
14100 T(V6K), /* PRE_V4. */
14101 T(V6K), /* V4. */
14102 T(V6K), /* V4T. */
14103 T(V6K), /* V5T. */
14104 T(V6K), /* V5TE. */
14105 T(V6K), /* V5TEJ. */
14106 T(V6K), /* V6. */
14107 T(V6KZ), /* V6KZ. */
14108 T(V7), /* V6T2. */
14109 T(V6K) /* V6K. */
14110 };
14111 const int v7[] =
14112 {
14113 T(V7), /* PRE_V4. */
14114 T(V7), /* V4. */
14115 T(V7), /* V4T. */
14116 T(V7), /* V5T. */
14117 T(V7), /* V5TE. */
14118 T(V7), /* V5TEJ. */
14119 T(V7), /* V6. */
14120 T(V7), /* V6KZ. */
14121 T(V7), /* V6T2. */
14122 T(V7), /* V6K. */
14123 T(V7) /* V7. */
14124 };
14125 const int v6_m[] =
14126 {
14127 -1, /* PRE_V4. */
14128 -1, /* V4. */
14129 T(V6K), /* V4T. */
14130 T(V6K), /* V5T. */
14131 T(V6K), /* V5TE. */
14132 T(V6K), /* V5TEJ. */
14133 T(V6K), /* V6. */
14134 T(V6KZ), /* V6KZ. */
14135 T(V7), /* V6T2. */
14136 T(V6K), /* V6K. */
14137 T(V7), /* V7. */
14138 T(V6_M) /* V6_M. */
14139 };
14140 const int v6s_m[] =
14141 {
14142 -1, /* PRE_V4. */
14143 -1, /* V4. */
14144 T(V6K), /* V4T. */
14145 T(V6K), /* V5T. */
14146 T(V6K), /* V5TE. */
14147 T(V6K), /* V5TEJ. */
14148 T(V6K), /* V6. */
14149 T(V6KZ), /* V6KZ. */
14150 T(V7), /* V6T2. */
14151 T(V6K), /* V6K. */
14152 T(V7), /* V7. */
14153 T(V6S_M), /* V6_M. */
14154 T(V6S_M) /* V6S_M. */
14155 };
14156 const int v7e_m[] =
14157 {
14158 -1, /* PRE_V4. */
14159 -1, /* V4. */
14160 T(V7E_M), /* V4T. */
14161 T(V7E_M), /* V5T. */
14162 T(V7E_M), /* V5TE. */
14163 T(V7E_M), /* V5TEJ. */
14164 T(V7E_M), /* V6. */
14165 T(V7E_M), /* V6KZ. */
14166 T(V7E_M), /* V6T2. */
14167 T(V7E_M), /* V6K. */
14168 T(V7E_M), /* V7. */
14169 T(V7E_M), /* V6_M. */
14170 T(V7E_M), /* V6S_M. */
14171 T(V7E_M) /* V7E_M. */
14172 };
14173 const int v8[] =
14174 {
14175 T(V8), /* PRE_V4. */
14176 T(V8), /* V4. */
14177 T(V8), /* V4T. */
14178 T(V8), /* V5T. */
14179 T(V8), /* V5TE. */
14180 T(V8), /* V5TEJ. */
14181 T(V8), /* V6. */
14182 T(V8), /* V6KZ. */
14183 T(V8), /* V6T2. */
14184 T(V8), /* V6K. */
14185 T(V8), /* V7. */
14186 T(V8), /* V6_M. */
14187 T(V8), /* V6S_M. */
14188 T(V8), /* V7E_M. */
14189 T(V8) /* V8. */
14190 };
14191 const int v8r[] =
14192 {
14193 T(V8R), /* PRE_V4. */
14194 T(V8R), /* V4. */
14195 T(V8R), /* V4T. */
14196 T(V8R), /* V5T. */
14197 T(V8R), /* V5TE. */
14198 T(V8R), /* V5TEJ. */
14199 T(V8R), /* V6. */
14200 T(V8R), /* V6KZ. */
14201 T(V8R), /* V6T2. */
14202 T(V8R), /* V6K. */
14203 T(V8R), /* V7. */
14204 T(V8R), /* V6_M. */
14205 T(V8R), /* V6S_M. */
14206 T(V8R), /* V7E_M. */
14207 T(V8), /* V8. */
14208 T(V8R), /* V8R. */
14209 };
14210 const int v8m_baseline[] =
14211 {
14212 -1, /* PRE_V4. */
14213 -1, /* V4. */
14214 -1, /* V4T. */
14215 -1, /* V5T. */
14216 -1, /* V5TE. */
14217 -1, /* V5TEJ. */
14218 -1, /* V6. */
14219 -1, /* V6KZ. */
14220 -1, /* V6T2. */
14221 -1, /* V6K. */
14222 -1, /* V7. */
14223 T(V8M_BASE), /* V6_M. */
14224 T(V8M_BASE), /* V6S_M. */
14225 -1, /* V7E_M. */
14226 -1, /* V8. */
14227 -1, /* V8R. */
14228 T(V8M_BASE) /* V8-M BASELINE. */
14229 };
14230 const int v8m_mainline[] =
14231 {
14232 -1, /* PRE_V4. */
14233 -1, /* V4. */
14234 -1, /* V4T. */
14235 -1, /* V5T. */
14236 -1, /* V5TE. */
14237 -1, /* V5TEJ. */
14238 -1, /* V6. */
14239 -1, /* V6KZ. */
14240 -1, /* V6T2. */
14241 -1, /* V6K. */
14242 T(V8M_MAIN), /* V7. */
14243 T(V8M_MAIN), /* V6_M. */
14244 T(V8M_MAIN), /* V6S_M. */
14245 T(V8M_MAIN), /* V7E_M. */
14246 -1, /* V8. */
14247 -1, /* V8R. */
14248 T(V8M_MAIN), /* V8-M BASELINE. */
14249 T(V8M_MAIN) /* V8-M MAINLINE. */
14250 };
14251 const int v8_1m_mainline[] =
14252 {
14253 -1, /* PRE_V4. */
14254 -1, /* V4. */
14255 -1, /* V4T. */
14256 -1, /* V5T. */
14257 -1, /* V5TE. */
14258 -1, /* V5TEJ. */
14259 -1, /* V6. */
14260 -1, /* V6KZ. */
14261 -1, /* V6T2. */
14262 -1, /* V6K. */
14263 T(V8_1M_MAIN), /* V7. */
14264 T(V8_1M_MAIN), /* V6_M. */
14265 T(V8_1M_MAIN), /* V6S_M. */
14266 T(V8_1M_MAIN), /* V7E_M. */
14267 -1, /* V8. */
14268 -1, /* V8R. */
14269 T(V8_1M_MAIN), /* V8-M BASELINE. */
14270 T(V8_1M_MAIN), /* V8-M MAINLINE. */
14271 -1, /* Unused (18). */
14272 -1, /* Unused (19). */
14273 -1, /* Unused (20). */
14274 T(V8_1M_MAIN) /* V8.1-M MAINLINE. */
14275 };
14276 const int v4t_plus_v6_m[] =
14277 {
14278 -1, /* PRE_V4. */
14279 -1, /* V4. */
14280 T(V4T), /* V4T. */
14281 T(V5T), /* V5T. */
14282 T(V5TE), /* V5TE. */
14283 T(V5TEJ), /* V5TEJ. */
14284 T(V6), /* V6. */
14285 T(V6KZ), /* V6KZ. */
14286 T(V6T2), /* V6T2. */
14287 T(V6K), /* V6K. */
14288 T(V7), /* V7. */
14289 T(V6_M), /* V6_M. */
14290 T(V6S_M), /* V6S_M. */
14291 T(V7E_M), /* V7E_M. */
14292 T(V8), /* V8. */
14293 -1, /* V8R. */
14294 T(V8M_BASE), /* V8-M BASELINE. */
14295 T(V8M_MAIN), /* V8-M MAINLINE. */
14296 -1, /* Unused (18). */
14297 -1, /* Unused (19). */
14298 -1, /* Unused (20). */
14299 T(V8_1M_MAIN), /* V8.1-M MAINLINE. */
14300 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
14301 };
14302 const int *comb[] =
14303 {
14304 v6t2,
14305 v6k,
14306 v7,
14307 v6_m,
14308 v6s_m,
14309 v7e_m,
14310 v8,
14311 v8r,
14312 v8m_baseline,
14313 v8m_mainline,
14314 NULL,
14315 NULL,
14316 NULL,
14317 v8_1m_mainline,
14318 /* Pseudo-architecture. */
14319 v4t_plus_v6_m
14320 };
14321
14322 /* Check we've not got a higher architecture than we know about. */
14323
14324 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
14325 {
14326 _bfd_error_handler (_("error: %pB: unknown CPU architecture"), ibfd);
14327 return -1;
14328 }
14329
14330 /* Override old tag if we have a Tag_also_compatible_with on the output. */
14331
14332 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
14333 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
14334 oldtag = T(V4T_PLUS_V6_M);
14335
14336 /* And override the new tag if we have a Tag_also_compatible_with on the
14337 input. */
14338
14339 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
14340 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
14341 newtag = T(V4T_PLUS_V6_M);
14342
14343 tagl = (oldtag < newtag) ? oldtag : newtag;
14344 result = tagh = (oldtag > newtag) ? oldtag : newtag;
14345
14346 /* Architectures before V6KZ add features monotonically. */
14347 if (tagh <= TAG_CPU_ARCH_V6KZ)
14348 return result;
14349
14350 result = comb[tagh - T(V6T2)] ? comb[tagh - T(V6T2)][tagl] : -1;
14351
14352 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
14353 as the canonical version. */
14354 if (result == T(V4T_PLUS_V6_M))
14355 {
14356 result = T(V4T);
14357 *secondary_compat_out = T(V6_M);
14358 }
14359 else
14360 *secondary_compat_out = -1;
14361
14362 if (result == -1)
14363 {
14364 _bfd_error_handler (_("error: %pB: conflicting CPU architectures %d/%d"),
14365 ibfd, oldtag, newtag);
14366 return -1;
14367 }
14368
14369 return result;
14370 #undef T
14371 }
14372
14373 /* Query attributes object to see if integer divide instructions may be
14374 present in an object. */
14375 static bfd_boolean
14376 elf32_arm_attributes_accept_div (const obj_attribute *attr)
14377 {
14378 int arch = attr[Tag_CPU_arch].i;
14379 int profile = attr[Tag_CPU_arch_profile].i;
14380
14381 switch (attr[Tag_DIV_use].i)
14382 {
14383 case 0:
14384 /* Integer divide allowed if instruction contained in archetecture. */
14385 if (arch == TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M'))
14386 return TRUE;
14387 else if (arch >= TAG_CPU_ARCH_V7E_M)
14388 return TRUE;
14389 else
14390 return FALSE;
14391
14392 case 1:
14393 /* Integer divide explicitly prohibited. */
14394 return FALSE;
14395
14396 default:
14397 /* Unrecognised case - treat as allowing divide everywhere. */
14398 case 2:
14399 /* Integer divide allowed in ARM state. */
14400 return TRUE;
14401 }
14402 }
14403
14404 /* Query attributes object to see if integer divide instructions are
14405 forbidden to be in the object. This is not the inverse of
14406 elf32_arm_attributes_accept_div. */
14407 static bfd_boolean
14408 elf32_arm_attributes_forbid_div (const obj_attribute *attr)
14409 {
14410 return attr[Tag_DIV_use].i == 1;
14411 }
14412
14413 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
14414 are conflicting attributes. */
14415
14416 static bfd_boolean
14417 elf32_arm_merge_eabi_attributes (bfd *ibfd, struct bfd_link_info *info)
14418 {
14419 bfd *obfd = info->output_bfd;
14420 obj_attribute *in_attr;
14421 obj_attribute *out_attr;
14422 /* Some tags have 0 = don't care, 1 = strong requirement,
14423 2 = weak requirement. */
14424 static const int order_021[3] = {0, 2, 1};
14425 int i;
14426 bfd_boolean result = TRUE;
14427 const char *sec_name = get_elf_backend_data (ibfd)->obj_attrs_section;
14428
14429 /* Skip the linker stubs file. This preserves previous behavior
14430 of accepting unknown attributes in the first input file - but
14431 is that a bug? */
14432 if (ibfd->flags & BFD_LINKER_CREATED)
14433 return TRUE;
14434
14435 /* Skip any input that hasn't attribute section.
14436 This enables to link object files without attribute section with
14437 any others. */
14438 if (bfd_get_section_by_name (ibfd, sec_name) == NULL)
14439 return TRUE;
14440
14441 if (!elf_known_obj_attributes_proc (obfd)[0].i)
14442 {
14443 /* This is the first object. Copy the attributes. */
14444 _bfd_elf_copy_obj_attributes (ibfd, obfd);
14445
14446 out_attr = elf_known_obj_attributes_proc (obfd);
14447
14448 /* Use the Tag_null value to indicate the attributes have been
14449 initialized. */
14450 out_attr[0].i = 1;
14451
14452 /* We do not output objects with Tag_MPextension_use_legacy - we move
14453 the attribute's value to Tag_MPextension_use. */
14454 if (out_attr[Tag_MPextension_use_legacy].i != 0)
14455 {
14456 if (out_attr[Tag_MPextension_use].i != 0
14457 && out_attr[Tag_MPextension_use_legacy].i
14458 != out_attr[Tag_MPextension_use].i)
14459 {
14460 _bfd_error_handler
14461 (_("Error: %pB has both the current and legacy "
14462 "Tag_MPextension_use attributes"), ibfd);
14463 result = FALSE;
14464 }
14465
14466 out_attr[Tag_MPextension_use] =
14467 out_attr[Tag_MPextension_use_legacy];
14468 out_attr[Tag_MPextension_use_legacy].type = 0;
14469 out_attr[Tag_MPextension_use_legacy].i = 0;
14470 }
14471
14472 return result;
14473 }
14474
14475 in_attr = elf_known_obj_attributes_proc (ibfd);
14476 out_attr = elf_known_obj_attributes_proc (obfd);
14477 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
14478 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
14479 {
14480 /* Ignore mismatches if the object doesn't use floating point or is
14481 floating point ABI independent. */
14482 if (out_attr[Tag_ABI_FP_number_model].i == AEABI_FP_number_model_none
14483 || (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
14484 && out_attr[Tag_ABI_VFP_args].i == AEABI_VFP_args_compatible))
14485 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
14486 else if (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
14487 && in_attr[Tag_ABI_VFP_args].i != AEABI_VFP_args_compatible)
14488 {
14489 _bfd_error_handler
14490 (_("error: %pB uses VFP register arguments, %pB does not"),
14491 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
14492 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
14493 result = FALSE;
14494 }
14495 }
14496
14497 for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
14498 {
14499 /* Merge this attribute with existing attributes. */
14500 switch (i)
14501 {
14502 case Tag_CPU_raw_name:
14503 case Tag_CPU_name:
14504 /* These are merged after Tag_CPU_arch. */
14505 break;
14506
14507 case Tag_ABI_optimization_goals:
14508 case Tag_ABI_FP_optimization_goals:
14509 /* Use the first value seen. */
14510 break;
14511
14512 case Tag_CPU_arch:
14513 {
14514 int secondary_compat = -1, secondary_compat_out = -1;
14515 unsigned int saved_out_attr = out_attr[i].i;
14516 int arch_attr;
14517 static const char *name_table[] =
14518 {
14519 /* These aren't real CPU names, but we can't guess
14520 that from the architecture version alone. */
14521 "Pre v4",
14522 "ARM v4",
14523 "ARM v4T",
14524 "ARM v5T",
14525 "ARM v5TE",
14526 "ARM v5TEJ",
14527 "ARM v6",
14528 "ARM v6KZ",
14529 "ARM v6T2",
14530 "ARM v6K",
14531 "ARM v7",
14532 "ARM v6-M",
14533 "ARM v6S-M",
14534 "ARM v8",
14535 "",
14536 "ARM v8-M.baseline",
14537 "ARM v8-M.mainline",
14538 };
14539
14540 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
14541 secondary_compat = get_secondary_compatible_arch (ibfd);
14542 secondary_compat_out = get_secondary_compatible_arch (obfd);
14543 arch_attr = tag_cpu_arch_combine (ibfd, out_attr[i].i,
14544 &secondary_compat_out,
14545 in_attr[i].i,
14546 secondary_compat);
14547
14548 /* Return with error if failed to merge. */
14549 if (arch_attr == -1)
14550 return FALSE;
14551
14552 out_attr[i].i = arch_attr;
14553
14554 set_secondary_compatible_arch (obfd, secondary_compat_out);
14555
14556 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
14557 if (out_attr[i].i == saved_out_attr)
14558 ; /* Leave the names alone. */
14559 else if (out_attr[i].i == in_attr[i].i)
14560 {
14561 /* The output architecture has been changed to match the
14562 input architecture. Use the input names. */
14563 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
14564 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
14565 : NULL;
14566 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
14567 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
14568 : NULL;
14569 }
14570 else
14571 {
14572 out_attr[Tag_CPU_name].s = NULL;
14573 out_attr[Tag_CPU_raw_name].s = NULL;
14574 }
14575
14576 /* If we still don't have a value for Tag_CPU_name,
14577 make one up now. Tag_CPU_raw_name remains blank. */
14578 if (out_attr[Tag_CPU_name].s == NULL
14579 && out_attr[i].i < ARRAY_SIZE (name_table))
14580 out_attr[Tag_CPU_name].s =
14581 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
14582 }
14583 break;
14584
14585 case Tag_ARM_ISA_use:
14586 case Tag_THUMB_ISA_use:
14587 case Tag_WMMX_arch:
14588 case Tag_Advanced_SIMD_arch:
14589 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
14590 case Tag_ABI_FP_rounding:
14591 case Tag_ABI_FP_exceptions:
14592 case Tag_ABI_FP_user_exceptions:
14593 case Tag_ABI_FP_number_model:
14594 case Tag_FP_HP_extension:
14595 case Tag_CPU_unaligned_access:
14596 case Tag_T2EE_use:
14597 case Tag_MPextension_use:
14598 /* Use the largest value specified. */
14599 if (in_attr[i].i > out_attr[i].i)
14600 out_attr[i].i = in_attr[i].i;
14601 break;
14602
14603 case Tag_ABI_align_preserved:
14604 case Tag_ABI_PCS_RO_data:
14605 /* Use the smallest value specified. */
14606 if (in_attr[i].i < out_attr[i].i)
14607 out_attr[i].i = in_attr[i].i;
14608 break;
14609
14610 case Tag_ABI_align_needed:
14611 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
14612 && (in_attr[Tag_ABI_align_preserved].i == 0
14613 || out_attr[Tag_ABI_align_preserved].i == 0))
14614 {
14615 /* This error message should be enabled once all non-conformant
14616 binaries in the toolchain have had the attributes set
14617 properly.
14618 _bfd_error_handler
14619 (_("error: %pB: 8-byte data alignment conflicts with %pB"),
14620 obfd, ibfd);
14621 result = FALSE; */
14622 }
14623 /* Fall through. */
14624 case Tag_ABI_FP_denormal:
14625 case Tag_ABI_PCS_GOT_use:
14626 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
14627 value if greater than 2 (for future-proofing). */
14628 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
14629 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
14630 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
14631 out_attr[i].i = in_attr[i].i;
14632 break;
14633
14634 case Tag_Virtualization_use:
14635 /* The virtualization tag effectively stores two bits of
14636 information: the intended use of TrustZone (in bit 0), and the
14637 intended use of Virtualization (in bit 1). */
14638 if (out_attr[i].i == 0)
14639 out_attr[i].i = in_attr[i].i;
14640 else if (in_attr[i].i != 0
14641 && in_attr[i].i != out_attr[i].i)
14642 {
14643 if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
14644 out_attr[i].i = 3;
14645 else
14646 {
14647 _bfd_error_handler
14648 (_("error: %pB: unable to merge virtualization attributes "
14649 "with %pB"),
14650 obfd, ibfd);
14651 result = FALSE;
14652 }
14653 }
14654 break;
14655
14656 case Tag_CPU_arch_profile:
14657 if (out_attr[i].i != in_attr[i].i)
14658 {
14659 /* 0 will merge with anything.
14660 'A' and 'S' merge to 'A'.
14661 'R' and 'S' merge to 'R'.
14662 'M' and 'A|R|S' is an error. */
14663 if (out_attr[i].i == 0
14664 || (out_attr[i].i == 'S'
14665 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
14666 out_attr[i].i = in_attr[i].i;
14667 else if (in_attr[i].i == 0
14668 || (in_attr[i].i == 'S'
14669 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
14670 ; /* Do nothing. */
14671 else
14672 {
14673 _bfd_error_handler
14674 (_("error: %pB: conflicting architecture profiles %c/%c"),
14675 ibfd,
14676 in_attr[i].i ? in_attr[i].i : '0',
14677 out_attr[i].i ? out_attr[i].i : '0');
14678 result = FALSE;
14679 }
14680 }
14681 break;
14682
14683 case Tag_DSP_extension:
14684 /* No need to change output value if any of:
14685 - pre (<=) ARMv5T input architecture (do not have DSP)
14686 - M input profile not ARMv7E-M and do not have DSP. */
14687 if (in_attr[Tag_CPU_arch].i <= 3
14688 || (in_attr[Tag_CPU_arch_profile].i == 'M'
14689 && in_attr[Tag_CPU_arch].i != 13
14690 && in_attr[i].i == 0))
14691 ; /* Do nothing. */
14692 /* Output value should be 0 if DSP part of architecture, ie.
14693 - post (>=) ARMv5te architecture output
14694 - A, R or S profile output or ARMv7E-M output architecture. */
14695 else if (out_attr[Tag_CPU_arch].i >= 4
14696 && (out_attr[Tag_CPU_arch_profile].i == 'A'
14697 || out_attr[Tag_CPU_arch_profile].i == 'R'
14698 || out_attr[Tag_CPU_arch_profile].i == 'S'
14699 || out_attr[Tag_CPU_arch].i == 13))
14700 out_attr[i].i = 0;
14701 /* Otherwise, DSP instructions are added and not part of output
14702 architecture. */
14703 else
14704 out_attr[i].i = 1;
14705 break;
14706
14707 case Tag_FP_arch:
14708 {
14709 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
14710 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
14711 when it's 0. It might mean absence of FP hardware if
14712 Tag_FP_arch is zero. */
14713
14714 #define VFP_VERSION_COUNT 9
14715 static const struct
14716 {
14717 int ver;
14718 int regs;
14719 } vfp_versions[VFP_VERSION_COUNT] =
14720 {
14721 {0, 0},
14722 {1, 16},
14723 {2, 16},
14724 {3, 32},
14725 {3, 16},
14726 {4, 32},
14727 {4, 16},
14728 {8, 32},
14729 {8, 16}
14730 };
14731 int ver;
14732 int regs;
14733 int newval;
14734
14735 /* If the output has no requirement about FP hardware,
14736 follow the requirement of the input. */
14737 if (out_attr[i].i == 0)
14738 {
14739 /* This assert is still reasonable, we shouldn't
14740 produce the suspicious build attribute
14741 combination (See below for in_attr). */
14742 BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
14743 out_attr[i].i = in_attr[i].i;
14744 out_attr[Tag_ABI_HardFP_use].i
14745 = in_attr[Tag_ABI_HardFP_use].i;
14746 break;
14747 }
14748 /* If the input has no requirement about FP hardware, do
14749 nothing. */
14750 else if (in_attr[i].i == 0)
14751 {
14752 /* We used to assert that Tag_ABI_HardFP_use was
14753 zero here, but we should never assert when
14754 consuming an object file that has suspicious
14755 build attributes. The single precision variant
14756 of 'no FP architecture' is still 'no FP
14757 architecture', so we just ignore the tag in this
14758 case. */
14759 break;
14760 }
14761
14762 /* Both the input and the output have nonzero Tag_FP_arch.
14763 So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero. */
14764
14765 /* If both the input and the output have zero Tag_ABI_HardFP_use,
14766 do nothing. */
14767 if (in_attr[Tag_ABI_HardFP_use].i == 0
14768 && out_attr[Tag_ABI_HardFP_use].i == 0)
14769 ;
14770 /* If the input and the output have different Tag_ABI_HardFP_use,
14771 the combination of them is 0 (implied by Tag_FP_arch). */
14772 else if (in_attr[Tag_ABI_HardFP_use].i
14773 != out_attr[Tag_ABI_HardFP_use].i)
14774 out_attr[Tag_ABI_HardFP_use].i = 0;
14775
14776 /* Now we can handle Tag_FP_arch. */
14777
14778 /* Values of VFP_VERSION_COUNT or more aren't defined, so just
14779 pick the biggest. */
14780 if (in_attr[i].i >= VFP_VERSION_COUNT
14781 && in_attr[i].i > out_attr[i].i)
14782 {
14783 out_attr[i] = in_attr[i];
14784 break;
14785 }
14786 /* The output uses the superset of input features
14787 (ISA version) and registers. */
14788 ver = vfp_versions[in_attr[i].i].ver;
14789 if (ver < vfp_versions[out_attr[i].i].ver)
14790 ver = vfp_versions[out_attr[i].i].ver;
14791 regs = vfp_versions[in_attr[i].i].regs;
14792 if (regs < vfp_versions[out_attr[i].i].regs)
14793 regs = vfp_versions[out_attr[i].i].regs;
14794 /* This assumes all possible supersets are also a valid
14795 options. */
14796 for (newval = VFP_VERSION_COUNT - 1; newval > 0; newval--)
14797 {
14798 if (regs == vfp_versions[newval].regs
14799 && ver == vfp_versions[newval].ver)
14800 break;
14801 }
14802 out_attr[i].i = newval;
14803 }
14804 break;
14805 case Tag_PCS_config:
14806 if (out_attr[i].i == 0)
14807 out_attr[i].i = in_attr[i].i;
14808 else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i)
14809 {
14810 /* It's sometimes ok to mix different configs, so this is only
14811 a warning. */
14812 _bfd_error_handler
14813 (_("warning: %pB: conflicting platform configuration"), ibfd);
14814 }
14815 break;
14816 case Tag_ABI_PCS_R9_use:
14817 if (in_attr[i].i != out_attr[i].i
14818 && out_attr[i].i != AEABI_R9_unused
14819 && in_attr[i].i != AEABI_R9_unused)
14820 {
14821 _bfd_error_handler
14822 (_("error: %pB: conflicting use of R9"), ibfd);
14823 result = FALSE;
14824 }
14825 if (out_attr[i].i == AEABI_R9_unused)
14826 out_attr[i].i = in_attr[i].i;
14827 break;
14828 case Tag_ABI_PCS_RW_data:
14829 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
14830 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
14831 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
14832 {
14833 _bfd_error_handler
14834 (_("error: %pB: SB relative addressing conflicts with use of R9"),
14835 ibfd);
14836 result = FALSE;
14837 }
14838 /* Use the smallest value specified. */
14839 if (in_attr[i].i < out_attr[i].i)
14840 out_attr[i].i = in_attr[i].i;
14841 break;
14842 case Tag_ABI_PCS_wchar_t:
14843 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
14844 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
14845 {
14846 _bfd_error_handler
14847 (_("warning: %pB uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
14848 ibfd, in_attr[i].i, out_attr[i].i);
14849 }
14850 else if (in_attr[i].i && !out_attr[i].i)
14851 out_attr[i].i = in_attr[i].i;
14852 break;
14853 case Tag_ABI_enum_size:
14854 if (in_attr[i].i != AEABI_enum_unused)
14855 {
14856 if (out_attr[i].i == AEABI_enum_unused
14857 || out_attr[i].i == AEABI_enum_forced_wide)
14858 {
14859 /* The existing object is compatible with anything.
14860 Use whatever requirements the new object has. */
14861 out_attr[i].i = in_attr[i].i;
14862 }
14863 else if (in_attr[i].i != AEABI_enum_forced_wide
14864 && out_attr[i].i != in_attr[i].i
14865 && !elf_arm_tdata (obfd)->no_enum_size_warning)
14866 {
14867 static const char *aeabi_enum_names[] =
14868 { "", "variable-size", "32-bit", "" };
14869 const char *in_name =
14870 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
14871 ? aeabi_enum_names[in_attr[i].i]
14872 : "<unknown>";
14873 const char *out_name =
14874 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
14875 ? aeabi_enum_names[out_attr[i].i]
14876 : "<unknown>";
14877 _bfd_error_handler
14878 (_("warning: %pB uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
14879 ibfd, in_name, out_name);
14880 }
14881 }
14882 break;
14883 case Tag_ABI_VFP_args:
14884 /* Aready done. */
14885 break;
14886 case Tag_ABI_WMMX_args:
14887 if (in_attr[i].i != out_attr[i].i)
14888 {
14889 _bfd_error_handler
14890 (_("error: %pB uses iWMMXt register arguments, %pB does not"),
14891 ibfd, obfd);
14892 result = FALSE;
14893 }
14894 break;
14895 case Tag_compatibility:
14896 /* Merged in target-independent code. */
14897 break;
14898 case Tag_ABI_HardFP_use:
14899 /* This is handled along with Tag_FP_arch. */
14900 break;
14901 case Tag_ABI_FP_16bit_format:
14902 if (in_attr[i].i != 0 && out_attr[i].i != 0)
14903 {
14904 if (in_attr[i].i != out_attr[i].i)
14905 {
14906 _bfd_error_handler
14907 (_("error: fp16 format mismatch between %pB and %pB"),
14908 ibfd, obfd);
14909 result = FALSE;
14910 }
14911 }
14912 if (in_attr[i].i != 0)
14913 out_attr[i].i = in_attr[i].i;
14914 break;
14915
14916 case Tag_DIV_use:
14917 /* A value of zero on input means that the divide instruction may
14918 be used if available in the base architecture as specified via
14919 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that
14920 the user did not want divide instructions. A value of 2
14921 explicitly means that divide instructions were allowed in ARM
14922 and Thumb state. */
14923 if (in_attr[i].i == out_attr[i].i)
14924 /* Do nothing. */ ;
14925 else if (elf32_arm_attributes_forbid_div (in_attr)
14926 && !elf32_arm_attributes_accept_div (out_attr))
14927 out_attr[i].i = 1;
14928 else if (elf32_arm_attributes_forbid_div (out_attr)
14929 && elf32_arm_attributes_accept_div (in_attr))
14930 out_attr[i].i = in_attr[i].i;
14931 else if (in_attr[i].i == 2)
14932 out_attr[i].i = in_attr[i].i;
14933 break;
14934
14935 case Tag_MPextension_use_legacy:
14936 /* We don't output objects with Tag_MPextension_use_legacy - we
14937 move the value to Tag_MPextension_use. */
14938 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
14939 {
14940 if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
14941 {
14942 _bfd_error_handler
14943 (_("%pB has both the current and legacy "
14944 "Tag_MPextension_use attributes"),
14945 ibfd);
14946 result = FALSE;
14947 }
14948 }
14949
14950 if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
14951 out_attr[Tag_MPextension_use] = in_attr[i];
14952
14953 break;
14954
14955 case Tag_nodefaults:
14956 /* This tag is set if it exists, but the value is unused (and is
14957 typically zero). We don't actually need to do anything here -
14958 the merge happens automatically when the type flags are merged
14959 below. */
14960 break;
14961 case Tag_also_compatible_with:
14962 /* Already done in Tag_CPU_arch. */
14963 break;
14964 case Tag_conformance:
14965 /* Keep the attribute if it matches. Throw it away otherwise.
14966 No attribute means no claim to conform. */
14967 if (!in_attr[i].s || !out_attr[i].s
14968 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
14969 out_attr[i].s = NULL;
14970 break;
14971
14972 default:
14973 result
14974 = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i);
14975 }
14976
14977 /* If out_attr was copied from in_attr then it won't have a type yet. */
14978 if (in_attr[i].type && !out_attr[i].type)
14979 out_attr[i].type = in_attr[i].type;
14980 }
14981
14982 /* Merge Tag_compatibility attributes and any common GNU ones. */
14983 if (!_bfd_elf_merge_object_attributes (ibfd, info))
14984 return FALSE;
14985
14986 /* Check for any attributes not known on ARM. */
14987 result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd);
14988
14989 return result;
14990 }
14991
14992
14993 /* Return TRUE if the two EABI versions are incompatible. */
14994
14995 static bfd_boolean
14996 elf32_arm_versions_compatible (unsigned iver, unsigned over)
14997 {
14998 /* v4 and v5 are the same spec before and after it was released,
14999 so allow mixing them. */
15000 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
15001 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
15002 return TRUE;
15003
15004 return (iver == over);
15005 }
15006
15007 /* Merge backend specific data from an object file to the output
15008 object file when linking. */
15009
15010 static bfd_boolean
15011 elf32_arm_merge_private_bfd_data (bfd *, struct bfd_link_info *);
15012
15013 /* Display the flags field. */
15014
15015 static bfd_boolean
15016 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
15017 {
15018 FILE * file = (FILE *) ptr;
15019 unsigned long flags;
15020
15021 BFD_ASSERT (abfd != NULL && ptr != NULL);
15022
15023 /* Print normal ELF private data. */
15024 _bfd_elf_print_private_bfd_data (abfd, ptr);
15025
15026 flags = elf_elfheader (abfd)->e_flags;
15027 /* Ignore init flag - it may not be set, despite the flags field
15028 containing valid data. */
15029
15030 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
15031
15032 switch (EF_ARM_EABI_VERSION (flags))
15033 {
15034 case EF_ARM_EABI_UNKNOWN:
15035 /* The following flag bits are GNU extensions and not part of the
15036 official ARM ELF extended ABI. Hence they are only decoded if
15037 the EABI version is not set. */
15038 if (flags & EF_ARM_INTERWORK)
15039 fprintf (file, _(" [interworking enabled]"));
15040
15041 if (flags & EF_ARM_APCS_26)
15042 fprintf (file, " [APCS-26]");
15043 else
15044 fprintf (file, " [APCS-32]");
15045
15046 if (flags & EF_ARM_VFP_FLOAT)
15047 fprintf (file, _(" [VFP float format]"));
15048 else if (flags & EF_ARM_MAVERICK_FLOAT)
15049 fprintf (file, _(" [Maverick float format]"));
15050 else
15051 fprintf (file, _(" [FPA float format]"));
15052
15053 if (flags & EF_ARM_APCS_FLOAT)
15054 fprintf (file, _(" [floats passed in float registers]"));
15055
15056 if (flags & EF_ARM_PIC)
15057 fprintf (file, _(" [position independent]"));
15058
15059 if (flags & EF_ARM_NEW_ABI)
15060 fprintf (file, _(" [new ABI]"));
15061
15062 if (flags & EF_ARM_OLD_ABI)
15063 fprintf (file, _(" [old ABI]"));
15064
15065 if (flags & EF_ARM_SOFT_FLOAT)
15066 fprintf (file, _(" [software FP]"));
15067
15068 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
15069 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
15070 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
15071 | EF_ARM_MAVERICK_FLOAT);
15072 break;
15073
15074 case EF_ARM_EABI_VER1:
15075 fprintf (file, _(" [Version1 EABI]"));
15076
15077 if (flags & EF_ARM_SYMSARESORTED)
15078 fprintf (file, _(" [sorted symbol table]"));
15079 else
15080 fprintf (file, _(" [unsorted symbol table]"));
15081
15082 flags &= ~ EF_ARM_SYMSARESORTED;
15083 break;
15084
15085 case EF_ARM_EABI_VER2:
15086 fprintf (file, _(" [Version2 EABI]"));
15087
15088 if (flags & EF_ARM_SYMSARESORTED)
15089 fprintf (file, _(" [sorted symbol table]"));
15090 else
15091 fprintf (file, _(" [unsorted symbol table]"));
15092
15093 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
15094 fprintf (file, _(" [dynamic symbols use segment index]"));
15095
15096 if (flags & EF_ARM_MAPSYMSFIRST)
15097 fprintf (file, _(" [mapping symbols precede others]"));
15098
15099 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
15100 | EF_ARM_MAPSYMSFIRST);
15101 break;
15102
15103 case EF_ARM_EABI_VER3:
15104 fprintf (file, _(" [Version3 EABI]"));
15105 break;
15106
15107 case EF_ARM_EABI_VER4:
15108 fprintf (file, _(" [Version4 EABI]"));
15109 goto eabi;
15110
15111 case EF_ARM_EABI_VER5:
15112 fprintf (file, _(" [Version5 EABI]"));
15113
15114 if (flags & EF_ARM_ABI_FLOAT_SOFT)
15115 fprintf (file, _(" [soft-float ABI]"));
15116
15117 if (flags & EF_ARM_ABI_FLOAT_HARD)
15118 fprintf (file, _(" [hard-float ABI]"));
15119
15120 flags &= ~(EF_ARM_ABI_FLOAT_SOFT | EF_ARM_ABI_FLOAT_HARD);
15121
15122 eabi:
15123 if (flags & EF_ARM_BE8)
15124 fprintf (file, _(" [BE8]"));
15125
15126 if (flags & EF_ARM_LE8)
15127 fprintf (file, _(" [LE8]"));
15128
15129 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
15130 break;
15131
15132 default:
15133 fprintf (file, _(" <EABI version unrecognised>"));
15134 break;
15135 }
15136
15137 flags &= ~ EF_ARM_EABIMASK;
15138
15139 if (flags & EF_ARM_RELEXEC)
15140 fprintf (file, _(" [relocatable executable]"));
15141
15142 if (flags & EF_ARM_PIC)
15143 fprintf (file, _(" [position independent]"));
15144
15145 if (elf_elfheader (abfd)->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC)
15146 fprintf (file, _(" [FDPIC ABI supplement]"));
15147
15148 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_PIC);
15149
15150 if (flags)
15151 fprintf (file, _("<Unrecognised flag bits set>"));
15152
15153 fputc ('\n', file);
15154
15155 return TRUE;
15156 }
15157
15158 static int
15159 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
15160 {
15161 switch (ELF_ST_TYPE (elf_sym->st_info))
15162 {
15163 case STT_ARM_TFUNC:
15164 return ELF_ST_TYPE (elf_sym->st_info);
15165
15166 case STT_ARM_16BIT:
15167 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
15168 This allows us to distinguish between data used by Thumb instructions
15169 and non-data (which is probably code) inside Thumb regions of an
15170 executable. */
15171 if (type != STT_OBJECT && type != STT_TLS)
15172 return ELF_ST_TYPE (elf_sym->st_info);
15173 break;
15174
15175 default:
15176 break;
15177 }
15178
15179 return type;
15180 }
15181
15182 static asection *
15183 elf32_arm_gc_mark_hook (asection *sec,
15184 struct bfd_link_info *info,
15185 Elf_Internal_Rela *rel,
15186 struct elf_link_hash_entry *h,
15187 Elf_Internal_Sym *sym)
15188 {
15189 if (h != NULL)
15190 switch (ELF32_R_TYPE (rel->r_info))
15191 {
15192 case R_ARM_GNU_VTINHERIT:
15193 case R_ARM_GNU_VTENTRY:
15194 return NULL;
15195 }
15196
15197 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
15198 }
15199
15200 /* Look through the relocs for a section during the first phase. */
15201
15202 static bfd_boolean
15203 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
15204 asection *sec, const Elf_Internal_Rela *relocs)
15205 {
15206 Elf_Internal_Shdr *symtab_hdr;
15207 struct elf_link_hash_entry **sym_hashes;
15208 const Elf_Internal_Rela *rel;
15209 const Elf_Internal_Rela *rel_end;
15210 bfd *dynobj;
15211 asection *sreloc;
15212 struct elf32_arm_link_hash_table *htab;
15213 bfd_boolean call_reloc_p;
15214 bfd_boolean may_become_dynamic_p;
15215 bfd_boolean may_need_local_target_p;
15216 unsigned long nsyms;
15217
15218 if (bfd_link_relocatable (info))
15219 return TRUE;
15220
15221 BFD_ASSERT (is_arm_elf (abfd));
15222
15223 htab = elf32_arm_hash_table (info);
15224 if (htab == NULL)
15225 return FALSE;
15226
15227 sreloc = NULL;
15228
15229 /* Create dynamic sections for relocatable executables so that we can
15230 copy relocations. */
15231 if (htab->root.is_relocatable_executable
15232 && ! htab->root.dynamic_sections_created)
15233 {
15234 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
15235 return FALSE;
15236 }
15237
15238 if (htab->root.dynobj == NULL)
15239 htab->root.dynobj = abfd;
15240 if (!create_ifunc_sections (info))
15241 return FALSE;
15242
15243 dynobj = htab->root.dynobj;
15244
15245 symtab_hdr = & elf_symtab_hdr (abfd);
15246 sym_hashes = elf_sym_hashes (abfd);
15247 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
15248
15249 rel_end = relocs + sec->reloc_count;
15250 for (rel = relocs; rel < rel_end; rel++)
15251 {
15252 Elf_Internal_Sym *isym;
15253 struct elf_link_hash_entry *h;
15254 struct elf32_arm_link_hash_entry *eh;
15255 unsigned int r_symndx;
15256 int r_type;
15257
15258 r_symndx = ELF32_R_SYM (rel->r_info);
15259 r_type = ELF32_R_TYPE (rel->r_info);
15260 r_type = arm_real_reloc_type (htab, r_type);
15261
15262 if (r_symndx >= nsyms
15263 /* PR 9934: It is possible to have relocations that do not
15264 refer to symbols, thus it is also possible to have an
15265 object file containing relocations but no symbol table. */
15266 && (r_symndx > STN_UNDEF || nsyms > 0))
15267 {
15268 _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd,
15269 r_symndx);
15270 return FALSE;
15271 }
15272
15273 h = NULL;
15274 isym = NULL;
15275 if (nsyms > 0)
15276 {
15277 if (r_symndx < symtab_hdr->sh_info)
15278 {
15279 /* A local symbol. */
15280 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
15281 abfd, r_symndx);
15282 if (isym == NULL)
15283 return FALSE;
15284 }
15285 else
15286 {
15287 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
15288 while (h->root.type == bfd_link_hash_indirect
15289 || h->root.type == bfd_link_hash_warning)
15290 h = (struct elf_link_hash_entry *) h->root.u.i.link;
15291 }
15292 }
15293
15294 eh = (struct elf32_arm_link_hash_entry *) h;
15295
15296 call_reloc_p = FALSE;
15297 may_become_dynamic_p = FALSE;
15298 may_need_local_target_p = FALSE;
15299
15300 /* Could be done earlier, if h were already available. */
15301 r_type = elf32_arm_tls_transition (info, r_type, h);
15302 switch (r_type)
15303 {
15304 case R_ARM_GOTOFFFUNCDESC:
15305 {
15306 if (h == NULL)
15307 {
15308 if (!elf32_arm_allocate_local_sym_info (abfd))
15309 return FALSE;
15310 elf32_arm_local_fdpic_cnts(abfd)[r_symndx].gotofffuncdesc_cnt += 1;
15311 elf32_arm_local_fdpic_cnts(abfd)[r_symndx].funcdesc_offset = -1;
15312 }
15313 else
15314 {
15315 eh->fdpic_cnts.gotofffuncdesc_cnt++;
15316 }
15317 }
15318 break;
15319
15320 case R_ARM_GOTFUNCDESC:
15321 {
15322 if (h == NULL)
15323 {
15324 /* Such a relocation is not supposed to be generated
15325 by gcc on a static function. */
15326 /* Anyway if needed it could be handled. */
15327 abort();
15328 }
15329 else
15330 {
15331 eh->fdpic_cnts.gotfuncdesc_cnt++;
15332 }
15333 }
15334 break;
15335
15336 case R_ARM_FUNCDESC:
15337 {
15338 if (h == NULL)
15339 {
15340 if (!elf32_arm_allocate_local_sym_info (abfd))
15341 return FALSE;
15342 elf32_arm_local_fdpic_cnts(abfd)[r_symndx].funcdesc_cnt += 1;
15343 elf32_arm_local_fdpic_cnts(abfd)[r_symndx].funcdesc_offset = -1;
15344 }
15345 else
15346 {
15347 eh->fdpic_cnts.funcdesc_cnt++;
15348 }
15349 }
15350 break;
15351
15352 case R_ARM_GOT32:
15353 case R_ARM_GOT_PREL:
15354 case R_ARM_TLS_GD32:
15355 case R_ARM_TLS_GD32_FDPIC:
15356 case R_ARM_TLS_IE32:
15357 case R_ARM_TLS_IE32_FDPIC:
15358 case R_ARM_TLS_GOTDESC:
15359 case R_ARM_TLS_DESCSEQ:
15360 case R_ARM_THM_TLS_DESCSEQ:
15361 case R_ARM_TLS_CALL:
15362 case R_ARM_THM_TLS_CALL:
15363 /* This symbol requires a global offset table entry. */
15364 {
15365 int tls_type, old_tls_type;
15366
15367 switch (r_type)
15368 {
15369 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
15370 case R_ARM_TLS_GD32_FDPIC: tls_type = GOT_TLS_GD; break;
15371
15372 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
15373 case R_ARM_TLS_IE32_FDPIC: tls_type = GOT_TLS_IE; break;
15374
15375 case R_ARM_TLS_GOTDESC:
15376 case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL:
15377 case R_ARM_TLS_DESCSEQ: case R_ARM_THM_TLS_DESCSEQ:
15378 tls_type = GOT_TLS_GDESC; break;
15379
15380 default: tls_type = GOT_NORMAL; break;
15381 }
15382
15383 if (!bfd_link_executable (info) && (tls_type & GOT_TLS_IE))
15384 info->flags |= DF_STATIC_TLS;
15385
15386 if (h != NULL)
15387 {
15388 h->got.refcount++;
15389 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
15390 }
15391 else
15392 {
15393 /* This is a global offset table entry for a local symbol. */
15394 if (!elf32_arm_allocate_local_sym_info (abfd))
15395 return FALSE;
15396 elf_local_got_refcounts (abfd)[r_symndx] += 1;
15397 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
15398 }
15399
15400 /* If a variable is accessed with both tls methods, two
15401 slots may be created. */
15402 if (GOT_TLS_GD_ANY_P (old_tls_type)
15403 && GOT_TLS_GD_ANY_P (tls_type))
15404 tls_type |= old_tls_type;
15405
15406 /* We will already have issued an error message if there
15407 is a TLS/non-TLS mismatch, based on the symbol
15408 type. So just combine any TLS types needed. */
15409 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
15410 && tls_type != GOT_NORMAL)
15411 tls_type |= old_tls_type;
15412
15413 /* If the symbol is accessed in both IE and GDESC
15414 method, we're able to relax. Turn off the GDESC flag,
15415 without messing up with any other kind of tls types
15416 that may be involved. */
15417 if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC))
15418 tls_type &= ~GOT_TLS_GDESC;
15419
15420 if (old_tls_type != tls_type)
15421 {
15422 if (h != NULL)
15423 elf32_arm_hash_entry (h)->tls_type = tls_type;
15424 else
15425 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
15426 }
15427 }
15428 /* Fall through. */
15429
15430 case R_ARM_TLS_LDM32:
15431 case R_ARM_TLS_LDM32_FDPIC:
15432 if (r_type == R_ARM_TLS_LDM32 || r_type == R_ARM_TLS_LDM32_FDPIC)
15433 htab->tls_ldm_got.refcount++;
15434 /* Fall through. */
15435
15436 case R_ARM_GOTOFF32:
15437 case R_ARM_GOTPC:
15438 if (htab->root.sgot == NULL
15439 && !create_got_section (htab->root.dynobj, info))
15440 return FALSE;
15441 break;
15442
15443 case R_ARM_PC24:
15444 case R_ARM_PLT32:
15445 case R_ARM_CALL:
15446 case R_ARM_JUMP24:
15447 case R_ARM_PREL31:
15448 case R_ARM_THM_CALL:
15449 case R_ARM_THM_JUMP24:
15450 case R_ARM_THM_JUMP19:
15451 call_reloc_p = TRUE;
15452 may_need_local_target_p = TRUE;
15453 break;
15454
15455 case R_ARM_ABS12:
15456 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
15457 ldr __GOTT_INDEX__ offsets. */
15458 if (!htab->vxworks_p)
15459 {
15460 may_need_local_target_p = TRUE;
15461 break;
15462 }
15463 else goto jump_over;
15464
15465 /* Fall through. */
15466
15467 case R_ARM_MOVW_ABS_NC:
15468 case R_ARM_MOVT_ABS:
15469 case R_ARM_THM_MOVW_ABS_NC:
15470 case R_ARM_THM_MOVT_ABS:
15471 if (bfd_link_pic (info))
15472 {
15473 _bfd_error_handler
15474 (_("%pB: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
15475 abfd, elf32_arm_howto_table_1[r_type].name,
15476 (h) ? h->root.root.string : "a local symbol");
15477 bfd_set_error (bfd_error_bad_value);
15478 return FALSE;
15479 }
15480
15481 /* Fall through. */
15482 case R_ARM_ABS32:
15483 case R_ARM_ABS32_NOI:
15484 jump_over:
15485 if (h != NULL && bfd_link_executable (info))
15486 {
15487 h->pointer_equality_needed = 1;
15488 }
15489 /* Fall through. */
15490 case R_ARM_REL32:
15491 case R_ARM_REL32_NOI:
15492 case R_ARM_MOVW_PREL_NC:
15493 case R_ARM_MOVT_PREL:
15494 case R_ARM_THM_MOVW_PREL_NC:
15495 case R_ARM_THM_MOVT_PREL:
15496
15497 /* Should the interworking branches be listed here? */
15498 if ((bfd_link_pic (info) || htab->root.is_relocatable_executable
15499 || htab->fdpic_p)
15500 && (sec->flags & SEC_ALLOC) != 0)
15501 {
15502 if (h == NULL
15503 && elf32_arm_howto_from_type (r_type)->pc_relative)
15504 {
15505 /* In shared libraries and relocatable executables,
15506 we treat local relative references as calls;
15507 see the related SYMBOL_CALLS_LOCAL code in
15508 allocate_dynrelocs. */
15509 call_reloc_p = TRUE;
15510 may_need_local_target_p = TRUE;
15511 }
15512 else
15513 /* We are creating a shared library or relocatable
15514 executable, and this is a reloc against a global symbol,
15515 or a non-PC-relative reloc against a local symbol.
15516 We may need to copy the reloc into the output. */
15517 may_become_dynamic_p = TRUE;
15518 }
15519 else
15520 may_need_local_target_p = TRUE;
15521 break;
15522
15523 /* This relocation describes the C++ object vtable hierarchy.
15524 Reconstruct it for later use during GC. */
15525 case R_ARM_GNU_VTINHERIT:
15526 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
15527 return FALSE;
15528 break;
15529
15530 /* This relocation describes which C++ vtable entries are actually
15531 used. Record for later use during GC. */
15532 case R_ARM_GNU_VTENTRY:
15533 if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
15534 return FALSE;
15535 break;
15536 }
15537
15538 if (h != NULL)
15539 {
15540 if (call_reloc_p)
15541 /* We may need a .plt entry if the function this reloc
15542 refers to is in a different object, regardless of the
15543 symbol's type. We can't tell for sure yet, because
15544 something later might force the symbol local. */
15545 h->needs_plt = 1;
15546 else if (may_need_local_target_p)
15547 /* If this reloc is in a read-only section, we might
15548 need a copy reloc. We can't check reliably at this
15549 stage whether the section is read-only, as input
15550 sections have not yet been mapped to output sections.
15551 Tentatively set the flag for now, and correct in
15552 adjust_dynamic_symbol. */
15553 h->non_got_ref = 1;
15554 }
15555
15556 if (may_need_local_target_p
15557 && (h != NULL || ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC))
15558 {
15559 union gotplt_union *root_plt;
15560 struct arm_plt_info *arm_plt;
15561 struct arm_local_iplt_info *local_iplt;
15562
15563 if (h != NULL)
15564 {
15565 root_plt = &h->plt;
15566 arm_plt = &eh->plt;
15567 }
15568 else
15569 {
15570 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
15571 if (local_iplt == NULL)
15572 return FALSE;
15573 root_plt = &local_iplt->root;
15574 arm_plt = &local_iplt->arm;
15575 }
15576
15577 /* If the symbol is a function that doesn't bind locally,
15578 this relocation will need a PLT entry. */
15579 if (root_plt->refcount != -1)
15580 root_plt->refcount += 1;
15581
15582 if (!call_reloc_p)
15583 arm_plt->noncall_refcount++;
15584
15585 /* It's too early to use htab->use_blx here, so we have to
15586 record possible blx references separately from
15587 relocs that definitely need a thumb stub. */
15588
15589 if (r_type == R_ARM_THM_CALL)
15590 arm_plt->maybe_thumb_refcount += 1;
15591
15592 if (r_type == R_ARM_THM_JUMP24
15593 || r_type == R_ARM_THM_JUMP19)
15594 arm_plt->thumb_refcount += 1;
15595 }
15596
15597 if (may_become_dynamic_p)
15598 {
15599 struct elf_dyn_relocs *p, **head;
15600
15601 /* Create a reloc section in dynobj. */
15602 if (sreloc == NULL)
15603 {
15604 sreloc = _bfd_elf_make_dynamic_reloc_section
15605 (sec, dynobj, 2, abfd, ! htab->use_rel);
15606
15607 if (sreloc == NULL)
15608 return FALSE;
15609
15610 /* BPABI objects never have dynamic relocations mapped. */
15611 if (htab->symbian_p)
15612 {
15613 flagword flags;
15614
15615 flags = bfd_get_section_flags (dynobj, sreloc);
15616 flags &= ~(SEC_LOAD | SEC_ALLOC);
15617 bfd_set_section_flags (dynobj, sreloc, flags);
15618 }
15619 }
15620
15621 /* If this is a global symbol, count the number of
15622 relocations we need for this symbol. */
15623 if (h != NULL)
15624 head = &((struct elf32_arm_link_hash_entry *) h)->dyn_relocs;
15625 else
15626 {
15627 head = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
15628 if (head == NULL)
15629 return FALSE;
15630 }
15631
15632 p = *head;
15633 if (p == NULL || p->sec != sec)
15634 {
15635 bfd_size_type amt = sizeof *p;
15636
15637 p = (struct elf_dyn_relocs *) bfd_alloc (htab->root.dynobj, amt);
15638 if (p == NULL)
15639 return FALSE;
15640 p->next = *head;
15641 *head = p;
15642 p->sec = sec;
15643 p->count = 0;
15644 p->pc_count = 0;
15645 }
15646
15647 if (elf32_arm_howto_from_type (r_type)->pc_relative)
15648 p->pc_count += 1;
15649 p->count += 1;
15650 if (h == NULL && htab->fdpic_p && !bfd_link_pic(info)
15651 && r_type != R_ARM_ABS32 && r_type != R_ARM_ABS32_NOI) {
15652 /* Here we only support R_ARM_ABS32 and R_ARM_ABS32_NOI
15653 that will become rofixup. */
15654 /* This is due to the fact that we suppose all will become rofixup. */
15655 fprintf(stderr, "FDPIC does not yet support %d relocation to become dynamic for executable\n", r_type);
15656 _bfd_error_handler
15657 (_("FDPIC does not yet support %s relocation"
15658 " to become dynamic for executable"),
15659 elf32_arm_howto_table_1[r_type].name);
15660 abort();
15661 }
15662 }
15663 }
15664
15665 return TRUE;
15666 }
15667
15668 static void
15669 elf32_arm_update_relocs (asection *o,
15670 struct bfd_elf_section_reloc_data *reldata)
15671 {
15672 void (*swap_in) (bfd *, const bfd_byte *, Elf_Internal_Rela *);
15673 void (*swap_out) (bfd *, const Elf_Internal_Rela *, bfd_byte *);
15674 const struct elf_backend_data *bed;
15675 _arm_elf_section_data *eado;
15676 struct bfd_link_order *p;
15677 bfd_byte *erela_head, *erela;
15678 Elf_Internal_Rela *irela_head, *irela;
15679 Elf_Internal_Shdr *rel_hdr;
15680 bfd *abfd;
15681 unsigned int count;
15682
15683 eado = get_arm_elf_section_data (o);
15684
15685 if (!eado || eado->elf.this_hdr.sh_type != SHT_ARM_EXIDX)
15686 return;
15687
15688 abfd = o->owner;
15689 bed = get_elf_backend_data (abfd);
15690 rel_hdr = reldata->hdr;
15691
15692 if (rel_hdr->sh_entsize == bed->s->sizeof_rel)
15693 {
15694 swap_in = bed->s->swap_reloc_in;
15695 swap_out = bed->s->swap_reloc_out;
15696 }
15697 else if (rel_hdr->sh_entsize == bed->s->sizeof_rela)
15698 {
15699 swap_in = bed->s->swap_reloca_in;
15700 swap_out = bed->s->swap_reloca_out;
15701 }
15702 else
15703 abort ();
15704
15705 erela_head = rel_hdr->contents;
15706 irela_head = (Elf_Internal_Rela *) bfd_zmalloc
15707 ((NUM_SHDR_ENTRIES (rel_hdr) + 1) * sizeof (*irela_head));
15708
15709 erela = erela_head;
15710 irela = irela_head;
15711 count = 0;
15712
15713 for (p = o->map_head.link_order; p; p = p->next)
15714 {
15715 if (p->type == bfd_section_reloc_link_order
15716 || p->type == bfd_symbol_reloc_link_order)
15717 {
15718 (*swap_in) (abfd, erela, irela);
15719 erela += rel_hdr->sh_entsize;
15720 irela++;
15721 count++;
15722 }
15723 else if (p->type == bfd_indirect_link_order)
15724 {
15725 struct bfd_elf_section_reloc_data *input_reldata;
15726 arm_unwind_table_edit *edit_list, *edit_tail;
15727 _arm_elf_section_data *eadi;
15728 bfd_size_type j;
15729 bfd_vma offset;
15730 asection *i;
15731
15732 i = p->u.indirect.section;
15733
15734 eadi = get_arm_elf_section_data (i);
15735 edit_list = eadi->u.exidx.unwind_edit_list;
15736 edit_tail = eadi->u.exidx.unwind_edit_tail;
15737 offset = o->vma + i->output_offset;
15738
15739 if (eadi->elf.rel.hdr &&
15740 eadi->elf.rel.hdr->sh_entsize == rel_hdr->sh_entsize)
15741 input_reldata = &eadi->elf.rel;
15742 else if (eadi->elf.rela.hdr &&
15743 eadi->elf.rela.hdr->sh_entsize == rel_hdr->sh_entsize)
15744 input_reldata = &eadi->elf.rela;
15745 else
15746 abort ();
15747
15748 if (edit_list)
15749 {
15750 for (j = 0; j < NUM_SHDR_ENTRIES (input_reldata->hdr); j++)
15751 {
15752 arm_unwind_table_edit *edit_node, *edit_next;
15753 bfd_vma bias;
15754 bfd_vma reloc_index;
15755
15756 (*swap_in) (abfd, erela, irela);
15757 reloc_index = (irela->r_offset - offset) / 8;
15758
15759 bias = 0;
15760 edit_node = edit_list;
15761 for (edit_next = edit_list;
15762 edit_next && edit_next->index <= reloc_index;
15763 edit_next = edit_node->next)
15764 {
15765 bias++;
15766 edit_node = edit_next;
15767 }
15768
15769 if (edit_node->type != DELETE_EXIDX_ENTRY
15770 || edit_node->index != reloc_index)
15771 {
15772 irela->r_offset -= bias * 8;
15773 irela++;
15774 count++;
15775 }
15776
15777 erela += rel_hdr->sh_entsize;
15778 }
15779
15780 if (edit_tail->type == INSERT_EXIDX_CANTUNWIND_AT_END)
15781 {
15782 /* New relocation entity. */
15783 asection *text_sec = edit_tail->linked_section;
15784 asection *text_out = text_sec->output_section;
15785 bfd_vma exidx_offset = offset + i->size - 8;
15786
15787 irela->r_addend = 0;
15788 irela->r_offset = exidx_offset;
15789 irela->r_info = ELF32_R_INFO
15790 (text_out->target_index, R_ARM_PREL31);
15791 irela++;
15792 count++;
15793 }
15794 }
15795 else
15796 {
15797 for (j = 0; j < NUM_SHDR_ENTRIES (input_reldata->hdr); j++)
15798 {
15799 (*swap_in) (abfd, erela, irela);
15800 erela += rel_hdr->sh_entsize;
15801 irela++;
15802 }
15803
15804 count += NUM_SHDR_ENTRIES (input_reldata->hdr);
15805 }
15806 }
15807 }
15808
15809 reldata->count = count;
15810 rel_hdr->sh_size = count * rel_hdr->sh_entsize;
15811
15812 erela = erela_head;
15813 irela = irela_head;
15814 while (count > 0)
15815 {
15816 (*swap_out) (abfd, irela, erela);
15817 erela += rel_hdr->sh_entsize;
15818 irela++;
15819 count--;
15820 }
15821
15822 free (irela_head);
15823
15824 /* Hashes are no longer valid. */
15825 free (reldata->hashes);
15826 reldata->hashes = NULL;
15827 }
15828
15829 /* Unwinding tables are not referenced directly. This pass marks them as
15830 required if the corresponding code section is marked. Similarly, ARMv8-M
15831 secure entry functions can only be referenced by SG veneers which are
15832 created after the GC process. They need to be marked in case they reside in
15833 their own section (as would be the case if code was compiled with
15834 -ffunction-sections). */
15835
15836 static bfd_boolean
15837 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
15838 elf_gc_mark_hook_fn gc_mark_hook)
15839 {
15840 bfd *sub;
15841 Elf_Internal_Shdr **elf_shdrp;
15842 asection *cmse_sec;
15843 obj_attribute *out_attr;
15844 Elf_Internal_Shdr *symtab_hdr;
15845 unsigned i, sym_count, ext_start;
15846 const struct elf_backend_data *bed;
15847 struct elf_link_hash_entry **sym_hashes;
15848 struct elf32_arm_link_hash_entry *cmse_hash;
15849 bfd_boolean again, is_v8m, first_bfd_browse = TRUE;
15850
15851 _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
15852
15853 out_attr = elf_known_obj_attributes_proc (info->output_bfd);
15854 is_v8m = out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
15855 && out_attr[Tag_CPU_arch_profile].i == 'M';
15856
15857 /* Marking EH data may cause additional code sections to be marked,
15858 requiring multiple passes. */
15859 again = TRUE;
15860 while (again)
15861 {
15862 again = FALSE;
15863 for (sub = info->input_bfds; sub != NULL; sub = sub->link.next)
15864 {
15865 asection *o;
15866
15867 if (! is_arm_elf (sub))
15868 continue;
15869
15870 elf_shdrp = elf_elfsections (sub);
15871 for (o = sub->sections; o != NULL; o = o->next)
15872 {
15873 Elf_Internal_Shdr *hdr;
15874
15875 hdr = &elf_section_data (o)->this_hdr;
15876 if (hdr->sh_type == SHT_ARM_EXIDX
15877 && hdr->sh_link
15878 && hdr->sh_link < elf_numsections (sub)
15879 && !o->gc_mark
15880 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
15881 {
15882 again = TRUE;
15883 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
15884 return FALSE;
15885 }
15886 }
15887
15888 /* Mark section holding ARMv8-M secure entry functions. We mark all
15889 of them so no need for a second browsing. */
15890 if (is_v8m && first_bfd_browse)
15891 {
15892 sym_hashes = elf_sym_hashes (sub);
15893 bed = get_elf_backend_data (sub);
15894 symtab_hdr = &elf_tdata (sub)->symtab_hdr;
15895 sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
15896 ext_start = symtab_hdr->sh_info;
15897
15898 /* Scan symbols. */
15899 for (i = ext_start; i < sym_count; i++)
15900 {
15901 cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
15902
15903 /* Assume it is a special symbol. If not, cmse_scan will
15904 warn about it and user can do something about it. */
15905 if (ARM_GET_SYM_CMSE_SPCL (cmse_hash->root.target_internal))
15906 {
15907 cmse_sec = cmse_hash->root.root.u.def.section;
15908 if (!cmse_sec->gc_mark
15909 && !_bfd_elf_gc_mark (info, cmse_sec, gc_mark_hook))
15910 return FALSE;
15911 }
15912 }
15913 }
15914 }
15915 first_bfd_browse = FALSE;
15916 }
15917
15918 return TRUE;
15919 }
15920
15921 /* Treat mapping symbols as special target symbols. */
15922
15923 static bfd_boolean
15924 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
15925 {
15926 return bfd_is_arm_special_symbol_name (sym->name,
15927 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
15928 }
15929
15930 /* This is a copy of elf_find_function() from elf.c except that
15931 ARM mapping symbols are ignored when looking for function names
15932 and STT_ARM_TFUNC is considered to a function type. */
15933
15934 static bfd_boolean
15935 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
15936 asymbol ** symbols,
15937 asection * section,
15938 bfd_vma offset,
15939 const char ** filename_ptr,
15940 const char ** functionname_ptr)
15941 {
15942 const char * filename = NULL;
15943 asymbol * func = NULL;
15944 bfd_vma low_func = 0;
15945 asymbol ** p;
15946
15947 for (p = symbols; *p != NULL; p++)
15948 {
15949 elf_symbol_type *q;
15950
15951 q = (elf_symbol_type *) *p;
15952
15953 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
15954 {
15955 default:
15956 break;
15957 case STT_FILE:
15958 filename = bfd_asymbol_name (&q->symbol);
15959 break;
15960 case STT_FUNC:
15961 case STT_ARM_TFUNC:
15962 case STT_NOTYPE:
15963 /* Skip mapping symbols. */
15964 if ((q->symbol.flags & BSF_LOCAL)
15965 && bfd_is_arm_special_symbol_name (q->symbol.name,
15966 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
15967 continue;
15968 /* Fall through. */
15969 if (bfd_get_section (&q->symbol) == section
15970 && q->symbol.value >= low_func
15971 && q->symbol.value <= offset)
15972 {
15973 func = (asymbol *) q;
15974 low_func = q->symbol.value;
15975 }
15976 break;
15977 }
15978 }
15979
15980 if (func == NULL)
15981 return FALSE;
15982
15983 if (filename_ptr)
15984 *filename_ptr = filename;
15985 if (functionname_ptr)
15986 *functionname_ptr = bfd_asymbol_name (func);
15987
15988 return TRUE;
15989 }
15990
15991
15992 /* Find the nearest line to a particular section and offset, for error
15993 reporting. This code is a duplicate of the code in elf.c, except
15994 that it uses arm_elf_find_function. */
15995
15996 static bfd_boolean
15997 elf32_arm_find_nearest_line (bfd * abfd,
15998 asymbol ** symbols,
15999 asection * section,
16000 bfd_vma offset,
16001 const char ** filename_ptr,
16002 const char ** functionname_ptr,
16003 unsigned int * line_ptr,
16004 unsigned int * discriminator_ptr)
16005 {
16006 bfd_boolean found = FALSE;
16007
16008 if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset,
16009 filename_ptr, functionname_ptr,
16010 line_ptr, discriminator_ptr,
16011 dwarf_debug_sections, 0,
16012 & elf_tdata (abfd)->dwarf2_find_line_info))
16013 {
16014 if (!*functionname_ptr)
16015 arm_elf_find_function (abfd, symbols, section, offset,
16016 *filename_ptr ? NULL : filename_ptr,
16017 functionname_ptr);
16018
16019 return TRUE;
16020 }
16021
16022 /* Skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain
16023 uses DWARF1. */
16024
16025 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
16026 & found, filename_ptr,
16027 functionname_ptr, line_ptr,
16028 & elf_tdata (abfd)->line_info))
16029 return FALSE;
16030
16031 if (found && (*functionname_ptr || *line_ptr))
16032 return TRUE;
16033
16034 if (symbols == NULL)
16035 return FALSE;
16036
16037 if (! arm_elf_find_function (abfd, symbols, section, offset,
16038 filename_ptr, functionname_ptr))
16039 return FALSE;
16040
16041 *line_ptr = 0;
16042 return TRUE;
16043 }
16044
16045 static bfd_boolean
16046 elf32_arm_find_inliner_info (bfd * abfd,
16047 const char ** filename_ptr,
16048 const char ** functionname_ptr,
16049 unsigned int * line_ptr)
16050 {
16051 bfd_boolean found;
16052 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
16053 functionname_ptr, line_ptr,
16054 & elf_tdata (abfd)->dwarf2_find_line_info);
16055 return found;
16056 }
16057
16058 /* Find dynamic relocs for H that apply to read-only sections. */
16059
16060 static asection *
16061 readonly_dynrelocs (struct elf_link_hash_entry *h)
16062 {
16063 struct elf_dyn_relocs *p;
16064
16065 for (p = elf32_arm_hash_entry (h)->dyn_relocs; p != NULL; p = p->next)
16066 {
16067 asection *s = p->sec->output_section;
16068
16069 if (s != NULL && (s->flags & SEC_READONLY) != 0)
16070 return p->sec;
16071 }
16072 return NULL;
16073 }
16074
16075 /* Adjust a symbol defined by a dynamic object and referenced by a
16076 regular object. The current definition is in some section of the
16077 dynamic object, but we're not including those sections. We have to
16078 change the definition to something the rest of the link can
16079 understand. */
16080
16081 static bfd_boolean
16082 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
16083 struct elf_link_hash_entry * h)
16084 {
16085 bfd * dynobj;
16086 asection *s, *srel;
16087 struct elf32_arm_link_hash_entry * eh;
16088 struct elf32_arm_link_hash_table *globals;
16089
16090 globals = elf32_arm_hash_table (info);
16091 if (globals == NULL)
16092 return FALSE;
16093
16094 dynobj = elf_hash_table (info)->dynobj;
16095
16096 /* Make sure we know what is going on here. */
16097 BFD_ASSERT (dynobj != NULL
16098 && (h->needs_plt
16099 || h->type == STT_GNU_IFUNC
16100 || h->is_weakalias
16101 || (h->def_dynamic
16102 && h->ref_regular
16103 && !h->def_regular)));
16104
16105 eh = (struct elf32_arm_link_hash_entry *) h;
16106
16107 /* If this is a function, put it in the procedure linkage table. We
16108 will fill in the contents of the procedure linkage table later,
16109 when we know the address of the .got section. */
16110 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
16111 {
16112 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
16113 symbol binds locally. */
16114 if (h->plt.refcount <= 0
16115 || (h->type != STT_GNU_IFUNC
16116 && (SYMBOL_CALLS_LOCAL (info, h)
16117 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
16118 && h->root.type == bfd_link_hash_undefweak))))
16119 {
16120 /* This case can occur if we saw a PLT32 reloc in an input
16121 file, but the symbol was never referred to by a dynamic
16122 object, or if all references were garbage collected. In
16123 such a case, we don't actually need to build a procedure
16124 linkage table, and we can just do a PC24 reloc instead. */
16125 h->plt.offset = (bfd_vma) -1;
16126 eh->plt.thumb_refcount = 0;
16127 eh->plt.maybe_thumb_refcount = 0;
16128 eh->plt.noncall_refcount = 0;
16129 h->needs_plt = 0;
16130 }
16131
16132 return TRUE;
16133 }
16134 else
16135 {
16136 /* It's possible that we incorrectly decided a .plt reloc was
16137 needed for an R_ARM_PC24 or similar reloc to a non-function sym
16138 in check_relocs. We can't decide accurately between function
16139 and non-function syms in check-relocs; Objects loaded later in
16140 the link may change h->type. So fix it now. */
16141 h->plt.offset = (bfd_vma) -1;
16142 eh->plt.thumb_refcount = 0;
16143 eh->plt.maybe_thumb_refcount = 0;
16144 eh->plt.noncall_refcount = 0;
16145 }
16146
16147 /* If this is a weak symbol, and there is a real definition, the
16148 processor independent code will have arranged for us to see the
16149 real definition first, and we can just use the same value. */
16150 if (h->is_weakalias)
16151 {
16152 struct elf_link_hash_entry *def = weakdef (h);
16153 BFD_ASSERT (def->root.type == bfd_link_hash_defined);
16154 h->root.u.def.section = def->root.u.def.section;
16155 h->root.u.def.value = def->root.u.def.value;
16156 return TRUE;
16157 }
16158
16159 /* If there are no non-GOT references, we do not need a copy
16160 relocation. */
16161 if (!h->non_got_ref)
16162 return TRUE;
16163
16164 /* This is a reference to a symbol defined by a dynamic object which
16165 is not a function. */
16166
16167 /* If we are creating a shared library, we must presume that the
16168 only references to the symbol are via the global offset table.
16169 For such cases we need not do anything here; the relocations will
16170 be handled correctly by relocate_section. Relocatable executables
16171 can reference data in shared objects directly, so we don't need to
16172 do anything here. */
16173 if (bfd_link_pic (info) || globals->root.is_relocatable_executable)
16174 return TRUE;
16175
16176 /* We must allocate the symbol in our .dynbss section, which will
16177 become part of the .bss section of the executable. There will be
16178 an entry for this symbol in the .dynsym section. The dynamic
16179 object will contain position independent code, so all references
16180 from the dynamic object to this symbol will go through the global
16181 offset table. The dynamic linker will use the .dynsym entry to
16182 determine the address it must put in the global offset table, so
16183 both the dynamic object and the regular object will refer to the
16184 same memory location for the variable. */
16185 /* If allowed, we must generate a R_ARM_COPY reloc to tell the dynamic
16186 linker to copy the initial value out of the dynamic object and into
16187 the runtime process image. We need to remember the offset into the
16188 .rel(a).bss section we are going to use. */
16189 if ((h->root.u.def.section->flags & SEC_READONLY) != 0)
16190 {
16191 s = globals->root.sdynrelro;
16192 srel = globals->root.sreldynrelro;
16193 }
16194 else
16195 {
16196 s = globals->root.sdynbss;
16197 srel = globals->root.srelbss;
16198 }
16199 if (info->nocopyreloc == 0
16200 && (h->root.u.def.section->flags & SEC_ALLOC) != 0
16201 && h->size != 0)
16202 {
16203 elf32_arm_allocate_dynrelocs (info, srel, 1);
16204 h->needs_copy = 1;
16205 }
16206
16207 return _bfd_elf_adjust_dynamic_copy (info, h, s);
16208 }
16209
16210 /* Allocate space in .plt, .got and associated reloc sections for
16211 dynamic relocs. */
16212
16213 static bfd_boolean
16214 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
16215 {
16216 struct bfd_link_info *info;
16217 struct elf32_arm_link_hash_table *htab;
16218 struct elf32_arm_link_hash_entry *eh;
16219 struct elf_dyn_relocs *p;
16220
16221 if (h->root.type == bfd_link_hash_indirect)
16222 return TRUE;
16223
16224 eh = (struct elf32_arm_link_hash_entry *) h;
16225
16226 info = (struct bfd_link_info *) inf;
16227 htab = elf32_arm_hash_table (info);
16228 if (htab == NULL)
16229 return FALSE;
16230
16231 if ((htab->root.dynamic_sections_created || h->type == STT_GNU_IFUNC)
16232 && h->plt.refcount > 0)
16233 {
16234 /* Make sure this symbol is output as a dynamic symbol.
16235 Undefined weak syms won't yet be marked as dynamic. */
16236 if (h->dynindx == -1 && !h->forced_local
16237 && h->root.type == bfd_link_hash_undefweak)
16238 {
16239 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16240 return FALSE;
16241 }
16242
16243 /* If the call in the PLT entry binds locally, the associated
16244 GOT entry should use an R_ARM_IRELATIVE relocation instead of
16245 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
16246 than the .plt section. */
16247 if (h->type == STT_GNU_IFUNC && SYMBOL_CALLS_LOCAL (info, h))
16248 {
16249 eh->is_iplt = 1;
16250 if (eh->plt.noncall_refcount == 0
16251 && SYMBOL_REFERENCES_LOCAL (info, h))
16252 /* All non-call references can be resolved directly.
16253 This means that they can (and in some cases, must)
16254 resolve directly to the run-time target, rather than
16255 to the PLT. That in turns means that any .got entry
16256 would be equal to the .igot.plt entry, so there's
16257 no point having both. */
16258 h->got.refcount = 0;
16259 }
16260
16261 if (bfd_link_pic (info)
16262 || eh->is_iplt
16263 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
16264 {
16265 elf32_arm_allocate_plt_entry (info, eh->is_iplt, &h->plt, &eh->plt);
16266
16267 /* If this symbol is not defined in a regular file, and we are
16268 not generating a shared library, then set the symbol to this
16269 location in the .plt. This is required to make function
16270 pointers compare as equal between the normal executable and
16271 the shared library. */
16272 if (! bfd_link_pic (info)
16273 && !h->def_regular)
16274 {
16275 h->root.u.def.section = htab->root.splt;
16276 h->root.u.def.value = h->plt.offset;
16277
16278 /* Make sure the function is not marked as Thumb, in case
16279 it is the target of an ABS32 relocation, which will
16280 point to the PLT entry. */
16281 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
16282 }
16283
16284 /* VxWorks executables have a second set of relocations for
16285 each PLT entry. They go in a separate relocation section,
16286 which is processed by the kernel loader. */
16287 if (htab->vxworks_p && !bfd_link_pic (info))
16288 {
16289 /* There is a relocation for the initial PLT entry:
16290 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
16291 if (h->plt.offset == htab->plt_header_size)
16292 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 1);
16293
16294 /* There are two extra relocations for each subsequent
16295 PLT entry: an R_ARM_32 relocation for the GOT entry,
16296 and an R_ARM_32 relocation for the PLT entry. */
16297 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 2);
16298 }
16299 }
16300 else
16301 {
16302 h->plt.offset = (bfd_vma) -1;
16303 h->needs_plt = 0;
16304 }
16305 }
16306 else
16307 {
16308 h->plt.offset = (bfd_vma) -1;
16309 h->needs_plt = 0;
16310 }
16311
16312 eh = (struct elf32_arm_link_hash_entry *) h;
16313 eh->tlsdesc_got = (bfd_vma) -1;
16314
16315 if (h->got.refcount > 0)
16316 {
16317 asection *s;
16318 bfd_boolean dyn;
16319 int tls_type = elf32_arm_hash_entry (h)->tls_type;
16320 int indx;
16321
16322 /* Make sure this symbol is output as a dynamic symbol.
16323 Undefined weak syms won't yet be marked as dynamic. */
16324 if (htab->root.dynamic_sections_created && h->dynindx == -1 && !h->forced_local
16325 && h->root.type == bfd_link_hash_undefweak)
16326 {
16327 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16328 return FALSE;
16329 }
16330
16331 if (!htab->symbian_p)
16332 {
16333 s = htab->root.sgot;
16334 h->got.offset = s->size;
16335
16336 if (tls_type == GOT_UNKNOWN)
16337 abort ();
16338
16339 if (tls_type == GOT_NORMAL)
16340 /* Non-TLS symbols need one GOT slot. */
16341 s->size += 4;
16342 else
16343 {
16344 if (tls_type & GOT_TLS_GDESC)
16345 {
16346 /* R_ARM_TLS_DESC needs 2 GOT slots. */
16347 eh->tlsdesc_got
16348 = (htab->root.sgotplt->size
16349 - elf32_arm_compute_jump_table_size (htab));
16350 htab->root.sgotplt->size += 8;
16351 h->got.offset = (bfd_vma) -2;
16352 /* plt.got_offset needs to know there's a TLS_DESC
16353 reloc in the middle of .got.plt. */
16354 htab->num_tls_desc++;
16355 }
16356
16357 if (tls_type & GOT_TLS_GD)
16358 {
16359 /* R_ARM_TLS_GD32 and R_ARM_TLS_GD32_FDPIC need two
16360 consecutive GOT slots. If the symbol is both GD
16361 and GDESC, got.offset may have been
16362 overwritten. */
16363 h->got.offset = s->size;
16364 s->size += 8;
16365 }
16366
16367 if (tls_type & GOT_TLS_IE)
16368 /* R_ARM_TLS_IE32/R_ARM_TLS_IE32_FDPIC need one GOT
16369 slot. */
16370 s->size += 4;
16371 }
16372
16373 dyn = htab->root.dynamic_sections_created;
16374
16375 indx = 0;
16376 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
16377 bfd_link_pic (info),
16378 h)
16379 && (!bfd_link_pic (info)
16380 || !SYMBOL_REFERENCES_LOCAL (info, h)))
16381 indx = h->dynindx;
16382
16383 if (tls_type != GOT_NORMAL
16384 && (bfd_link_pic (info) || indx != 0)
16385 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
16386 || h->root.type != bfd_link_hash_undefweak))
16387 {
16388 if (tls_type & GOT_TLS_IE)
16389 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16390
16391 if (tls_type & GOT_TLS_GD)
16392 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16393
16394 if (tls_type & GOT_TLS_GDESC)
16395 {
16396 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
16397 /* GDESC needs a trampoline to jump to. */
16398 htab->tls_trampoline = -1;
16399 }
16400
16401 /* Only GD needs it. GDESC just emits one relocation per
16402 2 entries. */
16403 if ((tls_type & GOT_TLS_GD) && indx != 0)
16404 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16405 }
16406 else if (((indx != -1) || htab->fdpic_p)
16407 && !SYMBOL_REFERENCES_LOCAL (info, h))
16408 {
16409 if (htab->root.dynamic_sections_created)
16410 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
16411 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16412 }
16413 else if (h->type == STT_GNU_IFUNC
16414 && eh->plt.noncall_refcount == 0)
16415 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
16416 they all resolve dynamically instead. Reserve room for the
16417 GOT entry's R_ARM_IRELATIVE relocation. */
16418 elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1);
16419 else if (bfd_link_pic (info)
16420 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
16421 || h->root.type != bfd_link_hash_undefweak))
16422 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
16423 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16424 else if (htab->fdpic_p && tls_type == GOT_NORMAL)
16425 /* Reserve room for rofixup for FDPIC executable. */
16426 /* TLS relocs do not need space since they are completely
16427 resolved. */
16428 htab->srofixup->size += 4;
16429 }
16430 }
16431 else
16432 h->got.offset = (bfd_vma) -1;
16433
16434 /* FDPIC support. */
16435 if (eh->fdpic_cnts.gotofffuncdesc_cnt > 0)
16436 {
16437 /* Symbol musn't be exported. */
16438 if (h->dynindx != -1)
16439 abort();
16440
16441 /* We only allocate one function descriptor with its associated relocation. */
16442 if (eh->fdpic_cnts.funcdesc_offset == -1)
16443 {
16444 asection *s = htab->root.sgot;
16445
16446 eh->fdpic_cnts.funcdesc_offset = s->size;
16447 s->size += 8;
16448 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16449 if (bfd_link_pic(info))
16450 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16451 else
16452 htab->srofixup->size += 8;
16453 }
16454 }
16455
16456 if (eh->fdpic_cnts.gotfuncdesc_cnt > 0)
16457 {
16458 asection *s = htab->root.sgot;
16459
16460 if (htab->root.dynamic_sections_created && h->dynindx == -1
16461 && !h->forced_local)
16462 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16463 return FALSE;
16464
16465 if (h->dynindx == -1)
16466 {
16467 /* We only allocate one function descriptor with its associated relocation. q */
16468 if (eh->fdpic_cnts.funcdesc_offset == -1)
16469 {
16470
16471 eh->fdpic_cnts.funcdesc_offset = s->size;
16472 s->size += 8;
16473 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16474 if (bfd_link_pic(info))
16475 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16476 else
16477 htab->srofixup->size += 8;
16478 }
16479 }
16480
16481 /* Add one entry into the GOT and a R_ARM_FUNCDESC or
16482 R_ARM_RELATIVE/rofixup relocation on it. */
16483 eh->fdpic_cnts.gotfuncdesc_offset = s->size;
16484 s->size += 4;
16485 if (h->dynindx == -1 && !bfd_link_pic(info))
16486 htab->srofixup->size += 4;
16487 else
16488 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16489 }
16490
16491 if (eh->fdpic_cnts.funcdesc_cnt > 0)
16492 {
16493 if (htab->root.dynamic_sections_created && h->dynindx == -1
16494 && !h->forced_local)
16495 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16496 return FALSE;
16497
16498 if (h->dynindx == -1)
16499 {
16500 /* We only allocate one function descriptor with its associated relocation. */
16501 if (eh->fdpic_cnts.funcdesc_offset == -1)
16502 {
16503 asection *s = htab->root.sgot;
16504
16505 eh->fdpic_cnts.funcdesc_offset = s->size;
16506 s->size += 8;
16507 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16508 if (bfd_link_pic(info))
16509 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16510 else
16511 htab->srofixup->size += 8;
16512 }
16513 }
16514 if (h->dynindx == -1 && !bfd_link_pic(info))
16515 {
16516 /* For FDPIC executable we replace R_ARM_RELATIVE with a rofixup. */
16517 htab->srofixup->size += 4 * eh->fdpic_cnts.funcdesc_cnt;
16518 }
16519 else
16520 {
16521 /* Will need one dynamic reloc per reference. will be either
16522 R_ARM_FUNCDESC or R_ARM_RELATIVE for hidden symbols. */
16523 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot,
16524 eh->fdpic_cnts.funcdesc_cnt);
16525 }
16526 }
16527
16528 /* Allocate stubs for exported Thumb functions on v4t. */
16529 if (!htab->use_blx && h->dynindx != -1
16530 && h->def_regular
16531 && ARM_GET_SYM_BRANCH_TYPE (h->target_internal) == ST_BRANCH_TO_THUMB
16532 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
16533 {
16534 struct elf_link_hash_entry * th;
16535 struct bfd_link_hash_entry * bh;
16536 struct elf_link_hash_entry * myh;
16537 char name[1024];
16538 asection *s;
16539 bh = NULL;
16540 /* Create a new symbol to regist the real location of the function. */
16541 s = h->root.u.def.section;
16542 sprintf (name, "__real_%s", h->root.root.string);
16543 _bfd_generic_link_add_one_symbol (info, s->owner,
16544 name, BSF_GLOBAL, s,
16545 h->root.u.def.value,
16546 NULL, TRUE, FALSE, &bh);
16547
16548 myh = (struct elf_link_hash_entry *) bh;
16549 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
16550 myh->forced_local = 1;
16551 ARM_SET_SYM_BRANCH_TYPE (myh->target_internal, ST_BRANCH_TO_THUMB);
16552 eh->export_glue = myh;
16553 th = record_arm_to_thumb_glue (info, h);
16554 /* Point the symbol at the stub. */
16555 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
16556 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
16557 h->root.u.def.section = th->root.u.def.section;
16558 h->root.u.def.value = th->root.u.def.value & ~1;
16559 }
16560
16561 if (eh->dyn_relocs == NULL)
16562 return TRUE;
16563
16564 /* In the shared -Bsymbolic case, discard space allocated for
16565 dynamic pc-relative relocs against symbols which turn out to be
16566 defined in regular objects. For the normal shared case, discard
16567 space for pc-relative relocs that have become local due to symbol
16568 visibility changes. */
16569
16570 if (bfd_link_pic (info) || htab->root.is_relocatable_executable || htab->fdpic_p)
16571 {
16572 /* Relocs that use pc_count are PC-relative forms, which will appear
16573 on something like ".long foo - ." or "movw REG, foo - .". We want
16574 calls to protected symbols to resolve directly to the function
16575 rather than going via the plt. If people want function pointer
16576 comparisons to work as expected then they should avoid writing
16577 assembly like ".long foo - .". */
16578 if (SYMBOL_CALLS_LOCAL (info, h))
16579 {
16580 struct elf_dyn_relocs **pp;
16581
16582 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
16583 {
16584 p->count -= p->pc_count;
16585 p->pc_count = 0;
16586 if (p->count == 0)
16587 *pp = p->next;
16588 else
16589 pp = &p->next;
16590 }
16591 }
16592
16593 if (htab->vxworks_p)
16594 {
16595 struct elf_dyn_relocs **pp;
16596
16597 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
16598 {
16599 if (strcmp (p->sec->output_section->name, ".tls_vars") == 0)
16600 *pp = p->next;
16601 else
16602 pp = &p->next;
16603 }
16604 }
16605
16606 /* Also discard relocs on undefined weak syms with non-default
16607 visibility. */
16608 if (eh->dyn_relocs != NULL
16609 && h->root.type == bfd_link_hash_undefweak)
16610 {
16611 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
16612 || UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
16613 eh->dyn_relocs = NULL;
16614
16615 /* Make sure undefined weak symbols are output as a dynamic
16616 symbol in PIEs. */
16617 else if (htab->root.dynamic_sections_created && h->dynindx == -1
16618 && !h->forced_local)
16619 {
16620 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16621 return FALSE;
16622 }
16623 }
16624
16625 else if (htab->root.is_relocatable_executable && h->dynindx == -1
16626 && h->root.type == bfd_link_hash_new)
16627 {
16628 /* Output absolute symbols so that we can create relocations
16629 against them. For normal symbols we output a relocation
16630 against the section that contains them. */
16631 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16632 return FALSE;
16633 }
16634
16635 }
16636 else
16637 {
16638 /* For the non-shared case, discard space for relocs against
16639 symbols which turn out to need copy relocs or are not
16640 dynamic. */
16641
16642 if (!h->non_got_ref
16643 && ((h->def_dynamic
16644 && !h->def_regular)
16645 || (htab->root.dynamic_sections_created
16646 && (h->root.type == bfd_link_hash_undefweak
16647 || h->root.type == bfd_link_hash_undefined))))
16648 {
16649 /* Make sure this symbol is output as a dynamic symbol.
16650 Undefined weak syms won't yet be marked as dynamic. */
16651 if (h->dynindx == -1 && !h->forced_local
16652 && h->root.type == bfd_link_hash_undefweak)
16653 {
16654 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16655 return FALSE;
16656 }
16657
16658 /* If that succeeded, we know we'll be keeping all the
16659 relocs. */
16660 if (h->dynindx != -1)
16661 goto keep;
16662 }
16663
16664 eh->dyn_relocs = NULL;
16665
16666 keep: ;
16667 }
16668
16669 /* Finally, allocate space. */
16670 for (p = eh->dyn_relocs; p != NULL; p = p->next)
16671 {
16672 asection *sreloc = elf_section_data (p->sec)->sreloc;
16673
16674 if (h->type == STT_GNU_IFUNC
16675 && eh->plt.noncall_refcount == 0
16676 && SYMBOL_REFERENCES_LOCAL (info, h))
16677 elf32_arm_allocate_irelocs (info, sreloc, p->count);
16678 else if (h->dynindx != -1 && (!bfd_link_pic(info) || !info->symbolic || !h->def_regular))
16679 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
16680 else if (htab->fdpic_p && !bfd_link_pic(info))
16681 htab->srofixup->size += 4 * p->count;
16682 else
16683 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
16684 }
16685
16686 return TRUE;
16687 }
16688
16689 /* Set DF_TEXTREL if we find any dynamic relocs that apply to
16690 read-only sections. */
16691
16692 static bfd_boolean
16693 maybe_set_textrel (struct elf_link_hash_entry *h, void *info_p)
16694 {
16695 asection *sec;
16696
16697 if (h->root.type == bfd_link_hash_indirect)
16698 return TRUE;
16699
16700 sec = readonly_dynrelocs (h);
16701 if (sec != NULL)
16702 {
16703 struct bfd_link_info *info = (struct bfd_link_info *) info_p;
16704
16705 info->flags |= DF_TEXTREL;
16706 info->callbacks->minfo
16707 (_("%pB: dynamic relocation against `%pT' in read-only section `%pA'\n"),
16708 sec->owner, h->root.root.string, sec);
16709
16710 /* Not an error, just cut short the traversal. */
16711 return FALSE;
16712 }
16713
16714 return TRUE;
16715 }
16716
16717 void
16718 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
16719 int byteswap_code)
16720 {
16721 struct elf32_arm_link_hash_table *globals;
16722
16723 globals = elf32_arm_hash_table (info);
16724 if (globals == NULL)
16725 return;
16726
16727 globals->byteswap_code = byteswap_code;
16728 }
16729
16730 /* Set the sizes of the dynamic sections. */
16731
16732 static bfd_boolean
16733 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
16734 struct bfd_link_info * info)
16735 {
16736 bfd * dynobj;
16737 asection * s;
16738 bfd_boolean plt;
16739 bfd_boolean relocs;
16740 bfd *ibfd;
16741 struct elf32_arm_link_hash_table *htab;
16742
16743 htab = elf32_arm_hash_table (info);
16744 if (htab == NULL)
16745 return FALSE;
16746
16747 dynobj = elf_hash_table (info)->dynobj;
16748 BFD_ASSERT (dynobj != NULL);
16749 check_use_blx (htab);
16750
16751 if (elf_hash_table (info)->dynamic_sections_created)
16752 {
16753 /* Set the contents of the .interp section to the interpreter. */
16754 if (bfd_link_executable (info) && !info->nointerp)
16755 {
16756 s = bfd_get_linker_section (dynobj, ".interp");
16757 BFD_ASSERT (s != NULL);
16758 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
16759 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
16760 }
16761 }
16762
16763 /* Set up .got offsets for local syms, and space for local dynamic
16764 relocs. */
16765 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
16766 {
16767 bfd_signed_vma *local_got;
16768 bfd_signed_vma *end_local_got;
16769 struct arm_local_iplt_info **local_iplt_ptr, *local_iplt;
16770 char *local_tls_type;
16771 bfd_vma *local_tlsdesc_gotent;
16772 bfd_size_type locsymcount;
16773 Elf_Internal_Shdr *symtab_hdr;
16774 asection *srel;
16775 bfd_boolean is_vxworks = htab->vxworks_p;
16776 unsigned int symndx;
16777 struct fdpic_local *local_fdpic_cnts;
16778
16779 if (! is_arm_elf (ibfd))
16780 continue;
16781
16782 for (s = ibfd->sections; s != NULL; s = s->next)
16783 {
16784 struct elf_dyn_relocs *p;
16785
16786 for (p = (struct elf_dyn_relocs *)
16787 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
16788 {
16789 if (!bfd_is_abs_section (p->sec)
16790 && bfd_is_abs_section (p->sec->output_section))
16791 {
16792 /* Input section has been discarded, either because
16793 it is a copy of a linkonce section or due to
16794 linker script /DISCARD/, so we'll be discarding
16795 the relocs too. */
16796 }
16797 else if (is_vxworks
16798 && strcmp (p->sec->output_section->name,
16799 ".tls_vars") == 0)
16800 {
16801 /* Relocations in vxworks .tls_vars sections are
16802 handled specially by the loader. */
16803 }
16804 else if (p->count != 0)
16805 {
16806 srel = elf_section_data (p->sec)->sreloc;
16807 if (htab->fdpic_p && !bfd_link_pic(info))
16808 htab->srofixup->size += 4 * p->count;
16809 else
16810 elf32_arm_allocate_dynrelocs (info, srel, p->count);
16811 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
16812 info->flags |= DF_TEXTREL;
16813 }
16814 }
16815 }
16816
16817 local_got = elf_local_got_refcounts (ibfd);
16818 if (!local_got)
16819 continue;
16820
16821 symtab_hdr = & elf_symtab_hdr (ibfd);
16822 locsymcount = symtab_hdr->sh_info;
16823 end_local_got = local_got + locsymcount;
16824 local_iplt_ptr = elf32_arm_local_iplt (ibfd);
16825 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
16826 local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd);
16827 local_fdpic_cnts = elf32_arm_local_fdpic_cnts (ibfd);
16828 symndx = 0;
16829 s = htab->root.sgot;
16830 srel = htab->root.srelgot;
16831 for (; local_got < end_local_got;
16832 ++local_got, ++local_iplt_ptr, ++local_tls_type,
16833 ++local_tlsdesc_gotent, ++symndx, ++local_fdpic_cnts)
16834 {
16835 *local_tlsdesc_gotent = (bfd_vma) -1;
16836 local_iplt = *local_iplt_ptr;
16837
16838 /* FDPIC support. */
16839 if (local_fdpic_cnts->gotofffuncdesc_cnt > 0)
16840 {
16841 if (local_fdpic_cnts->funcdesc_offset == -1)
16842 {
16843 local_fdpic_cnts->funcdesc_offset = s->size;
16844 s->size += 8;
16845
16846 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16847 if (bfd_link_pic(info))
16848 elf32_arm_allocate_dynrelocs (info, srel, 1);
16849 else
16850 htab->srofixup->size += 8;
16851 }
16852 }
16853
16854 if (local_fdpic_cnts->funcdesc_cnt > 0)
16855 {
16856 if (local_fdpic_cnts->funcdesc_offset == -1)
16857 {
16858 local_fdpic_cnts->funcdesc_offset = s->size;
16859 s->size += 8;
16860
16861 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16862 if (bfd_link_pic(info))
16863 elf32_arm_allocate_dynrelocs (info, srel, 1);
16864 else
16865 htab->srofixup->size += 8;
16866 }
16867
16868 /* We will add n R_ARM_RELATIVE relocations or n rofixups. */
16869 if (bfd_link_pic(info))
16870 elf32_arm_allocate_dynrelocs (info, srel, local_fdpic_cnts->funcdesc_cnt);
16871 else
16872 htab->srofixup->size += 4 * local_fdpic_cnts->funcdesc_cnt;
16873 }
16874
16875 if (local_iplt != NULL)
16876 {
16877 struct elf_dyn_relocs *p;
16878
16879 if (local_iplt->root.refcount > 0)
16880 {
16881 elf32_arm_allocate_plt_entry (info, TRUE,
16882 &local_iplt->root,
16883 &local_iplt->arm);
16884 if (local_iplt->arm.noncall_refcount == 0)
16885 /* All references to the PLT are calls, so all
16886 non-call references can resolve directly to the
16887 run-time target. This means that the .got entry
16888 would be the same as the .igot.plt entry, so there's
16889 no point creating both. */
16890 *local_got = 0;
16891 }
16892 else
16893 {
16894 BFD_ASSERT (local_iplt->arm.noncall_refcount == 0);
16895 local_iplt->root.offset = (bfd_vma) -1;
16896 }
16897
16898 for (p = local_iplt->dyn_relocs; p != NULL; p = p->next)
16899 {
16900 asection *psrel;
16901
16902 psrel = elf_section_data (p->sec)->sreloc;
16903 if (local_iplt->arm.noncall_refcount == 0)
16904 elf32_arm_allocate_irelocs (info, psrel, p->count);
16905 else
16906 elf32_arm_allocate_dynrelocs (info, psrel, p->count);
16907 }
16908 }
16909 if (*local_got > 0)
16910 {
16911 Elf_Internal_Sym *isym;
16912
16913 *local_got = s->size;
16914 if (*local_tls_type & GOT_TLS_GD)
16915 /* TLS_GD relocs need an 8-byte structure in the GOT. */
16916 s->size += 8;
16917 if (*local_tls_type & GOT_TLS_GDESC)
16918 {
16919 *local_tlsdesc_gotent = htab->root.sgotplt->size
16920 - elf32_arm_compute_jump_table_size (htab);
16921 htab->root.sgotplt->size += 8;
16922 *local_got = (bfd_vma) -2;
16923 /* plt.got_offset needs to know there's a TLS_DESC
16924 reloc in the middle of .got.plt. */
16925 htab->num_tls_desc++;
16926 }
16927 if (*local_tls_type & GOT_TLS_IE)
16928 s->size += 4;
16929
16930 if (*local_tls_type & GOT_NORMAL)
16931 {
16932 /* If the symbol is both GD and GDESC, *local_got
16933 may have been overwritten. */
16934 *local_got = s->size;
16935 s->size += 4;
16936 }
16937
16938 isym = bfd_sym_from_r_symndx (&htab->sym_cache, ibfd, symndx);
16939 if (isym == NULL)
16940 return FALSE;
16941
16942 /* If all references to an STT_GNU_IFUNC PLT are calls,
16943 then all non-call references, including this GOT entry,
16944 resolve directly to the run-time target. */
16945 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC
16946 && (local_iplt == NULL
16947 || local_iplt->arm.noncall_refcount == 0))
16948 elf32_arm_allocate_irelocs (info, srel, 1);
16949 else if (bfd_link_pic (info) || output_bfd->flags & DYNAMIC || htab->fdpic_p)
16950 {
16951 if ((bfd_link_pic (info) && !(*local_tls_type & GOT_TLS_GDESC)))
16952 elf32_arm_allocate_dynrelocs (info, srel, 1);
16953 else if (htab->fdpic_p && *local_tls_type & GOT_NORMAL)
16954 htab->srofixup->size += 4;
16955
16956 if ((bfd_link_pic (info) || htab->fdpic_p)
16957 && *local_tls_type & GOT_TLS_GDESC)
16958 {
16959 elf32_arm_allocate_dynrelocs (info,
16960 htab->root.srelplt, 1);
16961 htab->tls_trampoline = -1;
16962 }
16963 }
16964 }
16965 else
16966 *local_got = (bfd_vma) -1;
16967 }
16968 }
16969
16970 if (htab->tls_ldm_got.refcount > 0)
16971 {
16972 /* Allocate two GOT entries and one dynamic relocation (if necessary)
16973 for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations. */
16974 htab->tls_ldm_got.offset = htab->root.sgot->size;
16975 htab->root.sgot->size += 8;
16976 if (bfd_link_pic (info))
16977 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16978 }
16979 else
16980 htab->tls_ldm_got.offset = -1;
16981
16982 /* At the very end of the .rofixup section is a pointer to the GOT,
16983 reserve space for it. */
16984 if (htab->fdpic_p && htab->srofixup != NULL)
16985 htab->srofixup->size += 4;
16986
16987 /* Allocate global sym .plt and .got entries, and space for global
16988 sym dynamic relocs. */
16989 elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info);
16990
16991 /* Here we rummage through the found bfds to collect glue information. */
16992 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
16993 {
16994 if (! is_arm_elf (ibfd))
16995 continue;
16996
16997 /* Initialise mapping tables for code/data. */
16998 bfd_elf32_arm_init_maps (ibfd);
16999
17000 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
17001 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info)
17002 || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd, info))
17003 _bfd_error_handler (_("errors encountered processing file %pB"), ibfd);
17004 }
17005
17006 /* Allocate space for the glue sections now that we've sized them. */
17007 bfd_elf32_arm_allocate_interworking_sections (info);
17008
17009 /* For every jump slot reserved in the sgotplt, reloc_count is
17010 incremented. However, when we reserve space for TLS descriptors,
17011 it's not incremented, so in order to compute the space reserved
17012 for them, it suffices to multiply the reloc count by the jump
17013 slot size. */
17014 if (htab->root.srelplt)
17015 htab->sgotplt_jump_table_size = elf32_arm_compute_jump_table_size(htab);
17016
17017 if (htab->tls_trampoline)
17018 {
17019 if (htab->root.splt->size == 0)
17020 htab->root.splt->size += htab->plt_header_size;
17021
17022 htab->tls_trampoline = htab->root.splt->size;
17023 htab->root.splt->size += htab->plt_entry_size;
17024
17025 /* If we're not using lazy TLS relocations, don't generate the
17026 PLT and GOT entries they require. */
17027 if (!(info->flags & DF_BIND_NOW))
17028 {
17029 htab->dt_tlsdesc_got = htab->root.sgot->size;
17030 htab->root.sgot->size += 4;
17031
17032 htab->dt_tlsdesc_plt = htab->root.splt->size;
17033 htab->root.splt->size += 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline);
17034 }
17035 }
17036
17037 /* The check_relocs and adjust_dynamic_symbol entry points have
17038 determined the sizes of the various dynamic sections. Allocate
17039 memory for them. */
17040 plt = FALSE;
17041 relocs = FALSE;
17042 for (s = dynobj->sections; s != NULL; s = s->next)
17043 {
17044 const char * name;
17045
17046 if ((s->flags & SEC_LINKER_CREATED) == 0)
17047 continue;
17048
17049 /* It's OK to base decisions on the section name, because none
17050 of the dynobj section names depend upon the input files. */
17051 name = bfd_get_section_name (dynobj, s);
17052
17053 if (s == htab->root.splt)
17054 {
17055 /* Remember whether there is a PLT. */
17056 plt = s->size != 0;
17057 }
17058 else if (CONST_STRNEQ (name, ".rel"))
17059 {
17060 if (s->size != 0)
17061 {
17062 /* Remember whether there are any reloc sections other
17063 than .rel(a).plt and .rela.plt.unloaded. */
17064 if (s != htab->root.srelplt && s != htab->srelplt2)
17065 relocs = TRUE;
17066
17067 /* We use the reloc_count field as a counter if we need
17068 to copy relocs into the output file. */
17069 s->reloc_count = 0;
17070 }
17071 }
17072 else if (s != htab->root.sgot
17073 && s != htab->root.sgotplt
17074 && s != htab->root.iplt
17075 && s != htab->root.igotplt
17076 && s != htab->root.sdynbss
17077 && s != htab->root.sdynrelro
17078 && s != htab->srofixup)
17079 {
17080 /* It's not one of our sections, so don't allocate space. */
17081 continue;
17082 }
17083
17084 if (s->size == 0)
17085 {
17086 /* If we don't need this section, strip it from the
17087 output file. This is mostly to handle .rel(a).bss and
17088 .rel(a).plt. We must create both sections in
17089 create_dynamic_sections, because they must be created
17090 before the linker maps input sections to output
17091 sections. The linker does that before
17092 adjust_dynamic_symbol is called, and it is that
17093 function which decides whether anything needs to go
17094 into these sections. */
17095 s->flags |= SEC_EXCLUDE;
17096 continue;
17097 }
17098
17099 if ((s->flags & SEC_HAS_CONTENTS) == 0)
17100 continue;
17101
17102 /* Allocate memory for the section contents. */
17103 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
17104 if (s->contents == NULL)
17105 return FALSE;
17106 }
17107
17108 if (elf_hash_table (info)->dynamic_sections_created)
17109 {
17110 /* Add some entries to the .dynamic section. We fill in the
17111 values later, in elf32_arm_finish_dynamic_sections, but we
17112 must add the entries now so that we get the correct size for
17113 the .dynamic section. The DT_DEBUG entry is filled in by the
17114 dynamic linker and used by the debugger. */
17115 #define add_dynamic_entry(TAG, VAL) \
17116 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
17117
17118 if (bfd_link_executable (info))
17119 {
17120 if (!add_dynamic_entry (DT_DEBUG, 0))
17121 return FALSE;
17122 }
17123
17124 if (plt)
17125 {
17126 if ( !add_dynamic_entry (DT_PLTGOT, 0)
17127 || !add_dynamic_entry (DT_PLTRELSZ, 0)
17128 || !add_dynamic_entry (DT_PLTREL,
17129 htab->use_rel ? DT_REL : DT_RELA)
17130 || !add_dynamic_entry (DT_JMPREL, 0))
17131 return FALSE;
17132
17133 if (htab->dt_tlsdesc_plt
17134 && (!add_dynamic_entry (DT_TLSDESC_PLT,0)
17135 || !add_dynamic_entry (DT_TLSDESC_GOT,0)))
17136 return FALSE;
17137 }
17138
17139 if (relocs)
17140 {
17141 if (htab->use_rel)
17142 {
17143 if (!add_dynamic_entry (DT_REL, 0)
17144 || !add_dynamic_entry (DT_RELSZ, 0)
17145 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
17146 return FALSE;
17147 }
17148 else
17149 {
17150 if (!add_dynamic_entry (DT_RELA, 0)
17151 || !add_dynamic_entry (DT_RELASZ, 0)
17152 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
17153 return FALSE;
17154 }
17155 }
17156
17157 /* If any dynamic relocs apply to a read-only section,
17158 then we need a DT_TEXTREL entry. */
17159 if ((info->flags & DF_TEXTREL) == 0)
17160 elf_link_hash_traverse (&htab->root, maybe_set_textrel, info);
17161
17162 if ((info->flags & DF_TEXTREL) != 0)
17163 {
17164 if (!add_dynamic_entry (DT_TEXTREL, 0))
17165 return FALSE;
17166 }
17167 if (htab->vxworks_p
17168 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
17169 return FALSE;
17170 }
17171 #undef add_dynamic_entry
17172
17173 return TRUE;
17174 }
17175
17176 /* Size sections even though they're not dynamic. We use it to setup
17177 _TLS_MODULE_BASE_, if needed. */
17178
17179 static bfd_boolean
17180 elf32_arm_always_size_sections (bfd *output_bfd,
17181 struct bfd_link_info *info)
17182 {
17183 asection *tls_sec;
17184 struct elf32_arm_link_hash_table *htab;
17185
17186 htab = elf32_arm_hash_table (info);
17187
17188 if (bfd_link_relocatable (info))
17189 return TRUE;
17190
17191 tls_sec = elf_hash_table (info)->tls_sec;
17192
17193 if (tls_sec)
17194 {
17195 struct elf_link_hash_entry *tlsbase;
17196
17197 tlsbase = elf_link_hash_lookup
17198 (elf_hash_table (info), "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
17199
17200 if (tlsbase)
17201 {
17202 struct bfd_link_hash_entry *bh = NULL;
17203 const struct elf_backend_data *bed
17204 = get_elf_backend_data (output_bfd);
17205
17206 if (!(_bfd_generic_link_add_one_symbol
17207 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
17208 tls_sec, 0, NULL, FALSE,
17209 bed->collect, &bh)))
17210 return FALSE;
17211
17212 tlsbase->type = STT_TLS;
17213 tlsbase = (struct elf_link_hash_entry *)bh;
17214 tlsbase->def_regular = 1;
17215 tlsbase->other = STV_HIDDEN;
17216 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
17217 }
17218 }
17219
17220 if (htab->fdpic_p && !bfd_link_relocatable (info)
17221 && !bfd_elf_stack_segment_size (output_bfd, info,
17222 "__stacksize", DEFAULT_STACK_SIZE))
17223 return FALSE;
17224
17225 return TRUE;
17226 }
17227
17228 /* Finish up dynamic symbol handling. We set the contents of various
17229 dynamic sections here. */
17230
17231 static bfd_boolean
17232 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
17233 struct bfd_link_info * info,
17234 struct elf_link_hash_entry * h,
17235 Elf_Internal_Sym * sym)
17236 {
17237 struct elf32_arm_link_hash_table *htab;
17238 struct elf32_arm_link_hash_entry *eh;
17239
17240 htab = elf32_arm_hash_table (info);
17241 if (htab == NULL)
17242 return FALSE;
17243
17244 eh = (struct elf32_arm_link_hash_entry *) h;
17245
17246 if (h->plt.offset != (bfd_vma) -1)
17247 {
17248 if (!eh->is_iplt)
17249 {
17250 BFD_ASSERT (h->dynindx != -1);
17251 if (! elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
17252 h->dynindx, 0))
17253 return FALSE;
17254 }
17255
17256 if (!h->def_regular)
17257 {
17258 /* Mark the symbol as undefined, rather than as defined in
17259 the .plt section. */
17260 sym->st_shndx = SHN_UNDEF;
17261 /* If the symbol is weak we need to clear the value.
17262 Otherwise, the PLT entry would provide a definition for
17263 the symbol even if the symbol wasn't defined anywhere,
17264 and so the symbol would never be NULL. Leave the value if
17265 there were any relocations where pointer equality matters
17266 (this is a clue for the dynamic linker, to make function
17267 pointer comparisons work between an application and shared
17268 library). */
17269 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
17270 sym->st_value = 0;
17271 }
17272 else if (eh->is_iplt && eh->plt.noncall_refcount != 0)
17273 {
17274 /* At least one non-call relocation references this .iplt entry,
17275 so the .iplt entry is the function's canonical address. */
17276 sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC);
17277 ARM_SET_SYM_BRANCH_TYPE (sym->st_target_internal, ST_BRANCH_TO_ARM);
17278 sym->st_shndx = (_bfd_elf_section_from_bfd_section
17279 (output_bfd, htab->root.iplt->output_section));
17280 sym->st_value = (h->plt.offset
17281 + htab->root.iplt->output_section->vma
17282 + htab->root.iplt->output_offset);
17283 }
17284 }
17285
17286 if (h->needs_copy)
17287 {
17288 asection * s;
17289 Elf_Internal_Rela rel;
17290
17291 /* This symbol needs a copy reloc. Set it up. */
17292 BFD_ASSERT (h->dynindx != -1
17293 && (h->root.type == bfd_link_hash_defined
17294 || h->root.type == bfd_link_hash_defweak));
17295
17296 rel.r_addend = 0;
17297 rel.r_offset = (h->root.u.def.value
17298 + h->root.u.def.section->output_section->vma
17299 + h->root.u.def.section->output_offset);
17300 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
17301 if (h->root.u.def.section == htab->root.sdynrelro)
17302 s = htab->root.sreldynrelro;
17303 else
17304 s = htab->root.srelbss;
17305 elf32_arm_add_dynreloc (output_bfd, info, s, &rel);
17306 }
17307
17308 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
17309 and for FDPIC, the _GLOBAL_OFFSET_TABLE_ symbol is not absolute:
17310 it is relative to the ".got" section. */
17311 if (h == htab->root.hdynamic
17312 || (!htab->fdpic_p && !htab->vxworks_p && h == htab->root.hgot))
17313 sym->st_shndx = SHN_ABS;
17314
17315 return TRUE;
17316 }
17317
17318 static void
17319 arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
17320 void *contents,
17321 const unsigned long *template, unsigned count)
17322 {
17323 unsigned ix;
17324
17325 for (ix = 0; ix != count; ix++)
17326 {
17327 unsigned long insn = template[ix];
17328
17329 /* Emit mov pc,rx if bx is not permitted. */
17330 if (htab->fix_v4bx == 1 && (insn & 0x0ffffff0) == 0x012fff10)
17331 insn = (insn & 0xf000000f) | 0x01a0f000;
17332 put_arm_insn (htab, output_bfd, insn, (char *)contents + ix*4);
17333 }
17334 }
17335
17336 /* Install the special first PLT entry for elf32-arm-nacl. Unlike
17337 other variants, NaCl needs this entry in a static executable's
17338 .iplt too. When we're handling that case, GOT_DISPLACEMENT is
17339 zero. For .iplt really only the last bundle is useful, and .iplt
17340 could have a shorter first entry, with each individual PLT entry's
17341 relative branch calculated differently so it targets the last
17342 bundle instead of the instruction before it (labelled .Lplt_tail
17343 above). But it's simpler to keep the size and layout of PLT0
17344 consistent with the dynamic case, at the cost of some dead code at
17345 the start of .iplt and the one dead store to the stack at the start
17346 of .Lplt_tail. */
17347 static void
17348 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
17349 asection *plt, bfd_vma got_displacement)
17350 {
17351 unsigned int i;
17352
17353 put_arm_insn (htab, output_bfd,
17354 elf32_arm_nacl_plt0_entry[0]
17355 | arm_movw_immediate (got_displacement),
17356 plt->contents + 0);
17357 put_arm_insn (htab, output_bfd,
17358 elf32_arm_nacl_plt0_entry[1]
17359 | arm_movt_immediate (got_displacement),
17360 plt->contents + 4);
17361
17362 for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i)
17363 put_arm_insn (htab, output_bfd,
17364 elf32_arm_nacl_plt0_entry[i],
17365 plt->contents + (i * 4));
17366 }
17367
17368 /* Finish up the dynamic sections. */
17369
17370 static bfd_boolean
17371 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
17372 {
17373 bfd * dynobj;
17374 asection * sgot;
17375 asection * sdyn;
17376 struct elf32_arm_link_hash_table *htab;
17377
17378 htab = elf32_arm_hash_table (info);
17379 if (htab == NULL)
17380 return FALSE;
17381
17382 dynobj = elf_hash_table (info)->dynobj;
17383
17384 sgot = htab->root.sgotplt;
17385 /* A broken linker script might have discarded the dynamic sections.
17386 Catch this here so that we do not seg-fault later on. */
17387 if (sgot != NULL && bfd_is_abs_section (sgot->output_section))
17388 return FALSE;
17389 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
17390
17391 if (elf_hash_table (info)->dynamic_sections_created)
17392 {
17393 asection *splt;
17394 Elf32_External_Dyn *dyncon, *dynconend;
17395
17396 splt = htab->root.splt;
17397 BFD_ASSERT (splt != NULL && sdyn != NULL);
17398 BFD_ASSERT (htab->symbian_p || sgot != NULL);
17399
17400 dyncon = (Elf32_External_Dyn *) sdyn->contents;
17401 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
17402
17403 for (; dyncon < dynconend; dyncon++)
17404 {
17405 Elf_Internal_Dyn dyn;
17406 const char * name;
17407 asection * s;
17408
17409 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
17410
17411 switch (dyn.d_tag)
17412 {
17413 unsigned int type;
17414
17415 default:
17416 if (htab->vxworks_p
17417 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
17418 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17419 break;
17420
17421 case DT_HASH:
17422 name = ".hash";
17423 goto get_vma_if_bpabi;
17424 case DT_STRTAB:
17425 name = ".dynstr";
17426 goto get_vma_if_bpabi;
17427 case DT_SYMTAB:
17428 name = ".dynsym";
17429 goto get_vma_if_bpabi;
17430 case DT_VERSYM:
17431 name = ".gnu.version";
17432 goto get_vma_if_bpabi;
17433 case DT_VERDEF:
17434 name = ".gnu.version_d";
17435 goto get_vma_if_bpabi;
17436 case DT_VERNEED:
17437 name = ".gnu.version_r";
17438 goto get_vma_if_bpabi;
17439
17440 case DT_PLTGOT:
17441 name = htab->symbian_p ? ".got" : ".got.plt";
17442 goto get_vma;
17443 case DT_JMPREL:
17444 name = RELOC_SECTION (htab, ".plt");
17445 get_vma:
17446 s = bfd_get_linker_section (dynobj, name);
17447 if (s == NULL)
17448 {
17449 _bfd_error_handler
17450 (_("could not find section %s"), name);
17451 bfd_set_error (bfd_error_invalid_operation);
17452 return FALSE;
17453 }
17454 if (!htab->symbian_p)
17455 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
17456 else
17457 /* In the BPABI, tags in the PT_DYNAMIC section point
17458 at the file offset, not the memory address, for the
17459 convenience of the post linker. */
17460 dyn.d_un.d_ptr = s->output_section->filepos + s->output_offset;
17461 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17462 break;
17463
17464 get_vma_if_bpabi:
17465 if (htab->symbian_p)
17466 goto get_vma;
17467 break;
17468
17469 case DT_PLTRELSZ:
17470 s = htab->root.srelplt;
17471 BFD_ASSERT (s != NULL);
17472 dyn.d_un.d_val = s->size;
17473 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17474 break;
17475
17476 case DT_RELSZ:
17477 case DT_RELASZ:
17478 case DT_REL:
17479 case DT_RELA:
17480 /* In the BPABI, the DT_REL tag must point at the file
17481 offset, not the VMA, of the first relocation
17482 section. So, we use code similar to that in
17483 elflink.c, but do not check for SHF_ALLOC on the
17484 relocation section, since relocation sections are
17485 never allocated under the BPABI. PLT relocs are also
17486 included. */
17487 if (htab->symbian_p)
17488 {
17489 unsigned int i;
17490 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
17491 ? SHT_REL : SHT_RELA);
17492 dyn.d_un.d_val = 0;
17493 for (i = 1; i < elf_numsections (output_bfd); i++)
17494 {
17495 Elf_Internal_Shdr *hdr
17496 = elf_elfsections (output_bfd)[i];
17497 if (hdr->sh_type == type)
17498 {
17499 if (dyn.d_tag == DT_RELSZ
17500 || dyn.d_tag == DT_RELASZ)
17501 dyn.d_un.d_val += hdr->sh_size;
17502 else if ((ufile_ptr) hdr->sh_offset
17503 <= dyn.d_un.d_val - 1)
17504 dyn.d_un.d_val = hdr->sh_offset;
17505 }
17506 }
17507 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17508 }
17509 break;
17510
17511 case DT_TLSDESC_PLT:
17512 s = htab->root.splt;
17513 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
17514 + htab->dt_tlsdesc_plt);
17515 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17516 break;
17517
17518 case DT_TLSDESC_GOT:
17519 s = htab->root.sgot;
17520 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
17521 + htab->dt_tlsdesc_got);
17522 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17523 break;
17524
17525 /* Set the bottom bit of DT_INIT/FINI if the
17526 corresponding function is Thumb. */
17527 case DT_INIT:
17528 name = info->init_function;
17529 goto get_sym;
17530 case DT_FINI:
17531 name = info->fini_function;
17532 get_sym:
17533 /* If it wasn't set by elf_bfd_final_link
17534 then there is nothing to adjust. */
17535 if (dyn.d_un.d_val != 0)
17536 {
17537 struct elf_link_hash_entry * eh;
17538
17539 eh = elf_link_hash_lookup (elf_hash_table (info), name,
17540 FALSE, FALSE, TRUE);
17541 if (eh != NULL
17542 && ARM_GET_SYM_BRANCH_TYPE (eh->target_internal)
17543 == ST_BRANCH_TO_THUMB)
17544 {
17545 dyn.d_un.d_val |= 1;
17546 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17547 }
17548 }
17549 break;
17550 }
17551 }
17552
17553 /* Fill in the first entry in the procedure linkage table. */
17554 if (splt->size > 0 && htab->plt_header_size)
17555 {
17556 const bfd_vma *plt0_entry;
17557 bfd_vma got_address, plt_address, got_displacement;
17558
17559 /* Calculate the addresses of the GOT and PLT. */
17560 got_address = sgot->output_section->vma + sgot->output_offset;
17561 plt_address = splt->output_section->vma + splt->output_offset;
17562
17563 if (htab->vxworks_p)
17564 {
17565 /* The VxWorks GOT is relocated by the dynamic linker.
17566 Therefore, we must emit relocations rather than simply
17567 computing the values now. */
17568 Elf_Internal_Rela rel;
17569
17570 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
17571 put_arm_insn (htab, output_bfd, plt0_entry[0],
17572 splt->contents + 0);
17573 put_arm_insn (htab, output_bfd, plt0_entry[1],
17574 splt->contents + 4);
17575 put_arm_insn (htab, output_bfd, plt0_entry[2],
17576 splt->contents + 8);
17577 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
17578
17579 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
17580 rel.r_offset = plt_address + 12;
17581 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
17582 rel.r_addend = 0;
17583 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
17584 htab->srelplt2->contents);
17585 }
17586 else if (htab->nacl_p)
17587 arm_nacl_put_plt0 (htab, output_bfd, splt,
17588 got_address + 8 - (plt_address + 16));
17589 else if (using_thumb_only (htab))
17590 {
17591 got_displacement = got_address - (plt_address + 12);
17592
17593 plt0_entry = elf32_thumb2_plt0_entry;
17594 put_arm_insn (htab, output_bfd, plt0_entry[0],
17595 splt->contents + 0);
17596 put_arm_insn (htab, output_bfd, plt0_entry[1],
17597 splt->contents + 4);
17598 put_arm_insn (htab, output_bfd, plt0_entry[2],
17599 splt->contents + 8);
17600
17601 bfd_put_32 (output_bfd, got_displacement, splt->contents + 12);
17602 }
17603 else
17604 {
17605 got_displacement = got_address - (plt_address + 16);
17606
17607 plt0_entry = elf32_arm_plt0_entry;
17608 put_arm_insn (htab, output_bfd, plt0_entry[0],
17609 splt->contents + 0);
17610 put_arm_insn (htab, output_bfd, plt0_entry[1],
17611 splt->contents + 4);
17612 put_arm_insn (htab, output_bfd, plt0_entry[2],
17613 splt->contents + 8);
17614 put_arm_insn (htab, output_bfd, plt0_entry[3],
17615 splt->contents + 12);
17616
17617 #ifdef FOUR_WORD_PLT
17618 /* The displacement value goes in the otherwise-unused
17619 last word of the second entry. */
17620 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
17621 #else
17622 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
17623 #endif
17624 }
17625 }
17626
17627 /* UnixWare sets the entsize of .plt to 4, although that doesn't
17628 really seem like the right value. */
17629 if (splt->output_section->owner == output_bfd)
17630 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
17631
17632 if (htab->dt_tlsdesc_plt)
17633 {
17634 bfd_vma got_address
17635 = sgot->output_section->vma + sgot->output_offset;
17636 bfd_vma gotplt_address = (htab->root.sgot->output_section->vma
17637 + htab->root.sgot->output_offset);
17638 bfd_vma plt_address
17639 = splt->output_section->vma + splt->output_offset;
17640
17641 arm_put_trampoline (htab, output_bfd,
17642 splt->contents + htab->dt_tlsdesc_plt,
17643 dl_tlsdesc_lazy_trampoline, 6);
17644
17645 bfd_put_32 (output_bfd,
17646 gotplt_address + htab->dt_tlsdesc_got
17647 - (plt_address + htab->dt_tlsdesc_plt)
17648 - dl_tlsdesc_lazy_trampoline[6],
17649 splt->contents + htab->dt_tlsdesc_plt + 24);
17650 bfd_put_32 (output_bfd,
17651 got_address - (plt_address + htab->dt_tlsdesc_plt)
17652 - dl_tlsdesc_lazy_trampoline[7],
17653 splt->contents + htab->dt_tlsdesc_plt + 24 + 4);
17654 }
17655
17656 if (htab->tls_trampoline)
17657 {
17658 arm_put_trampoline (htab, output_bfd,
17659 splt->contents + htab->tls_trampoline,
17660 tls_trampoline, 3);
17661 #ifdef FOUR_WORD_PLT
17662 bfd_put_32 (output_bfd, 0x00000000,
17663 splt->contents + htab->tls_trampoline + 12);
17664 #endif
17665 }
17666
17667 if (htab->vxworks_p
17668 && !bfd_link_pic (info)
17669 && htab->root.splt->size > 0)
17670 {
17671 /* Correct the .rel(a).plt.unloaded relocations. They will have
17672 incorrect symbol indexes. */
17673 int num_plts;
17674 unsigned char *p;
17675
17676 num_plts = ((htab->root.splt->size - htab->plt_header_size)
17677 / htab->plt_entry_size);
17678 p = htab->srelplt2->contents + RELOC_SIZE (htab);
17679
17680 for (; num_plts; num_plts--)
17681 {
17682 Elf_Internal_Rela rel;
17683
17684 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
17685 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
17686 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
17687 p += RELOC_SIZE (htab);
17688
17689 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
17690 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
17691 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
17692 p += RELOC_SIZE (htab);
17693 }
17694 }
17695 }
17696
17697 if (htab->nacl_p && htab->root.iplt != NULL && htab->root.iplt->size > 0)
17698 /* NaCl uses a special first entry in .iplt too. */
17699 arm_nacl_put_plt0 (htab, output_bfd, htab->root.iplt, 0);
17700
17701 /* Fill in the first three entries in the global offset table. */
17702 if (sgot)
17703 {
17704 if (sgot->size > 0)
17705 {
17706 if (sdyn == NULL)
17707 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
17708 else
17709 bfd_put_32 (output_bfd,
17710 sdyn->output_section->vma + sdyn->output_offset,
17711 sgot->contents);
17712 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
17713 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
17714 }
17715
17716 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
17717 }
17718
17719 /* At the very end of the .rofixup section is a pointer to the GOT. */
17720 if (htab->fdpic_p && htab->srofixup != NULL)
17721 {
17722 struct elf_link_hash_entry *hgot = htab->root.hgot;
17723
17724 bfd_vma got_value = hgot->root.u.def.value
17725 + hgot->root.u.def.section->output_section->vma
17726 + hgot->root.u.def.section->output_offset;
17727
17728 arm_elf_add_rofixup(output_bfd, htab->srofixup, got_value);
17729
17730 /* Make sure we allocated and generated the same number of fixups. */
17731 BFD_ASSERT (htab->srofixup->reloc_count * 4 == htab->srofixup->size);
17732 }
17733
17734 return TRUE;
17735 }
17736
17737 static void
17738 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
17739 {
17740 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
17741 struct elf32_arm_link_hash_table *globals;
17742 struct elf_segment_map *m;
17743
17744 i_ehdrp = elf_elfheader (abfd);
17745
17746 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
17747 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
17748 else
17749 _bfd_elf_post_process_headers (abfd, link_info);
17750 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
17751
17752 if (link_info)
17753 {
17754 globals = elf32_arm_hash_table (link_info);
17755 if (globals != NULL && globals->byteswap_code)
17756 i_ehdrp->e_flags |= EF_ARM_BE8;
17757
17758 if (globals->fdpic_p)
17759 i_ehdrp->e_ident[EI_OSABI] |= ELFOSABI_ARM_FDPIC;
17760 }
17761
17762 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_VER5
17763 && ((i_ehdrp->e_type == ET_DYN) || (i_ehdrp->e_type == ET_EXEC)))
17764 {
17765 int abi = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_ABI_VFP_args);
17766 if (abi == AEABI_VFP_args_vfp)
17767 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_HARD;
17768 else
17769 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_SOFT;
17770 }
17771
17772 /* Scan segment to set p_flags attribute if it contains only sections with
17773 SHF_ARM_PURECODE flag. */
17774 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
17775 {
17776 unsigned int j;
17777
17778 if (m->count == 0)
17779 continue;
17780 for (j = 0; j < m->count; j++)
17781 {
17782 if (!(elf_section_flags (m->sections[j]) & SHF_ARM_PURECODE))
17783 break;
17784 }
17785 if (j == m->count)
17786 {
17787 m->p_flags = PF_X;
17788 m->p_flags_valid = 1;
17789 }
17790 }
17791 }
17792
17793 static enum elf_reloc_type_class
17794 elf32_arm_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
17795 const asection *rel_sec ATTRIBUTE_UNUSED,
17796 const Elf_Internal_Rela *rela)
17797 {
17798 switch ((int) ELF32_R_TYPE (rela->r_info))
17799 {
17800 case R_ARM_RELATIVE:
17801 return reloc_class_relative;
17802 case R_ARM_JUMP_SLOT:
17803 return reloc_class_plt;
17804 case R_ARM_COPY:
17805 return reloc_class_copy;
17806 case R_ARM_IRELATIVE:
17807 return reloc_class_ifunc;
17808 default:
17809 return reloc_class_normal;
17810 }
17811 }
17812
17813 static void
17814 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
17815 {
17816 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
17817 }
17818
17819 /* Return TRUE if this is an unwinding table entry. */
17820
17821 static bfd_boolean
17822 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
17823 {
17824 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
17825 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
17826 }
17827
17828
17829 /* Set the type and flags for an ARM section. We do this by
17830 the section name, which is a hack, but ought to work. */
17831
17832 static bfd_boolean
17833 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
17834 {
17835 const char * name;
17836
17837 name = bfd_get_section_name (abfd, sec);
17838
17839 if (is_arm_elf_unwind_section_name (abfd, name))
17840 {
17841 hdr->sh_type = SHT_ARM_EXIDX;
17842 hdr->sh_flags |= SHF_LINK_ORDER;
17843 }
17844
17845 if (sec->flags & SEC_ELF_PURECODE)
17846 hdr->sh_flags |= SHF_ARM_PURECODE;
17847
17848 return TRUE;
17849 }
17850
17851 /* Handle an ARM specific section when reading an object file. This is
17852 called when bfd_section_from_shdr finds a section with an unknown
17853 type. */
17854
17855 static bfd_boolean
17856 elf32_arm_section_from_shdr (bfd *abfd,
17857 Elf_Internal_Shdr * hdr,
17858 const char *name,
17859 int shindex)
17860 {
17861 /* There ought to be a place to keep ELF backend specific flags, but
17862 at the moment there isn't one. We just keep track of the
17863 sections by their name, instead. Fortunately, the ABI gives
17864 names for all the ARM specific sections, so we will probably get
17865 away with this. */
17866 switch (hdr->sh_type)
17867 {
17868 case SHT_ARM_EXIDX:
17869 case SHT_ARM_PREEMPTMAP:
17870 case SHT_ARM_ATTRIBUTES:
17871 break;
17872
17873 default:
17874 return FALSE;
17875 }
17876
17877 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
17878 return FALSE;
17879
17880 return TRUE;
17881 }
17882
17883 static _arm_elf_section_data *
17884 get_arm_elf_section_data (asection * sec)
17885 {
17886 if (sec && sec->owner && is_arm_elf (sec->owner))
17887 return elf32_arm_section_data (sec);
17888 else
17889 return NULL;
17890 }
17891
17892 typedef struct
17893 {
17894 void *flaginfo;
17895 struct bfd_link_info *info;
17896 asection *sec;
17897 int sec_shndx;
17898 int (*func) (void *, const char *, Elf_Internal_Sym *,
17899 asection *, struct elf_link_hash_entry *);
17900 } output_arch_syminfo;
17901
17902 enum map_symbol_type
17903 {
17904 ARM_MAP_ARM,
17905 ARM_MAP_THUMB,
17906 ARM_MAP_DATA
17907 };
17908
17909
17910 /* Output a single mapping symbol. */
17911
17912 static bfd_boolean
17913 elf32_arm_output_map_sym (output_arch_syminfo *osi,
17914 enum map_symbol_type type,
17915 bfd_vma offset)
17916 {
17917 static const char *names[3] = {"$a", "$t", "$d"};
17918 Elf_Internal_Sym sym;
17919
17920 sym.st_value = osi->sec->output_section->vma
17921 + osi->sec->output_offset
17922 + offset;
17923 sym.st_size = 0;
17924 sym.st_other = 0;
17925 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
17926 sym.st_shndx = osi->sec_shndx;
17927 sym.st_target_internal = 0;
17928 elf32_arm_section_map_add (osi->sec, names[type][1], offset);
17929 return osi->func (osi->flaginfo, names[type], &sym, osi->sec, NULL) == 1;
17930 }
17931
17932 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
17933 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
17934
17935 static bfd_boolean
17936 elf32_arm_output_plt_map_1 (output_arch_syminfo *osi,
17937 bfd_boolean is_iplt_entry_p,
17938 union gotplt_union *root_plt,
17939 struct arm_plt_info *arm_plt)
17940 {
17941 struct elf32_arm_link_hash_table *htab;
17942 bfd_vma addr, plt_header_size;
17943
17944 if (root_plt->offset == (bfd_vma) -1)
17945 return TRUE;
17946
17947 htab = elf32_arm_hash_table (osi->info);
17948 if (htab == NULL)
17949 return FALSE;
17950
17951 if (is_iplt_entry_p)
17952 {
17953 osi->sec = htab->root.iplt;
17954 plt_header_size = 0;
17955 }
17956 else
17957 {
17958 osi->sec = htab->root.splt;
17959 plt_header_size = htab->plt_header_size;
17960 }
17961 osi->sec_shndx = (_bfd_elf_section_from_bfd_section
17962 (osi->info->output_bfd, osi->sec->output_section));
17963
17964 addr = root_plt->offset & -2;
17965 if (htab->symbian_p)
17966 {
17967 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17968 return FALSE;
17969 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
17970 return FALSE;
17971 }
17972 else if (htab->vxworks_p)
17973 {
17974 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17975 return FALSE;
17976 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
17977 return FALSE;
17978 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
17979 return FALSE;
17980 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
17981 return FALSE;
17982 }
17983 else if (htab->nacl_p)
17984 {
17985 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17986 return FALSE;
17987 }
17988 else if (htab->fdpic_p)
17989 {
17990 enum map_symbol_type type = using_thumb_only(htab)
17991 ? ARM_MAP_THUMB
17992 : ARM_MAP_ARM;
17993
17994 if (elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt))
17995 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
17996 return FALSE;
17997 if (!elf32_arm_output_map_sym (osi, type, addr))
17998 return FALSE;
17999 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 16))
18000 return FALSE;
18001 if (htab->plt_entry_size == 4 * ARRAY_SIZE(elf32_arm_fdpic_plt_entry))
18002 if (!elf32_arm_output_map_sym (osi, type, addr + 24))
18003 return FALSE;
18004 }
18005 else if (using_thumb_only (htab))
18006 {
18007 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr))
18008 return FALSE;
18009 }
18010 else
18011 {
18012 bfd_boolean thumb_stub_p;
18013
18014 thumb_stub_p = elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt);
18015 if (thumb_stub_p)
18016 {
18017 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
18018 return FALSE;
18019 }
18020 #ifdef FOUR_WORD_PLT
18021 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
18022 return FALSE;
18023 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
18024 return FALSE;
18025 #else
18026 /* A three-word PLT with no Thumb thunk contains only Arm code,
18027 so only need to output a mapping symbol for the first PLT entry and
18028 entries with thumb thunks. */
18029 if (thumb_stub_p || addr == plt_header_size)
18030 {
18031 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
18032 return FALSE;
18033 }
18034 #endif
18035 }
18036
18037 return TRUE;
18038 }
18039
18040 /* Output mapping symbols for PLT entries associated with H. */
18041
18042 static bfd_boolean
18043 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
18044 {
18045 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
18046 struct elf32_arm_link_hash_entry *eh;
18047
18048 if (h->root.type == bfd_link_hash_indirect)
18049 return TRUE;
18050
18051 if (h->root.type == bfd_link_hash_warning)
18052 /* When warning symbols are created, they **replace** the "real"
18053 entry in the hash table, thus we never get to see the real
18054 symbol in a hash traversal. So look at it now. */
18055 h = (struct elf_link_hash_entry *) h->root.u.i.link;
18056
18057 eh = (struct elf32_arm_link_hash_entry *) h;
18058 return elf32_arm_output_plt_map_1 (osi, SYMBOL_CALLS_LOCAL (osi->info, h),
18059 &h->plt, &eh->plt);
18060 }
18061
18062 /* Bind a veneered symbol to its veneer identified by its hash entry
18063 STUB_ENTRY. The veneered location thus loose its symbol. */
18064
18065 static void
18066 arm_stub_claim_sym (struct elf32_arm_stub_hash_entry *stub_entry)
18067 {
18068 struct elf32_arm_link_hash_entry *hash = stub_entry->h;
18069
18070 BFD_ASSERT (hash);
18071 hash->root.root.u.def.section = stub_entry->stub_sec;
18072 hash->root.root.u.def.value = stub_entry->stub_offset;
18073 hash->root.size = stub_entry->stub_size;
18074 }
18075
18076 /* Output a single local symbol for a generated stub. */
18077
18078 static bfd_boolean
18079 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
18080 bfd_vma offset, bfd_vma size)
18081 {
18082 Elf_Internal_Sym sym;
18083
18084 sym.st_value = osi->sec->output_section->vma
18085 + osi->sec->output_offset
18086 + offset;
18087 sym.st_size = size;
18088 sym.st_other = 0;
18089 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
18090 sym.st_shndx = osi->sec_shndx;
18091 sym.st_target_internal = 0;
18092 return osi->func (osi->flaginfo, name, &sym, osi->sec, NULL) == 1;
18093 }
18094
18095 static bfd_boolean
18096 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
18097 void * in_arg)
18098 {
18099 struct elf32_arm_stub_hash_entry *stub_entry;
18100 asection *stub_sec;
18101 bfd_vma addr;
18102 char *stub_name;
18103 output_arch_syminfo *osi;
18104 const insn_sequence *template_sequence;
18105 enum stub_insn_type prev_type;
18106 int size;
18107 int i;
18108 enum map_symbol_type sym_type;
18109
18110 /* Massage our args to the form they really have. */
18111 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
18112 osi = (output_arch_syminfo *) in_arg;
18113
18114 stub_sec = stub_entry->stub_sec;
18115
18116 /* Ensure this stub is attached to the current section being
18117 processed. */
18118 if (stub_sec != osi->sec)
18119 return TRUE;
18120
18121 addr = (bfd_vma) stub_entry->stub_offset;
18122 template_sequence = stub_entry->stub_template;
18123
18124 if (arm_stub_sym_claimed (stub_entry->stub_type))
18125 arm_stub_claim_sym (stub_entry);
18126 else
18127 {
18128 stub_name = stub_entry->output_name;
18129 switch (template_sequence[0].type)
18130 {
18131 case ARM_TYPE:
18132 if (!elf32_arm_output_stub_sym (osi, stub_name, addr,
18133 stub_entry->stub_size))
18134 return FALSE;
18135 break;
18136 case THUMB16_TYPE:
18137 case THUMB32_TYPE:
18138 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
18139 stub_entry->stub_size))
18140 return FALSE;
18141 break;
18142 default:
18143 BFD_FAIL ();
18144 return 0;
18145 }
18146 }
18147
18148 prev_type = DATA_TYPE;
18149 size = 0;
18150 for (i = 0; i < stub_entry->stub_template_size; i++)
18151 {
18152 switch (template_sequence[i].type)
18153 {
18154 case ARM_TYPE:
18155 sym_type = ARM_MAP_ARM;
18156 break;
18157
18158 case THUMB16_TYPE:
18159 case THUMB32_TYPE:
18160 sym_type = ARM_MAP_THUMB;
18161 break;
18162
18163 case DATA_TYPE:
18164 sym_type = ARM_MAP_DATA;
18165 break;
18166
18167 default:
18168 BFD_FAIL ();
18169 return FALSE;
18170 }
18171
18172 if (template_sequence[i].type != prev_type)
18173 {
18174 prev_type = template_sequence[i].type;
18175 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
18176 return FALSE;
18177 }
18178
18179 switch (template_sequence[i].type)
18180 {
18181 case ARM_TYPE:
18182 case THUMB32_TYPE:
18183 size += 4;
18184 break;
18185
18186 case THUMB16_TYPE:
18187 size += 2;
18188 break;
18189
18190 case DATA_TYPE:
18191 size += 4;
18192 break;
18193
18194 default:
18195 BFD_FAIL ();
18196 return FALSE;
18197 }
18198 }
18199
18200 return TRUE;
18201 }
18202
18203 /* Output mapping symbols for linker generated sections,
18204 and for those data-only sections that do not have a
18205 $d. */
18206
18207 static bfd_boolean
18208 elf32_arm_output_arch_local_syms (bfd *output_bfd,
18209 struct bfd_link_info *info,
18210 void *flaginfo,
18211 int (*func) (void *, const char *,
18212 Elf_Internal_Sym *,
18213 asection *,
18214 struct elf_link_hash_entry *))
18215 {
18216 output_arch_syminfo osi;
18217 struct elf32_arm_link_hash_table *htab;
18218 bfd_vma offset;
18219 bfd_size_type size;
18220 bfd *input_bfd;
18221
18222 htab = elf32_arm_hash_table (info);
18223 if (htab == NULL)
18224 return FALSE;
18225
18226 check_use_blx (htab);
18227
18228 osi.flaginfo = flaginfo;
18229 osi.info = info;
18230 osi.func = func;
18231
18232 /* Add a $d mapping symbol to data-only sections that
18233 don't have any mapping symbol. This may result in (harmless) redundant
18234 mapping symbols. */
18235 for (input_bfd = info->input_bfds;
18236 input_bfd != NULL;
18237 input_bfd = input_bfd->link.next)
18238 {
18239 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
18240 for (osi.sec = input_bfd->sections;
18241 osi.sec != NULL;
18242 osi.sec = osi.sec->next)
18243 {
18244 if (osi.sec->output_section != NULL
18245 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
18246 != 0)
18247 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
18248 == SEC_HAS_CONTENTS
18249 && get_arm_elf_section_data (osi.sec) != NULL
18250 && get_arm_elf_section_data (osi.sec)->mapcount == 0
18251 && osi.sec->size > 0
18252 && (osi.sec->flags & SEC_EXCLUDE) == 0)
18253 {
18254 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18255 (output_bfd, osi.sec->output_section);
18256 if (osi.sec_shndx != (int)SHN_BAD)
18257 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
18258 }
18259 }
18260 }
18261
18262 /* ARM->Thumb glue. */
18263 if (htab->arm_glue_size > 0)
18264 {
18265 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18266 ARM2THUMB_GLUE_SECTION_NAME);
18267
18268 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18269 (output_bfd, osi.sec->output_section);
18270 if (bfd_link_pic (info) || htab->root.is_relocatable_executable
18271 || htab->pic_veneer)
18272 size = ARM2THUMB_PIC_GLUE_SIZE;
18273 else if (htab->use_blx)
18274 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
18275 else
18276 size = ARM2THUMB_STATIC_GLUE_SIZE;
18277
18278 for (offset = 0; offset < htab->arm_glue_size; offset += size)
18279 {
18280 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
18281 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
18282 }
18283 }
18284
18285 /* Thumb->ARM glue. */
18286 if (htab->thumb_glue_size > 0)
18287 {
18288 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18289 THUMB2ARM_GLUE_SECTION_NAME);
18290
18291 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18292 (output_bfd, osi.sec->output_section);
18293 size = THUMB2ARM_GLUE_SIZE;
18294
18295 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
18296 {
18297 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
18298 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
18299 }
18300 }
18301
18302 /* ARMv4 BX veneers. */
18303 if (htab->bx_glue_size > 0)
18304 {
18305 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18306 ARM_BX_GLUE_SECTION_NAME);
18307
18308 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18309 (output_bfd, osi.sec->output_section);
18310
18311 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
18312 }
18313
18314 /* Long calls stubs. */
18315 if (htab->stub_bfd && htab->stub_bfd->sections)
18316 {
18317 asection* stub_sec;
18318
18319 for (stub_sec = htab->stub_bfd->sections;
18320 stub_sec != NULL;
18321 stub_sec = stub_sec->next)
18322 {
18323 /* Ignore non-stub sections. */
18324 if (!strstr (stub_sec->name, STUB_SUFFIX))
18325 continue;
18326
18327 osi.sec = stub_sec;
18328
18329 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18330 (output_bfd, osi.sec->output_section);
18331
18332 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
18333 }
18334 }
18335
18336 /* Finally, output mapping symbols for the PLT. */
18337 if (htab->root.splt && htab->root.splt->size > 0)
18338 {
18339 osi.sec = htab->root.splt;
18340 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
18341 (output_bfd, osi.sec->output_section));
18342
18343 /* Output mapping symbols for the plt header. SymbianOS does not have a
18344 plt header. */
18345 if (htab->vxworks_p)
18346 {
18347 /* VxWorks shared libraries have no PLT header. */
18348 if (!bfd_link_pic (info))
18349 {
18350 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18351 return FALSE;
18352 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
18353 return FALSE;
18354 }
18355 }
18356 else if (htab->nacl_p)
18357 {
18358 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18359 return FALSE;
18360 }
18361 else if (using_thumb_only (htab) && !htab->fdpic_p)
18362 {
18363 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 0))
18364 return FALSE;
18365 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
18366 return FALSE;
18367 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 16))
18368 return FALSE;
18369 }
18370 else if (!htab->symbian_p && !htab->fdpic_p)
18371 {
18372 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18373 return FALSE;
18374 #ifndef FOUR_WORD_PLT
18375 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
18376 return FALSE;
18377 #endif
18378 }
18379 }
18380 if (htab->nacl_p && htab->root.iplt && htab->root.iplt->size > 0)
18381 {
18382 /* NaCl uses a special first entry in .iplt too. */
18383 osi.sec = htab->root.iplt;
18384 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
18385 (output_bfd, osi.sec->output_section));
18386 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18387 return FALSE;
18388 }
18389 if ((htab->root.splt && htab->root.splt->size > 0)
18390 || (htab->root.iplt && htab->root.iplt->size > 0))
18391 {
18392 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi);
18393 for (input_bfd = info->input_bfds;
18394 input_bfd != NULL;
18395 input_bfd = input_bfd->link.next)
18396 {
18397 struct arm_local_iplt_info **local_iplt;
18398 unsigned int i, num_syms;
18399
18400 local_iplt = elf32_arm_local_iplt (input_bfd);
18401 if (local_iplt != NULL)
18402 {
18403 num_syms = elf_symtab_hdr (input_bfd).sh_info;
18404 for (i = 0; i < num_syms; i++)
18405 if (local_iplt[i] != NULL
18406 && !elf32_arm_output_plt_map_1 (&osi, TRUE,
18407 &local_iplt[i]->root,
18408 &local_iplt[i]->arm))
18409 return FALSE;
18410 }
18411 }
18412 }
18413 if (htab->dt_tlsdesc_plt != 0)
18414 {
18415 /* Mapping symbols for the lazy tls trampoline. */
18416 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->dt_tlsdesc_plt))
18417 return FALSE;
18418
18419 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
18420 htab->dt_tlsdesc_plt + 24))
18421 return FALSE;
18422 }
18423 if (htab->tls_trampoline != 0)
18424 {
18425 /* Mapping symbols for the tls trampoline. */
18426 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->tls_trampoline))
18427 return FALSE;
18428 #ifdef FOUR_WORD_PLT
18429 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
18430 htab->tls_trampoline + 12))
18431 return FALSE;
18432 #endif
18433 }
18434
18435 return TRUE;
18436 }
18437
18438 /* Filter normal symbols of CMSE entry functions of ABFD to include in
18439 the import library. All SYMCOUNT symbols of ABFD can be examined
18440 from their pointers in SYMS. Pointers of symbols to keep should be
18441 stored continuously at the beginning of that array.
18442
18443 Returns the number of symbols to keep. */
18444
18445 static unsigned int
18446 elf32_arm_filter_cmse_symbols (bfd *abfd ATTRIBUTE_UNUSED,
18447 struct bfd_link_info *info,
18448 asymbol **syms, long symcount)
18449 {
18450 size_t maxnamelen;
18451 char *cmse_name;
18452 long src_count, dst_count = 0;
18453 struct elf32_arm_link_hash_table *htab;
18454
18455 htab = elf32_arm_hash_table (info);
18456 if (!htab->stub_bfd || !htab->stub_bfd->sections)
18457 symcount = 0;
18458
18459 maxnamelen = 128;
18460 cmse_name = (char *) bfd_malloc (maxnamelen);
18461 for (src_count = 0; src_count < symcount; src_count++)
18462 {
18463 struct elf32_arm_link_hash_entry *cmse_hash;
18464 asymbol *sym;
18465 flagword flags;
18466 char *name;
18467 size_t namelen;
18468
18469 sym = syms[src_count];
18470 flags = sym->flags;
18471 name = (char *) bfd_asymbol_name (sym);
18472
18473 if ((flags & BSF_FUNCTION) != BSF_FUNCTION)
18474 continue;
18475 if (!(flags & (BSF_GLOBAL | BSF_WEAK)))
18476 continue;
18477
18478 namelen = strlen (name) + sizeof (CMSE_PREFIX) + 1;
18479 if (namelen > maxnamelen)
18480 {
18481 cmse_name = (char *)
18482 bfd_realloc (cmse_name, namelen);
18483 maxnamelen = namelen;
18484 }
18485 snprintf (cmse_name, maxnamelen, "%s%s", CMSE_PREFIX, name);
18486 cmse_hash = (struct elf32_arm_link_hash_entry *)
18487 elf_link_hash_lookup (&(htab)->root, cmse_name, FALSE, FALSE, TRUE);
18488
18489 if (!cmse_hash
18490 || (cmse_hash->root.root.type != bfd_link_hash_defined
18491 && cmse_hash->root.root.type != bfd_link_hash_defweak)
18492 || cmse_hash->root.type != STT_FUNC)
18493 continue;
18494
18495 if (!ARM_GET_SYM_CMSE_SPCL (cmse_hash->root.target_internal))
18496 continue;
18497
18498 syms[dst_count++] = sym;
18499 }
18500 free (cmse_name);
18501
18502 syms[dst_count] = NULL;
18503
18504 return dst_count;
18505 }
18506
18507 /* Filter symbols of ABFD to include in the import library. All
18508 SYMCOUNT symbols of ABFD can be examined from their pointers in
18509 SYMS. Pointers of symbols to keep should be stored continuously at
18510 the beginning of that array.
18511
18512 Returns the number of symbols to keep. */
18513
18514 static unsigned int
18515 elf32_arm_filter_implib_symbols (bfd *abfd ATTRIBUTE_UNUSED,
18516 struct bfd_link_info *info,
18517 asymbol **syms, long symcount)
18518 {
18519 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
18520
18521 /* Requirement 8 of "ARM v8-M Security Extensions: Requirements on
18522 Development Tools" (ARM-ECM-0359818) mandates Secure Gateway import
18523 library to be a relocatable object file. */
18524 BFD_ASSERT (!(bfd_get_file_flags (info->out_implib_bfd) & EXEC_P));
18525 if (globals->cmse_implib)
18526 return elf32_arm_filter_cmse_symbols (abfd, info, syms, symcount);
18527 else
18528 return _bfd_elf_filter_global_symbols (abfd, info, syms, symcount);
18529 }
18530
18531 /* Allocate target specific section data. */
18532
18533 static bfd_boolean
18534 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
18535 {
18536 if (!sec->used_by_bfd)
18537 {
18538 _arm_elf_section_data *sdata;
18539 bfd_size_type amt = sizeof (*sdata);
18540
18541 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
18542 if (sdata == NULL)
18543 return FALSE;
18544 sec->used_by_bfd = sdata;
18545 }
18546
18547 return _bfd_elf_new_section_hook (abfd, sec);
18548 }
18549
18550
18551 /* Used to order a list of mapping symbols by address. */
18552
18553 static int
18554 elf32_arm_compare_mapping (const void * a, const void * b)
18555 {
18556 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
18557 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
18558
18559 if (amap->vma > bmap->vma)
18560 return 1;
18561 else if (amap->vma < bmap->vma)
18562 return -1;
18563 else if (amap->type > bmap->type)
18564 /* Ensure results do not depend on the host qsort for objects with
18565 multiple mapping symbols at the same address by sorting on type
18566 after vma. */
18567 return 1;
18568 else if (amap->type < bmap->type)
18569 return -1;
18570 else
18571 return 0;
18572 }
18573
18574 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
18575
18576 static unsigned long
18577 offset_prel31 (unsigned long addr, bfd_vma offset)
18578 {
18579 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
18580 }
18581
18582 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
18583 relocations. */
18584
18585 static void
18586 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
18587 {
18588 unsigned long first_word = bfd_get_32 (output_bfd, from);
18589 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
18590
18591 /* High bit of first word is supposed to be zero. */
18592 if ((first_word & 0x80000000ul) == 0)
18593 first_word = offset_prel31 (first_word, offset);
18594
18595 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
18596 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
18597 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
18598 second_word = offset_prel31 (second_word, offset);
18599
18600 bfd_put_32 (output_bfd, first_word, to);
18601 bfd_put_32 (output_bfd, second_word, to + 4);
18602 }
18603
18604 /* Data for make_branch_to_a8_stub(). */
18605
18606 struct a8_branch_to_stub_data
18607 {
18608 asection *writing_section;
18609 bfd_byte *contents;
18610 };
18611
18612
18613 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
18614 places for a particular section. */
18615
18616 static bfd_boolean
18617 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
18618 void *in_arg)
18619 {
18620 struct elf32_arm_stub_hash_entry *stub_entry;
18621 struct a8_branch_to_stub_data *data;
18622 bfd_byte *contents;
18623 unsigned long branch_insn;
18624 bfd_vma veneered_insn_loc, veneer_entry_loc;
18625 bfd_signed_vma branch_offset;
18626 bfd *abfd;
18627 unsigned int loc;
18628
18629 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
18630 data = (struct a8_branch_to_stub_data *) in_arg;
18631
18632 if (stub_entry->target_section != data->writing_section
18633 || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
18634 return TRUE;
18635
18636 contents = data->contents;
18637
18638 /* We use target_section as Cortex-A8 erratum workaround stubs are only
18639 generated when both source and target are in the same section. */
18640 veneered_insn_loc = stub_entry->target_section->output_section->vma
18641 + stub_entry->target_section->output_offset
18642 + stub_entry->source_value;
18643
18644 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
18645 + stub_entry->stub_sec->output_offset
18646 + stub_entry->stub_offset;
18647
18648 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
18649 veneered_insn_loc &= ~3u;
18650
18651 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
18652
18653 abfd = stub_entry->target_section->owner;
18654 loc = stub_entry->source_value;
18655
18656 /* We attempt to avoid this condition by setting stubs_always_after_branch
18657 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
18658 This check is just to be on the safe side... */
18659 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
18660 {
18661 _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub is "
18662 "allocated in unsafe location"), abfd);
18663 return FALSE;
18664 }
18665
18666 switch (stub_entry->stub_type)
18667 {
18668 case arm_stub_a8_veneer_b:
18669 case arm_stub_a8_veneer_b_cond:
18670 branch_insn = 0xf0009000;
18671 goto jump24;
18672
18673 case arm_stub_a8_veneer_blx:
18674 branch_insn = 0xf000e800;
18675 goto jump24;
18676
18677 case arm_stub_a8_veneer_bl:
18678 {
18679 unsigned int i1, j1, i2, j2, s;
18680
18681 branch_insn = 0xf000d000;
18682
18683 jump24:
18684 if (branch_offset < -16777216 || branch_offset > 16777214)
18685 {
18686 /* There's not much we can do apart from complain if this
18687 happens. */
18688 _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub out "
18689 "of range (input file too large)"), abfd);
18690 return FALSE;
18691 }
18692
18693 /* i1 = not(j1 eor s), so:
18694 not i1 = j1 eor s
18695 j1 = (not i1) eor s. */
18696
18697 branch_insn |= (branch_offset >> 1) & 0x7ff;
18698 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
18699 i2 = (branch_offset >> 22) & 1;
18700 i1 = (branch_offset >> 23) & 1;
18701 s = (branch_offset >> 24) & 1;
18702 j1 = (!i1) ^ s;
18703 j2 = (!i2) ^ s;
18704 branch_insn |= j2 << 11;
18705 branch_insn |= j1 << 13;
18706 branch_insn |= s << 26;
18707 }
18708 break;
18709
18710 default:
18711 BFD_FAIL ();
18712 return FALSE;
18713 }
18714
18715 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[loc]);
18716 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[loc + 2]);
18717
18718 return TRUE;
18719 }
18720
18721 /* Beginning of stm32l4xx work-around. */
18722
18723 /* Functions encoding instructions necessary for the emission of the
18724 fix-stm32l4xx-629360.
18725 Encoding is extracted from the
18726 ARM (C) Architecture Reference Manual
18727 ARMv7-A and ARMv7-R edition
18728 ARM DDI 0406C.b (ID072512). */
18729
18730 static inline bfd_vma
18731 create_instruction_branch_absolute (int branch_offset)
18732 {
18733 /* A8.8.18 B (A8-334)
18734 B target_address (Encoding T4). */
18735 /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii. */
18736 /* jump offset is: S:I1:I2:imm10:imm11:0. */
18737 /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S). */
18738
18739 int s = ((branch_offset & 0x1000000) >> 24);
18740 int j1 = s ^ !((branch_offset & 0x800000) >> 23);
18741 int j2 = s ^ !((branch_offset & 0x400000) >> 22);
18742
18743 if (branch_offset < -(1 << 24) || branch_offset >= (1 << 24))
18744 BFD_ASSERT (0 && "Error: branch out of range. Cannot create branch.");
18745
18746 bfd_vma patched_inst = 0xf0009000
18747 | s << 26 /* S. */
18748 | (((unsigned long) (branch_offset) >> 12) & 0x3ff) << 16 /* imm10. */
18749 | j1 << 13 /* J1. */
18750 | j2 << 11 /* J2. */
18751 | (((unsigned long) (branch_offset) >> 1) & 0x7ff); /* imm11. */
18752
18753 return patched_inst;
18754 }
18755
18756 static inline bfd_vma
18757 create_instruction_ldmia (int base_reg, int wback, int reg_mask)
18758 {
18759 /* A8.8.57 LDM/LDMIA/LDMFD (A8-396)
18760 LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2). */
18761 bfd_vma patched_inst = 0xe8900000
18762 | (/*W=*/wback << 21)
18763 | (base_reg << 16)
18764 | (reg_mask & 0x0000ffff);
18765
18766 return patched_inst;
18767 }
18768
18769 static inline bfd_vma
18770 create_instruction_ldmdb (int base_reg, int wback, int reg_mask)
18771 {
18772 /* A8.8.60 LDMDB/LDMEA (A8-402)
18773 LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1). */
18774 bfd_vma patched_inst = 0xe9100000
18775 | (/*W=*/wback << 21)
18776 | (base_reg << 16)
18777 | (reg_mask & 0x0000ffff);
18778
18779 return patched_inst;
18780 }
18781
18782 static inline bfd_vma
18783 create_instruction_mov (int target_reg, int source_reg)
18784 {
18785 /* A8.8.103 MOV (register) (A8-486)
18786 MOV Rd, Rm (Encoding T1). */
18787 bfd_vma patched_inst = 0x4600
18788 | (target_reg & 0x7)
18789 | ((target_reg & 0x8) >> 3) << 7
18790 | (source_reg << 3);
18791
18792 return patched_inst;
18793 }
18794
18795 static inline bfd_vma
18796 create_instruction_sub (int target_reg, int source_reg, int value)
18797 {
18798 /* A8.8.221 SUB (immediate) (A8-708)
18799 SUB Rd, Rn, #value (Encoding T3). */
18800 bfd_vma patched_inst = 0xf1a00000
18801 | (target_reg << 8)
18802 | (source_reg << 16)
18803 | (/*S=*/0 << 20)
18804 | ((value & 0x800) >> 11) << 26
18805 | ((value & 0x700) >> 8) << 12
18806 | (value & 0x0ff);
18807
18808 return patched_inst;
18809 }
18810
18811 static inline bfd_vma
18812 create_instruction_vldmia (int base_reg, int is_dp, int wback, int num_words,
18813 int first_reg)
18814 {
18815 /* A8.8.332 VLDM (A8-922)
18816 VLMD{MODE} Rn{!}, {list} (Encoding T1 or T2). */
18817 bfd_vma patched_inst = (is_dp ? 0xec900b00 : 0xec900a00)
18818 | (/*W=*/wback << 21)
18819 | (base_reg << 16)
18820 | (num_words & 0x000000ff)
18821 | (((unsigned)first_reg >> 1) & 0x0000000f) << 12
18822 | (first_reg & 0x00000001) << 22;
18823
18824 return patched_inst;
18825 }
18826
18827 static inline bfd_vma
18828 create_instruction_vldmdb (int base_reg, int is_dp, int num_words,
18829 int first_reg)
18830 {
18831 /* A8.8.332 VLDM (A8-922)
18832 VLMD{MODE} Rn!, {} (Encoding T1 or T2). */
18833 bfd_vma patched_inst = (is_dp ? 0xed300b00 : 0xed300a00)
18834 | (base_reg << 16)
18835 | (num_words & 0x000000ff)
18836 | (((unsigned)first_reg >>1 ) & 0x0000000f) << 12
18837 | (first_reg & 0x00000001) << 22;
18838
18839 return patched_inst;
18840 }
18841
18842 static inline bfd_vma
18843 create_instruction_udf_w (int value)
18844 {
18845 /* A8.8.247 UDF (A8-758)
18846 Undefined (Encoding T2). */
18847 bfd_vma patched_inst = 0xf7f0a000
18848 | (value & 0x00000fff)
18849 | (value & 0x000f0000) << 16;
18850
18851 return patched_inst;
18852 }
18853
18854 static inline bfd_vma
18855 create_instruction_udf (int value)
18856 {
18857 /* A8.8.247 UDF (A8-758)
18858 Undefined (Encoding T1). */
18859 bfd_vma patched_inst = 0xde00
18860 | (value & 0xff);
18861
18862 return patched_inst;
18863 }
18864
18865 /* Functions writing an instruction in memory, returning the next
18866 memory position to write to. */
18867
18868 static inline bfd_byte *
18869 push_thumb2_insn32 (struct elf32_arm_link_hash_table * htab,
18870 bfd * output_bfd, bfd_byte *pt, insn32 insn)
18871 {
18872 put_thumb2_insn (htab, output_bfd, insn, pt);
18873 return pt + 4;
18874 }
18875
18876 static inline bfd_byte *
18877 push_thumb2_insn16 (struct elf32_arm_link_hash_table * htab,
18878 bfd * output_bfd, bfd_byte *pt, insn32 insn)
18879 {
18880 put_thumb_insn (htab, output_bfd, insn, pt);
18881 return pt + 2;
18882 }
18883
18884 /* Function filling up a region in memory with T1 and T2 UDFs taking
18885 care of alignment. */
18886
18887 static bfd_byte *
18888 stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table * htab,
18889 bfd * output_bfd,
18890 const bfd_byte * const base_stub_contents,
18891 bfd_byte * const from_stub_contents,
18892 const bfd_byte * const end_stub_contents)
18893 {
18894 bfd_byte *current_stub_contents = from_stub_contents;
18895
18896 /* Fill the remaining of the stub with deterministic contents : UDF
18897 instructions.
18898 Check if realignment is needed on modulo 4 frontier using T1, to
18899 further use T2. */
18900 if ((current_stub_contents < end_stub_contents)
18901 && !((current_stub_contents - base_stub_contents) % 2)
18902 && ((current_stub_contents - base_stub_contents) % 4))
18903 current_stub_contents =
18904 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18905 create_instruction_udf (0));
18906
18907 for (; current_stub_contents < end_stub_contents;)
18908 current_stub_contents =
18909 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18910 create_instruction_udf_w (0));
18911
18912 return current_stub_contents;
18913 }
18914
18915 /* Functions writing the stream of instructions equivalent to the
18916 derived sequence for ldmia, ldmdb, vldm respectively. */
18917
18918 static void
18919 stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table * htab,
18920 bfd * output_bfd,
18921 const insn32 initial_insn,
18922 const bfd_byte *const initial_insn_addr,
18923 bfd_byte *const base_stub_contents)
18924 {
18925 int wback = (initial_insn & 0x00200000) >> 21;
18926 int ri, rn = (initial_insn & 0x000F0000) >> 16;
18927 int insn_all_registers = initial_insn & 0x0000ffff;
18928 int insn_low_registers, insn_high_registers;
18929 int usable_register_mask;
18930 int nb_registers = elf32_arm_popcount (insn_all_registers);
18931 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
18932 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
18933 bfd_byte *current_stub_contents = base_stub_contents;
18934
18935 BFD_ASSERT (is_thumb2_ldmia (initial_insn));
18936
18937 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
18938 smaller than 8 registers load sequences that do not cause the
18939 hardware issue. */
18940 if (nb_registers <= 8)
18941 {
18942 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
18943 current_stub_contents =
18944 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18945 initial_insn);
18946
18947 /* B initial_insn_addr+4. */
18948 if (!restore_pc)
18949 current_stub_contents =
18950 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18951 create_instruction_branch_absolute
18952 (initial_insn_addr - current_stub_contents));
18953
18954 /* Fill the remaining of the stub with deterministic contents. */
18955 current_stub_contents =
18956 stm32l4xx_fill_stub_udf (htab, output_bfd,
18957 base_stub_contents, current_stub_contents,
18958 base_stub_contents +
18959 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
18960
18961 return;
18962 }
18963
18964 /* - reg_list[13] == 0. */
18965 BFD_ASSERT ((insn_all_registers & (1 << 13))==0);
18966
18967 /* - reg_list[14] & reg_list[15] != 1. */
18968 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
18969
18970 /* - if (wback==1) reg_list[rn] == 0. */
18971 BFD_ASSERT (!wback || !restore_rn);
18972
18973 /* - nb_registers > 8. */
18974 BFD_ASSERT (elf32_arm_popcount (insn_all_registers) > 8);
18975
18976 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
18977
18978 /* In the following algorithm, we split this wide LDM using 2 LDM insns:
18979 - One with the 7 lowest registers (register mask 0x007F)
18980 This LDM will finally contain between 2 and 7 registers
18981 - One with the 7 highest registers (register mask 0xDF80)
18982 This ldm will finally contain between 2 and 7 registers. */
18983 insn_low_registers = insn_all_registers & 0x007F;
18984 insn_high_registers = insn_all_registers & 0xDF80;
18985
18986 /* A spare register may be needed during this veneer to temporarily
18987 handle the base register. This register will be restored with the
18988 last LDM operation.
18989 The usable register may be any general purpose register (that
18990 excludes PC, SP, LR : register mask is 0x1FFF). */
18991 usable_register_mask = 0x1FFF;
18992
18993 /* Generate the stub function. */
18994 if (wback)
18995 {
18996 /* LDMIA Rn!, {R-low-register-list} : (Encoding T2). */
18997 current_stub_contents =
18998 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18999 create_instruction_ldmia
19000 (rn, /*wback=*/1, insn_low_registers));
19001
19002 /* LDMIA Rn!, {R-high-register-list} : (Encoding T2). */
19003 current_stub_contents =
19004 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19005 create_instruction_ldmia
19006 (rn, /*wback=*/1, insn_high_registers));
19007 if (!restore_pc)
19008 {
19009 /* B initial_insn_addr+4. */
19010 current_stub_contents =
19011 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19012 create_instruction_branch_absolute
19013 (initial_insn_addr - current_stub_contents));
19014 }
19015 }
19016 else /* if (!wback). */
19017 {
19018 ri = rn;
19019
19020 /* If Rn is not part of the high-register-list, move it there. */
19021 if (!(insn_high_registers & (1 << rn)))
19022 {
19023 /* Choose a Ri in the high-register-list that will be restored. */
19024 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19025
19026 /* MOV Ri, Rn. */
19027 current_stub_contents =
19028 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19029 create_instruction_mov (ri, rn));
19030 }
19031
19032 /* LDMIA Ri!, {R-low-register-list} : (Encoding T2). */
19033 current_stub_contents =
19034 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19035 create_instruction_ldmia
19036 (ri, /*wback=*/1, insn_low_registers));
19037
19038 /* LDMIA Ri, {R-high-register-list} : (Encoding T2). */
19039 current_stub_contents =
19040 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19041 create_instruction_ldmia
19042 (ri, /*wback=*/0, insn_high_registers));
19043
19044 if (!restore_pc)
19045 {
19046 /* B initial_insn_addr+4. */
19047 current_stub_contents =
19048 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19049 create_instruction_branch_absolute
19050 (initial_insn_addr - current_stub_contents));
19051 }
19052 }
19053
19054 /* Fill the remaining of the stub with deterministic contents. */
19055 current_stub_contents =
19056 stm32l4xx_fill_stub_udf (htab, output_bfd,
19057 base_stub_contents, current_stub_contents,
19058 base_stub_contents +
19059 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
19060 }
19061
19062 static void
19063 stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table * htab,
19064 bfd * output_bfd,
19065 const insn32 initial_insn,
19066 const bfd_byte *const initial_insn_addr,
19067 bfd_byte *const base_stub_contents)
19068 {
19069 int wback = (initial_insn & 0x00200000) >> 21;
19070 int ri, rn = (initial_insn & 0x000f0000) >> 16;
19071 int insn_all_registers = initial_insn & 0x0000ffff;
19072 int insn_low_registers, insn_high_registers;
19073 int usable_register_mask;
19074 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
19075 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
19076 int nb_registers = elf32_arm_popcount (insn_all_registers);
19077 bfd_byte *current_stub_contents = base_stub_contents;
19078
19079 BFD_ASSERT (is_thumb2_ldmdb (initial_insn));
19080
19081 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
19082 smaller than 8 registers load sequences that do not cause the
19083 hardware issue. */
19084 if (nb_registers <= 8)
19085 {
19086 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
19087 current_stub_contents =
19088 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19089 initial_insn);
19090
19091 /* B initial_insn_addr+4. */
19092 current_stub_contents =
19093 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19094 create_instruction_branch_absolute
19095 (initial_insn_addr - current_stub_contents));
19096
19097 /* Fill the remaining of the stub with deterministic contents. */
19098 current_stub_contents =
19099 stm32l4xx_fill_stub_udf (htab, output_bfd,
19100 base_stub_contents, current_stub_contents,
19101 base_stub_contents +
19102 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
19103
19104 return;
19105 }
19106
19107 /* - reg_list[13] == 0. */
19108 BFD_ASSERT ((insn_all_registers & (1 << 13)) == 0);
19109
19110 /* - reg_list[14] & reg_list[15] != 1. */
19111 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
19112
19113 /* - if (wback==1) reg_list[rn] == 0. */
19114 BFD_ASSERT (!wback || !restore_rn);
19115
19116 /* - nb_registers > 8. */
19117 BFD_ASSERT (elf32_arm_popcount (insn_all_registers) > 8);
19118
19119 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
19120
19121 /* In the following algorithm, we split this wide LDM using 2 LDM insn:
19122 - One with the 7 lowest registers (register mask 0x007F)
19123 This LDM will finally contain between 2 and 7 registers
19124 - One with the 7 highest registers (register mask 0xDF80)
19125 This ldm will finally contain between 2 and 7 registers. */
19126 insn_low_registers = insn_all_registers & 0x007F;
19127 insn_high_registers = insn_all_registers & 0xDF80;
19128
19129 /* A spare register may be needed during this veneer to temporarily
19130 handle the base register. This register will be restored with
19131 the last LDM operation.
19132 The usable register may be any general purpose register (that excludes
19133 PC, SP, LR : register mask is 0x1FFF). */
19134 usable_register_mask = 0x1FFF;
19135
19136 /* Generate the stub function. */
19137 if (!wback && !restore_pc && !restore_rn)
19138 {
19139 /* Choose a Ri in the low-register-list that will be restored. */
19140 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
19141
19142 /* MOV Ri, Rn. */
19143 current_stub_contents =
19144 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19145 create_instruction_mov (ri, rn));
19146
19147 /* LDMDB Ri!, {R-high-register-list}. */
19148 current_stub_contents =
19149 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19150 create_instruction_ldmdb
19151 (ri, /*wback=*/1, insn_high_registers));
19152
19153 /* LDMDB Ri, {R-low-register-list}. */
19154 current_stub_contents =
19155 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19156 create_instruction_ldmdb
19157 (ri, /*wback=*/0, insn_low_registers));
19158
19159 /* B initial_insn_addr+4. */
19160 current_stub_contents =
19161 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19162 create_instruction_branch_absolute
19163 (initial_insn_addr - current_stub_contents));
19164 }
19165 else if (wback && !restore_pc && !restore_rn)
19166 {
19167 /* LDMDB Rn!, {R-high-register-list}. */
19168 current_stub_contents =
19169 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19170 create_instruction_ldmdb
19171 (rn, /*wback=*/1, insn_high_registers));
19172
19173 /* LDMDB Rn!, {R-low-register-list}. */
19174 current_stub_contents =
19175 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19176 create_instruction_ldmdb
19177 (rn, /*wback=*/1, insn_low_registers));
19178
19179 /* B initial_insn_addr+4. */
19180 current_stub_contents =
19181 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19182 create_instruction_branch_absolute
19183 (initial_insn_addr - current_stub_contents));
19184 }
19185 else if (!wback && restore_pc && !restore_rn)
19186 {
19187 /* Choose a Ri in the high-register-list that will be restored. */
19188 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19189
19190 /* SUB Ri, Rn, #(4*nb_registers). */
19191 current_stub_contents =
19192 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19193 create_instruction_sub (ri, rn, (4 * nb_registers)));
19194
19195 /* LDMIA Ri!, {R-low-register-list}. */
19196 current_stub_contents =
19197 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19198 create_instruction_ldmia
19199 (ri, /*wback=*/1, insn_low_registers));
19200
19201 /* LDMIA Ri, {R-high-register-list}. */
19202 current_stub_contents =
19203 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19204 create_instruction_ldmia
19205 (ri, /*wback=*/0, insn_high_registers));
19206 }
19207 else if (wback && restore_pc && !restore_rn)
19208 {
19209 /* Choose a Ri in the high-register-list that will be restored. */
19210 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19211
19212 /* SUB Rn, Rn, #(4*nb_registers) */
19213 current_stub_contents =
19214 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19215 create_instruction_sub (rn, rn, (4 * nb_registers)));
19216
19217 /* MOV Ri, Rn. */
19218 current_stub_contents =
19219 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19220 create_instruction_mov (ri, rn));
19221
19222 /* LDMIA Ri!, {R-low-register-list}. */
19223 current_stub_contents =
19224 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19225 create_instruction_ldmia
19226 (ri, /*wback=*/1, insn_low_registers));
19227
19228 /* LDMIA Ri, {R-high-register-list}. */
19229 current_stub_contents =
19230 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19231 create_instruction_ldmia
19232 (ri, /*wback=*/0, insn_high_registers));
19233 }
19234 else if (!wback && !restore_pc && restore_rn)
19235 {
19236 ri = rn;
19237 if (!(insn_low_registers & (1 << rn)))
19238 {
19239 /* Choose a Ri in the low-register-list that will be restored. */
19240 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
19241
19242 /* MOV Ri, Rn. */
19243 current_stub_contents =
19244 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19245 create_instruction_mov (ri, rn));
19246 }
19247
19248 /* LDMDB Ri!, {R-high-register-list}. */
19249 current_stub_contents =
19250 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19251 create_instruction_ldmdb
19252 (ri, /*wback=*/1, insn_high_registers));
19253
19254 /* LDMDB Ri, {R-low-register-list}. */
19255 current_stub_contents =
19256 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19257 create_instruction_ldmdb
19258 (ri, /*wback=*/0, insn_low_registers));
19259
19260 /* B initial_insn_addr+4. */
19261 current_stub_contents =
19262 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19263 create_instruction_branch_absolute
19264 (initial_insn_addr - current_stub_contents));
19265 }
19266 else if (!wback && restore_pc && restore_rn)
19267 {
19268 ri = rn;
19269 if (!(insn_high_registers & (1 << rn)))
19270 {
19271 /* Choose a Ri in the high-register-list that will be restored. */
19272 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19273 }
19274
19275 /* SUB Ri, Rn, #(4*nb_registers). */
19276 current_stub_contents =
19277 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19278 create_instruction_sub (ri, rn, (4 * nb_registers)));
19279
19280 /* LDMIA Ri!, {R-low-register-list}. */
19281 current_stub_contents =
19282 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19283 create_instruction_ldmia
19284 (ri, /*wback=*/1, insn_low_registers));
19285
19286 /* LDMIA Ri, {R-high-register-list}. */
19287 current_stub_contents =
19288 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19289 create_instruction_ldmia
19290 (ri, /*wback=*/0, insn_high_registers));
19291 }
19292 else if (wback && restore_rn)
19293 {
19294 /* The assembler should not have accepted to encode this. */
19295 BFD_ASSERT (0 && "Cannot patch an instruction that has an "
19296 "undefined behavior.\n");
19297 }
19298
19299 /* Fill the remaining of the stub with deterministic contents. */
19300 current_stub_contents =
19301 stm32l4xx_fill_stub_udf (htab, output_bfd,
19302 base_stub_contents, current_stub_contents,
19303 base_stub_contents +
19304 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
19305
19306 }
19307
19308 static void
19309 stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table * htab,
19310 bfd * output_bfd,
19311 const insn32 initial_insn,
19312 const bfd_byte *const initial_insn_addr,
19313 bfd_byte *const base_stub_contents)
19314 {
19315 int num_words = ((unsigned int) initial_insn << 24) >> 24;
19316 bfd_byte *current_stub_contents = base_stub_contents;
19317
19318 BFD_ASSERT (is_thumb2_vldm (initial_insn));
19319
19320 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
19321 smaller than 8 words load sequences that do not cause the
19322 hardware issue. */
19323 if (num_words <= 8)
19324 {
19325 /* Untouched instruction. */
19326 current_stub_contents =
19327 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19328 initial_insn);
19329
19330 /* B initial_insn_addr+4. */
19331 current_stub_contents =
19332 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19333 create_instruction_branch_absolute
19334 (initial_insn_addr - current_stub_contents));
19335 }
19336 else
19337 {
19338 bfd_boolean is_dp = /* DP encoding. */
19339 (initial_insn & 0xfe100f00) == 0xec100b00;
19340 bfd_boolean is_ia_nobang = /* (IA without !). */
19341 (((initial_insn << 7) >> 28) & 0xd) == 0x4;
19342 bfd_boolean is_ia_bang = /* (IA with !) - includes VPOP. */
19343 (((initial_insn << 7) >> 28) & 0xd) == 0x5;
19344 bfd_boolean is_db_bang = /* (DB with !). */
19345 (((initial_insn << 7) >> 28) & 0xd) == 0x9;
19346 int base_reg = ((unsigned int) initial_insn << 12) >> 28;
19347 /* d = UInt (Vd:D);. */
19348 int first_reg = ((((unsigned int) initial_insn << 16) >> 28) << 1)
19349 | (((unsigned int)initial_insn << 9) >> 31);
19350
19351 /* Compute the number of 8-words chunks needed to split. */
19352 int chunks = (num_words % 8) ? (num_words / 8 + 1) : (num_words / 8);
19353 int chunk;
19354
19355 /* The test coverage has been done assuming the following
19356 hypothesis that exactly one of the previous is_ predicates is
19357 true. */
19358 BFD_ASSERT ( (is_ia_nobang ^ is_ia_bang ^ is_db_bang)
19359 && !(is_ia_nobang & is_ia_bang & is_db_bang));
19360
19361 /* We treat the cutting of the words in one pass for all
19362 cases, then we emit the adjustments:
19363
19364 vldm rx, {...}
19365 -> vldm rx!, {8_words_or_less} for each needed 8_word
19366 -> sub rx, rx, #size (list)
19367
19368 vldm rx!, {...}
19369 -> vldm rx!, {8_words_or_less} for each needed 8_word
19370 This also handles vpop instruction (when rx is sp)
19371
19372 vldmd rx!, {...}
19373 -> vldmb rx!, {8_words_or_less} for each needed 8_word. */
19374 for (chunk = 0; chunk < chunks; ++chunk)
19375 {
19376 bfd_vma new_insn = 0;
19377
19378 if (is_ia_nobang || is_ia_bang)
19379 {
19380 new_insn = create_instruction_vldmia
19381 (base_reg,
19382 is_dp,
19383 /*wback= . */1,
19384 chunks - (chunk + 1) ?
19385 8 : num_words - chunk * 8,
19386 first_reg + chunk * 8);
19387 }
19388 else if (is_db_bang)
19389 {
19390 new_insn = create_instruction_vldmdb
19391 (base_reg,
19392 is_dp,
19393 chunks - (chunk + 1) ?
19394 8 : num_words - chunk * 8,
19395 first_reg + chunk * 8);
19396 }
19397
19398 if (new_insn)
19399 current_stub_contents =
19400 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19401 new_insn);
19402 }
19403
19404 /* Only this case requires the base register compensation
19405 subtract. */
19406 if (is_ia_nobang)
19407 {
19408 current_stub_contents =
19409 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19410 create_instruction_sub
19411 (base_reg, base_reg, 4*num_words));
19412 }
19413
19414 /* B initial_insn_addr+4. */
19415 current_stub_contents =
19416 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19417 create_instruction_branch_absolute
19418 (initial_insn_addr - current_stub_contents));
19419 }
19420
19421 /* Fill the remaining of the stub with deterministic contents. */
19422 current_stub_contents =
19423 stm32l4xx_fill_stub_udf (htab, output_bfd,
19424 base_stub_contents, current_stub_contents,
19425 base_stub_contents +
19426 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
19427 }
19428
19429 static void
19430 stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table * htab,
19431 bfd * output_bfd,
19432 const insn32 wrong_insn,
19433 const bfd_byte *const wrong_insn_addr,
19434 bfd_byte *const stub_contents)
19435 {
19436 if (is_thumb2_ldmia (wrong_insn))
19437 stm32l4xx_create_replacing_stub_ldmia (htab, output_bfd,
19438 wrong_insn, wrong_insn_addr,
19439 stub_contents);
19440 else if (is_thumb2_ldmdb (wrong_insn))
19441 stm32l4xx_create_replacing_stub_ldmdb (htab, output_bfd,
19442 wrong_insn, wrong_insn_addr,
19443 stub_contents);
19444 else if (is_thumb2_vldm (wrong_insn))
19445 stm32l4xx_create_replacing_stub_vldm (htab, output_bfd,
19446 wrong_insn, wrong_insn_addr,
19447 stub_contents);
19448 }
19449
19450 /* End of stm32l4xx work-around. */
19451
19452
19453 /* Do code byteswapping. Return FALSE afterwards so that the section is
19454 written out as normal. */
19455
19456 static bfd_boolean
19457 elf32_arm_write_section (bfd *output_bfd,
19458 struct bfd_link_info *link_info,
19459 asection *sec,
19460 bfd_byte *contents)
19461 {
19462 unsigned int mapcount, errcount;
19463 _arm_elf_section_data *arm_data;
19464 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
19465 elf32_arm_section_map *map;
19466 elf32_vfp11_erratum_list *errnode;
19467 elf32_stm32l4xx_erratum_list *stm32l4xx_errnode;
19468 bfd_vma ptr;
19469 bfd_vma end;
19470 bfd_vma offset = sec->output_section->vma + sec->output_offset;
19471 bfd_byte tmp;
19472 unsigned int i;
19473
19474 if (globals == NULL)
19475 return FALSE;
19476
19477 /* If this section has not been allocated an _arm_elf_section_data
19478 structure then we cannot record anything. */
19479 arm_data = get_arm_elf_section_data (sec);
19480 if (arm_data == NULL)
19481 return FALSE;
19482
19483 mapcount = arm_data->mapcount;
19484 map = arm_data->map;
19485 errcount = arm_data->erratumcount;
19486
19487 if (errcount != 0)
19488 {
19489 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
19490
19491 for (errnode = arm_data->erratumlist; errnode != 0;
19492 errnode = errnode->next)
19493 {
19494 bfd_vma target = errnode->vma - offset;
19495
19496 switch (errnode->type)
19497 {
19498 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
19499 {
19500 bfd_vma branch_to_veneer;
19501 /* Original condition code of instruction, plus bit mask for
19502 ARM B instruction. */
19503 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
19504 | 0x0a000000;
19505
19506 /* The instruction is before the label. */
19507 target -= 4;
19508
19509 /* Above offset included in -4 below. */
19510 branch_to_veneer = errnode->u.b.veneer->vma
19511 - errnode->vma - 4;
19512
19513 if ((signed) branch_to_veneer < -(1 << 25)
19514 || (signed) branch_to_veneer >= (1 << 25))
19515 _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19516 "range"), output_bfd);
19517
19518 insn |= (branch_to_veneer >> 2) & 0xffffff;
19519 contents[endianflip ^ target] = insn & 0xff;
19520 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
19521 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
19522 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
19523 }
19524 break;
19525
19526 case VFP11_ERRATUM_ARM_VENEER:
19527 {
19528 bfd_vma branch_from_veneer;
19529 unsigned int insn;
19530
19531 /* Take size of veneer into account. */
19532 branch_from_veneer = errnode->u.v.branch->vma
19533 - errnode->vma - 12;
19534
19535 if ((signed) branch_from_veneer < -(1 << 25)
19536 || (signed) branch_from_veneer >= (1 << 25))
19537 _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19538 "range"), output_bfd);
19539
19540 /* Original instruction. */
19541 insn = errnode->u.v.branch->u.b.vfp_insn;
19542 contents[endianflip ^ target] = insn & 0xff;
19543 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
19544 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
19545 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
19546
19547 /* Branch back to insn after original insn. */
19548 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
19549 contents[endianflip ^ (target + 4)] = insn & 0xff;
19550 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
19551 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
19552 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
19553 }
19554 break;
19555
19556 default:
19557 abort ();
19558 }
19559 }
19560 }
19561
19562 if (arm_data->stm32l4xx_erratumcount != 0)
19563 {
19564 for (stm32l4xx_errnode = arm_data->stm32l4xx_erratumlist;
19565 stm32l4xx_errnode != 0;
19566 stm32l4xx_errnode = stm32l4xx_errnode->next)
19567 {
19568 bfd_vma target = stm32l4xx_errnode->vma - offset;
19569
19570 switch (stm32l4xx_errnode->type)
19571 {
19572 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
19573 {
19574 unsigned int insn;
19575 bfd_vma branch_to_veneer =
19576 stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma;
19577
19578 if ((signed) branch_to_veneer < -(1 << 24)
19579 || (signed) branch_to_veneer >= (1 << 24))
19580 {
19581 bfd_vma out_of_range =
19582 ((signed) branch_to_veneer < -(1 << 24)) ?
19583 - branch_to_veneer - (1 << 24) :
19584 ((signed) branch_to_veneer >= (1 << 24)) ?
19585 branch_to_veneer - (1 << 24) : 0;
19586
19587 _bfd_error_handler
19588 (_("%pB(%#" PRIx64 "): error: "
19589 "cannot create STM32L4XX veneer; "
19590 "jump out of range by %" PRId64 " bytes; "
19591 "cannot encode branch instruction"),
19592 output_bfd,
19593 (uint64_t) (stm32l4xx_errnode->vma - 4),
19594 (int64_t) out_of_range);
19595 continue;
19596 }
19597
19598 insn = create_instruction_branch_absolute
19599 (stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma);
19600
19601 /* The instruction is before the label. */
19602 target -= 4;
19603
19604 put_thumb2_insn (globals, output_bfd,
19605 (bfd_vma) insn, contents + target);
19606 }
19607 break;
19608
19609 case STM32L4XX_ERRATUM_VENEER:
19610 {
19611 bfd_byte * veneer;
19612 bfd_byte * veneer_r;
19613 unsigned int insn;
19614
19615 veneer = contents + target;
19616 veneer_r = veneer
19617 + stm32l4xx_errnode->u.b.veneer->vma
19618 - stm32l4xx_errnode->vma - 4;
19619
19620 if ((signed) (veneer_r - veneer -
19621 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE >
19622 STM32L4XX_ERRATUM_LDM_VENEER_SIZE ?
19623 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE :
19624 STM32L4XX_ERRATUM_LDM_VENEER_SIZE) < -(1 << 24)
19625 || (signed) (veneer_r - veneer) >= (1 << 24))
19626 {
19627 _bfd_error_handler (_("%pB: error: cannot create STM32L4XX "
19628 "veneer"), output_bfd);
19629 continue;
19630 }
19631
19632 /* Original instruction. */
19633 insn = stm32l4xx_errnode->u.v.branch->u.b.insn;
19634
19635 stm32l4xx_create_replacing_stub
19636 (globals, output_bfd, insn, (void*)veneer_r, (void*)veneer);
19637 }
19638 break;
19639
19640 default:
19641 abort ();
19642 }
19643 }
19644 }
19645
19646 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
19647 {
19648 arm_unwind_table_edit *edit_node
19649 = arm_data->u.exidx.unwind_edit_list;
19650 /* Now, sec->size is the size of the section we will write. The original
19651 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
19652 markers) was sec->rawsize. (This isn't the case if we perform no
19653 edits, then rawsize will be zero and we should use size). */
19654 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
19655 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
19656 unsigned int in_index, out_index;
19657 bfd_vma add_to_offsets = 0;
19658
19659 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
19660 {
19661 if (edit_node)
19662 {
19663 unsigned int edit_index = edit_node->index;
19664
19665 if (in_index < edit_index && in_index * 8 < input_size)
19666 {
19667 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
19668 contents + in_index * 8, add_to_offsets);
19669 out_index++;
19670 in_index++;
19671 }
19672 else if (in_index == edit_index
19673 || (in_index * 8 >= input_size
19674 && edit_index == UINT_MAX))
19675 {
19676 switch (edit_node->type)
19677 {
19678 case DELETE_EXIDX_ENTRY:
19679 in_index++;
19680 add_to_offsets += 8;
19681 break;
19682
19683 case INSERT_EXIDX_CANTUNWIND_AT_END:
19684 {
19685 asection *text_sec = edit_node->linked_section;
19686 bfd_vma text_offset = text_sec->output_section->vma
19687 + text_sec->output_offset
19688 + text_sec->size;
19689 bfd_vma exidx_offset = offset + out_index * 8;
19690 unsigned long prel31_offset;
19691
19692 /* Note: this is meant to be equivalent to an
19693 R_ARM_PREL31 relocation. These synthetic
19694 EXIDX_CANTUNWIND markers are not relocated by the
19695 usual BFD method. */
19696 prel31_offset = (text_offset - exidx_offset)
19697 & 0x7ffffffful;
19698 if (bfd_link_relocatable (link_info))
19699 {
19700 /* Here relocation for new EXIDX_CANTUNWIND is
19701 created, so there is no need to
19702 adjust offset by hand. */
19703 prel31_offset = text_sec->output_offset
19704 + text_sec->size;
19705 }
19706
19707 /* First address we can't unwind. */
19708 bfd_put_32 (output_bfd, prel31_offset,
19709 &edited_contents[out_index * 8]);
19710
19711 /* Code for EXIDX_CANTUNWIND. */
19712 bfd_put_32 (output_bfd, 0x1,
19713 &edited_contents[out_index * 8 + 4]);
19714
19715 out_index++;
19716 add_to_offsets -= 8;
19717 }
19718 break;
19719 }
19720
19721 edit_node = edit_node->next;
19722 }
19723 }
19724 else
19725 {
19726 /* No more edits, copy remaining entries verbatim. */
19727 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
19728 contents + in_index * 8, add_to_offsets);
19729 out_index++;
19730 in_index++;
19731 }
19732 }
19733
19734 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
19735 bfd_set_section_contents (output_bfd, sec->output_section,
19736 edited_contents,
19737 (file_ptr) sec->output_offset, sec->size);
19738
19739 return TRUE;
19740 }
19741
19742 /* Fix code to point to Cortex-A8 erratum stubs. */
19743 if (globals->fix_cortex_a8)
19744 {
19745 struct a8_branch_to_stub_data data;
19746
19747 data.writing_section = sec;
19748 data.contents = contents;
19749
19750 bfd_hash_traverse (& globals->stub_hash_table, make_branch_to_a8_stub,
19751 & data);
19752 }
19753
19754 if (mapcount == 0)
19755 return FALSE;
19756
19757 if (globals->byteswap_code)
19758 {
19759 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
19760
19761 ptr = map[0].vma;
19762 for (i = 0; i < mapcount; i++)
19763 {
19764 if (i == mapcount - 1)
19765 end = sec->size;
19766 else
19767 end = map[i + 1].vma;
19768
19769 switch (map[i].type)
19770 {
19771 case 'a':
19772 /* Byte swap code words. */
19773 while (ptr + 3 < end)
19774 {
19775 tmp = contents[ptr];
19776 contents[ptr] = contents[ptr + 3];
19777 contents[ptr + 3] = tmp;
19778 tmp = contents[ptr + 1];
19779 contents[ptr + 1] = contents[ptr + 2];
19780 contents[ptr + 2] = tmp;
19781 ptr += 4;
19782 }
19783 break;
19784
19785 case 't':
19786 /* Byte swap code halfwords. */
19787 while (ptr + 1 < end)
19788 {
19789 tmp = contents[ptr];
19790 contents[ptr] = contents[ptr + 1];
19791 contents[ptr + 1] = tmp;
19792 ptr += 2;
19793 }
19794 break;
19795
19796 case 'd':
19797 /* Leave data alone. */
19798 break;
19799 }
19800 ptr = end;
19801 }
19802 }
19803
19804 free (map);
19805 arm_data->mapcount = -1;
19806 arm_data->mapsize = 0;
19807 arm_data->map = NULL;
19808
19809 return FALSE;
19810 }
19811
19812 /* Mangle thumb function symbols as we read them in. */
19813
19814 static bfd_boolean
19815 elf32_arm_swap_symbol_in (bfd * abfd,
19816 const void *psrc,
19817 const void *pshn,
19818 Elf_Internal_Sym *dst)
19819 {
19820 Elf_Internal_Shdr *symtab_hdr;
19821 const char *name = NULL;
19822
19823 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
19824 return FALSE;
19825 dst->st_target_internal = 0;
19826
19827 /* New EABI objects mark thumb function symbols by setting the low bit of
19828 the address. */
19829 if (ELF_ST_TYPE (dst->st_info) == STT_FUNC
19830 || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC)
19831 {
19832 if (dst->st_value & 1)
19833 {
19834 dst->st_value &= ~(bfd_vma) 1;
19835 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal,
19836 ST_BRANCH_TO_THUMB);
19837 }
19838 else
19839 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_ARM);
19840 }
19841 else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC)
19842 {
19843 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC);
19844 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_THUMB);
19845 }
19846 else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION)
19847 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_LONG);
19848 else
19849 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_UNKNOWN);
19850
19851 /* Mark CMSE special symbols. */
19852 symtab_hdr = & elf_symtab_hdr (abfd);
19853 if (symtab_hdr->sh_size)
19854 name = bfd_elf_sym_name (abfd, symtab_hdr, dst, NULL);
19855 if (name && CONST_STRNEQ (name, CMSE_PREFIX))
19856 ARM_SET_SYM_CMSE_SPCL (dst->st_target_internal);
19857
19858 return TRUE;
19859 }
19860
19861
19862 /* Mangle thumb function symbols as we write them out. */
19863
19864 static void
19865 elf32_arm_swap_symbol_out (bfd *abfd,
19866 const Elf_Internal_Sym *src,
19867 void *cdst,
19868 void *shndx)
19869 {
19870 Elf_Internal_Sym newsym;
19871
19872 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
19873 of the address set, as per the new EABI. We do this unconditionally
19874 because objcopy does not set the elf header flags until after
19875 it writes out the symbol table. */
19876 if (ARM_GET_SYM_BRANCH_TYPE (src->st_target_internal) == ST_BRANCH_TO_THUMB)
19877 {
19878 newsym = *src;
19879 if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC)
19880 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
19881 if (newsym.st_shndx != SHN_UNDEF)
19882 {
19883 /* Do this only for defined symbols. At link type, the static
19884 linker will simulate the work of dynamic linker of resolving
19885 symbols and will carry over the thumbness of found symbols to
19886 the output symbol table. It's not clear how it happens, but
19887 the thumbness of undefined symbols can well be different at
19888 runtime, and writing '1' for them will be confusing for users
19889 and possibly for dynamic linker itself.
19890 */
19891 newsym.st_value |= 1;
19892 }
19893
19894 src = &newsym;
19895 }
19896 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
19897 }
19898
19899 /* Add the PT_ARM_EXIDX program header. */
19900
19901 static bfd_boolean
19902 elf32_arm_modify_segment_map (bfd *abfd,
19903 struct bfd_link_info *info ATTRIBUTE_UNUSED)
19904 {
19905 struct elf_segment_map *m;
19906 asection *sec;
19907
19908 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
19909 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
19910 {
19911 /* If there is already a PT_ARM_EXIDX header, then we do not
19912 want to add another one. This situation arises when running
19913 "strip"; the input binary already has the header. */
19914 m = elf_seg_map (abfd);
19915 while (m && m->p_type != PT_ARM_EXIDX)
19916 m = m->next;
19917 if (!m)
19918 {
19919 m = (struct elf_segment_map *)
19920 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
19921 if (m == NULL)
19922 return FALSE;
19923 m->p_type = PT_ARM_EXIDX;
19924 m->count = 1;
19925 m->sections[0] = sec;
19926
19927 m->next = elf_seg_map (abfd);
19928 elf_seg_map (abfd) = m;
19929 }
19930 }
19931
19932 return TRUE;
19933 }
19934
19935 /* We may add a PT_ARM_EXIDX program header. */
19936
19937 static int
19938 elf32_arm_additional_program_headers (bfd *abfd,
19939 struct bfd_link_info *info ATTRIBUTE_UNUSED)
19940 {
19941 asection *sec;
19942
19943 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
19944 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
19945 return 1;
19946 else
19947 return 0;
19948 }
19949
19950 /* Hook called by the linker routine which adds symbols from an object
19951 file. */
19952
19953 static bfd_boolean
19954 elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
19955 Elf_Internal_Sym *sym, const char **namep,
19956 flagword *flagsp, asection **secp, bfd_vma *valp)
19957 {
19958 if (elf32_arm_hash_table (info) == NULL)
19959 return FALSE;
19960
19961 if (elf32_arm_hash_table (info)->vxworks_p
19962 && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep,
19963 flagsp, secp, valp))
19964 return FALSE;
19965
19966 return TRUE;
19967 }
19968
19969 /* We use this to override swap_symbol_in and swap_symbol_out. */
19970 const struct elf_size_info elf32_arm_size_info =
19971 {
19972 sizeof (Elf32_External_Ehdr),
19973 sizeof (Elf32_External_Phdr),
19974 sizeof (Elf32_External_Shdr),
19975 sizeof (Elf32_External_Rel),
19976 sizeof (Elf32_External_Rela),
19977 sizeof (Elf32_External_Sym),
19978 sizeof (Elf32_External_Dyn),
19979 sizeof (Elf_External_Note),
19980 4,
19981 1,
19982 32, 2,
19983 ELFCLASS32, EV_CURRENT,
19984 bfd_elf32_write_out_phdrs,
19985 bfd_elf32_write_shdrs_and_ehdr,
19986 bfd_elf32_checksum_contents,
19987 bfd_elf32_write_relocs,
19988 elf32_arm_swap_symbol_in,
19989 elf32_arm_swap_symbol_out,
19990 bfd_elf32_slurp_reloc_table,
19991 bfd_elf32_slurp_symbol_table,
19992 bfd_elf32_swap_dyn_in,
19993 bfd_elf32_swap_dyn_out,
19994 bfd_elf32_swap_reloc_in,
19995 bfd_elf32_swap_reloc_out,
19996 bfd_elf32_swap_reloca_in,
19997 bfd_elf32_swap_reloca_out
19998 };
19999
20000 static bfd_vma
20001 read_code32 (const bfd *abfd, const bfd_byte *addr)
20002 {
20003 /* V7 BE8 code is always little endian. */
20004 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
20005 return bfd_getl32 (addr);
20006
20007 return bfd_get_32 (abfd, addr);
20008 }
20009
20010 static bfd_vma
20011 read_code16 (const bfd *abfd, const bfd_byte *addr)
20012 {
20013 /* V7 BE8 code is always little endian. */
20014 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
20015 return bfd_getl16 (addr);
20016
20017 return bfd_get_16 (abfd, addr);
20018 }
20019
20020 /* Return size of plt0 entry starting at ADDR
20021 or (bfd_vma) -1 if size can not be determined. */
20022
20023 static bfd_vma
20024 elf32_arm_plt0_size (const bfd *abfd, const bfd_byte *addr)
20025 {
20026 bfd_vma first_word;
20027 bfd_vma plt0_size;
20028
20029 first_word = read_code32 (abfd, addr);
20030
20031 if (first_word == elf32_arm_plt0_entry[0])
20032 plt0_size = 4 * ARRAY_SIZE (elf32_arm_plt0_entry);
20033 else if (first_word == elf32_thumb2_plt0_entry[0])
20034 plt0_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
20035 else
20036 /* We don't yet handle this PLT format. */
20037 return (bfd_vma) -1;
20038
20039 return plt0_size;
20040 }
20041
20042 /* Return size of plt entry starting at offset OFFSET
20043 of plt section located at address START
20044 or (bfd_vma) -1 if size can not be determined. */
20045
20046 static bfd_vma
20047 elf32_arm_plt_size (const bfd *abfd, const bfd_byte *start, bfd_vma offset)
20048 {
20049 bfd_vma first_insn;
20050 bfd_vma plt_size = 0;
20051 const bfd_byte *addr = start + offset;
20052
20053 /* PLT entry size if fixed on Thumb-only platforms. */
20054 if (read_code32 (abfd, start) == elf32_thumb2_plt0_entry[0])
20055 return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
20056
20057 /* Respect Thumb stub if necessary. */
20058 if (read_code16 (abfd, addr) == elf32_arm_plt_thumb_stub[0])
20059 {
20060 plt_size += 2 * ARRAY_SIZE(elf32_arm_plt_thumb_stub);
20061 }
20062
20063 /* Strip immediate from first add. */
20064 first_insn = read_code32 (abfd, addr + plt_size) & 0xffffff00;
20065
20066 #ifdef FOUR_WORD_PLT
20067 if (first_insn == elf32_arm_plt_entry[0])
20068 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry);
20069 #else
20070 if (first_insn == elf32_arm_plt_entry_long[0])
20071 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_long);
20072 else if (first_insn == elf32_arm_plt_entry_short[0])
20073 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_short);
20074 #endif
20075 else
20076 /* We don't yet handle this PLT format. */
20077 return (bfd_vma) -1;
20078
20079 return plt_size;
20080 }
20081
20082 /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab. */
20083
20084 static long
20085 elf32_arm_get_synthetic_symtab (bfd *abfd,
20086 long symcount ATTRIBUTE_UNUSED,
20087 asymbol **syms ATTRIBUTE_UNUSED,
20088 long dynsymcount,
20089 asymbol **dynsyms,
20090 asymbol **ret)
20091 {
20092 asection *relplt;
20093 asymbol *s;
20094 arelent *p;
20095 long count, i, n;
20096 size_t size;
20097 Elf_Internal_Shdr *hdr;
20098 char *names;
20099 asection *plt;
20100 bfd_vma offset;
20101 bfd_byte *data;
20102
20103 *ret = NULL;
20104
20105 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
20106 return 0;
20107
20108 if (dynsymcount <= 0)
20109 return 0;
20110
20111 relplt = bfd_get_section_by_name (abfd, ".rel.plt");
20112 if (relplt == NULL)
20113 return 0;
20114
20115 hdr = &elf_section_data (relplt)->this_hdr;
20116 if (hdr->sh_link != elf_dynsymtab (abfd)
20117 || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA))
20118 return 0;
20119
20120 plt = bfd_get_section_by_name (abfd, ".plt");
20121 if (plt == NULL)
20122 return 0;
20123
20124 if (!elf32_arm_size_info.slurp_reloc_table (abfd, relplt, dynsyms, TRUE))
20125 return -1;
20126
20127 data = plt->contents;
20128 if (data == NULL)
20129 {
20130 if (!bfd_get_full_section_contents(abfd, (asection *) plt, &data) || data == NULL)
20131 return -1;
20132 bfd_cache_section_contents((asection *) plt, data);
20133 }
20134
20135 count = relplt->size / hdr->sh_entsize;
20136 size = count * sizeof (asymbol);
20137 p = relplt->relocation;
20138 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
20139 {
20140 size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt");
20141 if (p->addend != 0)
20142 size += sizeof ("+0x") - 1 + 8;
20143 }
20144
20145 s = *ret = (asymbol *) bfd_malloc (size);
20146 if (s == NULL)
20147 return -1;
20148
20149 offset = elf32_arm_plt0_size (abfd, data);
20150 if (offset == (bfd_vma) -1)
20151 return -1;
20152
20153 names = (char *) (s + count);
20154 p = relplt->relocation;
20155 n = 0;
20156 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
20157 {
20158 size_t len;
20159
20160 bfd_vma plt_size = elf32_arm_plt_size (abfd, data, offset);
20161 if (plt_size == (bfd_vma) -1)
20162 break;
20163
20164 *s = **p->sym_ptr_ptr;
20165 /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since
20166 we are defining a symbol, ensure one of them is set. */
20167 if ((s->flags & BSF_LOCAL) == 0)
20168 s->flags |= BSF_GLOBAL;
20169 s->flags |= BSF_SYNTHETIC;
20170 s->section = plt;
20171 s->value = offset;
20172 s->name = names;
20173 s->udata.p = NULL;
20174 len = strlen ((*p->sym_ptr_ptr)->name);
20175 memcpy (names, (*p->sym_ptr_ptr)->name, len);
20176 names += len;
20177 if (p->addend != 0)
20178 {
20179 char buf[30], *a;
20180
20181 memcpy (names, "+0x", sizeof ("+0x") - 1);
20182 names += sizeof ("+0x") - 1;
20183 bfd_sprintf_vma (abfd, buf, p->addend);
20184 for (a = buf; *a == '0'; ++a)
20185 ;
20186 len = strlen (a);
20187 memcpy (names, a, len);
20188 names += len;
20189 }
20190 memcpy (names, "@plt", sizeof ("@plt"));
20191 names += sizeof ("@plt");
20192 ++s, ++n;
20193 offset += plt_size;
20194 }
20195
20196 return n;
20197 }
20198
20199 static bfd_boolean
20200 elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr * hdr)
20201 {
20202 if (hdr->sh_flags & SHF_ARM_PURECODE)
20203 *flags |= SEC_ELF_PURECODE;
20204 return TRUE;
20205 }
20206
20207 static flagword
20208 elf32_arm_lookup_section_flags (char *flag_name)
20209 {
20210 if (!strcmp (flag_name, "SHF_ARM_PURECODE"))
20211 return SHF_ARM_PURECODE;
20212
20213 return SEC_NO_FLAGS;
20214 }
20215
20216 static unsigned int
20217 elf32_arm_count_additional_relocs (asection *sec)
20218 {
20219 struct _arm_elf_section_data *arm_data;
20220 arm_data = get_arm_elf_section_data (sec);
20221
20222 return arm_data == NULL ? 0 : arm_data->additional_reloc_count;
20223 }
20224
20225 /* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which
20226 has a type >= SHT_LOOS. Returns TRUE if these fields were initialised
20227 FALSE otherwise. ISECTION is the best guess matching section from the
20228 input bfd IBFD, but it might be NULL. */
20229
20230 static bfd_boolean
20231 elf32_arm_copy_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
20232 bfd *obfd ATTRIBUTE_UNUSED,
20233 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
20234 Elf_Internal_Shdr *osection)
20235 {
20236 switch (osection->sh_type)
20237 {
20238 case SHT_ARM_EXIDX:
20239 {
20240 Elf_Internal_Shdr **oheaders = elf_elfsections (obfd);
20241 Elf_Internal_Shdr **iheaders = elf_elfsections (ibfd);
20242 unsigned i = 0;
20243
20244 osection->sh_flags = SHF_ALLOC | SHF_LINK_ORDER;
20245 osection->sh_info = 0;
20246
20247 /* The sh_link field must be set to the text section associated with
20248 this index section. Unfortunately the ARM EHABI does not specify
20249 exactly how to determine this association. Our caller does try
20250 to match up OSECTION with its corresponding input section however
20251 so that is a good first guess. */
20252 if (isection != NULL
20253 && osection->bfd_section != NULL
20254 && isection->bfd_section != NULL
20255 && isection->bfd_section->output_section != NULL
20256 && isection->bfd_section->output_section == osection->bfd_section
20257 && iheaders != NULL
20258 && isection->sh_link > 0
20259 && isection->sh_link < elf_numsections (ibfd)
20260 && iheaders[isection->sh_link]->bfd_section != NULL
20261 && iheaders[isection->sh_link]->bfd_section->output_section != NULL
20262 )
20263 {
20264 for (i = elf_numsections (obfd); i-- > 0;)
20265 if (oheaders[i]->bfd_section
20266 == iheaders[isection->sh_link]->bfd_section->output_section)
20267 break;
20268 }
20269
20270 if (i == 0)
20271 {
20272 /* Failing that we have to find a matching section ourselves. If
20273 we had the output section name available we could compare that
20274 with input section names. Unfortunately we don't. So instead
20275 we use a simple heuristic and look for the nearest executable
20276 section before this one. */
20277 for (i = elf_numsections (obfd); i-- > 0;)
20278 if (oheaders[i] == osection)
20279 break;
20280 if (i == 0)
20281 break;
20282
20283 while (i-- > 0)
20284 if (oheaders[i]->sh_type == SHT_PROGBITS
20285 && (oheaders[i]->sh_flags & (SHF_ALLOC | SHF_EXECINSTR))
20286 == (SHF_ALLOC | SHF_EXECINSTR))
20287 break;
20288 }
20289
20290 if (i)
20291 {
20292 osection->sh_link = i;
20293 /* If the text section was part of a group
20294 then the index section should be too. */
20295 if (oheaders[i]->sh_flags & SHF_GROUP)
20296 osection->sh_flags |= SHF_GROUP;
20297 return TRUE;
20298 }
20299 }
20300 break;
20301
20302 case SHT_ARM_PREEMPTMAP:
20303 osection->sh_flags = SHF_ALLOC;
20304 break;
20305
20306 case SHT_ARM_ATTRIBUTES:
20307 case SHT_ARM_DEBUGOVERLAY:
20308 case SHT_ARM_OVERLAYSECTION:
20309 default:
20310 break;
20311 }
20312
20313 return FALSE;
20314 }
20315
20316 /* Returns TRUE if NAME is an ARM mapping symbol.
20317 Traditionally the symbols $a, $d and $t have been used.
20318 The ARM ELF standard also defines $x (for A64 code). It also allows a
20319 period initiated suffix to be added to the symbol: "$[adtx]\.[:sym_char]+".
20320 Other tools might also produce $b (Thumb BL), $f, $p, $m and $v, but we do
20321 not support them here. $t.x indicates the start of ThumbEE instructions. */
20322
20323 static bfd_boolean
20324 is_arm_mapping_symbol (const char * name)
20325 {
20326 return name != NULL /* Paranoia. */
20327 && name[0] == '$' /* Note: if objcopy --prefix-symbols has been used then
20328 the mapping symbols could have acquired a prefix.
20329 We do not support this here, since such symbols no
20330 longer conform to the ARM ELF ABI. */
20331 && (name[1] == 'a' || name[1] == 'd' || name[1] == 't' || name[1] == 'x')
20332 && (name[2] == 0 || name[2] == '.');
20333 /* FIXME: Strictly speaking the symbol is only a valid mapping symbol if
20334 any characters that follow the period are legal characters for the body
20335 of a symbol's name. For now we just assume that this is the case. */
20336 }
20337
20338 /* Make sure that mapping symbols in object files are not removed via the
20339 "strip --strip-unneeded" tool. These symbols are needed in order to
20340 correctly generate interworking veneers, and for byte swapping code
20341 regions. Once an object file has been linked, it is safe to remove the
20342 symbols as they will no longer be needed. */
20343
20344 static void
20345 elf32_arm_backend_symbol_processing (bfd *abfd, asymbol *sym)
20346 {
20347 if (((abfd->flags & (EXEC_P | DYNAMIC)) == 0)
20348 && sym->section != bfd_abs_section_ptr
20349 && is_arm_mapping_symbol (sym->name))
20350 sym->flags |= BSF_KEEP;
20351 }
20352
20353 #undef elf_backend_copy_special_section_fields
20354 #define elf_backend_copy_special_section_fields elf32_arm_copy_special_section_fields
20355
20356 #define ELF_ARCH bfd_arch_arm
20357 #define ELF_TARGET_ID ARM_ELF_DATA
20358 #define ELF_MACHINE_CODE EM_ARM
20359 #ifdef __QNXTARGET__
20360 #define ELF_MAXPAGESIZE 0x1000
20361 #else
20362 #define ELF_MAXPAGESIZE 0x10000
20363 #endif
20364 #define ELF_MINPAGESIZE 0x1000
20365 #define ELF_COMMONPAGESIZE 0x1000
20366
20367 #define bfd_elf32_mkobject elf32_arm_mkobject
20368
20369 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
20370 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
20371 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
20372 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
20373 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
20374 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
20375 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
20376 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
20377 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
20378 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
20379 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
20380 #define bfd_elf32_bfd_final_link elf32_arm_final_link
20381 #define bfd_elf32_get_synthetic_symtab elf32_arm_get_synthetic_symtab
20382
20383 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
20384 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
20385 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
20386 #define elf_backend_check_relocs elf32_arm_check_relocs
20387 #define elf_backend_update_relocs elf32_arm_update_relocs
20388 #define elf_backend_relocate_section elf32_arm_relocate_section
20389 #define elf_backend_write_section elf32_arm_write_section
20390 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
20391 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
20392 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
20393 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
20394 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
20395 #define elf_backend_always_size_sections elf32_arm_always_size_sections
20396 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
20397 #define elf_backend_post_process_headers elf32_arm_post_process_headers
20398 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
20399 #define elf_backend_object_p elf32_arm_object_p
20400 #define elf_backend_fake_sections elf32_arm_fake_sections
20401 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
20402 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20403 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
20404 #define elf_backend_size_info elf32_arm_size_info
20405 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
20406 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
20407 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
20408 #define elf_backend_filter_implib_symbols elf32_arm_filter_implib_symbols
20409 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
20410 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
20411 #define elf_backend_count_additional_relocs elf32_arm_count_additional_relocs
20412 #define elf_backend_symbol_processing elf32_arm_backend_symbol_processing
20413
20414 #define elf_backend_can_refcount 1
20415 #define elf_backend_can_gc_sections 1
20416 #define elf_backend_plt_readonly 1
20417 #define elf_backend_want_got_plt 1
20418 #define elf_backend_want_plt_sym 0
20419 #define elf_backend_want_dynrelro 1
20420 #define elf_backend_may_use_rel_p 1
20421 #define elf_backend_may_use_rela_p 0
20422 #define elf_backend_default_use_rela_p 0
20423 #define elf_backend_dtrel_excludes_plt 1
20424
20425 #define elf_backend_got_header_size 12
20426 #define elf_backend_extern_protected_data 1
20427
20428 #undef elf_backend_obj_attrs_vendor
20429 #define elf_backend_obj_attrs_vendor "aeabi"
20430 #undef elf_backend_obj_attrs_section
20431 #define elf_backend_obj_attrs_section ".ARM.attributes"
20432 #undef elf_backend_obj_attrs_arg_type
20433 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
20434 #undef elf_backend_obj_attrs_section_type
20435 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
20436 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
20437 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
20438
20439 #undef elf_backend_section_flags
20440 #define elf_backend_section_flags elf32_arm_section_flags
20441 #undef elf_backend_lookup_section_flags_hook
20442 #define elf_backend_lookup_section_flags_hook elf32_arm_lookup_section_flags
20443
20444 #define elf_backend_linux_prpsinfo32_ugid16 TRUE
20445
20446 #include "elf32-target.h"
20447
20448 /* Native Client targets. */
20449
20450 #undef TARGET_LITTLE_SYM
20451 #define TARGET_LITTLE_SYM arm_elf32_nacl_le_vec
20452 #undef TARGET_LITTLE_NAME
20453 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl"
20454 #undef TARGET_BIG_SYM
20455 #define TARGET_BIG_SYM arm_elf32_nacl_be_vec
20456 #undef TARGET_BIG_NAME
20457 #define TARGET_BIG_NAME "elf32-bigarm-nacl"
20458
20459 /* Like elf32_arm_link_hash_table_create -- but overrides
20460 appropriately for NaCl. */
20461
20462 static struct bfd_link_hash_table *
20463 elf32_arm_nacl_link_hash_table_create (bfd *abfd)
20464 {
20465 struct bfd_link_hash_table *ret;
20466
20467 ret = elf32_arm_link_hash_table_create (abfd);
20468 if (ret)
20469 {
20470 struct elf32_arm_link_hash_table *htab
20471 = (struct elf32_arm_link_hash_table *) ret;
20472
20473 htab->nacl_p = 1;
20474
20475 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry);
20476 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry);
20477 }
20478 return ret;
20479 }
20480
20481 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
20482 really need to use elf32_arm_modify_segment_map. But we do it
20483 anyway just to reduce gratuitous differences with the stock ARM backend. */
20484
20485 static bfd_boolean
20486 elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
20487 {
20488 return (elf32_arm_modify_segment_map (abfd, info)
20489 && nacl_modify_segment_map (abfd, info));
20490 }
20491
20492 static void
20493 elf32_arm_nacl_final_write_processing (bfd *abfd, bfd_boolean linker)
20494 {
20495 elf32_arm_final_write_processing (abfd, linker);
20496 nacl_final_write_processing (abfd, linker);
20497 }
20498
20499 static bfd_vma
20500 elf32_arm_nacl_plt_sym_val (bfd_vma i, const asection *plt,
20501 const arelent *rel ATTRIBUTE_UNUSED)
20502 {
20503 return plt->vma
20504 + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry) +
20505 i * ARRAY_SIZE (elf32_arm_nacl_plt_entry));
20506 }
20507
20508 #undef elf32_bed
20509 #define elf32_bed elf32_arm_nacl_bed
20510 #undef bfd_elf32_bfd_link_hash_table_create
20511 #define bfd_elf32_bfd_link_hash_table_create \
20512 elf32_arm_nacl_link_hash_table_create
20513 #undef elf_backend_plt_alignment
20514 #define elf_backend_plt_alignment 4
20515 #undef elf_backend_modify_segment_map
20516 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
20517 #undef elf_backend_modify_program_headers
20518 #define elf_backend_modify_program_headers nacl_modify_program_headers
20519 #undef elf_backend_final_write_processing
20520 #define elf_backend_final_write_processing elf32_arm_nacl_final_write_processing
20521 #undef bfd_elf32_get_synthetic_symtab
20522 #undef elf_backend_plt_sym_val
20523 #define elf_backend_plt_sym_val elf32_arm_nacl_plt_sym_val
20524 #undef elf_backend_copy_special_section_fields
20525
20526 #undef ELF_MINPAGESIZE
20527 #undef ELF_COMMONPAGESIZE
20528
20529
20530 #include "elf32-target.h"
20531
20532 /* Reset to defaults. */
20533 #undef elf_backend_plt_alignment
20534 #undef elf_backend_modify_segment_map
20535 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
20536 #undef elf_backend_modify_program_headers
20537 #undef elf_backend_final_write_processing
20538 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20539 #undef ELF_MINPAGESIZE
20540 #define ELF_MINPAGESIZE 0x1000
20541 #undef ELF_COMMONPAGESIZE
20542 #define ELF_COMMONPAGESIZE 0x1000
20543
20544
20545 /* FDPIC Targets. */
20546
20547 #undef TARGET_LITTLE_SYM
20548 #define TARGET_LITTLE_SYM arm_elf32_fdpic_le_vec
20549 #undef TARGET_LITTLE_NAME
20550 #define TARGET_LITTLE_NAME "elf32-littlearm-fdpic"
20551 #undef TARGET_BIG_SYM
20552 #define TARGET_BIG_SYM arm_elf32_fdpic_be_vec
20553 #undef TARGET_BIG_NAME
20554 #define TARGET_BIG_NAME "elf32-bigarm-fdpic"
20555 #undef elf_match_priority
20556 #define elf_match_priority 128
20557 #undef ELF_OSABI
20558 #define ELF_OSABI ELFOSABI_ARM_FDPIC
20559
20560 /* Like elf32_arm_link_hash_table_create -- but overrides
20561 appropriately for FDPIC. */
20562
20563 static struct bfd_link_hash_table *
20564 elf32_arm_fdpic_link_hash_table_create (bfd *abfd)
20565 {
20566 struct bfd_link_hash_table *ret;
20567
20568 ret = elf32_arm_link_hash_table_create (abfd);
20569 if (ret)
20570 {
20571 struct elf32_arm_link_hash_table *htab = (struct elf32_arm_link_hash_table *) ret;
20572
20573 htab->fdpic_p = 1;
20574 }
20575 return ret;
20576 }
20577
20578 /* We need dynamic symbols for every section, since segments can
20579 relocate independently. */
20580 static bfd_boolean
20581 elf32_arm_fdpic_omit_section_dynsym (bfd *output_bfd ATTRIBUTE_UNUSED,
20582 struct bfd_link_info *info
20583 ATTRIBUTE_UNUSED,
20584 asection *p ATTRIBUTE_UNUSED)
20585 {
20586 switch (elf_section_data (p)->this_hdr.sh_type)
20587 {
20588 case SHT_PROGBITS:
20589 case SHT_NOBITS:
20590 /* If sh_type is yet undecided, assume it could be
20591 SHT_PROGBITS/SHT_NOBITS. */
20592 case SHT_NULL:
20593 return FALSE;
20594
20595 /* There shouldn't be section relative relocations
20596 against any other section. */
20597 default:
20598 return TRUE;
20599 }
20600 }
20601
20602 #undef elf32_bed
20603 #define elf32_bed elf32_arm_fdpic_bed
20604
20605 #undef bfd_elf32_bfd_link_hash_table_create
20606 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_fdpic_link_hash_table_create
20607
20608 #undef elf_backend_omit_section_dynsym
20609 #define elf_backend_omit_section_dynsym elf32_arm_fdpic_omit_section_dynsym
20610
20611 #include "elf32-target.h"
20612
20613 #undef elf_match_priority
20614 #undef ELF_OSABI
20615 #undef elf_backend_omit_section_dynsym
20616
20617 /* VxWorks Targets. */
20618
20619 #undef TARGET_LITTLE_SYM
20620 #define TARGET_LITTLE_SYM arm_elf32_vxworks_le_vec
20621 #undef TARGET_LITTLE_NAME
20622 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
20623 #undef TARGET_BIG_SYM
20624 #define TARGET_BIG_SYM arm_elf32_vxworks_be_vec
20625 #undef TARGET_BIG_NAME
20626 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
20627
20628 /* Like elf32_arm_link_hash_table_create -- but overrides
20629 appropriately for VxWorks. */
20630
20631 static struct bfd_link_hash_table *
20632 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
20633 {
20634 struct bfd_link_hash_table *ret;
20635
20636 ret = elf32_arm_link_hash_table_create (abfd);
20637 if (ret)
20638 {
20639 struct elf32_arm_link_hash_table *htab
20640 = (struct elf32_arm_link_hash_table *) ret;
20641 htab->use_rel = 0;
20642 htab->vxworks_p = 1;
20643 }
20644 return ret;
20645 }
20646
20647 static void
20648 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
20649 {
20650 elf32_arm_final_write_processing (abfd, linker);
20651 elf_vxworks_final_write_processing (abfd, linker);
20652 }
20653
20654 #undef elf32_bed
20655 #define elf32_bed elf32_arm_vxworks_bed
20656
20657 #undef bfd_elf32_bfd_link_hash_table_create
20658 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
20659 #undef elf_backend_final_write_processing
20660 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
20661 #undef elf_backend_emit_relocs
20662 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
20663
20664 #undef elf_backend_may_use_rel_p
20665 #define elf_backend_may_use_rel_p 0
20666 #undef elf_backend_may_use_rela_p
20667 #define elf_backend_may_use_rela_p 1
20668 #undef elf_backend_default_use_rela_p
20669 #define elf_backend_default_use_rela_p 1
20670 #undef elf_backend_want_plt_sym
20671 #define elf_backend_want_plt_sym 1
20672 #undef ELF_MAXPAGESIZE
20673 #define ELF_MAXPAGESIZE 0x1000
20674
20675 #include "elf32-target.h"
20676
20677
20678 /* Merge backend specific data from an object file to the output
20679 object file when linking. */
20680
20681 static bfd_boolean
20682 elf32_arm_merge_private_bfd_data (bfd *ibfd, struct bfd_link_info *info)
20683 {
20684 bfd *obfd = info->output_bfd;
20685 flagword out_flags;
20686 flagword in_flags;
20687 bfd_boolean flags_compatible = TRUE;
20688 asection *sec;
20689
20690 /* Check if we have the same endianness. */
20691 if (! _bfd_generic_verify_endian_match (ibfd, info))
20692 return FALSE;
20693
20694 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
20695 return TRUE;
20696
20697 if (!elf32_arm_merge_eabi_attributes (ibfd, info))
20698 return FALSE;
20699
20700 /* The input BFD must have had its flags initialised. */
20701 /* The following seems bogus to me -- The flags are initialized in
20702 the assembler but I don't think an elf_flags_init field is
20703 written into the object. */
20704 /* BFD_ASSERT (elf_flags_init (ibfd)); */
20705
20706 in_flags = elf_elfheader (ibfd)->e_flags;
20707 out_flags = elf_elfheader (obfd)->e_flags;
20708
20709 /* In theory there is no reason why we couldn't handle this. However
20710 in practice it isn't even close to working and there is no real
20711 reason to want it. */
20712 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
20713 && !(ibfd->flags & DYNAMIC)
20714 && (in_flags & EF_ARM_BE8))
20715 {
20716 _bfd_error_handler (_("error: %pB is already in final BE8 format"),
20717 ibfd);
20718 return FALSE;
20719 }
20720
20721 if (!elf_flags_init (obfd))
20722 {
20723 /* If the input is the default architecture and had the default
20724 flags then do not bother setting the flags for the output
20725 architecture, instead allow future merges to do this. If no
20726 future merges ever set these flags then they will retain their
20727 uninitialised values, which surprise surprise, correspond
20728 to the default values. */
20729 if (bfd_get_arch_info (ibfd)->the_default
20730 && elf_elfheader (ibfd)->e_flags == 0)
20731 return TRUE;
20732
20733 elf_flags_init (obfd) = TRUE;
20734 elf_elfheader (obfd)->e_flags = in_flags;
20735
20736 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
20737 && bfd_get_arch_info (obfd)->the_default)
20738 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
20739
20740 return TRUE;
20741 }
20742
20743 /* Determine what should happen if the input ARM architecture
20744 does not match the output ARM architecture. */
20745 if (! bfd_arm_merge_machines (ibfd, obfd))
20746 return FALSE;
20747
20748 /* Identical flags must be compatible. */
20749 if (in_flags == out_flags)
20750 return TRUE;
20751
20752 /* Check to see if the input BFD actually contains any sections. If
20753 not, its flags may not have been initialised either, but it
20754 cannot actually cause any incompatiblity. Do not short-circuit
20755 dynamic objects; their section list may be emptied by
20756 elf_link_add_object_symbols.
20757
20758 Also check to see if there are no code sections in the input.
20759 In this case there is no need to check for code specific flags.
20760 XXX - do we need to worry about floating-point format compatability
20761 in data sections ? */
20762 if (!(ibfd->flags & DYNAMIC))
20763 {
20764 bfd_boolean null_input_bfd = TRUE;
20765 bfd_boolean only_data_sections = TRUE;
20766
20767 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
20768 {
20769 /* Ignore synthetic glue sections. */
20770 if (strcmp (sec->name, ".glue_7")
20771 && strcmp (sec->name, ".glue_7t"))
20772 {
20773 if ((bfd_get_section_flags (ibfd, sec)
20774 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
20775 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
20776 only_data_sections = FALSE;
20777
20778 null_input_bfd = FALSE;
20779 break;
20780 }
20781 }
20782
20783 if (null_input_bfd || only_data_sections)
20784 return TRUE;
20785 }
20786
20787 /* Complain about various flag mismatches. */
20788 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
20789 EF_ARM_EABI_VERSION (out_flags)))
20790 {
20791 _bfd_error_handler
20792 (_("error: source object %pB has EABI version %d, but target %pB has EABI version %d"),
20793 ibfd, (in_flags & EF_ARM_EABIMASK) >> 24,
20794 obfd, (out_flags & EF_ARM_EABIMASK) >> 24);
20795 return FALSE;
20796 }
20797
20798 /* Not sure what needs to be checked for EABI versions >= 1. */
20799 /* VxWorks libraries do not use these flags. */
20800 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
20801 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
20802 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
20803 {
20804 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
20805 {
20806 _bfd_error_handler
20807 (_("error: %pB is compiled for APCS-%d, whereas target %pB uses APCS-%d"),
20808 ibfd, in_flags & EF_ARM_APCS_26 ? 26 : 32,
20809 obfd, out_flags & EF_ARM_APCS_26 ? 26 : 32);
20810 flags_compatible = FALSE;
20811 }
20812
20813 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
20814 {
20815 if (in_flags & EF_ARM_APCS_FLOAT)
20816 _bfd_error_handler
20817 (_("error: %pB passes floats in float registers, whereas %pB passes them in integer registers"),
20818 ibfd, obfd);
20819 else
20820 _bfd_error_handler
20821 (_("error: %pB passes floats in integer registers, whereas %pB passes them in float registers"),
20822 ibfd, obfd);
20823
20824 flags_compatible = FALSE;
20825 }
20826
20827 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
20828 {
20829 if (in_flags & EF_ARM_VFP_FLOAT)
20830 _bfd_error_handler
20831 (_("error: %pB uses %s instructions, whereas %pB does not"),
20832 ibfd, "VFP", obfd);
20833 else
20834 _bfd_error_handler
20835 (_("error: %pB uses %s instructions, whereas %pB does not"),
20836 ibfd, "FPA", obfd);
20837
20838 flags_compatible = FALSE;
20839 }
20840
20841 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
20842 {
20843 if (in_flags & EF_ARM_MAVERICK_FLOAT)
20844 _bfd_error_handler
20845 (_("error: %pB uses %s instructions, whereas %pB does not"),
20846 ibfd, "Maverick", obfd);
20847 else
20848 _bfd_error_handler
20849 (_("error: %pB does not use %s instructions, whereas %pB does"),
20850 ibfd, "Maverick", obfd);
20851
20852 flags_compatible = FALSE;
20853 }
20854
20855 #ifdef EF_ARM_SOFT_FLOAT
20856 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
20857 {
20858 /* We can allow interworking between code that is VFP format
20859 layout, and uses either soft float or integer regs for
20860 passing floating point arguments and results. We already
20861 know that the APCS_FLOAT flags match; similarly for VFP
20862 flags. */
20863 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
20864 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
20865 {
20866 if (in_flags & EF_ARM_SOFT_FLOAT)
20867 _bfd_error_handler
20868 (_("error: %pB uses software FP, whereas %pB uses hardware FP"),
20869 ibfd, obfd);
20870 else
20871 _bfd_error_handler
20872 (_("error: %pB uses hardware FP, whereas %pB uses software FP"),
20873 ibfd, obfd);
20874
20875 flags_compatible = FALSE;
20876 }
20877 }
20878 #endif
20879
20880 /* Interworking mismatch is only a warning. */
20881 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
20882 {
20883 if (in_flags & EF_ARM_INTERWORK)
20884 {
20885 _bfd_error_handler
20886 (_("warning: %pB supports interworking, whereas %pB does not"),
20887 ibfd, obfd);
20888 }
20889 else
20890 {
20891 _bfd_error_handler
20892 (_("warning: %pB does not support interworking, whereas %pB does"),
20893 ibfd, obfd);
20894 }
20895 }
20896 }
20897
20898 return flags_compatible;
20899 }
20900
20901
20902 /* Symbian OS Targets. */
20903
20904 #undef TARGET_LITTLE_SYM
20905 #define TARGET_LITTLE_SYM arm_elf32_symbian_le_vec
20906 #undef TARGET_LITTLE_NAME
20907 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
20908 #undef TARGET_BIG_SYM
20909 #define TARGET_BIG_SYM arm_elf32_symbian_be_vec
20910 #undef TARGET_BIG_NAME
20911 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
20912
20913 /* Like elf32_arm_link_hash_table_create -- but overrides
20914 appropriately for Symbian OS. */
20915
20916 static struct bfd_link_hash_table *
20917 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
20918 {
20919 struct bfd_link_hash_table *ret;
20920
20921 ret = elf32_arm_link_hash_table_create (abfd);
20922 if (ret)
20923 {
20924 struct elf32_arm_link_hash_table *htab
20925 = (struct elf32_arm_link_hash_table *)ret;
20926 /* There is no PLT header for Symbian OS. */
20927 htab->plt_header_size = 0;
20928 /* The PLT entries are each one instruction and one word. */
20929 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
20930 htab->symbian_p = 1;
20931 /* Symbian uses armv5t or above, so use_blx is always true. */
20932 htab->use_blx = 1;
20933 htab->root.is_relocatable_executable = 1;
20934 }
20935 return ret;
20936 }
20937
20938 static const struct bfd_elf_special_section
20939 elf32_arm_symbian_special_sections[] =
20940 {
20941 /* In a BPABI executable, the dynamic linking sections do not go in
20942 the loadable read-only segment. The post-linker may wish to
20943 refer to these sections, but they are not part of the final
20944 program image. */
20945 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
20946 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
20947 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
20948 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
20949 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
20950 /* These sections do not need to be writable as the SymbianOS
20951 postlinker will arrange things so that no dynamic relocation is
20952 required. */
20953 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
20954 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
20955 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
20956 { NULL, 0, 0, 0, 0 }
20957 };
20958
20959 static void
20960 elf32_arm_symbian_begin_write_processing (bfd *abfd,
20961 struct bfd_link_info *link_info)
20962 {
20963 /* BPABI objects are never loaded directly by an OS kernel; they are
20964 processed by a postlinker first, into an OS-specific format. If
20965 the D_PAGED bit is set on the file, BFD will align segments on
20966 page boundaries, so that an OS can directly map the file. With
20967 BPABI objects, that just results in wasted space. In addition,
20968 because we clear the D_PAGED bit, map_sections_to_segments will
20969 recognize that the program headers should not be mapped into any
20970 loadable segment. */
20971 abfd->flags &= ~D_PAGED;
20972 elf32_arm_begin_write_processing (abfd, link_info);
20973 }
20974
20975 static bfd_boolean
20976 elf32_arm_symbian_modify_segment_map (bfd *abfd,
20977 struct bfd_link_info *info)
20978 {
20979 struct elf_segment_map *m;
20980 asection *dynsec;
20981
20982 /* BPABI shared libraries and executables should have a PT_DYNAMIC
20983 segment. However, because the .dynamic section is not marked
20984 with SEC_LOAD, the generic ELF code will not create such a
20985 segment. */
20986 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
20987 if (dynsec)
20988 {
20989 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
20990 if (m->p_type == PT_DYNAMIC)
20991 break;
20992
20993 if (m == NULL)
20994 {
20995 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
20996 m->next = elf_seg_map (abfd);
20997 elf_seg_map (abfd) = m;
20998 }
20999 }
21000
21001 /* Also call the generic arm routine. */
21002 return elf32_arm_modify_segment_map (abfd, info);
21003 }
21004
21005 /* Return address for Ith PLT stub in section PLT, for relocation REL
21006 or (bfd_vma) -1 if it should not be included. */
21007
21008 static bfd_vma
21009 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
21010 const arelent *rel ATTRIBUTE_UNUSED)
21011 {
21012 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
21013 }
21014
21015 #undef elf32_bed
21016 #define elf32_bed elf32_arm_symbian_bed
21017
21018 /* The dynamic sections are not allocated on SymbianOS; the postlinker
21019 will process them and then discard them. */
21020 #undef ELF_DYNAMIC_SEC_FLAGS
21021 #define ELF_DYNAMIC_SEC_FLAGS \
21022 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
21023
21024 #undef elf_backend_emit_relocs
21025
21026 #undef bfd_elf32_bfd_link_hash_table_create
21027 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
21028 #undef elf_backend_special_sections
21029 #define elf_backend_special_sections elf32_arm_symbian_special_sections
21030 #undef elf_backend_begin_write_processing
21031 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
21032 #undef elf_backend_final_write_processing
21033 #define elf_backend_final_write_processing elf32_arm_final_write_processing
21034
21035 #undef elf_backend_modify_segment_map
21036 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
21037
21038 /* There is no .got section for BPABI objects, and hence no header. */
21039 #undef elf_backend_got_header_size
21040 #define elf_backend_got_header_size 0
21041
21042 /* Similarly, there is no .got.plt section. */
21043 #undef elf_backend_want_got_plt
21044 #define elf_backend_want_got_plt 0
21045
21046 #undef elf_backend_plt_sym_val
21047 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
21048
21049 #undef elf_backend_may_use_rel_p
21050 #define elf_backend_may_use_rel_p 1
21051 #undef elf_backend_may_use_rela_p
21052 #define elf_backend_may_use_rela_p 0
21053 #undef elf_backend_default_use_rela_p
21054 #define elf_backend_default_use_rela_p 0
21055 #undef elf_backend_want_plt_sym
21056 #define elf_backend_want_plt_sym 0
21057 #undef elf_backend_dtrel_excludes_plt
21058 #define elf_backend_dtrel_excludes_plt 0
21059 #undef ELF_MAXPAGESIZE
21060 #define ELF_MAXPAGESIZE 0x8000
21061
21062 #include "elf32-target.h"