PR ld/15302
[binutils-gdb.git] / bfd / elf32-arm.c
1 /* 32-bit ELF support for ARM
2 Copyright 1998-2013 Free Software Foundation, Inc.
3
4 This file is part of BFD, the Binary File Descriptor library.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19 MA 02110-1301, USA. */
20
21 #include "sysdep.h"
22 #include <limits.h>
23
24 #include "bfd.h"
25 #include "bfd_stdint.h"
26 #include "libiberty.h"
27 #include "libbfd.h"
28 #include "elf-bfd.h"
29 #include "elf-nacl.h"
30 #include "elf-vxworks.h"
31 #include "elf/arm.h"
32
33 /* Return the relocation section associated with NAME. HTAB is the
34 bfd's elf32_arm_link_hash_entry. */
35 #define RELOC_SECTION(HTAB, NAME) \
36 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
37
38 /* Return size of a relocation entry. HTAB is the bfd's
39 elf32_arm_link_hash_entry. */
40 #define RELOC_SIZE(HTAB) \
41 ((HTAB)->use_rel \
42 ? sizeof (Elf32_External_Rel) \
43 : sizeof (Elf32_External_Rela))
44
45 /* Return function to swap relocations in. HTAB is the bfd's
46 elf32_arm_link_hash_entry. */
47 #define SWAP_RELOC_IN(HTAB) \
48 ((HTAB)->use_rel \
49 ? bfd_elf32_swap_reloc_in \
50 : bfd_elf32_swap_reloca_in)
51
52 /* Return function to swap relocations out. HTAB is the bfd's
53 elf32_arm_link_hash_entry. */
54 #define SWAP_RELOC_OUT(HTAB) \
55 ((HTAB)->use_rel \
56 ? bfd_elf32_swap_reloc_out \
57 : bfd_elf32_swap_reloca_out)
58
59 #define elf_info_to_howto 0
60 #define elf_info_to_howto_rel elf32_arm_info_to_howto
61
62 #define ARM_ELF_ABI_VERSION 0
63 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
64
65 /* The Adjusted Place, as defined by AAELF. */
66 #define Pa(X) ((X) & 0xfffffffc)
67
68 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
69 struct bfd_link_info *link_info,
70 asection *sec,
71 bfd_byte *contents);
72
73 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
74 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
75 in that slot. */
76
77 static reloc_howto_type elf32_arm_howto_table_1[] =
78 {
79 /* No relocation. */
80 HOWTO (R_ARM_NONE, /* type */
81 0, /* rightshift */
82 0, /* size (0 = byte, 1 = short, 2 = long) */
83 0, /* bitsize */
84 FALSE, /* pc_relative */
85 0, /* bitpos */
86 complain_overflow_dont,/* complain_on_overflow */
87 bfd_elf_generic_reloc, /* special_function */
88 "R_ARM_NONE", /* name */
89 FALSE, /* partial_inplace */
90 0, /* src_mask */
91 0, /* dst_mask */
92 FALSE), /* pcrel_offset */
93
94 HOWTO (R_ARM_PC24, /* type */
95 2, /* rightshift */
96 2, /* size (0 = byte, 1 = short, 2 = long) */
97 24, /* bitsize */
98 TRUE, /* pc_relative */
99 0, /* bitpos */
100 complain_overflow_signed,/* complain_on_overflow */
101 bfd_elf_generic_reloc, /* special_function */
102 "R_ARM_PC24", /* name */
103 FALSE, /* partial_inplace */
104 0x00ffffff, /* src_mask */
105 0x00ffffff, /* dst_mask */
106 TRUE), /* pcrel_offset */
107
108 /* 32 bit absolute */
109 HOWTO (R_ARM_ABS32, /* type */
110 0, /* rightshift */
111 2, /* size (0 = byte, 1 = short, 2 = long) */
112 32, /* bitsize */
113 FALSE, /* pc_relative */
114 0, /* bitpos */
115 complain_overflow_bitfield,/* complain_on_overflow */
116 bfd_elf_generic_reloc, /* special_function */
117 "R_ARM_ABS32", /* name */
118 FALSE, /* partial_inplace */
119 0xffffffff, /* src_mask */
120 0xffffffff, /* dst_mask */
121 FALSE), /* pcrel_offset */
122
123 /* standard 32bit pc-relative reloc */
124 HOWTO (R_ARM_REL32, /* type */
125 0, /* rightshift */
126 2, /* size (0 = byte, 1 = short, 2 = long) */
127 32, /* bitsize */
128 TRUE, /* pc_relative */
129 0, /* bitpos */
130 complain_overflow_bitfield,/* complain_on_overflow */
131 bfd_elf_generic_reloc, /* special_function */
132 "R_ARM_REL32", /* name */
133 FALSE, /* partial_inplace */
134 0xffffffff, /* src_mask */
135 0xffffffff, /* dst_mask */
136 TRUE), /* pcrel_offset */
137
138 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
139 HOWTO (R_ARM_LDR_PC_G0, /* type */
140 0, /* rightshift */
141 0, /* size (0 = byte, 1 = short, 2 = long) */
142 32, /* bitsize */
143 TRUE, /* pc_relative */
144 0, /* bitpos */
145 complain_overflow_dont,/* complain_on_overflow */
146 bfd_elf_generic_reloc, /* special_function */
147 "R_ARM_LDR_PC_G0", /* name */
148 FALSE, /* partial_inplace */
149 0xffffffff, /* src_mask */
150 0xffffffff, /* dst_mask */
151 TRUE), /* pcrel_offset */
152
153 /* 16 bit absolute */
154 HOWTO (R_ARM_ABS16, /* type */
155 0, /* rightshift */
156 1, /* size (0 = byte, 1 = short, 2 = long) */
157 16, /* bitsize */
158 FALSE, /* pc_relative */
159 0, /* bitpos */
160 complain_overflow_bitfield,/* complain_on_overflow */
161 bfd_elf_generic_reloc, /* special_function */
162 "R_ARM_ABS16", /* name */
163 FALSE, /* partial_inplace */
164 0x0000ffff, /* src_mask */
165 0x0000ffff, /* dst_mask */
166 FALSE), /* pcrel_offset */
167
168 /* 12 bit absolute */
169 HOWTO (R_ARM_ABS12, /* type */
170 0, /* rightshift */
171 2, /* size (0 = byte, 1 = short, 2 = long) */
172 12, /* bitsize */
173 FALSE, /* pc_relative */
174 0, /* bitpos */
175 complain_overflow_bitfield,/* complain_on_overflow */
176 bfd_elf_generic_reloc, /* special_function */
177 "R_ARM_ABS12", /* name */
178 FALSE, /* partial_inplace */
179 0x00000fff, /* src_mask */
180 0x00000fff, /* dst_mask */
181 FALSE), /* pcrel_offset */
182
183 HOWTO (R_ARM_THM_ABS5, /* type */
184 6, /* rightshift */
185 1, /* size (0 = byte, 1 = short, 2 = long) */
186 5, /* bitsize */
187 FALSE, /* pc_relative */
188 0, /* bitpos */
189 complain_overflow_bitfield,/* complain_on_overflow */
190 bfd_elf_generic_reloc, /* special_function */
191 "R_ARM_THM_ABS5", /* name */
192 FALSE, /* partial_inplace */
193 0x000007e0, /* src_mask */
194 0x000007e0, /* dst_mask */
195 FALSE), /* pcrel_offset */
196
197 /* 8 bit absolute */
198 HOWTO (R_ARM_ABS8, /* type */
199 0, /* rightshift */
200 0, /* size (0 = byte, 1 = short, 2 = long) */
201 8, /* bitsize */
202 FALSE, /* pc_relative */
203 0, /* bitpos */
204 complain_overflow_bitfield,/* complain_on_overflow */
205 bfd_elf_generic_reloc, /* special_function */
206 "R_ARM_ABS8", /* name */
207 FALSE, /* partial_inplace */
208 0x000000ff, /* src_mask */
209 0x000000ff, /* dst_mask */
210 FALSE), /* pcrel_offset */
211
212 HOWTO (R_ARM_SBREL32, /* type */
213 0, /* rightshift */
214 2, /* size (0 = byte, 1 = short, 2 = long) */
215 32, /* bitsize */
216 FALSE, /* pc_relative */
217 0, /* bitpos */
218 complain_overflow_dont,/* complain_on_overflow */
219 bfd_elf_generic_reloc, /* special_function */
220 "R_ARM_SBREL32", /* name */
221 FALSE, /* partial_inplace */
222 0xffffffff, /* src_mask */
223 0xffffffff, /* dst_mask */
224 FALSE), /* pcrel_offset */
225
226 HOWTO (R_ARM_THM_CALL, /* type */
227 1, /* rightshift */
228 2, /* size (0 = byte, 1 = short, 2 = long) */
229 24, /* bitsize */
230 TRUE, /* pc_relative */
231 0, /* bitpos */
232 complain_overflow_signed,/* complain_on_overflow */
233 bfd_elf_generic_reloc, /* special_function */
234 "R_ARM_THM_CALL", /* name */
235 FALSE, /* partial_inplace */
236 0x07ff2fff, /* src_mask */
237 0x07ff2fff, /* dst_mask */
238 TRUE), /* pcrel_offset */
239
240 HOWTO (R_ARM_THM_PC8, /* type */
241 1, /* rightshift */
242 1, /* size (0 = byte, 1 = short, 2 = long) */
243 8, /* bitsize */
244 TRUE, /* pc_relative */
245 0, /* bitpos */
246 complain_overflow_signed,/* complain_on_overflow */
247 bfd_elf_generic_reloc, /* special_function */
248 "R_ARM_THM_PC8", /* name */
249 FALSE, /* partial_inplace */
250 0x000000ff, /* src_mask */
251 0x000000ff, /* dst_mask */
252 TRUE), /* pcrel_offset */
253
254 HOWTO (R_ARM_BREL_ADJ, /* type */
255 1, /* rightshift */
256 1, /* size (0 = byte, 1 = short, 2 = long) */
257 32, /* bitsize */
258 FALSE, /* pc_relative */
259 0, /* bitpos */
260 complain_overflow_signed,/* complain_on_overflow */
261 bfd_elf_generic_reloc, /* special_function */
262 "R_ARM_BREL_ADJ", /* name */
263 FALSE, /* partial_inplace */
264 0xffffffff, /* src_mask */
265 0xffffffff, /* dst_mask */
266 FALSE), /* pcrel_offset */
267
268 HOWTO (R_ARM_TLS_DESC, /* type */
269 0, /* rightshift */
270 2, /* size (0 = byte, 1 = short, 2 = long) */
271 32, /* bitsize */
272 FALSE, /* pc_relative */
273 0, /* bitpos */
274 complain_overflow_bitfield,/* complain_on_overflow */
275 bfd_elf_generic_reloc, /* special_function */
276 "R_ARM_TLS_DESC", /* name */
277 FALSE, /* partial_inplace */
278 0xffffffff, /* src_mask */
279 0xffffffff, /* dst_mask */
280 FALSE), /* pcrel_offset */
281
282 HOWTO (R_ARM_THM_SWI8, /* type */
283 0, /* rightshift */
284 0, /* size (0 = byte, 1 = short, 2 = long) */
285 0, /* bitsize */
286 FALSE, /* pc_relative */
287 0, /* bitpos */
288 complain_overflow_signed,/* complain_on_overflow */
289 bfd_elf_generic_reloc, /* special_function */
290 "R_ARM_SWI8", /* name */
291 FALSE, /* partial_inplace */
292 0x00000000, /* src_mask */
293 0x00000000, /* dst_mask */
294 FALSE), /* pcrel_offset */
295
296 /* BLX instruction for the ARM. */
297 HOWTO (R_ARM_XPC25, /* type */
298 2, /* rightshift */
299 2, /* size (0 = byte, 1 = short, 2 = long) */
300 24, /* bitsize */
301 TRUE, /* pc_relative */
302 0, /* bitpos */
303 complain_overflow_signed,/* complain_on_overflow */
304 bfd_elf_generic_reloc, /* special_function */
305 "R_ARM_XPC25", /* name */
306 FALSE, /* partial_inplace */
307 0x00ffffff, /* src_mask */
308 0x00ffffff, /* dst_mask */
309 TRUE), /* pcrel_offset */
310
311 /* BLX instruction for the Thumb. */
312 HOWTO (R_ARM_THM_XPC22, /* type */
313 2, /* rightshift */
314 2, /* size (0 = byte, 1 = short, 2 = long) */
315 24, /* bitsize */
316 TRUE, /* pc_relative */
317 0, /* bitpos */
318 complain_overflow_signed,/* complain_on_overflow */
319 bfd_elf_generic_reloc, /* special_function */
320 "R_ARM_THM_XPC22", /* name */
321 FALSE, /* partial_inplace */
322 0x07ff2fff, /* src_mask */
323 0x07ff2fff, /* dst_mask */
324 TRUE), /* pcrel_offset */
325
326 /* Dynamic TLS relocations. */
327
328 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
329 0, /* rightshift */
330 2, /* size (0 = byte, 1 = short, 2 = long) */
331 32, /* bitsize */
332 FALSE, /* pc_relative */
333 0, /* bitpos */
334 complain_overflow_bitfield,/* complain_on_overflow */
335 bfd_elf_generic_reloc, /* special_function */
336 "R_ARM_TLS_DTPMOD32", /* name */
337 TRUE, /* partial_inplace */
338 0xffffffff, /* src_mask */
339 0xffffffff, /* dst_mask */
340 FALSE), /* pcrel_offset */
341
342 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
343 0, /* rightshift */
344 2, /* size (0 = byte, 1 = short, 2 = long) */
345 32, /* bitsize */
346 FALSE, /* pc_relative */
347 0, /* bitpos */
348 complain_overflow_bitfield,/* complain_on_overflow */
349 bfd_elf_generic_reloc, /* special_function */
350 "R_ARM_TLS_DTPOFF32", /* name */
351 TRUE, /* partial_inplace */
352 0xffffffff, /* src_mask */
353 0xffffffff, /* dst_mask */
354 FALSE), /* pcrel_offset */
355
356 HOWTO (R_ARM_TLS_TPOFF32, /* type */
357 0, /* rightshift */
358 2, /* size (0 = byte, 1 = short, 2 = long) */
359 32, /* bitsize */
360 FALSE, /* pc_relative */
361 0, /* bitpos */
362 complain_overflow_bitfield,/* complain_on_overflow */
363 bfd_elf_generic_reloc, /* special_function */
364 "R_ARM_TLS_TPOFF32", /* name */
365 TRUE, /* partial_inplace */
366 0xffffffff, /* src_mask */
367 0xffffffff, /* dst_mask */
368 FALSE), /* pcrel_offset */
369
370 /* Relocs used in ARM Linux */
371
372 HOWTO (R_ARM_COPY, /* type */
373 0, /* rightshift */
374 2, /* size (0 = byte, 1 = short, 2 = long) */
375 32, /* bitsize */
376 FALSE, /* pc_relative */
377 0, /* bitpos */
378 complain_overflow_bitfield,/* complain_on_overflow */
379 bfd_elf_generic_reloc, /* special_function */
380 "R_ARM_COPY", /* name */
381 TRUE, /* partial_inplace */
382 0xffffffff, /* src_mask */
383 0xffffffff, /* dst_mask */
384 FALSE), /* pcrel_offset */
385
386 HOWTO (R_ARM_GLOB_DAT, /* type */
387 0, /* rightshift */
388 2, /* size (0 = byte, 1 = short, 2 = long) */
389 32, /* bitsize */
390 FALSE, /* pc_relative */
391 0, /* bitpos */
392 complain_overflow_bitfield,/* complain_on_overflow */
393 bfd_elf_generic_reloc, /* special_function */
394 "R_ARM_GLOB_DAT", /* name */
395 TRUE, /* partial_inplace */
396 0xffffffff, /* src_mask */
397 0xffffffff, /* dst_mask */
398 FALSE), /* pcrel_offset */
399
400 HOWTO (R_ARM_JUMP_SLOT, /* type */
401 0, /* rightshift */
402 2, /* size (0 = byte, 1 = short, 2 = long) */
403 32, /* bitsize */
404 FALSE, /* pc_relative */
405 0, /* bitpos */
406 complain_overflow_bitfield,/* complain_on_overflow */
407 bfd_elf_generic_reloc, /* special_function */
408 "R_ARM_JUMP_SLOT", /* name */
409 TRUE, /* partial_inplace */
410 0xffffffff, /* src_mask */
411 0xffffffff, /* dst_mask */
412 FALSE), /* pcrel_offset */
413
414 HOWTO (R_ARM_RELATIVE, /* type */
415 0, /* rightshift */
416 2, /* size (0 = byte, 1 = short, 2 = long) */
417 32, /* bitsize */
418 FALSE, /* pc_relative */
419 0, /* bitpos */
420 complain_overflow_bitfield,/* complain_on_overflow */
421 bfd_elf_generic_reloc, /* special_function */
422 "R_ARM_RELATIVE", /* name */
423 TRUE, /* partial_inplace */
424 0xffffffff, /* src_mask */
425 0xffffffff, /* dst_mask */
426 FALSE), /* pcrel_offset */
427
428 HOWTO (R_ARM_GOTOFF32, /* type */
429 0, /* rightshift */
430 2, /* size (0 = byte, 1 = short, 2 = long) */
431 32, /* bitsize */
432 FALSE, /* pc_relative */
433 0, /* bitpos */
434 complain_overflow_bitfield,/* complain_on_overflow */
435 bfd_elf_generic_reloc, /* special_function */
436 "R_ARM_GOTOFF32", /* name */
437 TRUE, /* partial_inplace */
438 0xffffffff, /* src_mask */
439 0xffffffff, /* dst_mask */
440 FALSE), /* pcrel_offset */
441
442 HOWTO (R_ARM_GOTPC, /* type */
443 0, /* rightshift */
444 2, /* size (0 = byte, 1 = short, 2 = long) */
445 32, /* bitsize */
446 TRUE, /* pc_relative */
447 0, /* bitpos */
448 complain_overflow_bitfield,/* complain_on_overflow */
449 bfd_elf_generic_reloc, /* special_function */
450 "R_ARM_GOTPC", /* name */
451 TRUE, /* partial_inplace */
452 0xffffffff, /* src_mask */
453 0xffffffff, /* dst_mask */
454 TRUE), /* pcrel_offset */
455
456 HOWTO (R_ARM_GOT32, /* type */
457 0, /* rightshift */
458 2, /* size (0 = byte, 1 = short, 2 = long) */
459 32, /* bitsize */
460 FALSE, /* pc_relative */
461 0, /* bitpos */
462 complain_overflow_bitfield,/* complain_on_overflow */
463 bfd_elf_generic_reloc, /* special_function */
464 "R_ARM_GOT32", /* name */
465 TRUE, /* partial_inplace */
466 0xffffffff, /* src_mask */
467 0xffffffff, /* dst_mask */
468 FALSE), /* pcrel_offset */
469
470 HOWTO (R_ARM_PLT32, /* type */
471 2, /* rightshift */
472 2, /* size (0 = byte, 1 = short, 2 = long) */
473 24, /* bitsize */
474 TRUE, /* pc_relative */
475 0, /* bitpos */
476 complain_overflow_bitfield,/* complain_on_overflow */
477 bfd_elf_generic_reloc, /* special_function */
478 "R_ARM_PLT32", /* name */
479 FALSE, /* partial_inplace */
480 0x00ffffff, /* src_mask */
481 0x00ffffff, /* dst_mask */
482 TRUE), /* pcrel_offset */
483
484 HOWTO (R_ARM_CALL, /* type */
485 2, /* rightshift */
486 2, /* size (0 = byte, 1 = short, 2 = long) */
487 24, /* bitsize */
488 TRUE, /* pc_relative */
489 0, /* bitpos */
490 complain_overflow_signed,/* complain_on_overflow */
491 bfd_elf_generic_reloc, /* special_function */
492 "R_ARM_CALL", /* name */
493 FALSE, /* partial_inplace */
494 0x00ffffff, /* src_mask */
495 0x00ffffff, /* dst_mask */
496 TRUE), /* pcrel_offset */
497
498 HOWTO (R_ARM_JUMP24, /* type */
499 2, /* rightshift */
500 2, /* size (0 = byte, 1 = short, 2 = long) */
501 24, /* bitsize */
502 TRUE, /* pc_relative */
503 0, /* bitpos */
504 complain_overflow_signed,/* complain_on_overflow */
505 bfd_elf_generic_reloc, /* special_function */
506 "R_ARM_JUMP24", /* name */
507 FALSE, /* partial_inplace */
508 0x00ffffff, /* src_mask */
509 0x00ffffff, /* dst_mask */
510 TRUE), /* pcrel_offset */
511
512 HOWTO (R_ARM_THM_JUMP24, /* type */
513 1, /* rightshift */
514 2, /* size (0 = byte, 1 = short, 2 = long) */
515 24, /* bitsize */
516 TRUE, /* pc_relative */
517 0, /* bitpos */
518 complain_overflow_signed,/* complain_on_overflow */
519 bfd_elf_generic_reloc, /* special_function */
520 "R_ARM_THM_JUMP24", /* name */
521 FALSE, /* partial_inplace */
522 0x07ff2fff, /* src_mask */
523 0x07ff2fff, /* dst_mask */
524 TRUE), /* pcrel_offset */
525
526 HOWTO (R_ARM_BASE_ABS, /* type */
527 0, /* rightshift */
528 2, /* size (0 = byte, 1 = short, 2 = long) */
529 32, /* bitsize */
530 FALSE, /* pc_relative */
531 0, /* bitpos */
532 complain_overflow_dont,/* complain_on_overflow */
533 bfd_elf_generic_reloc, /* special_function */
534 "R_ARM_BASE_ABS", /* name */
535 FALSE, /* partial_inplace */
536 0xffffffff, /* src_mask */
537 0xffffffff, /* dst_mask */
538 FALSE), /* pcrel_offset */
539
540 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
541 0, /* rightshift */
542 2, /* size (0 = byte, 1 = short, 2 = long) */
543 12, /* bitsize */
544 TRUE, /* pc_relative */
545 0, /* bitpos */
546 complain_overflow_dont,/* complain_on_overflow */
547 bfd_elf_generic_reloc, /* special_function */
548 "R_ARM_ALU_PCREL_7_0", /* name */
549 FALSE, /* partial_inplace */
550 0x00000fff, /* src_mask */
551 0x00000fff, /* dst_mask */
552 TRUE), /* pcrel_offset */
553
554 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
555 0, /* rightshift */
556 2, /* size (0 = byte, 1 = short, 2 = long) */
557 12, /* bitsize */
558 TRUE, /* pc_relative */
559 8, /* bitpos */
560 complain_overflow_dont,/* complain_on_overflow */
561 bfd_elf_generic_reloc, /* special_function */
562 "R_ARM_ALU_PCREL_15_8",/* name */
563 FALSE, /* partial_inplace */
564 0x00000fff, /* src_mask */
565 0x00000fff, /* dst_mask */
566 TRUE), /* pcrel_offset */
567
568 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
569 0, /* rightshift */
570 2, /* size (0 = byte, 1 = short, 2 = long) */
571 12, /* bitsize */
572 TRUE, /* pc_relative */
573 16, /* bitpos */
574 complain_overflow_dont,/* complain_on_overflow */
575 bfd_elf_generic_reloc, /* special_function */
576 "R_ARM_ALU_PCREL_23_15",/* name */
577 FALSE, /* partial_inplace */
578 0x00000fff, /* src_mask */
579 0x00000fff, /* dst_mask */
580 TRUE), /* pcrel_offset */
581
582 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
583 0, /* rightshift */
584 2, /* size (0 = byte, 1 = short, 2 = long) */
585 12, /* bitsize */
586 FALSE, /* pc_relative */
587 0, /* bitpos */
588 complain_overflow_dont,/* complain_on_overflow */
589 bfd_elf_generic_reloc, /* special_function */
590 "R_ARM_LDR_SBREL_11_0",/* name */
591 FALSE, /* partial_inplace */
592 0x00000fff, /* src_mask */
593 0x00000fff, /* dst_mask */
594 FALSE), /* pcrel_offset */
595
596 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
597 0, /* rightshift */
598 2, /* size (0 = byte, 1 = short, 2 = long) */
599 8, /* bitsize */
600 FALSE, /* pc_relative */
601 12, /* bitpos */
602 complain_overflow_dont,/* complain_on_overflow */
603 bfd_elf_generic_reloc, /* special_function */
604 "R_ARM_ALU_SBREL_19_12",/* name */
605 FALSE, /* partial_inplace */
606 0x000ff000, /* src_mask */
607 0x000ff000, /* dst_mask */
608 FALSE), /* pcrel_offset */
609
610 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
611 0, /* rightshift */
612 2, /* size (0 = byte, 1 = short, 2 = long) */
613 8, /* bitsize */
614 FALSE, /* pc_relative */
615 20, /* bitpos */
616 complain_overflow_dont,/* complain_on_overflow */
617 bfd_elf_generic_reloc, /* special_function */
618 "R_ARM_ALU_SBREL_27_20",/* name */
619 FALSE, /* partial_inplace */
620 0x0ff00000, /* src_mask */
621 0x0ff00000, /* dst_mask */
622 FALSE), /* pcrel_offset */
623
624 HOWTO (R_ARM_TARGET1, /* type */
625 0, /* rightshift */
626 2, /* size (0 = byte, 1 = short, 2 = long) */
627 32, /* bitsize */
628 FALSE, /* pc_relative */
629 0, /* bitpos */
630 complain_overflow_dont,/* complain_on_overflow */
631 bfd_elf_generic_reloc, /* special_function */
632 "R_ARM_TARGET1", /* name */
633 FALSE, /* partial_inplace */
634 0xffffffff, /* src_mask */
635 0xffffffff, /* dst_mask */
636 FALSE), /* pcrel_offset */
637
638 HOWTO (R_ARM_ROSEGREL32, /* type */
639 0, /* rightshift */
640 2, /* size (0 = byte, 1 = short, 2 = long) */
641 32, /* bitsize */
642 FALSE, /* pc_relative */
643 0, /* bitpos */
644 complain_overflow_dont,/* complain_on_overflow */
645 bfd_elf_generic_reloc, /* special_function */
646 "R_ARM_ROSEGREL32", /* name */
647 FALSE, /* partial_inplace */
648 0xffffffff, /* src_mask */
649 0xffffffff, /* dst_mask */
650 FALSE), /* pcrel_offset */
651
652 HOWTO (R_ARM_V4BX, /* type */
653 0, /* rightshift */
654 2, /* size (0 = byte, 1 = short, 2 = long) */
655 32, /* bitsize */
656 FALSE, /* pc_relative */
657 0, /* bitpos */
658 complain_overflow_dont,/* complain_on_overflow */
659 bfd_elf_generic_reloc, /* special_function */
660 "R_ARM_V4BX", /* name */
661 FALSE, /* partial_inplace */
662 0xffffffff, /* src_mask */
663 0xffffffff, /* dst_mask */
664 FALSE), /* pcrel_offset */
665
666 HOWTO (R_ARM_TARGET2, /* type */
667 0, /* rightshift */
668 2, /* size (0 = byte, 1 = short, 2 = long) */
669 32, /* bitsize */
670 FALSE, /* pc_relative */
671 0, /* bitpos */
672 complain_overflow_signed,/* complain_on_overflow */
673 bfd_elf_generic_reloc, /* special_function */
674 "R_ARM_TARGET2", /* name */
675 FALSE, /* partial_inplace */
676 0xffffffff, /* src_mask */
677 0xffffffff, /* dst_mask */
678 TRUE), /* pcrel_offset */
679
680 HOWTO (R_ARM_PREL31, /* type */
681 0, /* rightshift */
682 2, /* size (0 = byte, 1 = short, 2 = long) */
683 31, /* bitsize */
684 TRUE, /* pc_relative */
685 0, /* bitpos */
686 complain_overflow_signed,/* complain_on_overflow */
687 bfd_elf_generic_reloc, /* special_function */
688 "R_ARM_PREL31", /* name */
689 FALSE, /* partial_inplace */
690 0x7fffffff, /* src_mask */
691 0x7fffffff, /* dst_mask */
692 TRUE), /* pcrel_offset */
693
694 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
695 0, /* rightshift */
696 2, /* size (0 = byte, 1 = short, 2 = long) */
697 16, /* bitsize */
698 FALSE, /* pc_relative */
699 0, /* bitpos */
700 complain_overflow_dont,/* complain_on_overflow */
701 bfd_elf_generic_reloc, /* special_function */
702 "R_ARM_MOVW_ABS_NC", /* name */
703 FALSE, /* partial_inplace */
704 0x000f0fff, /* src_mask */
705 0x000f0fff, /* dst_mask */
706 FALSE), /* pcrel_offset */
707
708 HOWTO (R_ARM_MOVT_ABS, /* type */
709 0, /* rightshift */
710 2, /* size (0 = byte, 1 = short, 2 = long) */
711 16, /* bitsize */
712 FALSE, /* pc_relative */
713 0, /* bitpos */
714 complain_overflow_bitfield,/* complain_on_overflow */
715 bfd_elf_generic_reloc, /* special_function */
716 "R_ARM_MOVT_ABS", /* name */
717 FALSE, /* partial_inplace */
718 0x000f0fff, /* src_mask */
719 0x000f0fff, /* dst_mask */
720 FALSE), /* pcrel_offset */
721
722 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
723 0, /* rightshift */
724 2, /* size (0 = byte, 1 = short, 2 = long) */
725 16, /* bitsize */
726 TRUE, /* pc_relative */
727 0, /* bitpos */
728 complain_overflow_dont,/* complain_on_overflow */
729 bfd_elf_generic_reloc, /* special_function */
730 "R_ARM_MOVW_PREL_NC", /* name */
731 FALSE, /* partial_inplace */
732 0x000f0fff, /* src_mask */
733 0x000f0fff, /* dst_mask */
734 TRUE), /* pcrel_offset */
735
736 HOWTO (R_ARM_MOVT_PREL, /* type */
737 0, /* rightshift */
738 2, /* size (0 = byte, 1 = short, 2 = long) */
739 16, /* bitsize */
740 TRUE, /* pc_relative */
741 0, /* bitpos */
742 complain_overflow_bitfield,/* complain_on_overflow */
743 bfd_elf_generic_reloc, /* special_function */
744 "R_ARM_MOVT_PREL", /* name */
745 FALSE, /* partial_inplace */
746 0x000f0fff, /* src_mask */
747 0x000f0fff, /* dst_mask */
748 TRUE), /* pcrel_offset */
749
750 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
751 0, /* rightshift */
752 2, /* size (0 = byte, 1 = short, 2 = long) */
753 16, /* bitsize */
754 FALSE, /* pc_relative */
755 0, /* bitpos */
756 complain_overflow_dont,/* complain_on_overflow */
757 bfd_elf_generic_reloc, /* special_function */
758 "R_ARM_THM_MOVW_ABS_NC",/* name */
759 FALSE, /* partial_inplace */
760 0x040f70ff, /* src_mask */
761 0x040f70ff, /* dst_mask */
762 FALSE), /* pcrel_offset */
763
764 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
765 0, /* rightshift */
766 2, /* size (0 = byte, 1 = short, 2 = long) */
767 16, /* bitsize */
768 FALSE, /* pc_relative */
769 0, /* bitpos */
770 complain_overflow_bitfield,/* complain_on_overflow */
771 bfd_elf_generic_reloc, /* special_function */
772 "R_ARM_THM_MOVT_ABS", /* name */
773 FALSE, /* partial_inplace */
774 0x040f70ff, /* src_mask */
775 0x040f70ff, /* dst_mask */
776 FALSE), /* pcrel_offset */
777
778 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
779 0, /* rightshift */
780 2, /* size (0 = byte, 1 = short, 2 = long) */
781 16, /* bitsize */
782 TRUE, /* pc_relative */
783 0, /* bitpos */
784 complain_overflow_dont,/* complain_on_overflow */
785 bfd_elf_generic_reloc, /* special_function */
786 "R_ARM_THM_MOVW_PREL_NC",/* name */
787 FALSE, /* partial_inplace */
788 0x040f70ff, /* src_mask */
789 0x040f70ff, /* dst_mask */
790 TRUE), /* pcrel_offset */
791
792 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
793 0, /* rightshift */
794 2, /* size (0 = byte, 1 = short, 2 = long) */
795 16, /* bitsize */
796 TRUE, /* pc_relative */
797 0, /* bitpos */
798 complain_overflow_bitfield,/* complain_on_overflow */
799 bfd_elf_generic_reloc, /* special_function */
800 "R_ARM_THM_MOVT_PREL", /* name */
801 FALSE, /* partial_inplace */
802 0x040f70ff, /* src_mask */
803 0x040f70ff, /* dst_mask */
804 TRUE), /* pcrel_offset */
805
806 HOWTO (R_ARM_THM_JUMP19, /* type */
807 1, /* rightshift */
808 2, /* size (0 = byte, 1 = short, 2 = long) */
809 19, /* bitsize */
810 TRUE, /* pc_relative */
811 0, /* bitpos */
812 complain_overflow_signed,/* complain_on_overflow */
813 bfd_elf_generic_reloc, /* special_function */
814 "R_ARM_THM_JUMP19", /* name */
815 FALSE, /* partial_inplace */
816 0x043f2fff, /* src_mask */
817 0x043f2fff, /* dst_mask */
818 TRUE), /* pcrel_offset */
819
820 HOWTO (R_ARM_THM_JUMP6, /* type */
821 1, /* rightshift */
822 1, /* size (0 = byte, 1 = short, 2 = long) */
823 6, /* bitsize */
824 TRUE, /* pc_relative */
825 0, /* bitpos */
826 complain_overflow_unsigned,/* complain_on_overflow */
827 bfd_elf_generic_reloc, /* special_function */
828 "R_ARM_THM_JUMP6", /* name */
829 FALSE, /* partial_inplace */
830 0x02f8, /* src_mask */
831 0x02f8, /* dst_mask */
832 TRUE), /* pcrel_offset */
833
834 /* These are declared as 13-bit signed relocations because we can
835 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
836 versa. */
837 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
838 0, /* rightshift */
839 2, /* size (0 = byte, 1 = short, 2 = long) */
840 13, /* bitsize */
841 TRUE, /* pc_relative */
842 0, /* bitpos */
843 complain_overflow_dont,/* complain_on_overflow */
844 bfd_elf_generic_reloc, /* special_function */
845 "R_ARM_THM_ALU_PREL_11_0",/* name */
846 FALSE, /* partial_inplace */
847 0xffffffff, /* src_mask */
848 0xffffffff, /* dst_mask */
849 TRUE), /* pcrel_offset */
850
851 HOWTO (R_ARM_THM_PC12, /* type */
852 0, /* rightshift */
853 2, /* size (0 = byte, 1 = short, 2 = long) */
854 13, /* bitsize */
855 TRUE, /* pc_relative */
856 0, /* bitpos */
857 complain_overflow_dont,/* complain_on_overflow */
858 bfd_elf_generic_reloc, /* special_function */
859 "R_ARM_THM_PC12", /* name */
860 FALSE, /* partial_inplace */
861 0xffffffff, /* src_mask */
862 0xffffffff, /* dst_mask */
863 TRUE), /* pcrel_offset */
864
865 HOWTO (R_ARM_ABS32_NOI, /* type */
866 0, /* rightshift */
867 2, /* size (0 = byte, 1 = short, 2 = long) */
868 32, /* bitsize */
869 FALSE, /* pc_relative */
870 0, /* bitpos */
871 complain_overflow_dont,/* complain_on_overflow */
872 bfd_elf_generic_reloc, /* special_function */
873 "R_ARM_ABS32_NOI", /* name */
874 FALSE, /* partial_inplace */
875 0xffffffff, /* src_mask */
876 0xffffffff, /* dst_mask */
877 FALSE), /* pcrel_offset */
878
879 HOWTO (R_ARM_REL32_NOI, /* type */
880 0, /* rightshift */
881 2, /* size (0 = byte, 1 = short, 2 = long) */
882 32, /* bitsize */
883 TRUE, /* pc_relative */
884 0, /* bitpos */
885 complain_overflow_dont,/* complain_on_overflow */
886 bfd_elf_generic_reloc, /* special_function */
887 "R_ARM_REL32_NOI", /* name */
888 FALSE, /* partial_inplace */
889 0xffffffff, /* src_mask */
890 0xffffffff, /* dst_mask */
891 FALSE), /* pcrel_offset */
892
893 /* Group relocations. */
894
895 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
896 0, /* rightshift */
897 2, /* size (0 = byte, 1 = short, 2 = long) */
898 32, /* bitsize */
899 TRUE, /* pc_relative */
900 0, /* bitpos */
901 complain_overflow_dont,/* complain_on_overflow */
902 bfd_elf_generic_reloc, /* special_function */
903 "R_ARM_ALU_PC_G0_NC", /* name */
904 FALSE, /* partial_inplace */
905 0xffffffff, /* src_mask */
906 0xffffffff, /* dst_mask */
907 TRUE), /* pcrel_offset */
908
909 HOWTO (R_ARM_ALU_PC_G0, /* type */
910 0, /* rightshift */
911 2, /* size (0 = byte, 1 = short, 2 = long) */
912 32, /* bitsize */
913 TRUE, /* pc_relative */
914 0, /* bitpos */
915 complain_overflow_dont,/* complain_on_overflow */
916 bfd_elf_generic_reloc, /* special_function */
917 "R_ARM_ALU_PC_G0", /* name */
918 FALSE, /* partial_inplace */
919 0xffffffff, /* src_mask */
920 0xffffffff, /* dst_mask */
921 TRUE), /* pcrel_offset */
922
923 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
924 0, /* rightshift */
925 2, /* size (0 = byte, 1 = short, 2 = long) */
926 32, /* bitsize */
927 TRUE, /* pc_relative */
928 0, /* bitpos */
929 complain_overflow_dont,/* complain_on_overflow */
930 bfd_elf_generic_reloc, /* special_function */
931 "R_ARM_ALU_PC_G1_NC", /* name */
932 FALSE, /* partial_inplace */
933 0xffffffff, /* src_mask */
934 0xffffffff, /* dst_mask */
935 TRUE), /* pcrel_offset */
936
937 HOWTO (R_ARM_ALU_PC_G1, /* type */
938 0, /* rightshift */
939 2, /* size (0 = byte, 1 = short, 2 = long) */
940 32, /* bitsize */
941 TRUE, /* pc_relative */
942 0, /* bitpos */
943 complain_overflow_dont,/* complain_on_overflow */
944 bfd_elf_generic_reloc, /* special_function */
945 "R_ARM_ALU_PC_G1", /* name */
946 FALSE, /* partial_inplace */
947 0xffffffff, /* src_mask */
948 0xffffffff, /* dst_mask */
949 TRUE), /* pcrel_offset */
950
951 HOWTO (R_ARM_ALU_PC_G2, /* type */
952 0, /* rightshift */
953 2, /* size (0 = byte, 1 = short, 2 = long) */
954 32, /* bitsize */
955 TRUE, /* pc_relative */
956 0, /* bitpos */
957 complain_overflow_dont,/* complain_on_overflow */
958 bfd_elf_generic_reloc, /* special_function */
959 "R_ARM_ALU_PC_G2", /* name */
960 FALSE, /* partial_inplace */
961 0xffffffff, /* src_mask */
962 0xffffffff, /* dst_mask */
963 TRUE), /* pcrel_offset */
964
965 HOWTO (R_ARM_LDR_PC_G1, /* type */
966 0, /* rightshift */
967 2, /* size (0 = byte, 1 = short, 2 = long) */
968 32, /* bitsize */
969 TRUE, /* pc_relative */
970 0, /* bitpos */
971 complain_overflow_dont,/* complain_on_overflow */
972 bfd_elf_generic_reloc, /* special_function */
973 "R_ARM_LDR_PC_G1", /* name */
974 FALSE, /* partial_inplace */
975 0xffffffff, /* src_mask */
976 0xffffffff, /* dst_mask */
977 TRUE), /* pcrel_offset */
978
979 HOWTO (R_ARM_LDR_PC_G2, /* type */
980 0, /* rightshift */
981 2, /* size (0 = byte, 1 = short, 2 = long) */
982 32, /* bitsize */
983 TRUE, /* pc_relative */
984 0, /* bitpos */
985 complain_overflow_dont,/* complain_on_overflow */
986 bfd_elf_generic_reloc, /* special_function */
987 "R_ARM_LDR_PC_G2", /* name */
988 FALSE, /* partial_inplace */
989 0xffffffff, /* src_mask */
990 0xffffffff, /* dst_mask */
991 TRUE), /* pcrel_offset */
992
993 HOWTO (R_ARM_LDRS_PC_G0, /* type */
994 0, /* rightshift */
995 2, /* size (0 = byte, 1 = short, 2 = long) */
996 32, /* bitsize */
997 TRUE, /* pc_relative */
998 0, /* bitpos */
999 complain_overflow_dont,/* complain_on_overflow */
1000 bfd_elf_generic_reloc, /* special_function */
1001 "R_ARM_LDRS_PC_G0", /* name */
1002 FALSE, /* partial_inplace */
1003 0xffffffff, /* src_mask */
1004 0xffffffff, /* dst_mask */
1005 TRUE), /* pcrel_offset */
1006
1007 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1008 0, /* rightshift */
1009 2, /* size (0 = byte, 1 = short, 2 = long) */
1010 32, /* bitsize */
1011 TRUE, /* pc_relative */
1012 0, /* bitpos */
1013 complain_overflow_dont,/* complain_on_overflow */
1014 bfd_elf_generic_reloc, /* special_function */
1015 "R_ARM_LDRS_PC_G1", /* name */
1016 FALSE, /* partial_inplace */
1017 0xffffffff, /* src_mask */
1018 0xffffffff, /* dst_mask */
1019 TRUE), /* pcrel_offset */
1020
1021 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1022 0, /* rightshift */
1023 2, /* size (0 = byte, 1 = short, 2 = long) */
1024 32, /* bitsize */
1025 TRUE, /* pc_relative */
1026 0, /* bitpos */
1027 complain_overflow_dont,/* complain_on_overflow */
1028 bfd_elf_generic_reloc, /* special_function */
1029 "R_ARM_LDRS_PC_G2", /* name */
1030 FALSE, /* partial_inplace */
1031 0xffffffff, /* src_mask */
1032 0xffffffff, /* dst_mask */
1033 TRUE), /* pcrel_offset */
1034
1035 HOWTO (R_ARM_LDC_PC_G0, /* type */
1036 0, /* rightshift */
1037 2, /* size (0 = byte, 1 = short, 2 = long) */
1038 32, /* bitsize */
1039 TRUE, /* pc_relative */
1040 0, /* bitpos */
1041 complain_overflow_dont,/* complain_on_overflow */
1042 bfd_elf_generic_reloc, /* special_function */
1043 "R_ARM_LDC_PC_G0", /* name */
1044 FALSE, /* partial_inplace */
1045 0xffffffff, /* src_mask */
1046 0xffffffff, /* dst_mask */
1047 TRUE), /* pcrel_offset */
1048
1049 HOWTO (R_ARM_LDC_PC_G1, /* type */
1050 0, /* rightshift */
1051 2, /* size (0 = byte, 1 = short, 2 = long) */
1052 32, /* bitsize */
1053 TRUE, /* pc_relative */
1054 0, /* bitpos */
1055 complain_overflow_dont,/* complain_on_overflow */
1056 bfd_elf_generic_reloc, /* special_function */
1057 "R_ARM_LDC_PC_G1", /* name */
1058 FALSE, /* partial_inplace */
1059 0xffffffff, /* src_mask */
1060 0xffffffff, /* dst_mask */
1061 TRUE), /* pcrel_offset */
1062
1063 HOWTO (R_ARM_LDC_PC_G2, /* type */
1064 0, /* rightshift */
1065 2, /* size (0 = byte, 1 = short, 2 = long) */
1066 32, /* bitsize */
1067 TRUE, /* pc_relative */
1068 0, /* bitpos */
1069 complain_overflow_dont,/* complain_on_overflow */
1070 bfd_elf_generic_reloc, /* special_function */
1071 "R_ARM_LDC_PC_G2", /* name */
1072 FALSE, /* partial_inplace */
1073 0xffffffff, /* src_mask */
1074 0xffffffff, /* dst_mask */
1075 TRUE), /* pcrel_offset */
1076
1077 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1078 0, /* rightshift */
1079 2, /* size (0 = byte, 1 = short, 2 = long) */
1080 32, /* bitsize */
1081 TRUE, /* pc_relative */
1082 0, /* bitpos */
1083 complain_overflow_dont,/* complain_on_overflow */
1084 bfd_elf_generic_reloc, /* special_function */
1085 "R_ARM_ALU_SB_G0_NC", /* name */
1086 FALSE, /* partial_inplace */
1087 0xffffffff, /* src_mask */
1088 0xffffffff, /* dst_mask */
1089 TRUE), /* pcrel_offset */
1090
1091 HOWTO (R_ARM_ALU_SB_G0, /* type */
1092 0, /* rightshift */
1093 2, /* size (0 = byte, 1 = short, 2 = long) */
1094 32, /* bitsize */
1095 TRUE, /* pc_relative */
1096 0, /* bitpos */
1097 complain_overflow_dont,/* complain_on_overflow */
1098 bfd_elf_generic_reloc, /* special_function */
1099 "R_ARM_ALU_SB_G0", /* name */
1100 FALSE, /* partial_inplace */
1101 0xffffffff, /* src_mask */
1102 0xffffffff, /* dst_mask */
1103 TRUE), /* pcrel_offset */
1104
1105 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1106 0, /* rightshift */
1107 2, /* size (0 = byte, 1 = short, 2 = long) */
1108 32, /* bitsize */
1109 TRUE, /* pc_relative */
1110 0, /* bitpos */
1111 complain_overflow_dont,/* complain_on_overflow */
1112 bfd_elf_generic_reloc, /* special_function */
1113 "R_ARM_ALU_SB_G1_NC", /* name */
1114 FALSE, /* partial_inplace */
1115 0xffffffff, /* src_mask */
1116 0xffffffff, /* dst_mask */
1117 TRUE), /* pcrel_offset */
1118
1119 HOWTO (R_ARM_ALU_SB_G1, /* type */
1120 0, /* rightshift */
1121 2, /* size (0 = byte, 1 = short, 2 = long) */
1122 32, /* bitsize */
1123 TRUE, /* pc_relative */
1124 0, /* bitpos */
1125 complain_overflow_dont,/* complain_on_overflow */
1126 bfd_elf_generic_reloc, /* special_function */
1127 "R_ARM_ALU_SB_G1", /* name */
1128 FALSE, /* partial_inplace */
1129 0xffffffff, /* src_mask */
1130 0xffffffff, /* dst_mask */
1131 TRUE), /* pcrel_offset */
1132
1133 HOWTO (R_ARM_ALU_SB_G2, /* type */
1134 0, /* rightshift */
1135 2, /* size (0 = byte, 1 = short, 2 = long) */
1136 32, /* bitsize */
1137 TRUE, /* pc_relative */
1138 0, /* bitpos */
1139 complain_overflow_dont,/* complain_on_overflow */
1140 bfd_elf_generic_reloc, /* special_function */
1141 "R_ARM_ALU_SB_G2", /* name */
1142 FALSE, /* partial_inplace */
1143 0xffffffff, /* src_mask */
1144 0xffffffff, /* dst_mask */
1145 TRUE), /* pcrel_offset */
1146
1147 HOWTO (R_ARM_LDR_SB_G0, /* type */
1148 0, /* rightshift */
1149 2, /* size (0 = byte, 1 = short, 2 = long) */
1150 32, /* bitsize */
1151 TRUE, /* pc_relative */
1152 0, /* bitpos */
1153 complain_overflow_dont,/* complain_on_overflow */
1154 bfd_elf_generic_reloc, /* special_function */
1155 "R_ARM_LDR_SB_G0", /* name */
1156 FALSE, /* partial_inplace */
1157 0xffffffff, /* src_mask */
1158 0xffffffff, /* dst_mask */
1159 TRUE), /* pcrel_offset */
1160
1161 HOWTO (R_ARM_LDR_SB_G1, /* type */
1162 0, /* rightshift */
1163 2, /* size (0 = byte, 1 = short, 2 = long) */
1164 32, /* bitsize */
1165 TRUE, /* pc_relative */
1166 0, /* bitpos */
1167 complain_overflow_dont,/* complain_on_overflow */
1168 bfd_elf_generic_reloc, /* special_function */
1169 "R_ARM_LDR_SB_G1", /* name */
1170 FALSE, /* partial_inplace */
1171 0xffffffff, /* src_mask */
1172 0xffffffff, /* dst_mask */
1173 TRUE), /* pcrel_offset */
1174
1175 HOWTO (R_ARM_LDR_SB_G2, /* type */
1176 0, /* rightshift */
1177 2, /* size (0 = byte, 1 = short, 2 = long) */
1178 32, /* bitsize */
1179 TRUE, /* pc_relative */
1180 0, /* bitpos */
1181 complain_overflow_dont,/* complain_on_overflow */
1182 bfd_elf_generic_reloc, /* special_function */
1183 "R_ARM_LDR_SB_G2", /* name */
1184 FALSE, /* partial_inplace */
1185 0xffffffff, /* src_mask */
1186 0xffffffff, /* dst_mask */
1187 TRUE), /* pcrel_offset */
1188
1189 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1190 0, /* rightshift */
1191 2, /* size (0 = byte, 1 = short, 2 = long) */
1192 32, /* bitsize */
1193 TRUE, /* pc_relative */
1194 0, /* bitpos */
1195 complain_overflow_dont,/* complain_on_overflow */
1196 bfd_elf_generic_reloc, /* special_function */
1197 "R_ARM_LDRS_SB_G0", /* name */
1198 FALSE, /* partial_inplace */
1199 0xffffffff, /* src_mask */
1200 0xffffffff, /* dst_mask */
1201 TRUE), /* pcrel_offset */
1202
1203 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1204 0, /* rightshift */
1205 2, /* size (0 = byte, 1 = short, 2 = long) */
1206 32, /* bitsize */
1207 TRUE, /* pc_relative */
1208 0, /* bitpos */
1209 complain_overflow_dont,/* complain_on_overflow */
1210 bfd_elf_generic_reloc, /* special_function */
1211 "R_ARM_LDRS_SB_G1", /* name */
1212 FALSE, /* partial_inplace */
1213 0xffffffff, /* src_mask */
1214 0xffffffff, /* dst_mask */
1215 TRUE), /* pcrel_offset */
1216
1217 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1218 0, /* rightshift */
1219 2, /* size (0 = byte, 1 = short, 2 = long) */
1220 32, /* bitsize */
1221 TRUE, /* pc_relative */
1222 0, /* bitpos */
1223 complain_overflow_dont,/* complain_on_overflow */
1224 bfd_elf_generic_reloc, /* special_function */
1225 "R_ARM_LDRS_SB_G2", /* name */
1226 FALSE, /* partial_inplace */
1227 0xffffffff, /* src_mask */
1228 0xffffffff, /* dst_mask */
1229 TRUE), /* pcrel_offset */
1230
1231 HOWTO (R_ARM_LDC_SB_G0, /* type */
1232 0, /* rightshift */
1233 2, /* size (0 = byte, 1 = short, 2 = long) */
1234 32, /* bitsize */
1235 TRUE, /* pc_relative */
1236 0, /* bitpos */
1237 complain_overflow_dont,/* complain_on_overflow */
1238 bfd_elf_generic_reloc, /* special_function */
1239 "R_ARM_LDC_SB_G0", /* name */
1240 FALSE, /* partial_inplace */
1241 0xffffffff, /* src_mask */
1242 0xffffffff, /* dst_mask */
1243 TRUE), /* pcrel_offset */
1244
1245 HOWTO (R_ARM_LDC_SB_G1, /* type */
1246 0, /* rightshift */
1247 2, /* size (0 = byte, 1 = short, 2 = long) */
1248 32, /* bitsize */
1249 TRUE, /* pc_relative */
1250 0, /* bitpos */
1251 complain_overflow_dont,/* complain_on_overflow */
1252 bfd_elf_generic_reloc, /* special_function */
1253 "R_ARM_LDC_SB_G1", /* name */
1254 FALSE, /* partial_inplace */
1255 0xffffffff, /* src_mask */
1256 0xffffffff, /* dst_mask */
1257 TRUE), /* pcrel_offset */
1258
1259 HOWTO (R_ARM_LDC_SB_G2, /* type */
1260 0, /* rightshift */
1261 2, /* size (0 = byte, 1 = short, 2 = long) */
1262 32, /* bitsize */
1263 TRUE, /* pc_relative */
1264 0, /* bitpos */
1265 complain_overflow_dont,/* complain_on_overflow */
1266 bfd_elf_generic_reloc, /* special_function */
1267 "R_ARM_LDC_SB_G2", /* name */
1268 FALSE, /* partial_inplace */
1269 0xffffffff, /* src_mask */
1270 0xffffffff, /* dst_mask */
1271 TRUE), /* pcrel_offset */
1272
1273 /* End of group relocations. */
1274
1275 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1276 0, /* rightshift */
1277 2, /* size (0 = byte, 1 = short, 2 = long) */
1278 16, /* bitsize */
1279 FALSE, /* pc_relative */
1280 0, /* bitpos */
1281 complain_overflow_dont,/* complain_on_overflow */
1282 bfd_elf_generic_reloc, /* special_function */
1283 "R_ARM_MOVW_BREL_NC", /* name */
1284 FALSE, /* partial_inplace */
1285 0x0000ffff, /* src_mask */
1286 0x0000ffff, /* dst_mask */
1287 FALSE), /* pcrel_offset */
1288
1289 HOWTO (R_ARM_MOVT_BREL, /* type */
1290 0, /* rightshift */
1291 2, /* size (0 = byte, 1 = short, 2 = long) */
1292 16, /* bitsize */
1293 FALSE, /* pc_relative */
1294 0, /* bitpos */
1295 complain_overflow_bitfield,/* complain_on_overflow */
1296 bfd_elf_generic_reloc, /* special_function */
1297 "R_ARM_MOVT_BREL", /* name */
1298 FALSE, /* partial_inplace */
1299 0x0000ffff, /* src_mask */
1300 0x0000ffff, /* dst_mask */
1301 FALSE), /* pcrel_offset */
1302
1303 HOWTO (R_ARM_MOVW_BREL, /* type */
1304 0, /* rightshift */
1305 2, /* size (0 = byte, 1 = short, 2 = long) */
1306 16, /* bitsize */
1307 FALSE, /* pc_relative */
1308 0, /* bitpos */
1309 complain_overflow_dont,/* complain_on_overflow */
1310 bfd_elf_generic_reloc, /* special_function */
1311 "R_ARM_MOVW_BREL", /* name */
1312 FALSE, /* partial_inplace */
1313 0x0000ffff, /* src_mask */
1314 0x0000ffff, /* dst_mask */
1315 FALSE), /* pcrel_offset */
1316
1317 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1318 0, /* rightshift */
1319 2, /* size (0 = byte, 1 = short, 2 = long) */
1320 16, /* bitsize */
1321 FALSE, /* pc_relative */
1322 0, /* bitpos */
1323 complain_overflow_dont,/* complain_on_overflow */
1324 bfd_elf_generic_reloc, /* special_function */
1325 "R_ARM_THM_MOVW_BREL_NC",/* name */
1326 FALSE, /* partial_inplace */
1327 0x040f70ff, /* src_mask */
1328 0x040f70ff, /* dst_mask */
1329 FALSE), /* pcrel_offset */
1330
1331 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1332 0, /* rightshift */
1333 2, /* size (0 = byte, 1 = short, 2 = long) */
1334 16, /* bitsize */
1335 FALSE, /* pc_relative */
1336 0, /* bitpos */
1337 complain_overflow_bitfield,/* complain_on_overflow */
1338 bfd_elf_generic_reloc, /* special_function */
1339 "R_ARM_THM_MOVT_BREL", /* name */
1340 FALSE, /* partial_inplace */
1341 0x040f70ff, /* src_mask */
1342 0x040f70ff, /* dst_mask */
1343 FALSE), /* pcrel_offset */
1344
1345 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1346 0, /* rightshift */
1347 2, /* size (0 = byte, 1 = short, 2 = long) */
1348 16, /* bitsize */
1349 FALSE, /* pc_relative */
1350 0, /* bitpos */
1351 complain_overflow_dont,/* complain_on_overflow */
1352 bfd_elf_generic_reloc, /* special_function */
1353 "R_ARM_THM_MOVW_BREL", /* name */
1354 FALSE, /* partial_inplace */
1355 0x040f70ff, /* src_mask */
1356 0x040f70ff, /* dst_mask */
1357 FALSE), /* pcrel_offset */
1358
1359 HOWTO (R_ARM_TLS_GOTDESC, /* type */
1360 0, /* rightshift */
1361 2, /* size (0 = byte, 1 = short, 2 = long) */
1362 32, /* bitsize */
1363 FALSE, /* pc_relative */
1364 0, /* bitpos */
1365 complain_overflow_bitfield,/* complain_on_overflow */
1366 NULL, /* special_function */
1367 "R_ARM_TLS_GOTDESC", /* name */
1368 TRUE, /* partial_inplace */
1369 0xffffffff, /* src_mask */
1370 0xffffffff, /* dst_mask */
1371 FALSE), /* pcrel_offset */
1372
1373 HOWTO (R_ARM_TLS_CALL, /* type */
1374 0, /* rightshift */
1375 2, /* size (0 = byte, 1 = short, 2 = long) */
1376 24, /* bitsize */
1377 FALSE, /* pc_relative */
1378 0, /* bitpos */
1379 complain_overflow_dont,/* complain_on_overflow */
1380 bfd_elf_generic_reloc, /* special_function */
1381 "R_ARM_TLS_CALL", /* name */
1382 FALSE, /* partial_inplace */
1383 0x00ffffff, /* src_mask */
1384 0x00ffffff, /* dst_mask */
1385 FALSE), /* pcrel_offset */
1386
1387 HOWTO (R_ARM_TLS_DESCSEQ, /* type */
1388 0, /* rightshift */
1389 2, /* size (0 = byte, 1 = short, 2 = long) */
1390 0, /* bitsize */
1391 FALSE, /* pc_relative */
1392 0, /* bitpos */
1393 complain_overflow_bitfield,/* complain_on_overflow */
1394 bfd_elf_generic_reloc, /* special_function */
1395 "R_ARM_TLS_DESCSEQ", /* name */
1396 FALSE, /* partial_inplace */
1397 0x00000000, /* src_mask */
1398 0x00000000, /* dst_mask */
1399 FALSE), /* pcrel_offset */
1400
1401 HOWTO (R_ARM_THM_TLS_CALL, /* type */
1402 0, /* rightshift */
1403 2, /* size (0 = byte, 1 = short, 2 = long) */
1404 24, /* bitsize */
1405 FALSE, /* pc_relative */
1406 0, /* bitpos */
1407 complain_overflow_dont,/* complain_on_overflow */
1408 bfd_elf_generic_reloc, /* special_function */
1409 "R_ARM_THM_TLS_CALL", /* name */
1410 FALSE, /* partial_inplace */
1411 0x07ff07ff, /* src_mask */
1412 0x07ff07ff, /* dst_mask */
1413 FALSE), /* pcrel_offset */
1414
1415 HOWTO (R_ARM_PLT32_ABS, /* type */
1416 0, /* rightshift */
1417 2, /* size (0 = byte, 1 = short, 2 = long) */
1418 32, /* bitsize */
1419 FALSE, /* pc_relative */
1420 0, /* bitpos */
1421 complain_overflow_dont,/* complain_on_overflow */
1422 bfd_elf_generic_reloc, /* special_function */
1423 "R_ARM_PLT32_ABS", /* name */
1424 FALSE, /* partial_inplace */
1425 0xffffffff, /* src_mask */
1426 0xffffffff, /* dst_mask */
1427 FALSE), /* pcrel_offset */
1428
1429 HOWTO (R_ARM_GOT_ABS, /* type */
1430 0, /* rightshift */
1431 2, /* size (0 = byte, 1 = short, 2 = long) */
1432 32, /* bitsize */
1433 FALSE, /* pc_relative */
1434 0, /* bitpos */
1435 complain_overflow_dont,/* complain_on_overflow */
1436 bfd_elf_generic_reloc, /* special_function */
1437 "R_ARM_GOT_ABS", /* name */
1438 FALSE, /* partial_inplace */
1439 0xffffffff, /* src_mask */
1440 0xffffffff, /* dst_mask */
1441 FALSE), /* pcrel_offset */
1442
1443 HOWTO (R_ARM_GOT_PREL, /* type */
1444 0, /* rightshift */
1445 2, /* size (0 = byte, 1 = short, 2 = long) */
1446 32, /* bitsize */
1447 TRUE, /* pc_relative */
1448 0, /* bitpos */
1449 complain_overflow_dont, /* complain_on_overflow */
1450 bfd_elf_generic_reloc, /* special_function */
1451 "R_ARM_GOT_PREL", /* name */
1452 FALSE, /* partial_inplace */
1453 0xffffffff, /* src_mask */
1454 0xffffffff, /* dst_mask */
1455 TRUE), /* pcrel_offset */
1456
1457 HOWTO (R_ARM_GOT_BREL12, /* type */
1458 0, /* rightshift */
1459 2, /* size (0 = byte, 1 = short, 2 = long) */
1460 12, /* bitsize */
1461 FALSE, /* pc_relative */
1462 0, /* bitpos */
1463 complain_overflow_bitfield,/* complain_on_overflow */
1464 bfd_elf_generic_reloc, /* special_function */
1465 "R_ARM_GOT_BREL12", /* name */
1466 FALSE, /* partial_inplace */
1467 0x00000fff, /* src_mask */
1468 0x00000fff, /* dst_mask */
1469 FALSE), /* pcrel_offset */
1470
1471 HOWTO (R_ARM_GOTOFF12, /* type */
1472 0, /* rightshift */
1473 2, /* size (0 = byte, 1 = short, 2 = long) */
1474 12, /* bitsize */
1475 FALSE, /* pc_relative */
1476 0, /* bitpos */
1477 complain_overflow_bitfield,/* complain_on_overflow */
1478 bfd_elf_generic_reloc, /* special_function */
1479 "R_ARM_GOTOFF12", /* name */
1480 FALSE, /* partial_inplace */
1481 0x00000fff, /* src_mask */
1482 0x00000fff, /* dst_mask */
1483 FALSE), /* pcrel_offset */
1484
1485 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1486
1487 /* GNU extension to record C++ vtable member usage */
1488 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1489 0, /* rightshift */
1490 2, /* size (0 = byte, 1 = short, 2 = long) */
1491 0, /* bitsize */
1492 FALSE, /* pc_relative */
1493 0, /* bitpos */
1494 complain_overflow_dont, /* complain_on_overflow */
1495 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1496 "R_ARM_GNU_VTENTRY", /* name */
1497 FALSE, /* partial_inplace */
1498 0, /* src_mask */
1499 0, /* dst_mask */
1500 FALSE), /* pcrel_offset */
1501
1502 /* GNU extension to record C++ vtable hierarchy */
1503 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1504 0, /* rightshift */
1505 2, /* size (0 = byte, 1 = short, 2 = long) */
1506 0, /* bitsize */
1507 FALSE, /* pc_relative */
1508 0, /* bitpos */
1509 complain_overflow_dont, /* complain_on_overflow */
1510 NULL, /* special_function */
1511 "R_ARM_GNU_VTINHERIT", /* name */
1512 FALSE, /* partial_inplace */
1513 0, /* src_mask */
1514 0, /* dst_mask */
1515 FALSE), /* pcrel_offset */
1516
1517 HOWTO (R_ARM_THM_JUMP11, /* type */
1518 1, /* rightshift */
1519 1, /* size (0 = byte, 1 = short, 2 = long) */
1520 11, /* bitsize */
1521 TRUE, /* pc_relative */
1522 0, /* bitpos */
1523 complain_overflow_signed, /* complain_on_overflow */
1524 bfd_elf_generic_reloc, /* special_function */
1525 "R_ARM_THM_JUMP11", /* name */
1526 FALSE, /* partial_inplace */
1527 0x000007ff, /* src_mask */
1528 0x000007ff, /* dst_mask */
1529 TRUE), /* pcrel_offset */
1530
1531 HOWTO (R_ARM_THM_JUMP8, /* type */
1532 1, /* rightshift */
1533 1, /* size (0 = byte, 1 = short, 2 = long) */
1534 8, /* bitsize */
1535 TRUE, /* pc_relative */
1536 0, /* bitpos */
1537 complain_overflow_signed, /* complain_on_overflow */
1538 bfd_elf_generic_reloc, /* special_function */
1539 "R_ARM_THM_JUMP8", /* name */
1540 FALSE, /* partial_inplace */
1541 0x000000ff, /* src_mask */
1542 0x000000ff, /* dst_mask */
1543 TRUE), /* pcrel_offset */
1544
1545 /* TLS relocations */
1546 HOWTO (R_ARM_TLS_GD32, /* type */
1547 0, /* rightshift */
1548 2, /* size (0 = byte, 1 = short, 2 = long) */
1549 32, /* bitsize */
1550 FALSE, /* pc_relative */
1551 0, /* bitpos */
1552 complain_overflow_bitfield,/* complain_on_overflow */
1553 NULL, /* special_function */
1554 "R_ARM_TLS_GD32", /* name */
1555 TRUE, /* partial_inplace */
1556 0xffffffff, /* src_mask */
1557 0xffffffff, /* dst_mask */
1558 FALSE), /* pcrel_offset */
1559
1560 HOWTO (R_ARM_TLS_LDM32, /* type */
1561 0, /* rightshift */
1562 2, /* size (0 = byte, 1 = short, 2 = long) */
1563 32, /* bitsize */
1564 FALSE, /* pc_relative */
1565 0, /* bitpos */
1566 complain_overflow_bitfield,/* complain_on_overflow */
1567 bfd_elf_generic_reloc, /* special_function */
1568 "R_ARM_TLS_LDM32", /* name */
1569 TRUE, /* partial_inplace */
1570 0xffffffff, /* src_mask */
1571 0xffffffff, /* dst_mask */
1572 FALSE), /* pcrel_offset */
1573
1574 HOWTO (R_ARM_TLS_LDO32, /* type */
1575 0, /* rightshift */
1576 2, /* size (0 = byte, 1 = short, 2 = long) */
1577 32, /* bitsize */
1578 FALSE, /* pc_relative */
1579 0, /* bitpos */
1580 complain_overflow_bitfield,/* complain_on_overflow */
1581 bfd_elf_generic_reloc, /* special_function */
1582 "R_ARM_TLS_LDO32", /* name */
1583 TRUE, /* partial_inplace */
1584 0xffffffff, /* src_mask */
1585 0xffffffff, /* dst_mask */
1586 FALSE), /* pcrel_offset */
1587
1588 HOWTO (R_ARM_TLS_IE32, /* type */
1589 0, /* rightshift */
1590 2, /* size (0 = byte, 1 = short, 2 = long) */
1591 32, /* bitsize */
1592 FALSE, /* pc_relative */
1593 0, /* bitpos */
1594 complain_overflow_bitfield,/* complain_on_overflow */
1595 NULL, /* special_function */
1596 "R_ARM_TLS_IE32", /* name */
1597 TRUE, /* partial_inplace */
1598 0xffffffff, /* src_mask */
1599 0xffffffff, /* dst_mask */
1600 FALSE), /* pcrel_offset */
1601
1602 HOWTO (R_ARM_TLS_LE32, /* type */
1603 0, /* rightshift */
1604 2, /* size (0 = byte, 1 = short, 2 = long) */
1605 32, /* bitsize */
1606 FALSE, /* pc_relative */
1607 0, /* bitpos */
1608 complain_overflow_bitfield,/* complain_on_overflow */
1609 bfd_elf_generic_reloc, /* special_function */
1610 "R_ARM_TLS_LE32", /* name */
1611 TRUE, /* partial_inplace */
1612 0xffffffff, /* src_mask */
1613 0xffffffff, /* dst_mask */
1614 FALSE), /* pcrel_offset */
1615
1616 HOWTO (R_ARM_TLS_LDO12, /* type */
1617 0, /* rightshift */
1618 2, /* size (0 = byte, 1 = short, 2 = long) */
1619 12, /* bitsize */
1620 FALSE, /* pc_relative */
1621 0, /* bitpos */
1622 complain_overflow_bitfield,/* complain_on_overflow */
1623 bfd_elf_generic_reloc, /* special_function */
1624 "R_ARM_TLS_LDO12", /* name */
1625 FALSE, /* partial_inplace */
1626 0x00000fff, /* src_mask */
1627 0x00000fff, /* dst_mask */
1628 FALSE), /* pcrel_offset */
1629
1630 HOWTO (R_ARM_TLS_LE12, /* type */
1631 0, /* rightshift */
1632 2, /* size (0 = byte, 1 = short, 2 = long) */
1633 12, /* bitsize */
1634 FALSE, /* pc_relative */
1635 0, /* bitpos */
1636 complain_overflow_bitfield,/* complain_on_overflow */
1637 bfd_elf_generic_reloc, /* special_function */
1638 "R_ARM_TLS_LE12", /* name */
1639 FALSE, /* partial_inplace */
1640 0x00000fff, /* src_mask */
1641 0x00000fff, /* dst_mask */
1642 FALSE), /* pcrel_offset */
1643
1644 HOWTO (R_ARM_TLS_IE12GP, /* type */
1645 0, /* rightshift */
1646 2, /* size (0 = byte, 1 = short, 2 = long) */
1647 12, /* bitsize */
1648 FALSE, /* pc_relative */
1649 0, /* bitpos */
1650 complain_overflow_bitfield,/* complain_on_overflow */
1651 bfd_elf_generic_reloc, /* special_function */
1652 "R_ARM_TLS_IE12GP", /* name */
1653 FALSE, /* partial_inplace */
1654 0x00000fff, /* src_mask */
1655 0x00000fff, /* dst_mask */
1656 FALSE), /* pcrel_offset */
1657
1658 /* 112-127 private relocations. */
1659 EMPTY_HOWTO (112),
1660 EMPTY_HOWTO (113),
1661 EMPTY_HOWTO (114),
1662 EMPTY_HOWTO (115),
1663 EMPTY_HOWTO (116),
1664 EMPTY_HOWTO (117),
1665 EMPTY_HOWTO (118),
1666 EMPTY_HOWTO (119),
1667 EMPTY_HOWTO (120),
1668 EMPTY_HOWTO (121),
1669 EMPTY_HOWTO (122),
1670 EMPTY_HOWTO (123),
1671 EMPTY_HOWTO (124),
1672 EMPTY_HOWTO (125),
1673 EMPTY_HOWTO (126),
1674 EMPTY_HOWTO (127),
1675
1676 /* R_ARM_ME_TOO, obsolete. */
1677 EMPTY_HOWTO (128),
1678
1679 HOWTO (R_ARM_THM_TLS_DESCSEQ, /* type */
1680 0, /* rightshift */
1681 1, /* size (0 = byte, 1 = short, 2 = long) */
1682 0, /* bitsize */
1683 FALSE, /* pc_relative */
1684 0, /* bitpos */
1685 complain_overflow_bitfield,/* complain_on_overflow */
1686 bfd_elf_generic_reloc, /* special_function */
1687 "R_ARM_THM_TLS_DESCSEQ",/* name */
1688 FALSE, /* partial_inplace */
1689 0x00000000, /* src_mask */
1690 0x00000000, /* dst_mask */
1691 FALSE), /* pcrel_offset */
1692 };
1693
1694 /* 160 onwards: */
1695 static reloc_howto_type elf32_arm_howto_table_2[1] =
1696 {
1697 HOWTO (R_ARM_IRELATIVE, /* type */
1698 0, /* rightshift */
1699 2, /* size (0 = byte, 1 = short, 2 = long) */
1700 32, /* bitsize */
1701 FALSE, /* pc_relative */
1702 0, /* bitpos */
1703 complain_overflow_bitfield,/* complain_on_overflow */
1704 bfd_elf_generic_reloc, /* special_function */
1705 "R_ARM_IRELATIVE", /* name */
1706 TRUE, /* partial_inplace */
1707 0xffffffff, /* src_mask */
1708 0xffffffff, /* dst_mask */
1709 FALSE) /* pcrel_offset */
1710 };
1711
1712 /* 249-255 extended, currently unused, relocations: */
1713 static reloc_howto_type elf32_arm_howto_table_3[4] =
1714 {
1715 HOWTO (R_ARM_RREL32, /* type */
1716 0, /* rightshift */
1717 0, /* size (0 = byte, 1 = short, 2 = long) */
1718 0, /* bitsize */
1719 FALSE, /* pc_relative */
1720 0, /* bitpos */
1721 complain_overflow_dont,/* complain_on_overflow */
1722 bfd_elf_generic_reloc, /* special_function */
1723 "R_ARM_RREL32", /* name */
1724 FALSE, /* partial_inplace */
1725 0, /* src_mask */
1726 0, /* dst_mask */
1727 FALSE), /* pcrel_offset */
1728
1729 HOWTO (R_ARM_RABS32, /* type */
1730 0, /* rightshift */
1731 0, /* size (0 = byte, 1 = short, 2 = long) */
1732 0, /* bitsize */
1733 FALSE, /* pc_relative */
1734 0, /* bitpos */
1735 complain_overflow_dont,/* complain_on_overflow */
1736 bfd_elf_generic_reloc, /* special_function */
1737 "R_ARM_RABS32", /* name */
1738 FALSE, /* partial_inplace */
1739 0, /* src_mask */
1740 0, /* dst_mask */
1741 FALSE), /* pcrel_offset */
1742
1743 HOWTO (R_ARM_RPC24, /* type */
1744 0, /* rightshift */
1745 0, /* size (0 = byte, 1 = short, 2 = long) */
1746 0, /* bitsize */
1747 FALSE, /* pc_relative */
1748 0, /* bitpos */
1749 complain_overflow_dont,/* complain_on_overflow */
1750 bfd_elf_generic_reloc, /* special_function */
1751 "R_ARM_RPC24", /* name */
1752 FALSE, /* partial_inplace */
1753 0, /* src_mask */
1754 0, /* dst_mask */
1755 FALSE), /* pcrel_offset */
1756
1757 HOWTO (R_ARM_RBASE, /* type */
1758 0, /* rightshift */
1759 0, /* size (0 = byte, 1 = short, 2 = long) */
1760 0, /* bitsize */
1761 FALSE, /* pc_relative */
1762 0, /* bitpos */
1763 complain_overflow_dont,/* complain_on_overflow */
1764 bfd_elf_generic_reloc, /* special_function */
1765 "R_ARM_RBASE", /* name */
1766 FALSE, /* partial_inplace */
1767 0, /* src_mask */
1768 0, /* dst_mask */
1769 FALSE) /* pcrel_offset */
1770 };
1771
1772 static reloc_howto_type *
1773 elf32_arm_howto_from_type (unsigned int r_type)
1774 {
1775 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1776 return &elf32_arm_howto_table_1[r_type];
1777
1778 if (r_type == R_ARM_IRELATIVE)
1779 return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE];
1780
1781 if (r_type >= R_ARM_RREL32
1782 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_3))
1783 return &elf32_arm_howto_table_3[r_type - R_ARM_RREL32];
1784
1785 return NULL;
1786 }
1787
1788 static void
1789 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc,
1790 Elf_Internal_Rela * elf_reloc)
1791 {
1792 unsigned int r_type;
1793
1794 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1795 bfd_reloc->howto = elf32_arm_howto_from_type (r_type);
1796 }
1797
1798 struct elf32_arm_reloc_map
1799 {
1800 bfd_reloc_code_real_type bfd_reloc_val;
1801 unsigned char elf_reloc_val;
1802 };
1803
1804 /* All entries in this list must also be present in elf32_arm_howto_table. */
1805 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1806 {
1807 {BFD_RELOC_NONE, R_ARM_NONE},
1808 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1809 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1810 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1811 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1812 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1813 {BFD_RELOC_32, R_ARM_ABS32},
1814 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1815 {BFD_RELOC_8, R_ARM_ABS8},
1816 {BFD_RELOC_16, R_ARM_ABS16},
1817 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1818 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1819 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1820 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1821 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1822 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1823 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1824 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1825 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1826 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1827 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1828 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1829 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1830 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL},
1831 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1832 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1833 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1834 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1835 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1836 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1837 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1838 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1839 {BFD_RELOC_ARM_TLS_GOTDESC, R_ARM_TLS_GOTDESC},
1840 {BFD_RELOC_ARM_TLS_CALL, R_ARM_TLS_CALL},
1841 {BFD_RELOC_ARM_THM_TLS_CALL, R_ARM_THM_TLS_CALL},
1842 {BFD_RELOC_ARM_TLS_DESCSEQ, R_ARM_TLS_DESCSEQ},
1843 {BFD_RELOC_ARM_THM_TLS_DESCSEQ, R_ARM_THM_TLS_DESCSEQ},
1844 {BFD_RELOC_ARM_TLS_DESC, R_ARM_TLS_DESC},
1845 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
1846 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
1847 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
1848 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
1849 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
1850 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
1851 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
1852 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
1853 {BFD_RELOC_ARM_IRELATIVE, R_ARM_IRELATIVE},
1854 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
1855 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
1856 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
1857 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
1858 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
1859 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
1860 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
1861 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
1862 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1863 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1864 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1865 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1866 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1867 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1868 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1869 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1870 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1871 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1872 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1873 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1874 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1875 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1876 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1877 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1878 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1879 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1880 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1881 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1882 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1883 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1884 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1885 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1886 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1887 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1888 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1889 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1890 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1891 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1892 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX}
1893 };
1894
1895 static reloc_howto_type *
1896 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1897 bfd_reloc_code_real_type code)
1898 {
1899 unsigned int i;
1900
1901 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1902 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1903 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1904
1905 return NULL;
1906 }
1907
1908 static reloc_howto_type *
1909 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1910 const char *r_name)
1911 {
1912 unsigned int i;
1913
1914 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1915 if (elf32_arm_howto_table_1[i].name != NULL
1916 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1917 return &elf32_arm_howto_table_1[i];
1918
1919 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1920 if (elf32_arm_howto_table_2[i].name != NULL
1921 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1922 return &elf32_arm_howto_table_2[i];
1923
1924 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_3); i++)
1925 if (elf32_arm_howto_table_3[i].name != NULL
1926 && strcasecmp (elf32_arm_howto_table_3[i].name, r_name) == 0)
1927 return &elf32_arm_howto_table_3[i];
1928
1929 return NULL;
1930 }
1931
1932 /* Support for core dump NOTE sections. */
1933
1934 static bfd_boolean
1935 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1936 {
1937 int offset;
1938 size_t size;
1939
1940 switch (note->descsz)
1941 {
1942 default:
1943 return FALSE;
1944
1945 case 148: /* Linux/ARM 32-bit. */
1946 /* pr_cursig */
1947 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
1948
1949 /* pr_pid */
1950 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
1951
1952 /* pr_reg */
1953 offset = 72;
1954 size = 72;
1955
1956 break;
1957 }
1958
1959 /* Make a ".reg/999" section. */
1960 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
1961 size, note->descpos + offset);
1962 }
1963
1964 static bfd_boolean
1965 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
1966 {
1967 switch (note->descsz)
1968 {
1969 default:
1970 return FALSE;
1971
1972 case 124: /* Linux/ARM elf_prpsinfo. */
1973 elf_tdata (abfd)->core->pid
1974 = bfd_get_32 (abfd, note->descdata + 12);
1975 elf_tdata (abfd)->core->program
1976 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
1977 elf_tdata (abfd)->core->command
1978 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
1979 }
1980
1981 /* Note that for some reason, a spurious space is tacked
1982 onto the end of the args in some (at least one anyway)
1983 implementations, so strip it off if it exists. */
1984 {
1985 char *command = elf_tdata (abfd)->core->command;
1986 int n = strlen (command);
1987
1988 if (0 < n && command[n - 1] == ' ')
1989 command[n - 1] = '\0';
1990 }
1991
1992 return TRUE;
1993 }
1994
1995 static char *
1996 elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz,
1997 int note_type, ...)
1998 {
1999 switch (note_type)
2000 {
2001 default:
2002 return NULL;
2003
2004 case NT_PRPSINFO:
2005 {
2006 char data[124];
2007 va_list ap;
2008
2009 va_start (ap, note_type);
2010 memset (data, 0, sizeof (data));
2011 strncpy (data + 28, va_arg (ap, const char *), 16);
2012 strncpy (data + 44, va_arg (ap, const char *), 80);
2013 va_end (ap);
2014
2015 return elfcore_write_note (abfd, buf, bufsiz,
2016 "CORE", note_type, data, sizeof (data));
2017 }
2018
2019 case NT_PRSTATUS:
2020 {
2021 char data[148];
2022 va_list ap;
2023 long pid;
2024 int cursig;
2025 const void *greg;
2026
2027 va_start (ap, note_type);
2028 memset (data, 0, sizeof (data));
2029 pid = va_arg (ap, long);
2030 bfd_put_32 (abfd, pid, data + 24);
2031 cursig = va_arg (ap, int);
2032 bfd_put_16 (abfd, cursig, data + 12);
2033 greg = va_arg (ap, const void *);
2034 memcpy (data + 72, greg, 72);
2035 va_end (ap);
2036
2037 return elfcore_write_note (abfd, buf, bufsiz,
2038 "CORE", note_type, data, sizeof (data));
2039 }
2040 }
2041 }
2042
2043 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vec
2044 #define TARGET_LITTLE_NAME "elf32-littlearm"
2045 #define TARGET_BIG_SYM bfd_elf32_bigarm_vec
2046 #define TARGET_BIG_NAME "elf32-bigarm"
2047
2048 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
2049 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
2050 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note
2051
2052 typedef unsigned long int insn32;
2053 typedef unsigned short int insn16;
2054
2055 /* In lieu of proper flags, assume all EABIv4 or later objects are
2056 interworkable. */
2057 #define INTERWORK_FLAG(abfd) \
2058 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2059 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2060 || ((abfd)->flags & BFD_LINKER_CREATED))
2061
2062 /* The linker script knows the section names for placement.
2063 The entry_names are used to do simple name mangling on the stubs.
2064 Given a function name, and its type, the stub can be found. The
2065 name can be changed. The only requirement is the %s be present. */
2066 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2067 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
2068
2069 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2070 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
2071
2072 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2073 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
2074
2075 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2076 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
2077
2078 #define STUB_ENTRY_NAME "__%s_veneer"
2079
2080 /* The name of the dynamic interpreter. This is put in the .interp
2081 section. */
2082 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
2083
2084 static const unsigned long tls_trampoline [] =
2085 {
2086 0xe08e0000, /* add r0, lr, r0 */
2087 0xe5901004, /* ldr r1, [r0,#4] */
2088 0xe12fff11, /* bx r1 */
2089 };
2090
2091 static const unsigned long dl_tlsdesc_lazy_trampoline [] =
2092 {
2093 0xe52d2004, /* push {r2} */
2094 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
2095 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
2096 0xe79f2002, /* 1: ldr r2, [pc, r2] */
2097 0xe081100f, /* 2: add r1, pc */
2098 0xe12fff12, /* bx r2 */
2099 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
2100 + dl_tlsdesc_lazy_resolver(GOT) */
2101 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2102 };
2103
2104 #ifdef FOUR_WORD_PLT
2105
2106 /* The first entry in a procedure linkage table looks like
2107 this. It is set up so that any shared library function that is
2108 called before the relocation has been set up calls the dynamic
2109 linker first. */
2110 static const bfd_vma elf32_arm_plt0_entry [] =
2111 {
2112 0xe52de004, /* str lr, [sp, #-4]! */
2113 0xe59fe010, /* ldr lr, [pc, #16] */
2114 0xe08fe00e, /* add lr, pc, lr */
2115 0xe5bef008, /* ldr pc, [lr, #8]! */
2116 };
2117
2118 /* Subsequent entries in a procedure linkage table look like
2119 this. */
2120 static const bfd_vma elf32_arm_plt_entry [] =
2121 {
2122 0xe28fc600, /* add ip, pc, #NN */
2123 0xe28cca00, /* add ip, ip, #NN */
2124 0xe5bcf000, /* ldr pc, [ip, #NN]! */
2125 0x00000000, /* unused */
2126 };
2127
2128 #else
2129
2130 /* The first entry in a procedure linkage table looks like
2131 this. It is set up so that any shared library function that is
2132 called before the relocation has been set up calls the dynamic
2133 linker first. */
2134 static const bfd_vma elf32_arm_plt0_entry [] =
2135 {
2136 0xe52de004, /* str lr, [sp, #-4]! */
2137 0xe59fe004, /* ldr lr, [pc, #4] */
2138 0xe08fe00e, /* add lr, pc, lr */
2139 0xe5bef008, /* ldr pc, [lr, #8]! */
2140 0x00000000, /* &GOT[0] - . */
2141 };
2142
2143 /* Subsequent entries in a procedure linkage table look like
2144 this. */
2145 static const bfd_vma elf32_arm_plt_entry [] =
2146 {
2147 0xe28fc600, /* add ip, pc, #0xNN00000 */
2148 0xe28cca00, /* add ip, ip, #0xNN000 */
2149 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2150 };
2151
2152 #endif
2153
2154 /* The format of the first entry in the procedure linkage table
2155 for a VxWorks executable. */
2156 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
2157 {
2158 0xe52dc008, /* str ip,[sp,#-8]! */
2159 0xe59fc000, /* ldr ip,[pc] */
2160 0xe59cf008, /* ldr pc,[ip,#8] */
2161 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
2162 };
2163
2164 /* The format of subsequent entries in a VxWorks executable. */
2165 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
2166 {
2167 0xe59fc000, /* ldr ip,[pc] */
2168 0xe59cf000, /* ldr pc,[ip] */
2169 0x00000000, /* .long @got */
2170 0xe59fc000, /* ldr ip,[pc] */
2171 0xea000000, /* b _PLT */
2172 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2173 };
2174
2175 /* The format of entries in a VxWorks shared library. */
2176 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
2177 {
2178 0xe59fc000, /* ldr ip,[pc] */
2179 0xe79cf009, /* ldr pc,[ip,r9] */
2180 0x00000000, /* .long @got */
2181 0xe59fc000, /* ldr ip,[pc] */
2182 0xe599f008, /* ldr pc,[r9,#8] */
2183 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2184 };
2185
2186 /* An initial stub used if the PLT entry is referenced from Thumb code. */
2187 #define PLT_THUMB_STUB_SIZE 4
2188 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2189 {
2190 0x4778, /* bx pc */
2191 0x46c0 /* nop */
2192 };
2193
2194 /* The entries in a PLT when using a DLL-based target with multiple
2195 address spaces. */
2196 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2197 {
2198 0xe51ff004, /* ldr pc, [pc, #-4] */
2199 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2200 };
2201
2202 /* The first entry in a procedure linkage table looks like
2203 this. It is set up so that any shared library function that is
2204 called before the relocation has been set up calls the dynamic
2205 linker first. */
2206 static const bfd_vma elf32_arm_nacl_plt0_entry [] =
2207 {
2208 /* First bundle: */
2209 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */
2210 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */
2211 0xe08cc00f, /* add ip, ip, pc */
2212 0xe52dc008, /* str ip, [sp, #-8]! */
2213 /* Second bundle: */
2214 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2215 0xe59cc000, /* ldr ip, [ip] */
2216 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2217 0xe12fff1c, /* bx ip */
2218 /* Third bundle: */
2219 0xe320f000, /* nop */
2220 0xe320f000, /* nop */
2221 0xe320f000, /* nop */
2222 /* .Lplt_tail: */
2223 0xe50dc004, /* str ip, [sp, #-4] */
2224 /* Fourth bundle: */
2225 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2226 0xe59cc000, /* ldr ip, [ip] */
2227 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2228 0xe12fff1c, /* bx ip */
2229 };
2230 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
2231
2232 /* Subsequent entries in a procedure linkage table look like this. */
2233 static const bfd_vma elf32_arm_nacl_plt_entry [] =
2234 {
2235 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */
2236 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */
2237 0xe08cc00f, /* add ip, ip, pc */
2238 0xea000000, /* b .Lplt_tail */
2239 };
2240
2241 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2242 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2243 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2244 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2245 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2246 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2247
2248 enum stub_insn_type
2249 {
2250 THUMB16_TYPE = 1,
2251 THUMB32_TYPE,
2252 ARM_TYPE,
2253 DATA_TYPE
2254 };
2255
2256 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2257 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2258 is inserted in arm_build_one_stub(). */
2259 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2260 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2261 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2262 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2263 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2264 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2265
2266 typedef struct
2267 {
2268 bfd_vma data;
2269 enum stub_insn_type type;
2270 unsigned int r_type;
2271 int reloc_addend;
2272 } insn_sequence;
2273
2274 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2275 to reach the stub if necessary. */
2276 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2277 {
2278 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2279 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2280 };
2281
2282 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2283 available. */
2284 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2285 {
2286 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2287 ARM_INSN (0xe12fff1c), /* bx ip */
2288 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2289 };
2290
2291 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2292 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2293 {
2294 THUMB16_INSN (0xb401), /* push {r0} */
2295 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2296 THUMB16_INSN (0x4684), /* mov ip, r0 */
2297 THUMB16_INSN (0xbc01), /* pop {r0} */
2298 THUMB16_INSN (0x4760), /* bx ip */
2299 THUMB16_INSN (0xbf00), /* nop */
2300 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2301 };
2302
2303 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2304 allowed. */
2305 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2306 {
2307 THUMB16_INSN (0x4778), /* bx pc */
2308 THUMB16_INSN (0x46c0), /* nop */
2309 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2310 ARM_INSN (0xe12fff1c), /* bx ip */
2311 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2312 };
2313
2314 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2315 available. */
2316 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2317 {
2318 THUMB16_INSN (0x4778), /* bx pc */
2319 THUMB16_INSN (0x46c0), /* nop */
2320 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2321 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2322 };
2323
2324 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2325 one, when the destination is close enough. */
2326 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2327 {
2328 THUMB16_INSN (0x4778), /* bx pc */
2329 THUMB16_INSN (0x46c0), /* nop */
2330 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */
2331 };
2332
2333 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2334 blx to reach the stub if necessary. */
2335 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2336 {
2337 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */
2338 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */
2339 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2340 };
2341
2342 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2343 blx to reach the stub if necessary. We can not add into pc;
2344 it is not guaranteed to mode switch (different in ARMv6 and
2345 ARMv7). */
2346 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2347 {
2348 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2349 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2350 ARM_INSN (0xe12fff1c), /* bx ip */
2351 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2352 };
2353
2354 /* V4T ARM -> ARM long branch stub, PIC. */
2355 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2356 {
2357 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2358 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2359 ARM_INSN (0xe12fff1c), /* bx ip */
2360 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2361 };
2362
2363 /* V4T Thumb -> ARM long branch stub, PIC. */
2364 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2365 {
2366 THUMB16_INSN (0x4778), /* bx pc */
2367 THUMB16_INSN (0x46c0), /* nop */
2368 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2369 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */
2370 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2371 };
2372
2373 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2374 architectures. */
2375 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2376 {
2377 THUMB16_INSN (0xb401), /* push {r0} */
2378 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2379 THUMB16_INSN (0x46fc), /* mov ip, pc */
2380 THUMB16_INSN (0x4484), /* add ip, r0 */
2381 THUMB16_INSN (0xbc01), /* pop {r0} */
2382 THUMB16_INSN (0x4760), /* bx ip */
2383 DATA_WORD (0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2384 };
2385
2386 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2387 allowed. */
2388 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2389 {
2390 THUMB16_INSN (0x4778), /* bx pc */
2391 THUMB16_INSN (0x46c0), /* nop */
2392 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2393 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2394 ARM_INSN (0xe12fff1c), /* bx ip */
2395 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2396 };
2397
2398 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
2399 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2400 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic[] =
2401 {
2402 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */
2403 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */
2404 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2405 };
2406
2407 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
2408 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2409 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] =
2410 {
2411 THUMB16_INSN (0x4778), /* bx pc */
2412 THUMB16_INSN (0x46c0), /* nop */
2413 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */
2414 ARM_INSN (0xe081f00f), /* add pc, r1, pc */
2415 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2416 };
2417
2418 /* Cortex-A8 erratum-workaround stubs. */
2419
2420 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2421 can't use a conditional branch to reach this stub). */
2422
2423 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2424 {
2425 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */
2426 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */
2427 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */
2428 };
2429
2430 /* Stub used for b.w and bl.w instructions. */
2431
2432 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2433 {
2434 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2435 };
2436
2437 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2438 {
2439 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2440 };
2441
2442 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2443 instruction (which switches to ARM mode) to point to this stub. Jump to the
2444 real destination using an ARM-mode branch. */
2445
2446 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2447 {
2448 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */
2449 };
2450
2451 /* For each section group there can be a specially created linker section
2452 to hold the stubs for that group. The name of the stub section is based
2453 upon the name of another section within that group with the suffix below
2454 applied.
2455
2456 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2457 create what appeared to be a linker stub section when it actually
2458 contained user code/data. For example, consider this fragment:
2459
2460 const char * stubborn_problems[] = { "np" };
2461
2462 If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2463 section called:
2464
2465 .data.rel.local.stubborn_problems
2466
2467 This then causes problems in arm32_arm_build_stubs() as it triggers:
2468
2469 // Ignore non-stub sections.
2470 if (!strstr (stub_sec->name, STUB_SUFFIX))
2471 continue;
2472
2473 And so the section would be ignored instead of being processed. Hence
2474 the change in definition of STUB_SUFFIX to a name that cannot be a valid
2475 C identifier. */
2476 #define STUB_SUFFIX ".__stub"
2477
2478 /* One entry per long/short branch stub defined above. */
2479 #define DEF_STUBS \
2480 DEF_STUB(long_branch_any_any) \
2481 DEF_STUB(long_branch_v4t_arm_thumb) \
2482 DEF_STUB(long_branch_thumb_only) \
2483 DEF_STUB(long_branch_v4t_thumb_thumb) \
2484 DEF_STUB(long_branch_v4t_thumb_arm) \
2485 DEF_STUB(short_branch_v4t_thumb_arm) \
2486 DEF_STUB(long_branch_any_arm_pic) \
2487 DEF_STUB(long_branch_any_thumb_pic) \
2488 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2489 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2490 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2491 DEF_STUB(long_branch_thumb_only_pic) \
2492 DEF_STUB(long_branch_any_tls_pic) \
2493 DEF_STUB(long_branch_v4t_thumb_tls_pic) \
2494 DEF_STUB(a8_veneer_b_cond) \
2495 DEF_STUB(a8_veneer_b) \
2496 DEF_STUB(a8_veneer_bl) \
2497 DEF_STUB(a8_veneer_blx)
2498
2499 #define DEF_STUB(x) arm_stub_##x,
2500 enum elf32_arm_stub_type
2501 {
2502 arm_stub_none,
2503 DEF_STUBS
2504 /* Note the first a8_veneer type */
2505 arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond
2506 };
2507 #undef DEF_STUB
2508
2509 typedef struct
2510 {
2511 const insn_sequence* template_sequence;
2512 int template_size;
2513 } stub_def;
2514
2515 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2516 static const stub_def stub_definitions[] =
2517 {
2518 {NULL, 0},
2519 DEF_STUBS
2520 };
2521
2522 struct elf32_arm_stub_hash_entry
2523 {
2524 /* Base hash table entry structure. */
2525 struct bfd_hash_entry root;
2526
2527 /* The stub section. */
2528 asection *stub_sec;
2529
2530 /* Offset within stub_sec of the beginning of this stub. */
2531 bfd_vma stub_offset;
2532
2533 /* Given the symbol's value and its section we can determine its final
2534 value when building the stubs (so the stub knows where to jump). */
2535 bfd_vma target_value;
2536 asection *target_section;
2537
2538 /* Offset to apply to relocation referencing target_value. */
2539 bfd_vma target_addend;
2540
2541 /* The instruction which caused this stub to be generated (only valid for
2542 Cortex-A8 erratum workaround stubs at present). */
2543 unsigned long orig_insn;
2544
2545 /* The stub type. */
2546 enum elf32_arm_stub_type stub_type;
2547 /* Its encoding size in bytes. */
2548 int stub_size;
2549 /* Its template. */
2550 const insn_sequence *stub_template;
2551 /* The size of the template (number of entries). */
2552 int stub_template_size;
2553
2554 /* The symbol table entry, if any, that this was derived from. */
2555 struct elf32_arm_link_hash_entry *h;
2556
2557 /* Type of branch. */
2558 enum arm_st_branch_type branch_type;
2559
2560 /* Where this stub is being called from, or, in the case of combined
2561 stub sections, the first input section in the group. */
2562 asection *id_sec;
2563
2564 /* The name for the local symbol at the start of this stub. The
2565 stub name in the hash table has to be unique; this does not, so
2566 it can be friendlier. */
2567 char *output_name;
2568 };
2569
2570 /* Used to build a map of a section. This is required for mixed-endian
2571 code/data. */
2572
2573 typedef struct elf32_elf_section_map
2574 {
2575 bfd_vma vma;
2576 char type;
2577 }
2578 elf32_arm_section_map;
2579
2580 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2581
2582 typedef enum
2583 {
2584 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2585 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2586 VFP11_ERRATUM_ARM_VENEER,
2587 VFP11_ERRATUM_THUMB_VENEER
2588 }
2589 elf32_vfp11_erratum_type;
2590
2591 typedef struct elf32_vfp11_erratum_list
2592 {
2593 struct elf32_vfp11_erratum_list *next;
2594 bfd_vma vma;
2595 union
2596 {
2597 struct
2598 {
2599 struct elf32_vfp11_erratum_list *veneer;
2600 unsigned int vfp_insn;
2601 } b;
2602 struct
2603 {
2604 struct elf32_vfp11_erratum_list *branch;
2605 unsigned int id;
2606 } v;
2607 } u;
2608 elf32_vfp11_erratum_type type;
2609 }
2610 elf32_vfp11_erratum_list;
2611
2612 typedef enum
2613 {
2614 DELETE_EXIDX_ENTRY,
2615 INSERT_EXIDX_CANTUNWIND_AT_END
2616 }
2617 arm_unwind_edit_type;
2618
2619 /* A (sorted) list of edits to apply to an unwind table. */
2620 typedef struct arm_unwind_table_edit
2621 {
2622 arm_unwind_edit_type type;
2623 /* Note: we sometimes want to insert an unwind entry corresponding to a
2624 section different from the one we're currently writing out, so record the
2625 (text) section this edit relates to here. */
2626 asection *linked_section;
2627 unsigned int index;
2628 struct arm_unwind_table_edit *next;
2629 }
2630 arm_unwind_table_edit;
2631
2632 typedef struct _arm_elf_section_data
2633 {
2634 /* Information about mapping symbols. */
2635 struct bfd_elf_section_data elf;
2636 unsigned int mapcount;
2637 unsigned int mapsize;
2638 elf32_arm_section_map *map;
2639 /* Information about CPU errata. */
2640 unsigned int erratumcount;
2641 elf32_vfp11_erratum_list *erratumlist;
2642 /* Information about unwind tables. */
2643 union
2644 {
2645 /* Unwind info attached to a text section. */
2646 struct
2647 {
2648 asection *arm_exidx_sec;
2649 } text;
2650
2651 /* Unwind info attached to an .ARM.exidx section. */
2652 struct
2653 {
2654 arm_unwind_table_edit *unwind_edit_list;
2655 arm_unwind_table_edit *unwind_edit_tail;
2656 } exidx;
2657 } u;
2658 }
2659 _arm_elf_section_data;
2660
2661 #define elf32_arm_section_data(sec) \
2662 ((_arm_elf_section_data *) elf_section_data (sec))
2663
2664 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2665 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2666 so may be created multiple times: we use an array of these entries whilst
2667 relaxing which we can refresh easily, then create stubs for each potentially
2668 erratum-triggering instruction once we've settled on a solution. */
2669
2670 struct a8_erratum_fix
2671 {
2672 bfd *input_bfd;
2673 asection *section;
2674 bfd_vma offset;
2675 bfd_vma addend;
2676 unsigned long orig_insn;
2677 char *stub_name;
2678 enum elf32_arm_stub_type stub_type;
2679 enum arm_st_branch_type branch_type;
2680 };
2681
2682 /* A table of relocs applied to branches which might trigger Cortex-A8
2683 erratum. */
2684
2685 struct a8_erratum_reloc
2686 {
2687 bfd_vma from;
2688 bfd_vma destination;
2689 struct elf32_arm_link_hash_entry *hash;
2690 const char *sym_name;
2691 unsigned int r_type;
2692 enum arm_st_branch_type branch_type;
2693 bfd_boolean non_a8_stub;
2694 };
2695
2696 /* The size of the thread control block. */
2697 #define TCB_SIZE 8
2698
2699 /* ARM-specific information about a PLT entry, over and above the usual
2700 gotplt_union. */
2701 struct arm_plt_info
2702 {
2703 /* We reference count Thumb references to a PLT entry separately,
2704 so that we can emit the Thumb trampoline only if needed. */
2705 bfd_signed_vma thumb_refcount;
2706
2707 /* Some references from Thumb code may be eliminated by BL->BLX
2708 conversion, so record them separately. */
2709 bfd_signed_vma maybe_thumb_refcount;
2710
2711 /* How many of the recorded PLT accesses were from non-call relocations.
2712 This information is useful when deciding whether anything takes the
2713 address of an STT_GNU_IFUNC PLT. A value of 0 means that all
2714 non-call references to the function should resolve directly to the
2715 real runtime target. */
2716 unsigned int noncall_refcount;
2717
2718 /* Since PLT entries have variable size if the Thumb prologue is
2719 used, we need to record the index into .got.plt instead of
2720 recomputing it from the PLT offset. */
2721 bfd_signed_vma got_offset;
2722 };
2723
2724 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
2725 struct arm_local_iplt_info
2726 {
2727 /* The information that is usually found in the generic ELF part of
2728 the hash table entry. */
2729 union gotplt_union root;
2730
2731 /* The information that is usually found in the ARM-specific part of
2732 the hash table entry. */
2733 struct arm_plt_info arm;
2734
2735 /* A list of all potential dynamic relocations against this symbol. */
2736 struct elf_dyn_relocs *dyn_relocs;
2737 };
2738
2739 struct elf_arm_obj_tdata
2740 {
2741 struct elf_obj_tdata root;
2742
2743 /* tls_type for each local got entry. */
2744 char *local_got_tls_type;
2745
2746 /* GOTPLT entries for TLS descriptors. */
2747 bfd_vma *local_tlsdesc_gotent;
2748
2749 /* Information for local symbols that need entries in .iplt. */
2750 struct arm_local_iplt_info **local_iplt;
2751
2752 /* Zero to warn when linking objects with incompatible enum sizes. */
2753 int no_enum_size_warning;
2754
2755 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2756 int no_wchar_size_warning;
2757 };
2758
2759 #define elf_arm_tdata(bfd) \
2760 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2761
2762 #define elf32_arm_local_got_tls_type(bfd) \
2763 (elf_arm_tdata (bfd)->local_got_tls_type)
2764
2765 #define elf32_arm_local_tlsdesc_gotent(bfd) \
2766 (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
2767
2768 #define elf32_arm_local_iplt(bfd) \
2769 (elf_arm_tdata (bfd)->local_iplt)
2770
2771 #define is_arm_elf(bfd) \
2772 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2773 && elf_tdata (bfd) != NULL \
2774 && elf_object_id (bfd) == ARM_ELF_DATA)
2775
2776 static bfd_boolean
2777 elf32_arm_mkobject (bfd *abfd)
2778 {
2779 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2780 ARM_ELF_DATA);
2781 }
2782
2783 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2784
2785 /* Arm ELF linker hash entry. */
2786 struct elf32_arm_link_hash_entry
2787 {
2788 struct elf_link_hash_entry root;
2789
2790 /* Track dynamic relocs copied for this symbol. */
2791 struct elf_dyn_relocs *dyn_relocs;
2792
2793 /* ARM-specific PLT information. */
2794 struct arm_plt_info plt;
2795
2796 #define GOT_UNKNOWN 0
2797 #define GOT_NORMAL 1
2798 #define GOT_TLS_GD 2
2799 #define GOT_TLS_IE 4
2800 #define GOT_TLS_GDESC 8
2801 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
2802 unsigned int tls_type : 8;
2803
2804 /* True if the symbol's PLT entry is in .iplt rather than .plt. */
2805 unsigned int is_iplt : 1;
2806
2807 unsigned int unused : 23;
2808
2809 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
2810 starting at the end of the jump table. */
2811 bfd_vma tlsdesc_got;
2812
2813 /* The symbol marking the real symbol location for exported thumb
2814 symbols with Arm stubs. */
2815 struct elf_link_hash_entry *export_glue;
2816
2817 /* A pointer to the most recently used stub hash entry against this
2818 symbol. */
2819 struct elf32_arm_stub_hash_entry *stub_cache;
2820 };
2821
2822 /* Traverse an arm ELF linker hash table. */
2823 #define elf32_arm_link_hash_traverse(table, func, info) \
2824 (elf_link_hash_traverse \
2825 (&(table)->root, \
2826 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
2827 (info)))
2828
2829 /* Get the ARM elf linker hash table from a link_info structure. */
2830 #define elf32_arm_hash_table(info) \
2831 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
2832 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
2833
2834 #define arm_stub_hash_lookup(table, string, create, copy) \
2835 ((struct elf32_arm_stub_hash_entry *) \
2836 bfd_hash_lookup ((table), (string), (create), (copy)))
2837
2838 /* Array to keep track of which stub sections have been created, and
2839 information on stub grouping. */
2840 struct map_stub
2841 {
2842 /* This is the section to which stubs in the group will be
2843 attached. */
2844 asection *link_sec;
2845 /* The stub section. */
2846 asection *stub_sec;
2847 };
2848
2849 #define elf32_arm_compute_jump_table_size(htab) \
2850 ((htab)->next_tls_desc_index * 4)
2851
2852 /* ARM ELF linker hash table. */
2853 struct elf32_arm_link_hash_table
2854 {
2855 /* The main hash table. */
2856 struct elf_link_hash_table root;
2857
2858 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
2859 bfd_size_type thumb_glue_size;
2860
2861 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
2862 bfd_size_type arm_glue_size;
2863
2864 /* The size in bytes of section containing the ARMv4 BX veneers. */
2865 bfd_size_type bx_glue_size;
2866
2867 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
2868 veneer has been populated. */
2869 bfd_vma bx_glue_offset[15];
2870
2871 /* The size in bytes of the section containing glue for VFP11 erratum
2872 veneers. */
2873 bfd_size_type vfp11_erratum_glue_size;
2874
2875 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
2876 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
2877 elf32_arm_write_section(). */
2878 struct a8_erratum_fix *a8_erratum_fixes;
2879 unsigned int num_a8_erratum_fixes;
2880
2881 /* An arbitrary input BFD chosen to hold the glue sections. */
2882 bfd * bfd_of_glue_owner;
2883
2884 /* Nonzero to output a BE8 image. */
2885 int byteswap_code;
2886
2887 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
2888 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
2889 int target1_is_rel;
2890
2891 /* The relocation to use for R_ARM_TARGET2 relocations. */
2892 int target2_reloc;
2893
2894 /* 0 = Ignore R_ARM_V4BX.
2895 1 = Convert BX to MOV PC.
2896 2 = Generate v4 interworing stubs. */
2897 int fix_v4bx;
2898
2899 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
2900 int fix_cortex_a8;
2901
2902 /* Whether we should fix the ARM1176 BLX immediate issue. */
2903 int fix_arm1176;
2904
2905 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
2906 int use_blx;
2907
2908 /* What sort of code sequences we should look for which may trigger the
2909 VFP11 denorm erratum. */
2910 bfd_arm_vfp11_fix vfp11_fix;
2911
2912 /* Global counter for the number of fixes we have emitted. */
2913 int num_vfp11_fixes;
2914
2915 /* Nonzero to force PIC branch veneers. */
2916 int pic_veneer;
2917
2918 /* The number of bytes in the initial entry in the PLT. */
2919 bfd_size_type plt_header_size;
2920
2921 /* The number of bytes in the subsequent PLT etries. */
2922 bfd_size_type plt_entry_size;
2923
2924 /* True if the target system is VxWorks. */
2925 int vxworks_p;
2926
2927 /* True if the target system is Symbian OS. */
2928 int symbian_p;
2929
2930 /* True if the target system is Native Client. */
2931 int nacl_p;
2932
2933 /* True if the target uses REL relocations. */
2934 int use_rel;
2935
2936 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
2937 bfd_vma next_tls_desc_index;
2938
2939 /* How many R_ARM_TLS_DESC relocations were generated so far. */
2940 bfd_vma num_tls_desc;
2941
2942 /* Short-cuts to get to dynamic linker sections. */
2943 asection *sdynbss;
2944 asection *srelbss;
2945
2946 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
2947 asection *srelplt2;
2948
2949 /* The offset into splt of the PLT entry for the TLS descriptor
2950 resolver. Special values are 0, if not necessary (or not found
2951 to be necessary yet), and -1 if needed but not determined
2952 yet. */
2953 bfd_vma dt_tlsdesc_plt;
2954
2955 /* The offset into sgot of the GOT entry used by the PLT entry
2956 above. */
2957 bfd_vma dt_tlsdesc_got;
2958
2959 /* Offset in .plt section of tls_arm_trampoline. */
2960 bfd_vma tls_trampoline;
2961
2962 /* Data for R_ARM_TLS_LDM32 relocations. */
2963 union
2964 {
2965 bfd_signed_vma refcount;
2966 bfd_vma offset;
2967 } tls_ldm_got;
2968
2969 /* Small local sym cache. */
2970 struct sym_cache sym_cache;
2971
2972 /* For convenience in allocate_dynrelocs. */
2973 bfd * obfd;
2974
2975 /* The amount of space used by the reserved portion of the sgotplt
2976 section, plus whatever space is used by the jump slots. */
2977 bfd_vma sgotplt_jump_table_size;
2978
2979 /* The stub hash table. */
2980 struct bfd_hash_table stub_hash_table;
2981
2982 /* Linker stub bfd. */
2983 bfd *stub_bfd;
2984
2985 /* Linker call-backs. */
2986 asection * (*add_stub_section) (const char *, asection *);
2987 void (*layout_sections_again) (void);
2988
2989 /* Array to keep track of which stub sections have been created, and
2990 information on stub grouping. */
2991 struct map_stub *stub_group;
2992
2993 /* Number of elements in stub_group. */
2994 int top_id;
2995
2996 /* Assorted information used by elf32_arm_size_stubs. */
2997 unsigned int bfd_count;
2998 int top_index;
2999 asection **input_list;
3000 };
3001
3002 /* Create an entry in an ARM ELF linker hash table. */
3003
3004 static struct bfd_hash_entry *
3005 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
3006 struct bfd_hash_table * table,
3007 const char * string)
3008 {
3009 struct elf32_arm_link_hash_entry * ret =
3010 (struct elf32_arm_link_hash_entry *) entry;
3011
3012 /* Allocate the structure if it has not already been allocated by a
3013 subclass. */
3014 if (ret == NULL)
3015 ret = (struct elf32_arm_link_hash_entry *)
3016 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
3017 if (ret == NULL)
3018 return (struct bfd_hash_entry *) ret;
3019
3020 /* Call the allocation method of the superclass. */
3021 ret = ((struct elf32_arm_link_hash_entry *)
3022 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
3023 table, string));
3024 if (ret != NULL)
3025 {
3026 ret->dyn_relocs = NULL;
3027 ret->tls_type = GOT_UNKNOWN;
3028 ret->tlsdesc_got = (bfd_vma) -1;
3029 ret->plt.thumb_refcount = 0;
3030 ret->plt.maybe_thumb_refcount = 0;
3031 ret->plt.noncall_refcount = 0;
3032 ret->plt.got_offset = -1;
3033 ret->is_iplt = FALSE;
3034 ret->export_glue = NULL;
3035
3036 ret->stub_cache = NULL;
3037 }
3038
3039 return (struct bfd_hash_entry *) ret;
3040 }
3041
3042 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3043 symbols. */
3044
3045 static bfd_boolean
3046 elf32_arm_allocate_local_sym_info (bfd *abfd)
3047 {
3048 if (elf_local_got_refcounts (abfd) == NULL)
3049 {
3050 bfd_size_type num_syms;
3051 bfd_size_type size;
3052 char *data;
3053
3054 num_syms = elf_tdata (abfd)->symtab_hdr.sh_info;
3055 size = num_syms * (sizeof (bfd_signed_vma)
3056 + sizeof (struct arm_local_iplt_info *)
3057 + sizeof (bfd_vma)
3058 + sizeof (char));
3059 data = bfd_zalloc (abfd, size);
3060 if (data == NULL)
3061 return FALSE;
3062
3063 elf_local_got_refcounts (abfd) = (bfd_signed_vma *) data;
3064 data += num_syms * sizeof (bfd_signed_vma);
3065
3066 elf32_arm_local_iplt (abfd) = (struct arm_local_iplt_info **) data;
3067 data += num_syms * sizeof (struct arm_local_iplt_info *);
3068
3069 elf32_arm_local_tlsdesc_gotent (abfd) = (bfd_vma *) data;
3070 data += num_syms * sizeof (bfd_vma);
3071
3072 elf32_arm_local_got_tls_type (abfd) = data;
3073 }
3074 return TRUE;
3075 }
3076
3077 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3078 to input bfd ABFD. Create the information if it doesn't already exist.
3079 Return null if an allocation fails. */
3080
3081 static struct arm_local_iplt_info *
3082 elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx)
3083 {
3084 struct arm_local_iplt_info **ptr;
3085
3086 if (!elf32_arm_allocate_local_sym_info (abfd))
3087 return NULL;
3088
3089 BFD_ASSERT (r_symndx < elf_tdata (abfd)->symtab_hdr.sh_info);
3090 ptr = &elf32_arm_local_iplt (abfd)[r_symndx];
3091 if (*ptr == NULL)
3092 *ptr = bfd_zalloc (abfd, sizeof (**ptr));
3093 return *ptr;
3094 }
3095
3096 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3097 in ABFD's symbol table. If the symbol is global, H points to its
3098 hash table entry, otherwise H is null.
3099
3100 Return true if the symbol does have PLT information. When returning
3101 true, point *ROOT_PLT at the target-independent reference count/offset
3102 union and *ARM_PLT at the ARM-specific information. */
3103
3104 static bfd_boolean
3105 elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_entry *h,
3106 unsigned long r_symndx, union gotplt_union **root_plt,
3107 struct arm_plt_info **arm_plt)
3108 {
3109 struct arm_local_iplt_info *local_iplt;
3110
3111 if (h != NULL)
3112 {
3113 *root_plt = &h->root.plt;
3114 *arm_plt = &h->plt;
3115 return TRUE;
3116 }
3117
3118 if (elf32_arm_local_iplt (abfd) == NULL)
3119 return FALSE;
3120
3121 local_iplt = elf32_arm_local_iplt (abfd)[r_symndx];
3122 if (local_iplt == NULL)
3123 return FALSE;
3124
3125 *root_plt = &local_iplt->root;
3126 *arm_plt = &local_iplt->arm;
3127 return TRUE;
3128 }
3129
3130 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3131 before it. */
3132
3133 static bfd_boolean
3134 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info *info,
3135 struct arm_plt_info *arm_plt)
3136 {
3137 struct elf32_arm_link_hash_table *htab;
3138
3139 htab = elf32_arm_hash_table (info);
3140 return (arm_plt->thumb_refcount != 0
3141 || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0));
3142 }
3143
3144 /* Return a pointer to the head of the dynamic reloc list that should
3145 be used for local symbol ISYM, which is symbol number R_SYMNDX in
3146 ABFD's symbol table. Return null if an error occurs. */
3147
3148 static struct elf_dyn_relocs **
3149 elf32_arm_get_local_dynreloc_list (bfd *abfd, unsigned long r_symndx,
3150 Elf_Internal_Sym *isym)
3151 {
3152 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
3153 {
3154 struct arm_local_iplt_info *local_iplt;
3155
3156 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
3157 if (local_iplt == NULL)
3158 return NULL;
3159 return &local_iplt->dyn_relocs;
3160 }
3161 else
3162 {
3163 /* Track dynamic relocs needed for local syms too.
3164 We really need local syms available to do this
3165 easily. Oh well. */
3166 asection *s;
3167 void *vpp;
3168
3169 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3170 if (s == NULL)
3171 abort ();
3172
3173 vpp = &elf_section_data (s)->local_dynrel;
3174 return (struct elf_dyn_relocs **) vpp;
3175 }
3176 }
3177
3178 /* Initialize an entry in the stub hash table. */
3179
3180 static struct bfd_hash_entry *
3181 stub_hash_newfunc (struct bfd_hash_entry *entry,
3182 struct bfd_hash_table *table,
3183 const char *string)
3184 {
3185 /* Allocate the structure if it has not already been allocated by a
3186 subclass. */
3187 if (entry == NULL)
3188 {
3189 entry = (struct bfd_hash_entry *)
3190 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
3191 if (entry == NULL)
3192 return entry;
3193 }
3194
3195 /* Call the allocation method of the superclass. */
3196 entry = bfd_hash_newfunc (entry, table, string);
3197 if (entry != NULL)
3198 {
3199 struct elf32_arm_stub_hash_entry *eh;
3200
3201 /* Initialize the local fields. */
3202 eh = (struct elf32_arm_stub_hash_entry *) entry;
3203 eh->stub_sec = NULL;
3204 eh->stub_offset = 0;
3205 eh->target_value = 0;
3206 eh->target_section = NULL;
3207 eh->target_addend = 0;
3208 eh->orig_insn = 0;
3209 eh->stub_type = arm_stub_none;
3210 eh->stub_size = 0;
3211 eh->stub_template = NULL;
3212 eh->stub_template_size = 0;
3213 eh->h = NULL;
3214 eh->id_sec = NULL;
3215 eh->output_name = NULL;
3216 }
3217
3218 return entry;
3219 }
3220
3221 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3222 shortcuts to them in our hash table. */
3223
3224 static bfd_boolean
3225 create_got_section (bfd *dynobj, struct bfd_link_info *info)
3226 {
3227 struct elf32_arm_link_hash_table *htab;
3228
3229 htab = elf32_arm_hash_table (info);
3230 if (htab == NULL)
3231 return FALSE;
3232
3233 /* BPABI objects never have a GOT, or associated sections. */
3234 if (htab->symbian_p)
3235 return TRUE;
3236
3237 if (! _bfd_elf_create_got_section (dynobj, info))
3238 return FALSE;
3239
3240 return TRUE;
3241 }
3242
3243 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
3244
3245 static bfd_boolean
3246 create_ifunc_sections (struct bfd_link_info *info)
3247 {
3248 struct elf32_arm_link_hash_table *htab;
3249 const struct elf_backend_data *bed;
3250 bfd *dynobj;
3251 asection *s;
3252 flagword flags;
3253
3254 htab = elf32_arm_hash_table (info);
3255 dynobj = htab->root.dynobj;
3256 bed = get_elf_backend_data (dynobj);
3257 flags = bed->dynamic_sec_flags;
3258
3259 if (htab->root.iplt == NULL)
3260 {
3261 s = bfd_make_section_anyway_with_flags (dynobj, ".iplt",
3262 flags | SEC_READONLY | SEC_CODE);
3263 if (s == NULL
3264 || !bfd_set_section_alignment (dynobj, s, bed->plt_alignment))
3265 return FALSE;
3266 htab->root.iplt = s;
3267 }
3268
3269 if (htab->root.irelplt == NULL)
3270 {
3271 s = bfd_make_section_anyway_with_flags (dynobj,
3272 RELOC_SECTION (htab, ".iplt"),
3273 flags | SEC_READONLY);
3274 if (s == NULL
3275 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3276 return FALSE;
3277 htab->root.irelplt = s;
3278 }
3279
3280 if (htab->root.igotplt == NULL)
3281 {
3282 s = bfd_make_section_anyway_with_flags (dynobj, ".igot.plt", flags);
3283 if (s == NULL
3284 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3285 return FALSE;
3286 htab->root.igotplt = s;
3287 }
3288 return TRUE;
3289 }
3290
3291 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3292 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3293 hash table. */
3294
3295 static bfd_boolean
3296 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
3297 {
3298 struct elf32_arm_link_hash_table *htab;
3299
3300 htab = elf32_arm_hash_table (info);
3301 if (htab == NULL)
3302 return FALSE;
3303
3304 if (!htab->root.sgot && !create_got_section (dynobj, info))
3305 return FALSE;
3306
3307 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3308 return FALSE;
3309
3310 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
3311 if (!info->shared)
3312 htab->srelbss = bfd_get_linker_section (dynobj,
3313 RELOC_SECTION (htab, ".bss"));
3314
3315 if (htab->vxworks_p)
3316 {
3317 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
3318 return FALSE;
3319
3320 if (info->shared)
3321 {
3322 htab->plt_header_size = 0;
3323 htab->plt_entry_size
3324 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
3325 }
3326 else
3327 {
3328 htab->plt_header_size
3329 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
3330 htab->plt_entry_size
3331 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
3332 }
3333 }
3334
3335 if (!htab->root.splt
3336 || !htab->root.srelplt
3337 || !htab->sdynbss
3338 || (!info->shared && !htab->srelbss))
3339 abort ();
3340
3341 return TRUE;
3342 }
3343
3344 /* Copy the extra info we tack onto an elf_link_hash_entry. */
3345
3346 static void
3347 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
3348 struct elf_link_hash_entry *dir,
3349 struct elf_link_hash_entry *ind)
3350 {
3351 struct elf32_arm_link_hash_entry *edir, *eind;
3352
3353 edir = (struct elf32_arm_link_hash_entry *) dir;
3354 eind = (struct elf32_arm_link_hash_entry *) ind;
3355
3356 if (eind->dyn_relocs != NULL)
3357 {
3358 if (edir->dyn_relocs != NULL)
3359 {
3360 struct elf_dyn_relocs **pp;
3361 struct elf_dyn_relocs *p;
3362
3363 /* Add reloc counts against the indirect sym to the direct sym
3364 list. Merge any entries against the same section. */
3365 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
3366 {
3367 struct elf_dyn_relocs *q;
3368
3369 for (q = edir->dyn_relocs; q != NULL; q = q->next)
3370 if (q->sec == p->sec)
3371 {
3372 q->pc_count += p->pc_count;
3373 q->count += p->count;
3374 *pp = p->next;
3375 break;
3376 }
3377 if (q == NULL)
3378 pp = &p->next;
3379 }
3380 *pp = edir->dyn_relocs;
3381 }
3382
3383 edir->dyn_relocs = eind->dyn_relocs;
3384 eind->dyn_relocs = NULL;
3385 }
3386
3387 if (ind->root.type == bfd_link_hash_indirect)
3388 {
3389 /* Copy over PLT info. */
3390 edir->plt.thumb_refcount += eind->plt.thumb_refcount;
3391 eind->plt.thumb_refcount = 0;
3392 edir->plt.maybe_thumb_refcount += eind->plt.maybe_thumb_refcount;
3393 eind->plt.maybe_thumb_refcount = 0;
3394 edir->plt.noncall_refcount += eind->plt.noncall_refcount;
3395 eind->plt.noncall_refcount = 0;
3396
3397 /* We should only allocate a function to .iplt once the final
3398 symbol information is known. */
3399 BFD_ASSERT (!eind->is_iplt);
3400
3401 if (dir->got.refcount <= 0)
3402 {
3403 edir->tls_type = eind->tls_type;
3404 eind->tls_type = GOT_UNKNOWN;
3405 }
3406 }
3407
3408 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
3409 }
3410
3411 /* Create an ARM elf linker hash table. */
3412
3413 static struct bfd_link_hash_table *
3414 elf32_arm_link_hash_table_create (bfd *abfd)
3415 {
3416 struct elf32_arm_link_hash_table *ret;
3417 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
3418
3419 ret = (struct elf32_arm_link_hash_table *) bfd_zmalloc (amt);
3420 if (ret == NULL)
3421 return NULL;
3422
3423 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
3424 elf32_arm_link_hash_newfunc,
3425 sizeof (struct elf32_arm_link_hash_entry),
3426 ARM_ELF_DATA))
3427 {
3428 free (ret);
3429 return NULL;
3430 }
3431
3432 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
3433 #ifdef FOUR_WORD_PLT
3434 ret->plt_header_size = 16;
3435 ret->plt_entry_size = 16;
3436 #else
3437 ret->plt_header_size = 20;
3438 ret->plt_entry_size = 12;
3439 #endif
3440 ret->use_rel = 1;
3441 ret->obfd = abfd;
3442
3443 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
3444 sizeof (struct elf32_arm_stub_hash_entry)))
3445 {
3446 free (ret);
3447 return NULL;
3448 }
3449
3450 return &ret->root.root;
3451 }
3452
3453 /* Free the derived linker hash table. */
3454
3455 static void
3456 elf32_arm_hash_table_free (struct bfd_link_hash_table *hash)
3457 {
3458 struct elf32_arm_link_hash_table *ret
3459 = (struct elf32_arm_link_hash_table *) hash;
3460
3461 bfd_hash_table_free (&ret->stub_hash_table);
3462 _bfd_elf_link_hash_table_free (hash);
3463 }
3464
3465 /* Determine if we're dealing with a Thumb only architecture. */
3466
3467 static bfd_boolean
3468 using_thumb_only (struct elf32_arm_link_hash_table *globals)
3469 {
3470 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3471 Tag_CPU_arch);
3472 int profile;
3473
3474 if (arch == TAG_CPU_ARCH_V6_M || arch == TAG_CPU_ARCH_V6S_M)
3475 return TRUE;
3476
3477 if (arch != TAG_CPU_ARCH_V7 && arch != TAG_CPU_ARCH_V7E_M)
3478 return FALSE;
3479
3480 profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3481 Tag_CPU_arch_profile);
3482
3483 return profile == 'M';
3484 }
3485
3486 /* Determine if we're dealing with a Thumb-2 object. */
3487
3488 static bfd_boolean
3489 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3490 {
3491 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3492 Tag_CPU_arch);
3493 return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7;
3494 }
3495
3496 /* Determine what kind of NOPs are available. */
3497
3498 static bfd_boolean
3499 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
3500 {
3501 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3502 Tag_CPU_arch);
3503 return arch == TAG_CPU_ARCH_V6T2
3504 || arch == TAG_CPU_ARCH_V6K
3505 || arch == TAG_CPU_ARCH_V7
3506 || arch == TAG_CPU_ARCH_V7E_M;
3507 }
3508
3509 static bfd_boolean
3510 arch_has_thumb2_nop (struct elf32_arm_link_hash_table *globals)
3511 {
3512 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3513 Tag_CPU_arch);
3514 return (arch == TAG_CPU_ARCH_V6T2 || arch == TAG_CPU_ARCH_V7
3515 || arch == TAG_CPU_ARCH_V7E_M);
3516 }
3517
3518 static bfd_boolean
3519 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
3520 {
3521 switch (stub_type)
3522 {
3523 case arm_stub_long_branch_thumb_only:
3524 case arm_stub_long_branch_v4t_thumb_arm:
3525 case arm_stub_short_branch_v4t_thumb_arm:
3526 case arm_stub_long_branch_v4t_thumb_arm_pic:
3527 case arm_stub_long_branch_v4t_thumb_tls_pic:
3528 case arm_stub_long_branch_thumb_only_pic:
3529 return TRUE;
3530 case arm_stub_none:
3531 BFD_FAIL ();
3532 return FALSE;
3533 break;
3534 default:
3535 return FALSE;
3536 }
3537 }
3538
3539 /* Determine the type of stub needed, if any, for a call. */
3540
3541 static enum elf32_arm_stub_type
3542 arm_type_of_stub (struct bfd_link_info *info,
3543 asection *input_sec,
3544 const Elf_Internal_Rela *rel,
3545 unsigned char st_type,
3546 enum arm_st_branch_type *actual_branch_type,
3547 struct elf32_arm_link_hash_entry *hash,
3548 bfd_vma destination,
3549 asection *sym_sec,
3550 bfd *input_bfd,
3551 const char *name)
3552 {
3553 bfd_vma location;
3554 bfd_signed_vma branch_offset;
3555 unsigned int r_type;
3556 struct elf32_arm_link_hash_table * globals;
3557 int thumb2;
3558 int thumb_only;
3559 enum elf32_arm_stub_type stub_type = arm_stub_none;
3560 int use_plt = 0;
3561 enum arm_st_branch_type branch_type = *actual_branch_type;
3562 union gotplt_union *root_plt;
3563 struct arm_plt_info *arm_plt;
3564
3565 if (branch_type == ST_BRANCH_LONG)
3566 return stub_type;
3567
3568 globals = elf32_arm_hash_table (info);
3569 if (globals == NULL)
3570 return stub_type;
3571
3572 thumb_only = using_thumb_only (globals);
3573
3574 thumb2 = using_thumb2 (globals);
3575
3576 /* Determine where the call point is. */
3577 location = (input_sec->output_offset
3578 + input_sec->output_section->vma
3579 + rel->r_offset);
3580
3581 r_type = ELF32_R_TYPE (rel->r_info);
3582
3583 /* For TLS call relocs, it is the caller's responsibility to provide
3584 the address of the appropriate trampoline. */
3585 if (r_type != R_ARM_TLS_CALL
3586 && r_type != R_ARM_THM_TLS_CALL
3587 && elf32_arm_get_plt_info (input_bfd, hash, ELF32_R_SYM (rel->r_info),
3588 &root_plt, &arm_plt)
3589 && root_plt->offset != (bfd_vma) -1)
3590 {
3591 asection *splt;
3592
3593 if (hash == NULL || hash->is_iplt)
3594 splt = globals->root.iplt;
3595 else
3596 splt = globals->root.splt;
3597 if (splt != NULL)
3598 {
3599 use_plt = 1;
3600
3601 /* Note when dealing with PLT entries: the main PLT stub is in
3602 ARM mode, so if the branch is in Thumb mode, another
3603 Thumb->ARM stub will be inserted later just before the ARM
3604 PLT stub. We don't take this extra distance into account
3605 here, because if a long branch stub is needed, we'll add a
3606 Thumb->Arm one and branch directly to the ARM PLT entry
3607 because it avoids spreading offset corrections in several
3608 places. */
3609
3610 destination = (splt->output_section->vma
3611 + splt->output_offset
3612 + root_plt->offset);
3613 st_type = STT_FUNC;
3614 branch_type = ST_BRANCH_TO_ARM;
3615 }
3616 }
3617 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
3618 BFD_ASSERT (st_type != STT_GNU_IFUNC);
3619
3620 branch_offset = (bfd_signed_vma)(destination - location);
3621
3622 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
3623 || r_type == R_ARM_THM_TLS_CALL)
3624 {
3625 /* Handle cases where:
3626 - this call goes too far (different Thumb/Thumb2 max
3627 distance)
3628 - it's a Thumb->Arm call and blx is not available, or it's a
3629 Thumb->Arm branch (not bl). A stub is needed in this case,
3630 but only if this call is not through a PLT entry. Indeed,
3631 PLT stubs handle mode switching already.
3632 */
3633 if ((!thumb2
3634 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3635 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3636 || (thumb2
3637 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3638 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3639 || (branch_type == ST_BRANCH_TO_ARM
3640 && (((r_type == R_ARM_THM_CALL
3641 || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx)
3642 || (r_type == R_ARM_THM_JUMP24))
3643 && !use_plt))
3644 {
3645 if (branch_type == ST_BRANCH_TO_THUMB)
3646 {
3647 /* Thumb to thumb. */
3648 if (!thumb_only)
3649 {
3650 stub_type = (info->shared | globals->pic_veneer)
3651 /* PIC stubs. */
3652 ? ((globals->use_blx
3653 && (r_type == R_ARM_THM_CALL))
3654 /* V5T and above. Stub starts with ARM code, so
3655 we must be able to switch mode before
3656 reaching it, which is only possible for 'bl'
3657 (ie R_ARM_THM_CALL relocation). */
3658 ? arm_stub_long_branch_any_thumb_pic
3659 /* On V4T, use Thumb code only. */
3660 : arm_stub_long_branch_v4t_thumb_thumb_pic)
3661
3662 /* non-PIC stubs. */
3663 : ((globals->use_blx
3664 && (r_type == R_ARM_THM_CALL))
3665 /* V5T and above. */
3666 ? arm_stub_long_branch_any_any
3667 /* V4T. */
3668 : arm_stub_long_branch_v4t_thumb_thumb);
3669 }
3670 else
3671 {
3672 stub_type = (info->shared | globals->pic_veneer)
3673 /* PIC stub. */
3674 ? arm_stub_long_branch_thumb_only_pic
3675 /* non-PIC stub. */
3676 : arm_stub_long_branch_thumb_only;
3677 }
3678 }
3679 else
3680 {
3681 /* Thumb to arm. */
3682 if (sym_sec != NULL
3683 && sym_sec->owner != NULL
3684 && !INTERWORK_FLAG (sym_sec->owner))
3685 {
3686 (*_bfd_error_handler)
3687 (_("%B(%s): warning: interworking not enabled.\n"
3688 " first occurrence: %B: Thumb call to ARM"),
3689 sym_sec->owner, input_bfd, name);
3690 }
3691
3692 stub_type =
3693 (info->shared | globals->pic_veneer)
3694 /* PIC stubs. */
3695 ? (r_type == R_ARM_THM_TLS_CALL
3696 /* TLS PIC stubs */
3697 ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic
3698 : arm_stub_long_branch_v4t_thumb_tls_pic)
3699 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
3700 /* V5T PIC and above. */
3701 ? arm_stub_long_branch_any_arm_pic
3702 /* V4T PIC stub. */
3703 : arm_stub_long_branch_v4t_thumb_arm_pic))
3704
3705 /* non-PIC stubs. */
3706 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
3707 /* V5T and above. */
3708 ? arm_stub_long_branch_any_any
3709 /* V4T. */
3710 : arm_stub_long_branch_v4t_thumb_arm);
3711
3712 /* Handle v4t short branches. */
3713 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
3714 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
3715 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
3716 stub_type = arm_stub_short_branch_v4t_thumb_arm;
3717 }
3718 }
3719 }
3720 else if (r_type == R_ARM_CALL
3721 || r_type == R_ARM_JUMP24
3722 || r_type == R_ARM_PLT32
3723 || r_type == R_ARM_TLS_CALL)
3724 {
3725 if (branch_type == ST_BRANCH_TO_THUMB)
3726 {
3727 /* Arm to thumb. */
3728
3729 if (sym_sec != NULL
3730 && sym_sec->owner != NULL
3731 && !INTERWORK_FLAG (sym_sec->owner))
3732 {
3733 (*_bfd_error_handler)
3734 (_("%B(%s): warning: interworking not enabled.\n"
3735 " first occurrence: %B: ARM call to Thumb"),
3736 sym_sec->owner, input_bfd, name);
3737 }
3738
3739 /* We have an extra 2-bytes reach because of
3740 the mode change (bit 24 (H) of BLX encoding). */
3741 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
3742 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
3743 || (r_type == R_ARM_CALL && !globals->use_blx)
3744 || (r_type == R_ARM_JUMP24)
3745 || (r_type == R_ARM_PLT32))
3746 {
3747 stub_type = (info->shared | globals->pic_veneer)
3748 /* PIC stubs. */
3749 ? ((globals->use_blx)
3750 /* V5T and above. */
3751 ? arm_stub_long_branch_any_thumb_pic
3752 /* V4T stub. */
3753 : arm_stub_long_branch_v4t_arm_thumb_pic)
3754
3755 /* non-PIC stubs. */
3756 : ((globals->use_blx)
3757 /* V5T and above. */
3758 ? arm_stub_long_branch_any_any
3759 /* V4T. */
3760 : arm_stub_long_branch_v4t_arm_thumb);
3761 }
3762 }
3763 else
3764 {
3765 /* Arm to arm. */
3766 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
3767 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
3768 {
3769 stub_type =
3770 (info->shared | globals->pic_veneer)
3771 /* PIC stubs. */
3772 ? (r_type == R_ARM_TLS_CALL
3773 /* TLS PIC Stub */
3774 ? arm_stub_long_branch_any_tls_pic
3775 : arm_stub_long_branch_any_arm_pic)
3776 /* non-PIC stubs. */
3777 : arm_stub_long_branch_any_any;
3778 }
3779 }
3780 }
3781
3782 /* If a stub is needed, record the actual destination type. */
3783 if (stub_type != arm_stub_none)
3784 *actual_branch_type = branch_type;
3785
3786 return stub_type;
3787 }
3788
3789 /* Build a name for an entry in the stub hash table. */
3790
3791 static char *
3792 elf32_arm_stub_name (const asection *input_section,
3793 const asection *sym_sec,
3794 const struct elf32_arm_link_hash_entry *hash,
3795 const Elf_Internal_Rela *rel,
3796 enum elf32_arm_stub_type stub_type)
3797 {
3798 char *stub_name;
3799 bfd_size_type len;
3800
3801 if (hash)
3802 {
3803 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
3804 stub_name = (char *) bfd_malloc (len);
3805 if (stub_name != NULL)
3806 sprintf (stub_name, "%08x_%s+%x_%d",
3807 input_section->id & 0xffffffff,
3808 hash->root.root.root.string,
3809 (int) rel->r_addend & 0xffffffff,
3810 (int) stub_type);
3811 }
3812 else
3813 {
3814 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
3815 stub_name = (char *) bfd_malloc (len);
3816 if (stub_name != NULL)
3817 sprintf (stub_name, "%08x_%x:%x+%x_%d",
3818 input_section->id & 0xffffffff,
3819 sym_sec->id & 0xffffffff,
3820 ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
3821 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL
3822 ? 0 : (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
3823 (int) rel->r_addend & 0xffffffff,
3824 (int) stub_type);
3825 }
3826
3827 return stub_name;
3828 }
3829
3830 /* Look up an entry in the stub hash. Stub entries are cached because
3831 creating the stub name takes a bit of time. */
3832
3833 static struct elf32_arm_stub_hash_entry *
3834 elf32_arm_get_stub_entry (const asection *input_section,
3835 const asection *sym_sec,
3836 struct elf_link_hash_entry *hash,
3837 const Elf_Internal_Rela *rel,
3838 struct elf32_arm_link_hash_table *htab,
3839 enum elf32_arm_stub_type stub_type)
3840 {
3841 struct elf32_arm_stub_hash_entry *stub_entry;
3842 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
3843 const asection *id_sec;
3844
3845 if ((input_section->flags & SEC_CODE) == 0)
3846 return NULL;
3847
3848 /* If this input section is part of a group of sections sharing one
3849 stub section, then use the id of the first section in the group.
3850 Stub names need to include a section id, as there may well be
3851 more than one stub used to reach say, printf, and we need to
3852 distinguish between them. */
3853 id_sec = htab->stub_group[input_section->id].link_sec;
3854
3855 if (h != NULL && h->stub_cache != NULL
3856 && h->stub_cache->h == h
3857 && h->stub_cache->id_sec == id_sec
3858 && h->stub_cache->stub_type == stub_type)
3859 {
3860 stub_entry = h->stub_cache;
3861 }
3862 else
3863 {
3864 char *stub_name;
3865
3866 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
3867 if (stub_name == NULL)
3868 return NULL;
3869
3870 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
3871 stub_name, FALSE, FALSE);
3872 if (h != NULL)
3873 h->stub_cache = stub_entry;
3874
3875 free (stub_name);
3876 }
3877
3878 return stub_entry;
3879 }
3880
3881 /* Find or create a stub section. Returns a pointer to the stub section, and
3882 the section to which the stub section will be attached (in *LINK_SEC_P).
3883 LINK_SEC_P may be NULL. */
3884
3885 static asection *
3886 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
3887 struct elf32_arm_link_hash_table *htab)
3888 {
3889 asection *link_sec;
3890 asection *stub_sec;
3891
3892 link_sec = htab->stub_group[section->id].link_sec;
3893 BFD_ASSERT (link_sec != NULL);
3894 stub_sec = htab->stub_group[section->id].stub_sec;
3895
3896 if (stub_sec == NULL)
3897 {
3898 stub_sec = htab->stub_group[link_sec->id].stub_sec;
3899 if (stub_sec == NULL)
3900 {
3901 size_t namelen;
3902 bfd_size_type len;
3903 char *s_name;
3904
3905 namelen = strlen (link_sec->name);
3906 len = namelen + sizeof (STUB_SUFFIX);
3907 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
3908 if (s_name == NULL)
3909 return NULL;
3910
3911 memcpy (s_name, link_sec->name, namelen);
3912 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
3913 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
3914 if (stub_sec == NULL)
3915 return NULL;
3916 htab->stub_group[link_sec->id].stub_sec = stub_sec;
3917 }
3918 htab->stub_group[section->id].stub_sec = stub_sec;
3919 }
3920
3921 if (link_sec_p)
3922 *link_sec_p = link_sec;
3923
3924 return stub_sec;
3925 }
3926
3927 /* Add a new stub entry to the stub hash. Not all fields of the new
3928 stub entry are initialised. */
3929
3930 static struct elf32_arm_stub_hash_entry *
3931 elf32_arm_add_stub (const char *stub_name,
3932 asection *section,
3933 struct elf32_arm_link_hash_table *htab)
3934 {
3935 asection *link_sec;
3936 asection *stub_sec;
3937 struct elf32_arm_stub_hash_entry *stub_entry;
3938
3939 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab);
3940 if (stub_sec == NULL)
3941 return NULL;
3942
3943 /* Enter this entry into the linker stub hash table. */
3944 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3945 TRUE, FALSE);
3946 if (stub_entry == NULL)
3947 {
3948 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
3949 section->owner,
3950 stub_name);
3951 return NULL;
3952 }
3953
3954 stub_entry->stub_sec = stub_sec;
3955 stub_entry->stub_offset = 0;
3956 stub_entry->id_sec = link_sec;
3957
3958 return stub_entry;
3959 }
3960
3961 /* Store an Arm insn into an output section not processed by
3962 elf32_arm_write_section. */
3963
3964 static void
3965 put_arm_insn (struct elf32_arm_link_hash_table * htab,
3966 bfd * output_bfd, bfd_vma val, void * ptr)
3967 {
3968 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3969 bfd_putl32 (val, ptr);
3970 else
3971 bfd_putb32 (val, ptr);
3972 }
3973
3974 /* Store a 16-bit Thumb insn into an output section not processed by
3975 elf32_arm_write_section. */
3976
3977 static void
3978 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
3979 bfd * output_bfd, bfd_vma val, void * ptr)
3980 {
3981 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3982 bfd_putl16 (val, ptr);
3983 else
3984 bfd_putb16 (val, ptr);
3985 }
3986
3987 /* If it's possible to change R_TYPE to a more efficient access
3988 model, return the new reloc type. */
3989
3990 static unsigned
3991 elf32_arm_tls_transition (struct bfd_link_info *info, int r_type,
3992 struct elf_link_hash_entry *h)
3993 {
3994 int is_local = (h == NULL);
3995
3996 if (info->shared || (h && h->root.type == bfd_link_hash_undefweak))
3997 return r_type;
3998
3999 /* We do not support relaxations for Old TLS models. */
4000 switch (r_type)
4001 {
4002 case R_ARM_TLS_GOTDESC:
4003 case R_ARM_TLS_CALL:
4004 case R_ARM_THM_TLS_CALL:
4005 case R_ARM_TLS_DESCSEQ:
4006 case R_ARM_THM_TLS_DESCSEQ:
4007 return is_local ? R_ARM_TLS_LE32 : R_ARM_TLS_IE32;
4008 }
4009
4010 return r_type;
4011 }
4012
4013 static bfd_reloc_status_type elf32_arm_final_link_relocate
4014 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
4015 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
4016 const char *, unsigned char, enum arm_st_branch_type,
4017 struct elf_link_hash_entry *, bfd_boolean *, char **);
4018
4019 static unsigned int
4020 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
4021 {
4022 switch (stub_type)
4023 {
4024 case arm_stub_a8_veneer_b_cond:
4025 case arm_stub_a8_veneer_b:
4026 case arm_stub_a8_veneer_bl:
4027 return 2;
4028
4029 case arm_stub_long_branch_any_any:
4030 case arm_stub_long_branch_v4t_arm_thumb:
4031 case arm_stub_long_branch_thumb_only:
4032 case arm_stub_long_branch_v4t_thumb_thumb:
4033 case arm_stub_long_branch_v4t_thumb_arm:
4034 case arm_stub_short_branch_v4t_thumb_arm:
4035 case arm_stub_long_branch_any_arm_pic:
4036 case arm_stub_long_branch_any_thumb_pic:
4037 case arm_stub_long_branch_v4t_thumb_thumb_pic:
4038 case arm_stub_long_branch_v4t_arm_thumb_pic:
4039 case arm_stub_long_branch_v4t_thumb_arm_pic:
4040 case arm_stub_long_branch_thumb_only_pic:
4041 case arm_stub_long_branch_any_tls_pic:
4042 case arm_stub_long_branch_v4t_thumb_tls_pic:
4043 case arm_stub_a8_veneer_blx:
4044 return 4;
4045
4046 default:
4047 abort (); /* Should be unreachable. */
4048 }
4049 }
4050
4051 static bfd_boolean
4052 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
4053 void * in_arg)
4054 {
4055 #define MAXRELOCS 2
4056 struct elf32_arm_stub_hash_entry *stub_entry;
4057 struct elf32_arm_link_hash_table *globals;
4058 struct bfd_link_info *info;
4059 asection *stub_sec;
4060 bfd *stub_bfd;
4061 bfd_byte *loc;
4062 bfd_vma sym_value;
4063 int template_size;
4064 int size;
4065 const insn_sequence *template_sequence;
4066 int i;
4067 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
4068 int stub_reloc_offset[MAXRELOCS] = {0, 0};
4069 int nrelocs = 0;
4070
4071 /* Massage our args to the form they really have. */
4072 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4073 info = (struct bfd_link_info *) in_arg;
4074
4075 globals = elf32_arm_hash_table (info);
4076 if (globals == NULL)
4077 return FALSE;
4078
4079 stub_sec = stub_entry->stub_sec;
4080
4081 if ((globals->fix_cortex_a8 < 0)
4082 != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
4083 /* We have to do less-strictly-aligned fixes last. */
4084 return TRUE;
4085
4086 /* Make a note of the offset within the stubs for this entry. */
4087 stub_entry->stub_offset = stub_sec->size;
4088 loc = stub_sec->contents + stub_entry->stub_offset;
4089
4090 stub_bfd = stub_sec->owner;
4091
4092 /* This is the address of the stub destination. */
4093 sym_value = (stub_entry->target_value
4094 + stub_entry->target_section->output_offset
4095 + stub_entry->target_section->output_section->vma);
4096
4097 template_sequence = stub_entry->stub_template;
4098 template_size = stub_entry->stub_template_size;
4099
4100 size = 0;
4101 for (i = 0; i < template_size; i++)
4102 {
4103 switch (template_sequence[i].type)
4104 {
4105 case THUMB16_TYPE:
4106 {
4107 bfd_vma data = (bfd_vma) template_sequence[i].data;
4108 if (template_sequence[i].reloc_addend != 0)
4109 {
4110 /* We've borrowed the reloc_addend field to mean we should
4111 insert a condition code into this (Thumb-1 branch)
4112 instruction. See THUMB16_BCOND_INSN. */
4113 BFD_ASSERT ((data & 0xff00) == 0xd000);
4114 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
4115 }
4116 bfd_put_16 (stub_bfd, data, loc + size);
4117 size += 2;
4118 }
4119 break;
4120
4121 case THUMB32_TYPE:
4122 bfd_put_16 (stub_bfd,
4123 (template_sequence[i].data >> 16) & 0xffff,
4124 loc + size);
4125 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
4126 loc + size + 2);
4127 if (template_sequence[i].r_type != R_ARM_NONE)
4128 {
4129 stub_reloc_idx[nrelocs] = i;
4130 stub_reloc_offset[nrelocs++] = size;
4131 }
4132 size += 4;
4133 break;
4134
4135 case ARM_TYPE:
4136 bfd_put_32 (stub_bfd, template_sequence[i].data,
4137 loc + size);
4138 /* Handle cases where the target is encoded within the
4139 instruction. */
4140 if (template_sequence[i].r_type == R_ARM_JUMP24)
4141 {
4142 stub_reloc_idx[nrelocs] = i;
4143 stub_reloc_offset[nrelocs++] = size;
4144 }
4145 size += 4;
4146 break;
4147
4148 case DATA_TYPE:
4149 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
4150 stub_reloc_idx[nrelocs] = i;
4151 stub_reloc_offset[nrelocs++] = size;
4152 size += 4;
4153 break;
4154
4155 default:
4156 BFD_FAIL ();
4157 return FALSE;
4158 }
4159 }
4160
4161 stub_sec->size += size;
4162
4163 /* Stub size has already been computed in arm_size_one_stub. Check
4164 consistency. */
4165 BFD_ASSERT (size == stub_entry->stub_size);
4166
4167 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
4168 if (stub_entry->branch_type == ST_BRANCH_TO_THUMB)
4169 sym_value |= 1;
4170
4171 /* Assume there is at least one and at most MAXRELOCS entries to relocate
4172 in each stub. */
4173 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
4174
4175 for (i = 0; i < nrelocs; i++)
4176 if (template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP24
4177 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP19
4178 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_CALL
4179 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_XPC22)
4180 {
4181 Elf_Internal_Rela rel;
4182 bfd_boolean unresolved_reloc;
4183 char *error_message;
4184 enum arm_st_branch_type branch_type
4185 = (template_sequence[stub_reloc_idx[i]].r_type != R_ARM_THM_XPC22
4186 ? ST_BRANCH_TO_THUMB : ST_BRANCH_TO_ARM);
4187 bfd_vma points_to = sym_value + stub_entry->target_addend;
4188
4189 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
4190 rel.r_info = ELF32_R_INFO (0,
4191 template_sequence[stub_reloc_idx[i]].r_type);
4192 rel.r_addend = template_sequence[stub_reloc_idx[i]].reloc_addend;
4193
4194 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
4195 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
4196 template should refer back to the instruction after the original
4197 branch. */
4198 points_to = sym_value;
4199
4200 /* There may be unintended consequences if this is not true. */
4201 BFD_ASSERT (stub_entry->h == NULL);
4202
4203 /* Note: _bfd_final_link_relocate doesn't handle these relocations
4204 properly. We should probably use this function unconditionally,
4205 rather than only for certain relocations listed in the enclosing
4206 conditional, for the sake of consistency. */
4207 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
4208 (template_sequence[stub_reloc_idx[i]].r_type),
4209 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
4210 points_to, info, stub_entry->target_section, "", STT_FUNC,
4211 branch_type, (struct elf_link_hash_entry *) stub_entry->h,
4212 &unresolved_reloc, &error_message);
4213 }
4214 else
4215 {
4216 Elf_Internal_Rela rel;
4217 bfd_boolean unresolved_reloc;
4218 char *error_message;
4219 bfd_vma points_to = sym_value + stub_entry->target_addend
4220 + template_sequence[stub_reloc_idx[i]].reloc_addend;
4221
4222 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
4223 rel.r_info = ELF32_R_INFO (0,
4224 template_sequence[stub_reloc_idx[i]].r_type);
4225 rel.r_addend = 0;
4226
4227 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
4228 (template_sequence[stub_reloc_idx[i]].r_type),
4229 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
4230 points_to, info, stub_entry->target_section, "", STT_FUNC,
4231 stub_entry->branch_type,
4232 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
4233 &error_message);
4234 }
4235
4236 return TRUE;
4237 #undef MAXRELOCS
4238 }
4239
4240 /* Calculate the template, template size and instruction size for a stub.
4241 Return value is the instruction size. */
4242
4243 static unsigned int
4244 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
4245 const insn_sequence **stub_template,
4246 int *stub_template_size)
4247 {
4248 const insn_sequence *template_sequence = NULL;
4249 int template_size = 0, i;
4250 unsigned int size;
4251
4252 template_sequence = stub_definitions[stub_type].template_sequence;
4253 if (stub_template)
4254 *stub_template = template_sequence;
4255
4256 template_size = stub_definitions[stub_type].template_size;
4257 if (stub_template_size)
4258 *stub_template_size = template_size;
4259
4260 size = 0;
4261 for (i = 0; i < template_size; i++)
4262 {
4263 switch (template_sequence[i].type)
4264 {
4265 case THUMB16_TYPE:
4266 size += 2;
4267 break;
4268
4269 case ARM_TYPE:
4270 case THUMB32_TYPE:
4271 case DATA_TYPE:
4272 size += 4;
4273 break;
4274
4275 default:
4276 BFD_FAIL ();
4277 return 0;
4278 }
4279 }
4280
4281 return size;
4282 }
4283
4284 /* As above, but don't actually build the stub. Just bump offset so
4285 we know stub section sizes. */
4286
4287 static bfd_boolean
4288 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
4289 void *in_arg ATTRIBUTE_UNUSED)
4290 {
4291 struct elf32_arm_stub_hash_entry *stub_entry;
4292 const insn_sequence *template_sequence;
4293 int template_size, size;
4294
4295 /* Massage our args to the form they really have. */
4296 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4297
4298 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
4299 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
4300
4301 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
4302 &template_size);
4303
4304 stub_entry->stub_size = size;
4305 stub_entry->stub_template = template_sequence;
4306 stub_entry->stub_template_size = template_size;
4307
4308 size = (size + 7) & ~7;
4309 stub_entry->stub_sec->size += size;
4310
4311 return TRUE;
4312 }
4313
4314 /* External entry points for sizing and building linker stubs. */
4315
4316 /* Set up various things so that we can make a list of input sections
4317 for each output section included in the link. Returns -1 on error,
4318 0 when no stubs will be needed, and 1 on success. */
4319
4320 int
4321 elf32_arm_setup_section_lists (bfd *output_bfd,
4322 struct bfd_link_info *info)
4323 {
4324 bfd *input_bfd;
4325 unsigned int bfd_count;
4326 int top_id, top_index;
4327 asection *section;
4328 asection **input_list, **list;
4329 bfd_size_type amt;
4330 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4331
4332 if (htab == NULL)
4333 return 0;
4334 if (! is_elf_hash_table (htab))
4335 return 0;
4336
4337 /* Count the number of input BFDs and find the top input section id. */
4338 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
4339 input_bfd != NULL;
4340 input_bfd = input_bfd->link_next)
4341 {
4342 bfd_count += 1;
4343 for (section = input_bfd->sections;
4344 section != NULL;
4345 section = section->next)
4346 {
4347 if (top_id < section->id)
4348 top_id = section->id;
4349 }
4350 }
4351 htab->bfd_count = bfd_count;
4352
4353 amt = sizeof (struct map_stub) * (top_id + 1);
4354 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
4355 if (htab->stub_group == NULL)
4356 return -1;
4357 htab->top_id = top_id;
4358
4359 /* We can't use output_bfd->section_count here to find the top output
4360 section index as some sections may have been removed, and
4361 _bfd_strip_section_from_output doesn't renumber the indices. */
4362 for (section = output_bfd->sections, top_index = 0;
4363 section != NULL;
4364 section = section->next)
4365 {
4366 if (top_index < section->index)
4367 top_index = section->index;
4368 }
4369
4370 htab->top_index = top_index;
4371 amt = sizeof (asection *) * (top_index + 1);
4372 input_list = (asection **) bfd_malloc (amt);
4373 htab->input_list = input_list;
4374 if (input_list == NULL)
4375 return -1;
4376
4377 /* For sections we aren't interested in, mark their entries with a
4378 value we can check later. */
4379 list = input_list + top_index;
4380 do
4381 *list = bfd_abs_section_ptr;
4382 while (list-- != input_list);
4383
4384 for (section = output_bfd->sections;
4385 section != NULL;
4386 section = section->next)
4387 {
4388 if ((section->flags & SEC_CODE) != 0)
4389 input_list[section->index] = NULL;
4390 }
4391
4392 return 1;
4393 }
4394
4395 /* The linker repeatedly calls this function for each input section,
4396 in the order that input sections are linked into output sections.
4397 Build lists of input sections to determine groupings between which
4398 we may insert linker stubs. */
4399
4400 void
4401 elf32_arm_next_input_section (struct bfd_link_info *info,
4402 asection *isec)
4403 {
4404 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4405
4406 if (htab == NULL)
4407 return;
4408
4409 if (isec->output_section->index <= htab->top_index)
4410 {
4411 asection **list = htab->input_list + isec->output_section->index;
4412
4413 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
4414 {
4415 /* Steal the link_sec pointer for our list. */
4416 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
4417 /* This happens to make the list in reverse order,
4418 which we reverse later. */
4419 PREV_SEC (isec) = *list;
4420 *list = isec;
4421 }
4422 }
4423 }
4424
4425 /* See whether we can group stub sections together. Grouping stub
4426 sections may result in fewer stubs. More importantly, we need to
4427 put all .init* and .fini* stubs at the end of the .init or
4428 .fini output sections respectively, because glibc splits the
4429 _init and _fini functions into multiple parts. Putting a stub in
4430 the middle of a function is not a good idea. */
4431
4432 static void
4433 group_sections (struct elf32_arm_link_hash_table *htab,
4434 bfd_size_type stub_group_size,
4435 bfd_boolean stubs_always_after_branch)
4436 {
4437 asection **list = htab->input_list;
4438
4439 do
4440 {
4441 asection *tail = *list;
4442 asection *head;
4443
4444 if (tail == bfd_abs_section_ptr)
4445 continue;
4446
4447 /* Reverse the list: we must avoid placing stubs at the
4448 beginning of the section because the beginning of the text
4449 section may be required for an interrupt vector in bare metal
4450 code. */
4451 #define NEXT_SEC PREV_SEC
4452 head = NULL;
4453 while (tail != NULL)
4454 {
4455 /* Pop from tail. */
4456 asection *item = tail;
4457 tail = PREV_SEC (item);
4458
4459 /* Push on head. */
4460 NEXT_SEC (item) = head;
4461 head = item;
4462 }
4463
4464 while (head != NULL)
4465 {
4466 asection *curr;
4467 asection *next;
4468 bfd_vma stub_group_start = head->output_offset;
4469 bfd_vma end_of_next;
4470
4471 curr = head;
4472 while (NEXT_SEC (curr) != NULL)
4473 {
4474 next = NEXT_SEC (curr);
4475 end_of_next = next->output_offset + next->size;
4476 if (end_of_next - stub_group_start >= stub_group_size)
4477 /* End of NEXT is too far from start, so stop. */
4478 break;
4479 /* Add NEXT to the group. */
4480 curr = next;
4481 }
4482
4483 /* OK, the size from the start to the start of CURR is less
4484 than stub_group_size and thus can be handled by one stub
4485 section. (Or the head section is itself larger than
4486 stub_group_size, in which case we may be toast.)
4487 We should really be keeping track of the total size of
4488 stubs added here, as stubs contribute to the final output
4489 section size. */
4490 do
4491 {
4492 next = NEXT_SEC (head);
4493 /* Set up this stub group. */
4494 htab->stub_group[head->id].link_sec = curr;
4495 }
4496 while (head != curr && (head = next) != NULL);
4497
4498 /* But wait, there's more! Input sections up to stub_group_size
4499 bytes after the stub section can be handled by it too. */
4500 if (!stubs_always_after_branch)
4501 {
4502 stub_group_start = curr->output_offset + curr->size;
4503
4504 while (next != NULL)
4505 {
4506 end_of_next = next->output_offset + next->size;
4507 if (end_of_next - stub_group_start >= stub_group_size)
4508 /* End of NEXT is too far from stubs, so stop. */
4509 break;
4510 /* Add NEXT to the stub group. */
4511 head = next;
4512 next = NEXT_SEC (head);
4513 htab->stub_group[head->id].link_sec = curr;
4514 }
4515 }
4516 head = next;
4517 }
4518 }
4519 while (list++ != htab->input_list + htab->top_index);
4520
4521 free (htab->input_list);
4522 #undef PREV_SEC
4523 #undef NEXT_SEC
4524 }
4525
4526 /* Comparison function for sorting/searching relocations relating to Cortex-A8
4527 erratum fix. */
4528
4529 static int
4530 a8_reloc_compare (const void *a, const void *b)
4531 {
4532 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
4533 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
4534
4535 if (ra->from < rb->from)
4536 return -1;
4537 else if (ra->from > rb->from)
4538 return 1;
4539 else
4540 return 0;
4541 }
4542
4543 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
4544 const char *, char **);
4545
4546 /* Helper function to scan code for sequences which might trigger the Cortex-A8
4547 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
4548 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
4549 otherwise. */
4550
4551 static bfd_boolean
4552 cortex_a8_erratum_scan (bfd *input_bfd,
4553 struct bfd_link_info *info,
4554 struct a8_erratum_fix **a8_fixes_p,
4555 unsigned int *num_a8_fixes_p,
4556 unsigned int *a8_fix_table_size_p,
4557 struct a8_erratum_reloc *a8_relocs,
4558 unsigned int num_a8_relocs,
4559 unsigned prev_num_a8_fixes,
4560 bfd_boolean *stub_changed_p)
4561 {
4562 asection *section;
4563 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4564 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
4565 unsigned int num_a8_fixes = *num_a8_fixes_p;
4566 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
4567
4568 if (htab == NULL)
4569 return FALSE;
4570
4571 for (section = input_bfd->sections;
4572 section != NULL;
4573 section = section->next)
4574 {
4575 bfd_byte *contents = NULL;
4576 struct _arm_elf_section_data *sec_data;
4577 unsigned int span;
4578 bfd_vma base_vma;
4579
4580 if (elf_section_type (section) != SHT_PROGBITS
4581 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
4582 || (section->flags & SEC_EXCLUDE) != 0
4583 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
4584 || (section->output_section == bfd_abs_section_ptr))
4585 continue;
4586
4587 base_vma = section->output_section->vma + section->output_offset;
4588
4589 if (elf_section_data (section)->this_hdr.contents != NULL)
4590 contents = elf_section_data (section)->this_hdr.contents;
4591 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
4592 return TRUE;
4593
4594 sec_data = elf32_arm_section_data (section);
4595
4596 for (span = 0; span < sec_data->mapcount; span++)
4597 {
4598 unsigned int span_start = sec_data->map[span].vma;
4599 unsigned int span_end = (span == sec_data->mapcount - 1)
4600 ? section->size : sec_data->map[span + 1].vma;
4601 unsigned int i;
4602 char span_type = sec_data->map[span].type;
4603 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
4604
4605 if (span_type != 't')
4606 continue;
4607
4608 /* Span is entirely within a single 4KB region: skip scanning. */
4609 if (((base_vma + span_start) & ~0xfff)
4610 == ((base_vma + span_end) & ~0xfff))
4611 continue;
4612
4613 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
4614
4615 * The opcode is BLX.W, BL.W, B.W, Bcc.W
4616 * The branch target is in the same 4KB region as the
4617 first half of the branch.
4618 * The instruction before the branch is a 32-bit
4619 length non-branch instruction. */
4620 for (i = span_start; i < span_end;)
4621 {
4622 unsigned int insn = bfd_getl16 (&contents[i]);
4623 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
4624 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
4625
4626 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
4627 insn_32bit = TRUE;
4628
4629 if (insn_32bit)
4630 {
4631 /* Load the rest of the insn (in manual-friendly order). */
4632 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
4633
4634 /* Encoding T4: B<c>.W. */
4635 is_b = (insn & 0xf800d000) == 0xf0009000;
4636 /* Encoding T1: BL<c>.W. */
4637 is_bl = (insn & 0xf800d000) == 0xf000d000;
4638 /* Encoding T2: BLX<c>.W. */
4639 is_blx = (insn & 0xf800d000) == 0xf000c000;
4640 /* Encoding T3: B<c>.W (not permitted in IT block). */
4641 is_bcc = (insn & 0xf800d000) == 0xf0008000
4642 && (insn & 0x07f00000) != 0x03800000;
4643 }
4644
4645 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
4646
4647 if (((base_vma + i) & 0xfff) == 0xffe
4648 && insn_32bit
4649 && is_32bit_branch
4650 && last_was_32bit
4651 && ! last_was_branch)
4652 {
4653 bfd_signed_vma offset = 0;
4654 bfd_boolean force_target_arm = FALSE;
4655 bfd_boolean force_target_thumb = FALSE;
4656 bfd_vma target;
4657 enum elf32_arm_stub_type stub_type = arm_stub_none;
4658 struct a8_erratum_reloc key, *found;
4659 bfd_boolean use_plt = FALSE;
4660
4661 key.from = base_vma + i;
4662 found = (struct a8_erratum_reloc *)
4663 bsearch (&key, a8_relocs, num_a8_relocs,
4664 sizeof (struct a8_erratum_reloc),
4665 &a8_reloc_compare);
4666
4667 if (found)
4668 {
4669 char *error_message = NULL;
4670 struct elf_link_hash_entry *entry;
4671
4672 /* We don't care about the error returned from this
4673 function, only if there is glue or not. */
4674 entry = find_thumb_glue (info, found->sym_name,
4675 &error_message);
4676
4677 if (entry)
4678 found->non_a8_stub = TRUE;
4679
4680 /* Keep a simpler condition, for the sake of clarity. */
4681 if (htab->root.splt != NULL && found->hash != NULL
4682 && found->hash->root.plt.offset != (bfd_vma) -1)
4683 use_plt = TRUE;
4684
4685 if (found->r_type == R_ARM_THM_CALL)
4686 {
4687 if (found->branch_type == ST_BRANCH_TO_ARM
4688 || use_plt)
4689 force_target_arm = TRUE;
4690 else
4691 force_target_thumb = TRUE;
4692 }
4693 }
4694
4695 /* Check if we have an offending branch instruction. */
4696
4697 if (found && found->non_a8_stub)
4698 /* We've already made a stub for this instruction, e.g.
4699 it's a long branch or a Thumb->ARM stub. Assume that
4700 stub will suffice to work around the A8 erratum (see
4701 setting of always_after_branch above). */
4702 ;
4703 else if (is_bcc)
4704 {
4705 offset = (insn & 0x7ff) << 1;
4706 offset |= (insn & 0x3f0000) >> 4;
4707 offset |= (insn & 0x2000) ? 0x40000 : 0;
4708 offset |= (insn & 0x800) ? 0x80000 : 0;
4709 offset |= (insn & 0x4000000) ? 0x100000 : 0;
4710 if (offset & 0x100000)
4711 offset |= ~ ((bfd_signed_vma) 0xfffff);
4712 stub_type = arm_stub_a8_veneer_b_cond;
4713 }
4714 else if (is_b || is_bl || is_blx)
4715 {
4716 int s = (insn & 0x4000000) != 0;
4717 int j1 = (insn & 0x2000) != 0;
4718 int j2 = (insn & 0x800) != 0;
4719 int i1 = !(j1 ^ s);
4720 int i2 = !(j2 ^ s);
4721
4722 offset = (insn & 0x7ff) << 1;
4723 offset |= (insn & 0x3ff0000) >> 4;
4724 offset |= i2 << 22;
4725 offset |= i1 << 23;
4726 offset |= s << 24;
4727 if (offset & 0x1000000)
4728 offset |= ~ ((bfd_signed_vma) 0xffffff);
4729
4730 if (is_blx)
4731 offset &= ~ ((bfd_signed_vma) 3);
4732
4733 stub_type = is_blx ? arm_stub_a8_veneer_blx :
4734 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
4735 }
4736
4737 if (stub_type != arm_stub_none)
4738 {
4739 bfd_vma pc_for_insn = base_vma + i + 4;
4740
4741 /* The original instruction is a BL, but the target is
4742 an ARM instruction. If we were not making a stub,
4743 the BL would have been converted to a BLX. Use the
4744 BLX stub instead in that case. */
4745 if (htab->use_blx && force_target_arm
4746 && stub_type == arm_stub_a8_veneer_bl)
4747 {
4748 stub_type = arm_stub_a8_veneer_blx;
4749 is_blx = TRUE;
4750 is_bl = FALSE;
4751 }
4752 /* Conversely, if the original instruction was
4753 BLX but the target is Thumb mode, use the BL
4754 stub. */
4755 else if (force_target_thumb
4756 && stub_type == arm_stub_a8_veneer_blx)
4757 {
4758 stub_type = arm_stub_a8_veneer_bl;
4759 is_blx = FALSE;
4760 is_bl = TRUE;
4761 }
4762
4763 if (is_blx)
4764 pc_for_insn &= ~ ((bfd_vma) 3);
4765
4766 /* If we found a relocation, use the proper destination,
4767 not the offset in the (unrelocated) instruction.
4768 Note this is always done if we switched the stub type
4769 above. */
4770 if (found)
4771 offset =
4772 (bfd_signed_vma) (found->destination - pc_for_insn);
4773
4774 /* If the stub will use a Thumb-mode branch to a
4775 PLT target, redirect it to the preceding Thumb
4776 entry point. */
4777 if (stub_type != arm_stub_a8_veneer_blx && use_plt)
4778 offset -= PLT_THUMB_STUB_SIZE;
4779
4780 target = pc_for_insn + offset;
4781
4782 /* The BLX stub is ARM-mode code. Adjust the offset to
4783 take the different PC value (+8 instead of +4) into
4784 account. */
4785 if (stub_type == arm_stub_a8_veneer_blx)
4786 offset += 4;
4787
4788 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
4789 {
4790 char *stub_name = NULL;
4791
4792 if (num_a8_fixes == a8_fix_table_size)
4793 {
4794 a8_fix_table_size *= 2;
4795 a8_fixes = (struct a8_erratum_fix *)
4796 bfd_realloc (a8_fixes,
4797 sizeof (struct a8_erratum_fix)
4798 * a8_fix_table_size);
4799 }
4800
4801 if (num_a8_fixes < prev_num_a8_fixes)
4802 {
4803 /* If we're doing a subsequent scan,
4804 check if we've found the same fix as
4805 before, and try and reuse the stub
4806 name. */
4807 stub_name = a8_fixes[num_a8_fixes].stub_name;
4808 if ((a8_fixes[num_a8_fixes].section != section)
4809 || (a8_fixes[num_a8_fixes].offset != i))
4810 {
4811 free (stub_name);
4812 stub_name = NULL;
4813 *stub_changed_p = TRUE;
4814 }
4815 }
4816
4817 if (!stub_name)
4818 {
4819 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
4820 if (stub_name != NULL)
4821 sprintf (stub_name, "%x:%x", section->id, i);
4822 }
4823
4824 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
4825 a8_fixes[num_a8_fixes].section = section;
4826 a8_fixes[num_a8_fixes].offset = i;
4827 a8_fixes[num_a8_fixes].addend = offset;
4828 a8_fixes[num_a8_fixes].orig_insn = insn;
4829 a8_fixes[num_a8_fixes].stub_name = stub_name;
4830 a8_fixes[num_a8_fixes].stub_type = stub_type;
4831 a8_fixes[num_a8_fixes].branch_type =
4832 is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB;
4833
4834 num_a8_fixes++;
4835 }
4836 }
4837 }
4838
4839 i += insn_32bit ? 4 : 2;
4840 last_was_32bit = insn_32bit;
4841 last_was_branch = is_32bit_branch;
4842 }
4843 }
4844
4845 if (elf_section_data (section)->this_hdr.contents == NULL)
4846 free (contents);
4847 }
4848
4849 *a8_fixes_p = a8_fixes;
4850 *num_a8_fixes_p = num_a8_fixes;
4851 *a8_fix_table_size_p = a8_fix_table_size;
4852
4853 return FALSE;
4854 }
4855
4856 /* Determine and set the size of the stub section for a final link.
4857
4858 The basic idea here is to examine all the relocations looking for
4859 PC-relative calls to a target that is unreachable with a "bl"
4860 instruction. */
4861
4862 bfd_boolean
4863 elf32_arm_size_stubs (bfd *output_bfd,
4864 bfd *stub_bfd,
4865 struct bfd_link_info *info,
4866 bfd_signed_vma group_size,
4867 asection * (*add_stub_section) (const char *, asection *),
4868 void (*layout_sections_again) (void))
4869 {
4870 bfd_size_type stub_group_size;
4871 bfd_boolean stubs_always_after_branch;
4872 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4873 struct a8_erratum_fix *a8_fixes = NULL;
4874 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
4875 struct a8_erratum_reloc *a8_relocs = NULL;
4876 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
4877
4878 if (htab == NULL)
4879 return FALSE;
4880
4881 if (htab->fix_cortex_a8)
4882 {
4883 a8_fixes = (struct a8_erratum_fix *)
4884 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
4885 a8_relocs = (struct a8_erratum_reloc *)
4886 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
4887 }
4888
4889 /* Propagate mach to stub bfd, because it may not have been
4890 finalized when we created stub_bfd. */
4891 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
4892 bfd_get_mach (output_bfd));
4893
4894 /* Stash our params away. */
4895 htab->stub_bfd = stub_bfd;
4896 htab->add_stub_section = add_stub_section;
4897 htab->layout_sections_again = layout_sections_again;
4898 stubs_always_after_branch = group_size < 0;
4899
4900 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
4901 as the first half of a 32-bit branch straddling two 4K pages. This is a
4902 crude way of enforcing that. */
4903 if (htab->fix_cortex_a8)
4904 stubs_always_after_branch = 1;
4905
4906 if (group_size < 0)
4907 stub_group_size = -group_size;
4908 else
4909 stub_group_size = group_size;
4910
4911 if (stub_group_size == 1)
4912 {
4913 /* Default values. */
4914 /* Thumb branch range is +-4MB has to be used as the default
4915 maximum size (a given section can contain both ARM and Thumb
4916 code, so the worst case has to be taken into account).
4917
4918 This value is 24K less than that, which allows for 2025
4919 12-byte stubs. If we exceed that, then we will fail to link.
4920 The user will have to relink with an explicit group size
4921 option. */
4922 stub_group_size = 4170000;
4923 }
4924
4925 group_sections (htab, stub_group_size, stubs_always_after_branch);
4926
4927 /* If we're applying the cortex A8 fix, we need to determine the
4928 program header size now, because we cannot change it later --
4929 that could alter section placements. Notice the A8 erratum fix
4930 ends up requiring the section addresses to remain unchanged
4931 modulo the page size. That's something we cannot represent
4932 inside BFD, and we don't want to force the section alignment to
4933 be the page size. */
4934 if (htab->fix_cortex_a8)
4935 (*htab->layout_sections_again) ();
4936
4937 while (1)
4938 {
4939 bfd *input_bfd;
4940 unsigned int bfd_indx;
4941 asection *stub_sec;
4942 bfd_boolean stub_changed = FALSE;
4943 unsigned prev_num_a8_fixes = num_a8_fixes;
4944
4945 num_a8_fixes = 0;
4946 for (input_bfd = info->input_bfds, bfd_indx = 0;
4947 input_bfd != NULL;
4948 input_bfd = input_bfd->link_next, bfd_indx++)
4949 {
4950 Elf_Internal_Shdr *symtab_hdr;
4951 asection *section;
4952 Elf_Internal_Sym *local_syms = NULL;
4953
4954 if (!is_arm_elf (input_bfd))
4955 continue;
4956
4957 num_a8_relocs = 0;
4958
4959 /* We'll need the symbol table in a second. */
4960 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4961 if (symtab_hdr->sh_info == 0)
4962 continue;
4963
4964 /* Walk over each section attached to the input bfd. */
4965 for (section = input_bfd->sections;
4966 section != NULL;
4967 section = section->next)
4968 {
4969 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
4970
4971 /* If there aren't any relocs, then there's nothing more
4972 to do. */
4973 if ((section->flags & SEC_RELOC) == 0
4974 || section->reloc_count == 0
4975 || (section->flags & SEC_CODE) == 0)
4976 continue;
4977
4978 /* If this section is a link-once section that will be
4979 discarded, then don't create any stubs. */
4980 if (section->output_section == NULL
4981 || section->output_section->owner != output_bfd)
4982 continue;
4983
4984 /* Get the relocs. */
4985 internal_relocs
4986 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
4987 NULL, info->keep_memory);
4988 if (internal_relocs == NULL)
4989 goto error_ret_free_local;
4990
4991 /* Now examine each relocation. */
4992 irela = internal_relocs;
4993 irelaend = irela + section->reloc_count;
4994 for (; irela < irelaend; irela++)
4995 {
4996 unsigned int r_type, r_indx;
4997 enum elf32_arm_stub_type stub_type;
4998 struct elf32_arm_stub_hash_entry *stub_entry;
4999 asection *sym_sec;
5000 bfd_vma sym_value;
5001 bfd_vma destination;
5002 struct elf32_arm_link_hash_entry *hash;
5003 const char *sym_name;
5004 char *stub_name;
5005 const asection *id_sec;
5006 unsigned char st_type;
5007 enum arm_st_branch_type branch_type;
5008 bfd_boolean created_stub = FALSE;
5009
5010 r_type = ELF32_R_TYPE (irela->r_info);
5011 r_indx = ELF32_R_SYM (irela->r_info);
5012
5013 if (r_type >= (unsigned int) R_ARM_max)
5014 {
5015 bfd_set_error (bfd_error_bad_value);
5016 error_ret_free_internal:
5017 if (elf_section_data (section)->relocs == NULL)
5018 free (internal_relocs);
5019 goto error_ret_free_local;
5020 }
5021
5022 hash = NULL;
5023 if (r_indx >= symtab_hdr->sh_info)
5024 hash = elf32_arm_hash_entry
5025 (elf_sym_hashes (input_bfd)
5026 [r_indx - symtab_hdr->sh_info]);
5027
5028 /* Only look for stubs on branch instructions, or
5029 non-relaxed TLSCALL */
5030 if ((r_type != (unsigned int) R_ARM_CALL)
5031 && (r_type != (unsigned int) R_ARM_THM_CALL)
5032 && (r_type != (unsigned int) R_ARM_JUMP24)
5033 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
5034 && (r_type != (unsigned int) R_ARM_THM_XPC22)
5035 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
5036 && (r_type != (unsigned int) R_ARM_PLT32)
5037 && !((r_type == (unsigned int) R_ARM_TLS_CALL
5038 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
5039 && r_type == elf32_arm_tls_transition
5040 (info, r_type, &hash->root)
5041 && ((hash ? hash->tls_type
5042 : (elf32_arm_local_got_tls_type
5043 (input_bfd)[r_indx]))
5044 & GOT_TLS_GDESC) != 0))
5045 continue;
5046
5047 /* Now determine the call target, its name, value,
5048 section. */
5049 sym_sec = NULL;
5050 sym_value = 0;
5051 destination = 0;
5052 sym_name = NULL;
5053
5054 if (r_type == (unsigned int) R_ARM_TLS_CALL
5055 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
5056 {
5057 /* A non-relaxed TLS call. The target is the
5058 plt-resident trampoline and nothing to do
5059 with the symbol. */
5060 BFD_ASSERT (htab->tls_trampoline > 0);
5061 sym_sec = htab->root.splt;
5062 sym_value = htab->tls_trampoline;
5063 hash = 0;
5064 st_type = STT_FUNC;
5065 branch_type = ST_BRANCH_TO_ARM;
5066 }
5067 else if (!hash)
5068 {
5069 /* It's a local symbol. */
5070 Elf_Internal_Sym *sym;
5071
5072 if (local_syms == NULL)
5073 {
5074 local_syms
5075 = (Elf_Internal_Sym *) symtab_hdr->contents;
5076 if (local_syms == NULL)
5077 local_syms
5078 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
5079 symtab_hdr->sh_info, 0,
5080 NULL, NULL, NULL);
5081 if (local_syms == NULL)
5082 goto error_ret_free_internal;
5083 }
5084
5085 sym = local_syms + r_indx;
5086 if (sym->st_shndx == SHN_UNDEF)
5087 sym_sec = bfd_und_section_ptr;
5088 else if (sym->st_shndx == SHN_ABS)
5089 sym_sec = bfd_abs_section_ptr;
5090 else if (sym->st_shndx == SHN_COMMON)
5091 sym_sec = bfd_com_section_ptr;
5092 else
5093 sym_sec =
5094 bfd_section_from_elf_index (input_bfd, sym->st_shndx);
5095
5096 if (!sym_sec)
5097 /* This is an undefined symbol. It can never
5098 be resolved. */
5099 continue;
5100
5101 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
5102 sym_value = sym->st_value;
5103 destination = (sym_value + irela->r_addend
5104 + sym_sec->output_offset
5105 + sym_sec->output_section->vma);
5106 st_type = ELF_ST_TYPE (sym->st_info);
5107 branch_type = ARM_SYM_BRANCH_TYPE (sym);
5108 sym_name
5109 = bfd_elf_string_from_elf_section (input_bfd,
5110 symtab_hdr->sh_link,
5111 sym->st_name);
5112 }
5113 else
5114 {
5115 /* It's an external symbol. */
5116 while (hash->root.root.type == bfd_link_hash_indirect
5117 || hash->root.root.type == bfd_link_hash_warning)
5118 hash = ((struct elf32_arm_link_hash_entry *)
5119 hash->root.root.u.i.link);
5120
5121 if (hash->root.root.type == bfd_link_hash_defined
5122 || hash->root.root.type == bfd_link_hash_defweak)
5123 {
5124 sym_sec = hash->root.root.u.def.section;
5125 sym_value = hash->root.root.u.def.value;
5126
5127 struct elf32_arm_link_hash_table *globals =
5128 elf32_arm_hash_table (info);
5129
5130 /* For a destination in a shared library,
5131 use the PLT stub as target address to
5132 decide whether a branch stub is
5133 needed. */
5134 if (globals != NULL
5135 && globals->root.splt != NULL
5136 && hash != NULL
5137 && hash->root.plt.offset != (bfd_vma) -1)
5138 {
5139 sym_sec = globals->root.splt;
5140 sym_value = hash->root.plt.offset;
5141 if (sym_sec->output_section != NULL)
5142 destination = (sym_value
5143 + sym_sec->output_offset
5144 + sym_sec->output_section->vma);
5145 }
5146 else if (sym_sec->output_section != NULL)
5147 destination = (sym_value + irela->r_addend
5148 + sym_sec->output_offset
5149 + sym_sec->output_section->vma);
5150 }
5151 else if ((hash->root.root.type == bfd_link_hash_undefined)
5152 || (hash->root.root.type == bfd_link_hash_undefweak))
5153 {
5154 /* For a shared library, use the PLT stub as
5155 target address to decide whether a long
5156 branch stub is needed.
5157 For absolute code, they cannot be handled. */
5158 struct elf32_arm_link_hash_table *globals =
5159 elf32_arm_hash_table (info);
5160
5161 if (globals != NULL
5162 && globals->root.splt != NULL
5163 && hash != NULL
5164 && hash->root.plt.offset != (bfd_vma) -1)
5165 {
5166 sym_sec = globals->root.splt;
5167 sym_value = hash->root.plt.offset;
5168 if (sym_sec->output_section != NULL)
5169 destination = (sym_value
5170 + sym_sec->output_offset
5171 + sym_sec->output_section->vma);
5172 }
5173 else
5174 continue;
5175 }
5176 else
5177 {
5178 bfd_set_error (bfd_error_bad_value);
5179 goto error_ret_free_internal;
5180 }
5181 st_type = hash->root.type;
5182 branch_type = hash->root.target_internal;
5183 sym_name = hash->root.root.root.string;
5184 }
5185
5186 do
5187 {
5188 /* Determine what (if any) linker stub is needed. */
5189 stub_type = arm_type_of_stub (info, section, irela,
5190 st_type, &branch_type,
5191 hash, destination, sym_sec,
5192 input_bfd, sym_name);
5193 if (stub_type == arm_stub_none)
5194 break;
5195
5196 /* Support for grouping stub sections. */
5197 id_sec = htab->stub_group[section->id].link_sec;
5198
5199 /* Get the name of this stub. */
5200 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash,
5201 irela, stub_type);
5202 if (!stub_name)
5203 goto error_ret_free_internal;
5204
5205 /* We've either created a stub for this reloc already,
5206 or we are about to. */
5207 created_stub = TRUE;
5208
5209 stub_entry = arm_stub_hash_lookup
5210 (&htab->stub_hash_table, stub_name,
5211 FALSE, FALSE);
5212 if (stub_entry != NULL)
5213 {
5214 /* The proper stub has already been created. */
5215 free (stub_name);
5216 stub_entry->target_value = sym_value;
5217 break;
5218 }
5219
5220 stub_entry = elf32_arm_add_stub (stub_name, section,
5221 htab);
5222 if (stub_entry == NULL)
5223 {
5224 free (stub_name);
5225 goto error_ret_free_internal;
5226 }
5227
5228 stub_entry->target_value = sym_value;
5229 stub_entry->target_section = sym_sec;
5230 stub_entry->stub_type = stub_type;
5231 stub_entry->h = hash;
5232 stub_entry->branch_type = branch_type;
5233
5234 if (sym_name == NULL)
5235 sym_name = "unnamed";
5236 stub_entry->output_name = (char *)
5237 bfd_alloc (htab->stub_bfd,
5238 sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
5239 + strlen (sym_name));
5240 if (stub_entry->output_name == NULL)
5241 {
5242 free (stub_name);
5243 goto error_ret_free_internal;
5244 }
5245
5246 /* For historical reasons, use the existing names for
5247 ARM-to-Thumb and Thumb-to-ARM stubs. */
5248 if ((r_type == (unsigned int) R_ARM_THM_CALL
5249 || r_type == (unsigned int) R_ARM_THM_JUMP24)
5250 && branch_type == ST_BRANCH_TO_ARM)
5251 sprintf (stub_entry->output_name,
5252 THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
5253 else if ((r_type == (unsigned int) R_ARM_CALL
5254 || r_type == (unsigned int) R_ARM_JUMP24)
5255 && branch_type == ST_BRANCH_TO_THUMB)
5256 sprintf (stub_entry->output_name,
5257 ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
5258 else
5259 sprintf (stub_entry->output_name, STUB_ENTRY_NAME,
5260 sym_name);
5261
5262 stub_changed = TRUE;
5263 }
5264 while (0);
5265
5266 /* Look for relocations which might trigger Cortex-A8
5267 erratum. */
5268 if (htab->fix_cortex_a8
5269 && (r_type == (unsigned int) R_ARM_THM_JUMP24
5270 || r_type == (unsigned int) R_ARM_THM_JUMP19
5271 || r_type == (unsigned int) R_ARM_THM_CALL
5272 || r_type == (unsigned int) R_ARM_THM_XPC22))
5273 {
5274 bfd_vma from = section->output_section->vma
5275 + section->output_offset
5276 + irela->r_offset;
5277
5278 if ((from & 0xfff) == 0xffe)
5279 {
5280 /* Found a candidate. Note we haven't checked the
5281 destination is within 4K here: if we do so (and
5282 don't create an entry in a8_relocs) we can't tell
5283 that a branch should have been relocated when
5284 scanning later. */
5285 if (num_a8_relocs == a8_reloc_table_size)
5286 {
5287 a8_reloc_table_size *= 2;
5288 a8_relocs = (struct a8_erratum_reloc *)
5289 bfd_realloc (a8_relocs,
5290 sizeof (struct a8_erratum_reloc)
5291 * a8_reloc_table_size);
5292 }
5293
5294 a8_relocs[num_a8_relocs].from = from;
5295 a8_relocs[num_a8_relocs].destination = destination;
5296 a8_relocs[num_a8_relocs].r_type = r_type;
5297 a8_relocs[num_a8_relocs].branch_type = branch_type;
5298 a8_relocs[num_a8_relocs].sym_name = sym_name;
5299 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
5300 a8_relocs[num_a8_relocs].hash = hash;
5301
5302 num_a8_relocs++;
5303 }
5304 }
5305 }
5306
5307 /* We're done with the internal relocs, free them. */
5308 if (elf_section_data (section)->relocs == NULL)
5309 free (internal_relocs);
5310 }
5311
5312 if (htab->fix_cortex_a8)
5313 {
5314 /* Sort relocs which might apply to Cortex-A8 erratum. */
5315 qsort (a8_relocs, num_a8_relocs,
5316 sizeof (struct a8_erratum_reloc),
5317 &a8_reloc_compare);
5318
5319 /* Scan for branches which might trigger Cortex-A8 erratum. */
5320 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
5321 &num_a8_fixes, &a8_fix_table_size,
5322 a8_relocs, num_a8_relocs,
5323 prev_num_a8_fixes, &stub_changed)
5324 != 0)
5325 goto error_ret_free_local;
5326 }
5327 }
5328
5329 if (prev_num_a8_fixes != num_a8_fixes)
5330 stub_changed = TRUE;
5331
5332 if (!stub_changed)
5333 break;
5334
5335 /* OK, we've added some stubs. Find out the new size of the
5336 stub sections. */
5337 for (stub_sec = htab->stub_bfd->sections;
5338 stub_sec != NULL;
5339 stub_sec = stub_sec->next)
5340 {
5341 /* Ignore non-stub sections. */
5342 if (!strstr (stub_sec->name, STUB_SUFFIX))
5343 continue;
5344
5345 stub_sec->size = 0;
5346 }
5347
5348 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
5349
5350 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
5351 if (htab->fix_cortex_a8)
5352 for (i = 0; i < num_a8_fixes; i++)
5353 {
5354 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
5355 a8_fixes[i].section, htab);
5356
5357 if (stub_sec == NULL)
5358 goto error_ret_free_local;
5359
5360 stub_sec->size
5361 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
5362 NULL);
5363 }
5364
5365
5366 /* Ask the linker to do its stuff. */
5367 (*htab->layout_sections_again) ();
5368 }
5369
5370 /* Add stubs for Cortex-A8 erratum fixes now. */
5371 if (htab->fix_cortex_a8)
5372 {
5373 for (i = 0; i < num_a8_fixes; i++)
5374 {
5375 struct elf32_arm_stub_hash_entry *stub_entry;
5376 char *stub_name = a8_fixes[i].stub_name;
5377 asection *section = a8_fixes[i].section;
5378 unsigned int section_id = a8_fixes[i].section->id;
5379 asection *link_sec = htab->stub_group[section_id].link_sec;
5380 asection *stub_sec = htab->stub_group[section_id].stub_sec;
5381 const insn_sequence *template_sequence;
5382 int template_size, size = 0;
5383
5384 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
5385 TRUE, FALSE);
5386 if (stub_entry == NULL)
5387 {
5388 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
5389 section->owner,
5390 stub_name);
5391 return FALSE;
5392 }
5393
5394 stub_entry->stub_sec = stub_sec;
5395 stub_entry->stub_offset = 0;
5396 stub_entry->id_sec = link_sec;
5397 stub_entry->stub_type = a8_fixes[i].stub_type;
5398 stub_entry->target_section = a8_fixes[i].section;
5399 stub_entry->target_value = a8_fixes[i].offset;
5400 stub_entry->target_addend = a8_fixes[i].addend;
5401 stub_entry->orig_insn = a8_fixes[i].orig_insn;
5402 stub_entry->branch_type = a8_fixes[i].branch_type;
5403
5404 size = find_stub_size_and_template (a8_fixes[i].stub_type,
5405 &template_sequence,
5406 &template_size);
5407
5408 stub_entry->stub_size = size;
5409 stub_entry->stub_template = template_sequence;
5410 stub_entry->stub_template_size = template_size;
5411 }
5412
5413 /* Stash the Cortex-A8 erratum fix array for use later in
5414 elf32_arm_write_section(). */
5415 htab->a8_erratum_fixes = a8_fixes;
5416 htab->num_a8_erratum_fixes = num_a8_fixes;
5417 }
5418 else
5419 {
5420 htab->a8_erratum_fixes = NULL;
5421 htab->num_a8_erratum_fixes = 0;
5422 }
5423 return TRUE;
5424
5425 error_ret_free_local:
5426 return FALSE;
5427 }
5428
5429 /* Build all the stubs associated with the current output file. The
5430 stubs are kept in a hash table attached to the main linker hash
5431 table. We also set up the .plt entries for statically linked PIC
5432 functions here. This function is called via arm_elf_finish in the
5433 linker. */
5434
5435 bfd_boolean
5436 elf32_arm_build_stubs (struct bfd_link_info *info)
5437 {
5438 asection *stub_sec;
5439 struct bfd_hash_table *table;
5440 struct elf32_arm_link_hash_table *htab;
5441
5442 htab = elf32_arm_hash_table (info);
5443 if (htab == NULL)
5444 return FALSE;
5445
5446 for (stub_sec = htab->stub_bfd->sections;
5447 stub_sec != NULL;
5448 stub_sec = stub_sec->next)
5449 {
5450 bfd_size_type size;
5451
5452 /* Ignore non-stub sections. */
5453 if (!strstr (stub_sec->name, STUB_SUFFIX))
5454 continue;
5455
5456 /* Allocate memory to hold the linker stubs. */
5457 size = stub_sec->size;
5458 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
5459 if (stub_sec->contents == NULL && size != 0)
5460 return FALSE;
5461 stub_sec->size = 0;
5462 }
5463
5464 /* Build the stubs as directed by the stub hash table. */
5465 table = &htab->stub_hash_table;
5466 bfd_hash_traverse (table, arm_build_one_stub, info);
5467 if (htab->fix_cortex_a8)
5468 {
5469 /* Place the cortex a8 stubs last. */
5470 htab->fix_cortex_a8 = -1;
5471 bfd_hash_traverse (table, arm_build_one_stub, info);
5472 }
5473
5474 return TRUE;
5475 }
5476
5477 /* Locate the Thumb encoded calling stub for NAME. */
5478
5479 static struct elf_link_hash_entry *
5480 find_thumb_glue (struct bfd_link_info *link_info,
5481 const char *name,
5482 char **error_message)
5483 {
5484 char *tmp_name;
5485 struct elf_link_hash_entry *hash;
5486 struct elf32_arm_link_hash_table *hash_table;
5487
5488 /* We need a pointer to the armelf specific hash table. */
5489 hash_table = elf32_arm_hash_table (link_info);
5490 if (hash_table == NULL)
5491 return NULL;
5492
5493 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5494 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
5495
5496 BFD_ASSERT (tmp_name);
5497
5498 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
5499
5500 hash = elf_link_hash_lookup
5501 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
5502
5503 if (hash == NULL
5504 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
5505 tmp_name, name) == -1)
5506 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
5507
5508 free (tmp_name);
5509
5510 return hash;
5511 }
5512
5513 /* Locate the ARM encoded calling stub for NAME. */
5514
5515 static struct elf_link_hash_entry *
5516 find_arm_glue (struct bfd_link_info *link_info,
5517 const char *name,
5518 char **error_message)
5519 {
5520 char *tmp_name;
5521 struct elf_link_hash_entry *myh;
5522 struct elf32_arm_link_hash_table *hash_table;
5523
5524 /* We need a pointer to the elfarm specific hash table. */
5525 hash_table = elf32_arm_hash_table (link_info);
5526 if (hash_table == NULL)
5527 return NULL;
5528
5529 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5530 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
5531
5532 BFD_ASSERT (tmp_name);
5533
5534 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
5535
5536 myh = elf_link_hash_lookup
5537 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
5538
5539 if (myh == NULL
5540 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
5541 tmp_name, name) == -1)
5542 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
5543
5544 free (tmp_name);
5545
5546 return myh;
5547 }
5548
5549 /* ARM->Thumb glue (static images):
5550
5551 .arm
5552 __func_from_arm:
5553 ldr r12, __func_addr
5554 bx r12
5555 __func_addr:
5556 .word func @ behave as if you saw a ARM_32 reloc.
5557
5558 (v5t static images)
5559 .arm
5560 __func_from_arm:
5561 ldr pc, __func_addr
5562 __func_addr:
5563 .word func @ behave as if you saw a ARM_32 reloc.
5564
5565 (relocatable images)
5566 .arm
5567 __func_from_arm:
5568 ldr r12, __func_offset
5569 add r12, r12, pc
5570 bx r12
5571 __func_offset:
5572 .word func - . */
5573
5574 #define ARM2THUMB_STATIC_GLUE_SIZE 12
5575 static const insn32 a2t1_ldr_insn = 0xe59fc000;
5576 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
5577 static const insn32 a2t3_func_addr_insn = 0x00000001;
5578
5579 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
5580 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
5581 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
5582
5583 #define ARM2THUMB_PIC_GLUE_SIZE 16
5584 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
5585 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
5586 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
5587
5588 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
5589
5590 .thumb .thumb
5591 .align 2 .align 2
5592 __func_from_thumb: __func_from_thumb:
5593 bx pc push {r6, lr}
5594 nop ldr r6, __func_addr
5595 .arm mov lr, pc
5596 b func bx r6
5597 .arm
5598 ;; back_to_thumb
5599 ldmia r13! {r6, lr}
5600 bx lr
5601 __func_addr:
5602 .word func */
5603
5604 #define THUMB2ARM_GLUE_SIZE 8
5605 static const insn16 t2a1_bx_pc_insn = 0x4778;
5606 static const insn16 t2a2_noop_insn = 0x46c0;
5607 static const insn32 t2a3_b_insn = 0xea000000;
5608
5609 #define VFP11_ERRATUM_VENEER_SIZE 8
5610
5611 #define ARM_BX_VENEER_SIZE 12
5612 static const insn32 armbx1_tst_insn = 0xe3100001;
5613 static const insn32 armbx2_moveq_insn = 0x01a0f000;
5614 static const insn32 armbx3_bx_insn = 0xe12fff10;
5615
5616 #ifndef ELFARM_NABI_C_INCLUDED
5617 static void
5618 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
5619 {
5620 asection * s;
5621 bfd_byte * contents;
5622
5623 if (size == 0)
5624 {
5625 /* Do not include empty glue sections in the output. */
5626 if (abfd != NULL)
5627 {
5628 s = bfd_get_linker_section (abfd, name);
5629 if (s != NULL)
5630 s->flags |= SEC_EXCLUDE;
5631 }
5632 return;
5633 }
5634
5635 BFD_ASSERT (abfd != NULL);
5636
5637 s = bfd_get_linker_section (abfd, name);
5638 BFD_ASSERT (s != NULL);
5639
5640 contents = (bfd_byte *) bfd_alloc (abfd, size);
5641
5642 BFD_ASSERT (s->size == size);
5643 s->contents = contents;
5644 }
5645
5646 bfd_boolean
5647 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
5648 {
5649 struct elf32_arm_link_hash_table * globals;
5650
5651 globals = elf32_arm_hash_table (info);
5652 BFD_ASSERT (globals != NULL);
5653
5654 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5655 globals->arm_glue_size,
5656 ARM2THUMB_GLUE_SECTION_NAME);
5657
5658 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5659 globals->thumb_glue_size,
5660 THUMB2ARM_GLUE_SECTION_NAME);
5661
5662 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5663 globals->vfp11_erratum_glue_size,
5664 VFP11_ERRATUM_VENEER_SECTION_NAME);
5665
5666 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5667 globals->bx_glue_size,
5668 ARM_BX_GLUE_SECTION_NAME);
5669
5670 return TRUE;
5671 }
5672
5673 /* Allocate space and symbols for calling a Thumb function from Arm mode.
5674 returns the symbol identifying the stub. */
5675
5676 static struct elf_link_hash_entry *
5677 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
5678 struct elf_link_hash_entry * h)
5679 {
5680 const char * name = h->root.root.string;
5681 asection * s;
5682 char * tmp_name;
5683 struct elf_link_hash_entry * myh;
5684 struct bfd_link_hash_entry * bh;
5685 struct elf32_arm_link_hash_table * globals;
5686 bfd_vma val;
5687 bfd_size_type size;
5688
5689 globals = elf32_arm_hash_table (link_info);
5690 BFD_ASSERT (globals != NULL);
5691 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5692
5693 s = bfd_get_linker_section
5694 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
5695
5696 BFD_ASSERT (s != NULL);
5697
5698 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5699 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
5700
5701 BFD_ASSERT (tmp_name);
5702
5703 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
5704
5705 myh = elf_link_hash_lookup
5706 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
5707
5708 if (myh != NULL)
5709 {
5710 /* We've already seen this guy. */
5711 free (tmp_name);
5712 return myh;
5713 }
5714
5715 /* The only trick here is using hash_table->arm_glue_size as the value.
5716 Even though the section isn't allocated yet, this is where we will be
5717 putting it. The +1 on the value marks that the stub has not been
5718 output yet - not that it is a Thumb function. */
5719 bh = NULL;
5720 val = globals->arm_glue_size + 1;
5721 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5722 tmp_name, BSF_GLOBAL, s, val,
5723 NULL, TRUE, FALSE, &bh);
5724
5725 myh = (struct elf_link_hash_entry *) bh;
5726 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5727 myh->forced_local = 1;
5728
5729 free (tmp_name);
5730
5731 if (link_info->shared || globals->root.is_relocatable_executable
5732 || globals->pic_veneer)
5733 size = ARM2THUMB_PIC_GLUE_SIZE;
5734 else if (globals->use_blx)
5735 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
5736 else
5737 size = ARM2THUMB_STATIC_GLUE_SIZE;
5738
5739 s->size += size;
5740 globals->arm_glue_size += size;
5741
5742 return myh;
5743 }
5744
5745 /* Allocate space for ARMv4 BX veneers. */
5746
5747 static void
5748 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
5749 {
5750 asection * s;
5751 struct elf32_arm_link_hash_table *globals;
5752 char *tmp_name;
5753 struct elf_link_hash_entry *myh;
5754 struct bfd_link_hash_entry *bh;
5755 bfd_vma val;
5756
5757 /* BX PC does not need a veneer. */
5758 if (reg == 15)
5759 return;
5760
5761 globals = elf32_arm_hash_table (link_info);
5762 BFD_ASSERT (globals != NULL);
5763 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5764
5765 /* Check if this veneer has already been allocated. */
5766 if (globals->bx_glue_offset[reg])
5767 return;
5768
5769 s = bfd_get_linker_section
5770 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
5771
5772 BFD_ASSERT (s != NULL);
5773
5774 /* Add symbol for veneer. */
5775 tmp_name = (char *)
5776 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
5777
5778 BFD_ASSERT (tmp_name);
5779
5780 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
5781
5782 myh = elf_link_hash_lookup
5783 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
5784
5785 BFD_ASSERT (myh == NULL);
5786
5787 bh = NULL;
5788 val = globals->bx_glue_size;
5789 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5790 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5791 NULL, TRUE, FALSE, &bh);
5792
5793 myh = (struct elf_link_hash_entry *) bh;
5794 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5795 myh->forced_local = 1;
5796
5797 s->size += ARM_BX_VENEER_SIZE;
5798 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
5799 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
5800 }
5801
5802
5803 /* Add an entry to the code/data map for section SEC. */
5804
5805 static void
5806 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
5807 {
5808 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
5809 unsigned int newidx;
5810
5811 if (sec_data->map == NULL)
5812 {
5813 sec_data->map = (elf32_arm_section_map *)
5814 bfd_malloc (sizeof (elf32_arm_section_map));
5815 sec_data->mapcount = 0;
5816 sec_data->mapsize = 1;
5817 }
5818
5819 newidx = sec_data->mapcount++;
5820
5821 if (sec_data->mapcount > sec_data->mapsize)
5822 {
5823 sec_data->mapsize *= 2;
5824 sec_data->map = (elf32_arm_section_map *)
5825 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
5826 * sizeof (elf32_arm_section_map));
5827 }
5828
5829 if (sec_data->map)
5830 {
5831 sec_data->map[newidx].vma = vma;
5832 sec_data->map[newidx].type = type;
5833 }
5834 }
5835
5836
5837 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
5838 veneers are handled for now. */
5839
5840 static bfd_vma
5841 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
5842 elf32_vfp11_erratum_list *branch,
5843 bfd *branch_bfd,
5844 asection *branch_sec,
5845 unsigned int offset)
5846 {
5847 asection *s;
5848 struct elf32_arm_link_hash_table *hash_table;
5849 char *tmp_name;
5850 struct elf_link_hash_entry *myh;
5851 struct bfd_link_hash_entry *bh;
5852 bfd_vma val;
5853 struct _arm_elf_section_data *sec_data;
5854 elf32_vfp11_erratum_list *newerr;
5855
5856 hash_table = elf32_arm_hash_table (link_info);
5857 BFD_ASSERT (hash_table != NULL);
5858 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
5859
5860 s = bfd_get_linker_section
5861 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
5862
5863 sec_data = elf32_arm_section_data (s);
5864
5865 BFD_ASSERT (s != NULL);
5866
5867 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
5868 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
5869
5870 BFD_ASSERT (tmp_name);
5871
5872 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
5873 hash_table->num_vfp11_fixes);
5874
5875 myh = elf_link_hash_lookup
5876 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5877
5878 BFD_ASSERT (myh == NULL);
5879
5880 bh = NULL;
5881 val = hash_table->vfp11_erratum_glue_size;
5882 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
5883 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5884 NULL, TRUE, FALSE, &bh);
5885
5886 myh = (struct elf_link_hash_entry *) bh;
5887 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5888 myh->forced_local = 1;
5889
5890 /* Link veneer back to calling location. */
5891 sec_data->erratumcount += 1;
5892 newerr = (elf32_vfp11_erratum_list *)
5893 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
5894
5895 newerr->type = VFP11_ERRATUM_ARM_VENEER;
5896 newerr->vma = -1;
5897 newerr->u.v.branch = branch;
5898 newerr->u.v.id = hash_table->num_vfp11_fixes;
5899 branch->u.b.veneer = newerr;
5900
5901 newerr->next = sec_data->erratumlist;
5902 sec_data->erratumlist = newerr;
5903
5904 /* A symbol for the return from the veneer. */
5905 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
5906 hash_table->num_vfp11_fixes);
5907
5908 myh = elf_link_hash_lookup
5909 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5910
5911 if (myh != NULL)
5912 abort ();
5913
5914 bh = NULL;
5915 val = offset + 4;
5916 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
5917 branch_sec, val, NULL, TRUE, FALSE, &bh);
5918
5919 myh = (struct elf_link_hash_entry *) bh;
5920 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5921 myh->forced_local = 1;
5922
5923 free (tmp_name);
5924
5925 /* Generate a mapping symbol for the veneer section, and explicitly add an
5926 entry for that symbol to the code/data map for the section. */
5927 if (hash_table->vfp11_erratum_glue_size == 0)
5928 {
5929 bh = NULL;
5930 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
5931 ever requires this erratum fix. */
5932 _bfd_generic_link_add_one_symbol (link_info,
5933 hash_table->bfd_of_glue_owner, "$a",
5934 BSF_LOCAL, s, 0, NULL,
5935 TRUE, FALSE, &bh);
5936
5937 myh = (struct elf_link_hash_entry *) bh;
5938 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
5939 myh->forced_local = 1;
5940
5941 /* The elf32_arm_init_maps function only cares about symbols from input
5942 BFDs. We must make a note of this generated mapping symbol
5943 ourselves so that code byteswapping works properly in
5944 elf32_arm_write_section. */
5945 elf32_arm_section_map_add (s, 'a', 0);
5946 }
5947
5948 s->size += VFP11_ERRATUM_VENEER_SIZE;
5949 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
5950 hash_table->num_vfp11_fixes++;
5951
5952 /* The offset of the veneer. */
5953 return val;
5954 }
5955
5956 #define ARM_GLUE_SECTION_FLAGS \
5957 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
5958 | SEC_READONLY | SEC_LINKER_CREATED)
5959
5960 /* Create a fake section for use by the ARM backend of the linker. */
5961
5962 static bfd_boolean
5963 arm_make_glue_section (bfd * abfd, const char * name)
5964 {
5965 asection * sec;
5966
5967 sec = bfd_get_linker_section (abfd, name);
5968 if (sec != NULL)
5969 /* Already made. */
5970 return TRUE;
5971
5972 sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
5973
5974 if (sec == NULL
5975 || !bfd_set_section_alignment (abfd, sec, 2))
5976 return FALSE;
5977
5978 /* Set the gc mark to prevent the section from being removed by garbage
5979 collection, despite the fact that no relocs refer to this section. */
5980 sec->gc_mark = 1;
5981
5982 return TRUE;
5983 }
5984
5985 /* Add the glue sections to ABFD. This function is called from the
5986 linker scripts in ld/emultempl/{armelf}.em. */
5987
5988 bfd_boolean
5989 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
5990 struct bfd_link_info *info)
5991 {
5992 /* If we are only performing a partial
5993 link do not bother adding the glue. */
5994 if (info->relocatable)
5995 return TRUE;
5996
5997 return arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
5998 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
5999 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
6000 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
6001 }
6002
6003 /* Select a BFD to be used to hold the sections used by the glue code.
6004 This function is called from the linker scripts in ld/emultempl/
6005 {armelf/pe}.em. */
6006
6007 bfd_boolean
6008 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
6009 {
6010 struct elf32_arm_link_hash_table *globals;
6011
6012 /* If we are only performing a partial link
6013 do not bother getting a bfd to hold the glue. */
6014 if (info->relocatable)
6015 return TRUE;
6016
6017 /* Make sure we don't attach the glue sections to a dynamic object. */
6018 BFD_ASSERT (!(abfd->flags & DYNAMIC));
6019
6020 globals = elf32_arm_hash_table (info);
6021 BFD_ASSERT (globals != NULL);
6022
6023 if (globals->bfd_of_glue_owner != NULL)
6024 return TRUE;
6025
6026 /* Save the bfd for later use. */
6027 globals->bfd_of_glue_owner = abfd;
6028
6029 return TRUE;
6030 }
6031
6032 static void
6033 check_use_blx (struct elf32_arm_link_hash_table *globals)
6034 {
6035 int cpu_arch;
6036
6037 cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
6038 Tag_CPU_arch);
6039
6040 if (globals->fix_arm1176)
6041 {
6042 if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K)
6043 globals->use_blx = 1;
6044 }
6045 else
6046 {
6047 if (cpu_arch > TAG_CPU_ARCH_V4T)
6048 globals->use_blx = 1;
6049 }
6050 }
6051
6052 bfd_boolean
6053 bfd_elf32_arm_process_before_allocation (bfd *abfd,
6054 struct bfd_link_info *link_info)
6055 {
6056 Elf_Internal_Shdr *symtab_hdr;
6057 Elf_Internal_Rela *internal_relocs = NULL;
6058 Elf_Internal_Rela *irel, *irelend;
6059 bfd_byte *contents = NULL;
6060
6061 asection *sec;
6062 struct elf32_arm_link_hash_table *globals;
6063
6064 /* If we are only performing a partial link do not bother
6065 to construct any glue. */
6066 if (link_info->relocatable)
6067 return TRUE;
6068
6069 /* Here we have a bfd that is to be included on the link. We have a
6070 hook to do reloc rummaging, before section sizes are nailed down. */
6071 globals = elf32_arm_hash_table (link_info);
6072 BFD_ASSERT (globals != NULL);
6073
6074 check_use_blx (globals);
6075
6076 if (globals->byteswap_code && !bfd_big_endian (abfd))
6077 {
6078 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
6079 abfd);
6080 return FALSE;
6081 }
6082
6083 /* PR 5398: If we have not decided to include any loadable sections in
6084 the output then we will not have a glue owner bfd. This is OK, it
6085 just means that there is nothing else for us to do here. */
6086 if (globals->bfd_of_glue_owner == NULL)
6087 return TRUE;
6088
6089 /* Rummage around all the relocs and map the glue vectors. */
6090 sec = abfd->sections;
6091
6092 if (sec == NULL)
6093 return TRUE;
6094
6095 for (; sec != NULL; sec = sec->next)
6096 {
6097 if (sec->reloc_count == 0)
6098 continue;
6099
6100 if ((sec->flags & SEC_EXCLUDE) != 0)
6101 continue;
6102
6103 symtab_hdr = & elf_symtab_hdr (abfd);
6104
6105 /* Load the relocs. */
6106 internal_relocs
6107 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
6108
6109 if (internal_relocs == NULL)
6110 goto error_return;
6111
6112 irelend = internal_relocs + sec->reloc_count;
6113 for (irel = internal_relocs; irel < irelend; irel++)
6114 {
6115 long r_type;
6116 unsigned long r_index;
6117
6118 struct elf_link_hash_entry *h;
6119
6120 r_type = ELF32_R_TYPE (irel->r_info);
6121 r_index = ELF32_R_SYM (irel->r_info);
6122
6123 /* These are the only relocation types we care about. */
6124 if ( r_type != R_ARM_PC24
6125 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
6126 continue;
6127
6128 /* Get the section contents if we haven't done so already. */
6129 if (contents == NULL)
6130 {
6131 /* Get cached copy if it exists. */
6132 if (elf_section_data (sec)->this_hdr.contents != NULL)
6133 contents = elf_section_data (sec)->this_hdr.contents;
6134 else
6135 {
6136 /* Go get them off disk. */
6137 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
6138 goto error_return;
6139 }
6140 }
6141
6142 if (r_type == R_ARM_V4BX)
6143 {
6144 int reg;
6145
6146 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
6147 record_arm_bx_glue (link_info, reg);
6148 continue;
6149 }
6150
6151 /* If the relocation is not against a symbol it cannot concern us. */
6152 h = NULL;
6153
6154 /* We don't care about local symbols. */
6155 if (r_index < symtab_hdr->sh_info)
6156 continue;
6157
6158 /* This is an external symbol. */
6159 r_index -= symtab_hdr->sh_info;
6160 h = (struct elf_link_hash_entry *)
6161 elf_sym_hashes (abfd)[r_index];
6162
6163 /* If the relocation is against a static symbol it must be within
6164 the current section and so cannot be a cross ARM/Thumb relocation. */
6165 if (h == NULL)
6166 continue;
6167
6168 /* If the call will go through a PLT entry then we do not need
6169 glue. */
6170 if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1)
6171 continue;
6172
6173 switch (r_type)
6174 {
6175 case R_ARM_PC24:
6176 /* This one is a call from arm code. We need to look up
6177 the target of the call. If it is a thumb target, we
6178 insert glue. */
6179 if (h->target_internal == ST_BRANCH_TO_THUMB)
6180 record_arm_to_thumb_glue (link_info, h);
6181 break;
6182
6183 default:
6184 abort ();
6185 }
6186 }
6187
6188 if (contents != NULL
6189 && elf_section_data (sec)->this_hdr.contents != contents)
6190 free (contents);
6191 contents = NULL;
6192
6193 if (internal_relocs != NULL
6194 && elf_section_data (sec)->relocs != internal_relocs)
6195 free (internal_relocs);
6196 internal_relocs = NULL;
6197 }
6198
6199 return TRUE;
6200
6201 error_return:
6202 if (contents != NULL
6203 && elf_section_data (sec)->this_hdr.contents != contents)
6204 free (contents);
6205 if (internal_relocs != NULL
6206 && elf_section_data (sec)->relocs != internal_relocs)
6207 free (internal_relocs);
6208
6209 return FALSE;
6210 }
6211 #endif
6212
6213
6214 /* Initialise maps of ARM/Thumb/data for input BFDs. */
6215
6216 void
6217 bfd_elf32_arm_init_maps (bfd *abfd)
6218 {
6219 Elf_Internal_Sym *isymbuf;
6220 Elf_Internal_Shdr *hdr;
6221 unsigned int i, localsyms;
6222
6223 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
6224 if (! is_arm_elf (abfd))
6225 return;
6226
6227 if ((abfd->flags & DYNAMIC) != 0)
6228 return;
6229
6230 hdr = & elf_symtab_hdr (abfd);
6231 localsyms = hdr->sh_info;
6232
6233 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
6234 should contain the number of local symbols, which should come before any
6235 global symbols. Mapping symbols are always local. */
6236 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
6237 NULL);
6238
6239 /* No internal symbols read? Skip this BFD. */
6240 if (isymbuf == NULL)
6241 return;
6242
6243 for (i = 0; i < localsyms; i++)
6244 {
6245 Elf_Internal_Sym *isym = &isymbuf[i];
6246 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
6247 const char *name;
6248
6249 if (sec != NULL
6250 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
6251 {
6252 name = bfd_elf_string_from_elf_section (abfd,
6253 hdr->sh_link, isym->st_name);
6254
6255 if (bfd_is_arm_special_symbol_name (name,
6256 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
6257 elf32_arm_section_map_add (sec, name[1], isym->st_value);
6258 }
6259 }
6260 }
6261
6262
6263 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
6264 say what they wanted. */
6265
6266 void
6267 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
6268 {
6269 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6270 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
6271
6272 if (globals == NULL)
6273 return;
6274
6275 if (globals->fix_cortex_a8 == -1)
6276 {
6277 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
6278 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
6279 && (out_attr[Tag_CPU_arch_profile].i == 'A'
6280 || out_attr[Tag_CPU_arch_profile].i == 0))
6281 globals->fix_cortex_a8 = 1;
6282 else
6283 globals->fix_cortex_a8 = 0;
6284 }
6285 }
6286
6287
6288 void
6289 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
6290 {
6291 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6292 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
6293
6294 if (globals == NULL)
6295 return;
6296 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
6297 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
6298 {
6299 switch (globals->vfp11_fix)
6300 {
6301 case BFD_ARM_VFP11_FIX_DEFAULT:
6302 case BFD_ARM_VFP11_FIX_NONE:
6303 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
6304 break;
6305
6306 default:
6307 /* Give a warning, but do as the user requests anyway. */
6308 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
6309 "workaround is not necessary for target architecture"), obfd);
6310 }
6311 }
6312 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
6313 /* For earlier architectures, we might need the workaround, but do not
6314 enable it by default. If users is running with broken hardware, they
6315 must enable the erratum fix explicitly. */
6316 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
6317 }
6318
6319
6320 enum bfd_arm_vfp11_pipe
6321 {
6322 VFP11_FMAC,
6323 VFP11_LS,
6324 VFP11_DS,
6325 VFP11_BAD
6326 };
6327
6328 /* Return a VFP register number. This is encoded as RX:X for single-precision
6329 registers, or X:RX for double-precision registers, where RX is the group of
6330 four bits in the instruction encoding and X is the single extension bit.
6331 RX and X fields are specified using their lowest (starting) bit. The return
6332 value is:
6333
6334 0...31: single-precision registers s0...s31
6335 32...63: double-precision registers d0...d31.
6336
6337 Although X should be zero for VFP11 (encoding d0...d15 only), we might
6338 encounter VFP3 instructions, so we allow the full range for DP registers. */
6339
6340 static unsigned int
6341 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
6342 unsigned int x)
6343 {
6344 if (is_double)
6345 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
6346 else
6347 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
6348 }
6349
6350 /* Set bits in *WMASK according to a register number REG as encoded by
6351 bfd_arm_vfp11_regno(). Ignore d16-d31. */
6352
6353 static void
6354 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
6355 {
6356 if (reg < 32)
6357 *wmask |= 1 << reg;
6358 else if (reg < 48)
6359 *wmask |= 3 << ((reg - 32) * 2);
6360 }
6361
6362 /* Return TRUE if WMASK overwrites anything in REGS. */
6363
6364 static bfd_boolean
6365 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
6366 {
6367 int i;
6368
6369 for (i = 0; i < numregs; i++)
6370 {
6371 unsigned int reg = regs[i];
6372
6373 if (reg < 32 && (wmask & (1 << reg)) != 0)
6374 return TRUE;
6375
6376 reg -= 32;
6377
6378 if (reg >= 16)
6379 continue;
6380
6381 if ((wmask & (3 << (reg * 2))) != 0)
6382 return TRUE;
6383 }
6384
6385 return FALSE;
6386 }
6387
6388 /* In this function, we're interested in two things: finding input registers
6389 for VFP data-processing instructions, and finding the set of registers which
6390 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
6391 hold the written set, so FLDM etc. are easy to deal with (we're only
6392 interested in 32 SP registers or 16 dp registers, due to the VFP version
6393 implemented by the chip in question). DP registers are marked by setting
6394 both SP registers in the write mask). */
6395
6396 static enum bfd_arm_vfp11_pipe
6397 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
6398 int *numregs)
6399 {
6400 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
6401 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
6402
6403 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
6404 {
6405 unsigned int pqrs;
6406 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
6407 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
6408
6409 pqrs = ((insn & 0x00800000) >> 20)
6410 | ((insn & 0x00300000) >> 19)
6411 | ((insn & 0x00000040) >> 6);
6412
6413 switch (pqrs)
6414 {
6415 case 0: /* fmac[sd]. */
6416 case 1: /* fnmac[sd]. */
6417 case 2: /* fmsc[sd]. */
6418 case 3: /* fnmsc[sd]. */
6419 vpipe = VFP11_FMAC;
6420 bfd_arm_vfp11_write_mask (destmask, fd);
6421 regs[0] = fd;
6422 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
6423 regs[2] = fm;
6424 *numregs = 3;
6425 break;
6426
6427 case 4: /* fmul[sd]. */
6428 case 5: /* fnmul[sd]. */
6429 case 6: /* fadd[sd]. */
6430 case 7: /* fsub[sd]. */
6431 vpipe = VFP11_FMAC;
6432 goto vfp_binop;
6433
6434 case 8: /* fdiv[sd]. */
6435 vpipe = VFP11_DS;
6436 vfp_binop:
6437 bfd_arm_vfp11_write_mask (destmask, fd);
6438 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
6439 regs[1] = fm;
6440 *numregs = 2;
6441 break;
6442
6443 case 15: /* extended opcode. */
6444 {
6445 unsigned int extn = ((insn >> 15) & 0x1e)
6446 | ((insn >> 7) & 1);
6447
6448 switch (extn)
6449 {
6450 case 0: /* fcpy[sd]. */
6451 case 1: /* fabs[sd]. */
6452 case 2: /* fneg[sd]. */
6453 case 8: /* fcmp[sd]. */
6454 case 9: /* fcmpe[sd]. */
6455 case 10: /* fcmpz[sd]. */
6456 case 11: /* fcmpez[sd]. */
6457 case 16: /* fuito[sd]. */
6458 case 17: /* fsito[sd]. */
6459 case 24: /* ftoui[sd]. */
6460 case 25: /* ftouiz[sd]. */
6461 case 26: /* ftosi[sd]. */
6462 case 27: /* ftosiz[sd]. */
6463 /* These instructions will not bounce due to underflow. */
6464 *numregs = 0;
6465 vpipe = VFP11_FMAC;
6466 break;
6467
6468 case 3: /* fsqrt[sd]. */
6469 /* fsqrt cannot underflow, but it can (perhaps) overwrite
6470 registers to cause the erratum in previous instructions. */
6471 bfd_arm_vfp11_write_mask (destmask, fd);
6472 vpipe = VFP11_DS;
6473 break;
6474
6475 case 15: /* fcvt{ds,sd}. */
6476 {
6477 int rnum = 0;
6478
6479 bfd_arm_vfp11_write_mask (destmask, fd);
6480
6481 /* Only FCVTSD can underflow. */
6482 if ((insn & 0x100) != 0)
6483 regs[rnum++] = fm;
6484
6485 *numregs = rnum;
6486
6487 vpipe = VFP11_FMAC;
6488 }
6489 break;
6490
6491 default:
6492 return VFP11_BAD;
6493 }
6494 }
6495 break;
6496
6497 default:
6498 return VFP11_BAD;
6499 }
6500 }
6501 /* Two-register transfer. */
6502 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
6503 {
6504 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
6505
6506 if ((insn & 0x100000) == 0)
6507 {
6508 if (is_double)
6509 bfd_arm_vfp11_write_mask (destmask, fm);
6510 else
6511 {
6512 bfd_arm_vfp11_write_mask (destmask, fm);
6513 bfd_arm_vfp11_write_mask (destmask, fm + 1);
6514 }
6515 }
6516
6517 vpipe = VFP11_LS;
6518 }
6519 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
6520 {
6521 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
6522 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
6523
6524 switch (puw)
6525 {
6526 case 0: /* Two-reg transfer. We should catch these above. */
6527 abort ();
6528
6529 case 2: /* fldm[sdx]. */
6530 case 3:
6531 case 5:
6532 {
6533 unsigned int i, offset = insn & 0xff;
6534
6535 if (is_double)
6536 offset >>= 1;
6537
6538 for (i = fd; i < fd + offset; i++)
6539 bfd_arm_vfp11_write_mask (destmask, i);
6540 }
6541 break;
6542
6543 case 4: /* fld[sd]. */
6544 case 6:
6545 bfd_arm_vfp11_write_mask (destmask, fd);
6546 break;
6547
6548 default:
6549 return VFP11_BAD;
6550 }
6551
6552 vpipe = VFP11_LS;
6553 }
6554 /* Single-register transfer. Note L==0. */
6555 else if ((insn & 0x0f100e10) == 0x0e000a10)
6556 {
6557 unsigned int opcode = (insn >> 21) & 7;
6558 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
6559
6560 switch (opcode)
6561 {
6562 case 0: /* fmsr/fmdlr. */
6563 case 1: /* fmdhr. */
6564 /* Mark fmdhr and fmdlr as writing to the whole of the DP
6565 destination register. I don't know if this is exactly right,
6566 but it is the conservative choice. */
6567 bfd_arm_vfp11_write_mask (destmask, fn);
6568 break;
6569
6570 case 7: /* fmxr. */
6571 break;
6572 }
6573
6574 vpipe = VFP11_LS;
6575 }
6576
6577 return vpipe;
6578 }
6579
6580
6581 static int elf32_arm_compare_mapping (const void * a, const void * b);
6582
6583
6584 /* Look for potentially-troublesome code sequences which might trigger the
6585 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
6586 (available from ARM) for details of the erratum. A short version is
6587 described in ld.texinfo. */
6588
6589 bfd_boolean
6590 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
6591 {
6592 asection *sec;
6593 bfd_byte *contents = NULL;
6594 int state = 0;
6595 int regs[3], numregs = 0;
6596 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6597 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
6598
6599 if (globals == NULL)
6600 return FALSE;
6601
6602 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
6603 The states transition as follows:
6604
6605 0 -> 1 (vector) or 0 -> 2 (scalar)
6606 A VFP FMAC-pipeline instruction has been seen. Fill
6607 regs[0]..regs[numregs-1] with its input operands. Remember this
6608 instruction in 'first_fmac'.
6609
6610 1 -> 2
6611 Any instruction, except for a VFP instruction which overwrites
6612 regs[*].
6613
6614 1 -> 3 [ -> 0 ] or
6615 2 -> 3 [ -> 0 ]
6616 A VFP instruction has been seen which overwrites any of regs[*].
6617 We must make a veneer! Reset state to 0 before examining next
6618 instruction.
6619
6620 2 -> 0
6621 If we fail to match anything in state 2, reset to state 0 and reset
6622 the instruction pointer to the instruction after 'first_fmac'.
6623
6624 If the VFP11 vector mode is in use, there must be at least two unrelated
6625 instructions between anti-dependent VFP11 instructions to properly avoid
6626 triggering the erratum, hence the use of the extra state 1. */
6627
6628 /* If we are only performing a partial link do not bother
6629 to construct any glue. */
6630 if (link_info->relocatable)
6631 return TRUE;
6632
6633 /* Skip if this bfd does not correspond to an ELF image. */
6634 if (! is_arm_elf (abfd))
6635 return TRUE;
6636
6637 /* We should have chosen a fix type by the time we get here. */
6638 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
6639
6640 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
6641 return TRUE;
6642
6643 /* Skip this BFD if it corresponds to an executable or dynamic object. */
6644 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
6645 return TRUE;
6646
6647 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6648 {
6649 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
6650 struct _arm_elf_section_data *sec_data;
6651
6652 /* If we don't have executable progbits, we're not interested in this
6653 section. Also skip if section is to be excluded. */
6654 if (elf_section_type (sec) != SHT_PROGBITS
6655 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
6656 || (sec->flags & SEC_EXCLUDE) != 0
6657 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
6658 || sec->output_section == bfd_abs_section_ptr
6659 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
6660 continue;
6661
6662 sec_data = elf32_arm_section_data (sec);
6663
6664 if (sec_data->mapcount == 0)
6665 continue;
6666
6667 if (elf_section_data (sec)->this_hdr.contents != NULL)
6668 contents = elf_section_data (sec)->this_hdr.contents;
6669 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
6670 goto error_return;
6671
6672 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
6673 elf32_arm_compare_mapping);
6674
6675 for (span = 0; span < sec_data->mapcount; span++)
6676 {
6677 unsigned int span_start = sec_data->map[span].vma;
6678 unsigned int span_end = (span == sec_data->mapcount - 1)
6679 ? sec->size : sec_data->map[span + 1].vma;
6680 char span_type = sec_data->map[span].type;
6681
6682 /* FIXME: Only ARM mode is supported at present. We may need to
6683 support Thumb-2 mode also at some point. */
6684 if (span_type != 'a')
6685 continue;
6686
6687 for (i = span_start; i < span_end;)
6688 {
6689 unsigned int next_i = i + 4;
6690 unsigned int insn = bfd_big_endian (abfd)
6691 ? (contents[i] << 24)
6692 | (contents[i + 1] << 16)
6693 | (contents[i + 2] << 8)
6694 | contents[i + 3]
6695 : (contents[i + 3] << 24)
6696 | (contents[i + 2] << 16)
6697 | (contents[i + 1] << 8)
6698 | contents[i];
6699 unsigned int writemask = 0;
6700 enum bfd_arm_vfp11_pipe vpipe;
6701
6702 switch (state)
6703 {
6704 case 0:
6705 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
6706 &numregs);
6707 /* I'm assuming the VFP11 erratum can trigger with denorm
6708 operands on either the FMAC or the DS pipeline. This might
6709 lead to slightly overenthusiastic veneer insertion. */
6710 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
6711 {
6712 state = use_vector ? 1 : 2;
6713 first_fmac = i;
6714 veneer_of_insn = insn;
6715 }
6716 break;
6717
6718 case 1:
6719 {
6720 int other_regs[3], other_numregs;
6721 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6722 other_regs,
6723 &other_numregs);
6724 if (vpipe != VFP11_BAD
6725 && bfd_arm_vfp11_antidependency (writemask, regs,
6726 numregs))
6727 state = 3;
6728 else
6729 state = 2;
6730 }
6731 break;
6732
6733 case 2:
6734 {
6735 int other_regs[3], other_numregs;
6736 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6737 other_regs,
6738 &other_numregs);
6739 if (vpipe != VFP11_BAD
6740 && bfd_arm_vfp11_antidependency (writemask, regs,
6741 numregs))
6742 state = 3;
6743 else
6744 {
6745 state = 0;
6746 next_i = first_fmac + 4;
6747 }
6748 }
6749 break;
6750
6751 case 3:
6752 abort (); /* Should be unreachable. */
6753 }
6754
6755 if (state == 3)
6756 {
6757 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
6758 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
6759
6760 elf32_arm_section_data (sec)->erratumcount += 1;
6761
6762 newerr->u.b.vfp_insn = veneer_of_insn;
6763
6764 switch (span_type)
6765 {
6766 case 'a':
6767 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
6768 break;
6769
6770 default:
6771 abort ();
6772 }
6773
6774 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
6775 first_fmac);
6776
6777 newerr->vma = -1;
6778
6779 newerr->next = sec_data->erratumlist;
6780 sec_data->erratumlist = newerr;
6781
6782 state = 0;
6783 }
6784
6785 i = next_i;
6786 }
6787 }
6788
6789 if (contents != NULL
6790 && elf_section_data (sec)->this_hdr.contents != contents)
6791 free (contents);
6792 contents = NULL;
6793 }
6794
6795 return TRUE;
6796
6797 error_return:
6798 if (contents != NULL
6799 && elf_section_data (sec)->this_hdr.contents != contents)
6800 free (contents);
6801
6802 return FALSE;
6803 }
6804
6805 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
6806 after sections have been laid out, using specially-named symbols. */
6807
6808 void
6809 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
6810 struct bfd_link_info *link_info)
6811 {
6812 asection *sec;
6813 struct elf32_arm_link_hash_table *globals;
6814 char *tmp_name;
6815
6816 if (link_info->relocatable)
6817 return;
6818
6819 /* Skip if this bfd does not correspond to an ELF image. */
6820 if (! is_arm_elf (abfd))
6821 return;
6822
6823 globals = elf32_arm_hash_table (link_info);
6824 if (globals == NULL)
6825 return;
6826
6827 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
6828 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
6829
6830 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6831 {
6832 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
6833 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
6834
6835 for (; errnode != NULL; errnode = errnode->next)
6836 {
6837 struct elf_link_hash_entry *myh;
6838 bfd_vma vma;
6839
6840 switch (errnode->type)
6841 {
6842 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
6843 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
6844 /* Find veneer symbol. */
6845 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
6846 errnode->u.b.veneer->u.v.id);
6847
6848 myh = elf_link_hash_lookup
6849 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6850
6851 if (myh == NULL)
6852 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6853 "`%s'"), abfd, tmp_name);
6854
6855 vma = myh->root.u.def.section->output_section->vma
6856 + myh->root.u.def.section->output_offset
6857 + myh->root.u.def.value;
6858
6859 errnode->u.b.veneer->vma = vma;
6860 break;
6861
6862 case VFP11_ERRATUM_ARM_VENEER:
6863 case VFP11_ERRATUM_THUMB_VENEER:
6864 /* Find return location. */
6865 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6866 errnode->u.v.id);
6867
6868 myh = elf_link_hash_lookup
6869 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6870
6871 if (myh == NULL)
6872 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6873 "`%s'"), abfd, tmp_name);
6874
6875 vma = myh->root.u.def.section->output_section->vma
6876 + myh->root.u.def.section->output_offset
6877 + myh->root.u.def.value;
6878
6879 errnode->u.v.branch->vma = vma;
6880 break;
6881
6882 default:
6883 abort ();
6884 }
6885 }
6886 }
6887
6888 free (tmp_name);
6889 }
6890
6891
6892 /* Set target relocation values needed during linking. */
6893
6894 void
6895 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
6896 struct bfd_link_info *link_info,
6897 int target1_is_rel,
6898 char * target2_type,
6899 int fix_v4bx,
6900 int use_blx,
6901 bfd_arm_vfp11_fix vfp11_fix,
6902 int no_enum_warn, int no_wchar_warn,
6903 int pic_veneer, int fix_cortex_a8,
6904 int fix_arm1176)
6905 {
6906 struct elf32_arm_link_hash_table *globals;
6907
6908 globals = elf32_arm_hash_table (link_info);
6909 if (globals == NULL)
6910 return;
6911
6912 globals->target1_is_rel = target1_is_rel;
6913 if (strcmp (target2_type, "rel") == 0)
6914 globals->target2_reloc = R_ARM_REL32;
6915 else if (strcmp (target2_type, "abs") == 0)
6916 globals->target2_reloc = R_ARM_ABS32;
6917 else if (strcmp (target2_type, "got-rel") == 0)
6918 globals->target2_reloc = R_ARM_GOT_PREL;
6919 else
6920 {
6921 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
6922 target2_type);
6923 }
6924 globals->fix_v4bx = fix_v4bx;
6925 globals->use_blx |= use_blx;
6926 globals->vfp11_fix = vfp11_fix;
6927 globals->pic_veneer = pic_veneer;
6928 globals->fix_cortex_a8 = fix_cortex_a8;
6929 globals->fix_arm1176 = fix_arm1176;
6930
6931 BFD_ASSERT (is_arm_elf (output_bfd));
6932 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
6933 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
6934 }
6935
6936 /* Replace the target offset of a Thumb bl or b.w instruction. */
6937
6938 static void
6939 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
6940 {
6941 bfd_vma upper;
6942 bfd_vma lower;
6943 int reloc_sign;
6944
6945 BFD_ASSERT ((offset & 1) == 0);
6946
6947 upper = bfd_get_16 (abfd, insn);
6948 lower = bfd_get_16 (abfd, insn + 2);
6949 reloc_sign = (offset < 0) ? 1 : 0;
6950 upper = (upper & ~(bfd_vma) 0x7ff)
6951 | ((offset >> 12) & 0x3ff)
6952 | (reloc_sign << 10);
6953 lower = (lower & ~(bfd_vma) 0x2fff)
6954 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
6955 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
6956 | ((offset >> 1) & 0x7ff);
6957 bfd_put_16 (abfd, upper, insn);
6958 bfd_put_16 (abfd, lower, insn + 2);
6959 }
6960
6961 /* Thumb code calling an ARM function. */
6962
6963 static int
6964 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
6965 const char * name,
6966 bfd * input_bfd,
6967 bfd * output_bfd,
6968 asection * input_section,
6969 bfd_byte * hit_data,
6970 asection * sym_sec,
6971 bfd_vma offset,
6972 bfd_signed_vma addend,
6973 bfd_vma val,
6974 char **error_message)
6975 {
6976 asection * s = 0;
6977 bfd_vma my_offset;
6978 long int ret_offset;
6979 struct elf_link_hash_entry * myh;
6980 struct elf32_arm_link_hash_table * globals;
6981
6982 myh = find_thumb_glue (info, name, error_message);
6983 if (myh == NULL)
6984 return FALSE;
6985
6986 globals = elf32_arm_hash_table (info);
6987 BFD_ASSERT (globals != NULL);
6988 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6989
6990 my_offset = myh->root.u.def.value;
6991
6992 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
6993 THUMB2ARM_GLUE_SECTION_NAME);
6994
6995 BFD_ASSERT (s != NULL);
6996 BFD_ASSERT (s->contents != NULL);
6997 BFD_ASSERT (s->output_section != NULL);
6998
6999 if ((my_offset & 0x01) == 0x01)
7000 {
7001 if (sym_sec != NULL
7002 && sym_sec->owner != NULL
7003 && !INTERWORK_FLAG (sym_sec->owner))
7004 {
7005 (*_bfd_error_handler)
7006 (_("%B(%s): warning: interworking not enabled.\n"
7007 " first occurrence: %B: Thumb call to ARM"),
7008 sym_sec->owner, input_bfd, name);
7009
7010 return FALSE;
7011 }
7012
7013 --my_offset;
7014 myh->root.u.def.value = my_offset;
7015
7016 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
7017 s->contents + my_offset);
7018
7019 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
7020 s->contents + my_offset + 2);
7021
7022 ret_offset =
7023 /* Address of destination of the stub. */
7024 ((bfd_signed_vma) val)
7025 - ((bfd_signed_vma)
7026 /* Offset from the start of the current section
7027 to the start of the stubs. */
7028 (s->output_offset
7029 /* Offset of the start of this stub from the start of the stubs. */
7030 + my_offset
7031 /* Address of the start of the current section. */
7032 + s->output_section->vma)
7033 /* The branch instruction is 4 bytes into the stub. */
7034 + 4
7035 /* ARM branches work from the pc of the instruction + 8. */
7036 + 8);
7037
7038 put_arm_insn (globals, output_bfd,
7039 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
7040 s->contents + my_offset + 4);
7041 }
7042
7043 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
7044
7045 /* Now go back and fix up the original BL insn to point to here. */
7046 ret_offset =
7047 /* Address of where the stub is located. */
7048 (s->output_section->vma + s->output_offset + my_offset)
7049 /* Address of where the BL is located. */
7050 - (input_section->output_section->vma + input_section->output_offset
7051 + offset)
7052 /* Addend in the relocation. */
7053 - addend
7054 /* Biassing for PC-relative addressing. */
7055 - 8;
7056
7057 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
7058
7059 return TRUE;
7060 }
7061
7062 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
7063
7064 static struct elf_link_hash_entry *
7065 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
7066 const char * name,
7067 bfd * input_bfd,
7068 bfd * output_bfd,
7069 asection * sym_sec,
7070 bfd_vma val,
7071 asection * s,
7072 char ** error_message)
7073 {
7074 bfd_vma my_offset;
7075 long int ret_offset;
7076 struct elf_link_hash_entry * myh;
7077 struct elf32_arm_link_hash_table * globals;
7078
7079 myh = find_arm_glue (info, name, error_message);
7080 if (myh == NULL)
7081 return NULL;
7082
7083 globals = elf32_arm_hash_table (info);
7084 BFD_ASSERT (globals != NULL);
7085 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7086
7087 my_offset = myh->root.u.def.value;
7088
7089 if ((my_offset & 0x01) == 0x01)
7090 {
7091 if (sym_sec != NULL
7092 && sym_sec->owner != NULL
7093 && !INTERWORK_FLAG (sym_sec->owner))
7094 {
7095 (*_bfd_error_handler)
7096 (_("%B(%s): warning: interworking not enabled.\n"
7097 " first occurrence: %B: arm call to thumb"),
7098 sym_sec->owner, input_bfd, name);
7099 }
7100
7101 --my_offset;
7102 myh->root.u.def.value = my_offset;
7103
7104 if (info->shared || globals->root.is_relocatable_executable
7105 || globals->pic_veneer)
7106 {
7107 /* For relocatable objects we can't use absolute addresses,
7108 so construct the address from a relative offset. */
7109 /* TODO: If the offset is small it's probably worth
7110 constructing the address with adds. */
7111 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
7112 s->contents + my_offset);
7113 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
7114 s->contents + my_offset + 4);
7115 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
7116 s->contents + my_offset + 8);
7117 /* Adjust the offset by 4 for the position of the add,
7118 and 8 for the pipeline offset. */
7119 ret_offset = (val - (s->output_offset
7120 + s->output_section->vma
7121 + my_offset + 12))
7122 | 1;
7123 bfd_put_32 (output_bfd, ret_offset,
7124 s->contents + my_offset + 12);
7125 }
7126 else if (globals->use_blx)
7127 {
7128 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
7129 s->contents + my_offset);
7130
7131 /* It's a thumb address. Add the low order bit. */
7132 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
7133 s->contents + my_offset + 4);
7134 }
7135 else
7136 {
7137 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
7138 s->contents + my_offset);
7139
7140 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
7141 s->contents + my_offset + 4);
7142
7143 /* It's a thumb address. Add the low order bit. */
7144 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
7145 s->contents + my_offset + 8);
7146
7147 my_offset += 12;
7148 }
7149 }
7150
7151 BFD_ASSERT (my_offset <= globals->arm_glue_size);
7152
7153 return myh;
7154 }
7155
7156 /* Arm code calling a Thumb function. */
7157
7158 static int
7159 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
7160 const char * name,
7161 bfd * input_bfd,
7162 bfd * output_bfd,
7163 asection * input_section,
7164 bfd_byte * hit_data,
7165 asection * sym_sec,
7166 bfd_vma offset,
7167 bfd_signed_vma addend,
7168 bfd_vma val,
7169 char **error_message)
7170 {
7171 unsigned long int tmp;
7172 bfd_vma my_offset;
7173 asection * s;
7174 long int ret_offset;
7175 struct elf_link_hash_entry * myh;
7176 struct elf32_arm_link_hash_table * globals;
7177
7178 globals = elf32_arm_hash_table (info);
7179 BFD_ASSERT (globals != NULL);
7180 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7181
7182 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
7183 ARM2THUMB_GLUE_SECTION_NAME);
7184 BFD_ASSERT (s != NULL);
7185 BFD_ASSERT (s->contents != NULL);
7186 BFD_ASSERT (s->output_section != NULL);
7187
7188 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
7189 sym_sec, val, s, error_message);
7190 if (!myh)
7191 return FALSE;
7192
7193 my_offset = myh->root.u.def.value;
7194 tmp = bfd_get_32 (input_bfd, hit_data);
7195 tmp = tmp & 0xFF000000;
7196
7197 /* Somehow these are both 4 too far, so subtract 8. */
7198 ret_offset = (s->output_offset
7199 + my_offset
7200 + s->output_section->vma
7201 - (input_section->output_offset
7202 + input_section->output_section->vma
7203 + offset + addend)
7204 - 8);
7205
7206 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
7207
7208 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
7209
7210 return TRUE;
7211 }
7212
7213 /* Populate Arm stub for an exported Thumb function. */
7214
7215 static bfd_boolean
7216 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
7217 {
7218 struct bfd_link_info * info = (struct bfd_link_info *) inf;
7219 asection * s;
7220 struct elf_link_hash_entry * myh;
7221 struct elf32_arm_link_hash_entry *eh;
7222 struct elf32_arm_link_hash_table * globals;
7223 asection *sec;
7224 bfd_vma val;
7225 char *error_message;
7226
7227 eh = elf32_arm_hash_entry (h);
7228 /* Allocate stubs for exported Thumb functions on v4t. */
7229 if (eh->export_glue == NULL)
7230 return TRUE;
7231
7232 globals = elf32_arm_hash_table (info);
7233 BFD_ASSERT (globals != NULL);
7234 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7235
7236 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
7237 ARM2THUMB_GLUE_SECTION_NAME);
7238 BFD_ASSERT (s != NULL);
7239 BFD_ASSERT (s->contents != NULL);
7240 BFD_ASSERT (s->output_section != NULL);
7241
7242 sec = eh->export_glue->root.u.def.section;
7243
7244 BFD_ASSERT (sec->output_section != NULL);
7245
7246 val = eh->export_glue->root.u.def.value + sec->output_offset
7247 + sec->output_section->vma;
7248
7249 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
7250 h->root.u.def.section->owner,
7251 globals->obfd, sec, val, s,
7252 &error_message);
7253 BFD_ASSERT (myh);
7254 return TRUE;
7255 }
7256
7257 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
7258
7259 static bfd_vma
7260 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
7261 {
7262 bfd_byte *p;
7263 bfd_vma glue_addr;
7264 asection *s;
7265 struct elf32_arm_link_hash_table *globals;
7266
7267 globals = elf32_arm_hash_table (info);
7268 BFD_ASSERT (globals != NULL);
7269 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7270
7271 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
7272 ARM_BX_GLUE_SECTION_NAME);
7273 BFD_ASSERT (s != NULL);
7274 BFD_ASSERT (s->contents != NULL);
7275 BFD_ASSERT (s->output_section != NULL);
7276
7277 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
7278
7279 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
7280
7281 if ((globals->bx_glue_offset[reg] & 1) == 0)
7282 {
7283 p = s->contents + glue_addr;
7284 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
7285 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
7286 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
7287 globals->bx_glue_offset[reg] |= 1;
7288 }
7289
7290 return glue_addr + s->output_section->vma + s->output_offset;
7291 }
7292
7293 /* Generate Arm stubs for exported Thumb symbols. */
7294 static void
7295 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
7296 struct bfd_link_info *link_info)
7297 {
7298 struct elf32_arm_link_hash_table * globals;
7299
7300 if (link_info == NULL)
7301 /* Ignore this if we are not called by the ELF backend linker. */
7302 return;
7303
7304 globals = elf32_arm_hash_table (link_info);
7305 if (globals == NULL)
7306 return;
7307
7308 /* If blx is available then exported Thumb symbols are OK and there is
7309 nothing to do. */
7310 if (globals->use_blx)
7311 return;
7312
7313 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
7314 link_info);
7315 }
7316
7317 /* Reserve space for COUNT dynamic relocations in relocation selection
7318 SRELOC. */
7319
7320 static void
7321 elf32_arm_allocate_dynrelocs (struct bfd_link_info *info, asection *sreloc,
7322 bfd_size_type count)
7323 {
7324 struct elf32_arm_link_hash_table *htab;
7325
7326 htab = elf32_arm_hash_table (info);
7327 BFD_ASSERT (htab->root.dynamic_sections_created);
7328 if (sreloc == NULL)
7329 abort ();
7330 sreloc->size += RELOC_SIZE (htab) * count;
7331 }
7332
7333 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
7334 dynamic, the relocations should go in SRELOC, otherwise they should
7335 go in the special .rel.iplt section. */
7336
7337 static void
7338 elf32_arm_allocate_irelocs (struct bfd_link_info *info, asection *sreloc,
7339 bfd_size_type count)
7340 {
7341 struct elf32_arm_link_hash_table *htab;
7342
7343 htab = elf32_arm_hash_table (info);
7344 if (!htab->root.dynamic_sections_created)
7345 htab->root.irelplt->size += RELOC_SIZE (htab) * count;
7346 else
7347 {
7348 BFD_ASSERT (sreloc != NULL);
7349 sreloc->size += RELOC_SIZE (htab) * count;
7350 }
7351 }
7352
7353 /* Add relocation REL to the end of relocation section SRELOC. */
7354
7355 static void
7356 elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
7357 asection *sreloc, Elf_Internal_Rela *rel)
7358 {
7359 bfd_byte *loc;
7360 struct elf32_arm_link_hash_table *htab;
7361
7362 htab = elf32_arm_hash_table (info);
7363 if (!htab->root.dynamic_sections_created
7364 && ELF32_R_TYPE (rel->r_info) == R_ARM_IRELATIVE)
7365 sreloc = htab->root.irelplt;
7366 if (sreloc == NULL)
7367 abort ();
7368 loc = sreloc->contents;
7369 loc += sreloc->reloc_count++ * RELOC_SIZE (htab);
7370 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
7371 abort ();
7372 SWAP_RELOC_OUT (htab) (output_bfd, rel, loc);
7373 }
7374
7375 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
7376 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
7377 to .plt. */
7378
7379 static void
7380 elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
7381 bfd_boolean is_iplt_entry,
7382 union gotplt_union *root_plt,
7383 struct arm_plt_info *arm_plt)
7384 {
7385 struct elf32_arm_link_hash_table *htab;
7386 asection *splt;
7387 asection *sgotplt;
7388
7389 htab = elf32_arm_hash_table (info);
7390
7391 if (is_iplt_entry)
7392 {
7393 splt = htab->root.iplt;
7394 sgotplt = htab->root.igotplt;
7395
7396 /* NaCl uses a special first entry in .iplt too. */
7397 if (htab->nacl_p && splt->size == 0)
7398 splt->size += htab->plt_header_size;
7399
7400 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
7401 elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1);
7402 }
7403 else
7404 {
7405 splt = htab->root.splt;
7406 sgotplt = htab->root.sgotplt;
7407
7408 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
7409 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
7410
7411 /* If this is the first .plt entry, make room for the special
7412 first entry. */
7413 if (splt->size == 0)
7414 splt->size += htab->plt_header_size;
7415 }
7416
7417 /* Allocate the PLT entry itself, including any leading Thumb stub. */
7418 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
7419 splt->size += PLT_THUMB_STUB_SIZE;
7420 root_plt->offset = splt->size;
7421 splt->size += htab->plt_entry_size;
7422
7423 if (!htab->symbian_p)
7424 {
7425 /* We also need to make an entry in the .got.plt section, which
7426 will be placed in the .got section by the linker script. */
7427 arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc;
7428 sgotplt->size += 4;
7429 }
7430 }
7431
7432 static bfd_vma
7433 arm_movw_immediate (bfd_vma value)
7434 {
7435 return (value & 0x00000fff) | ((value & 0x0000f000) << 4);
7436 }
7437
7438 static bfd_vma
7439 arm_movt_immediate (bfd_vma value)
7440 {
7441 return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12);
7442 }
7443
7444 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
7445 the entry lives in .iplt and resolves to (*SYM_VALUE)().
7446 Otherwise, DYNINDX is the index of the symbol in the dynamic
7447 symbol table and SYM_VALUE is undefined.
7448
7449 ROOT_PLT points to the offset of the PLT entry from the start of its
7450 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
7451 bookkeeping information. */
7452
7453 static void
7454 elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
7455 union gotplt_union *root_plt,
7456 struct arm_plt_info *arm_plt,
7457 int dynindx, bfd_vma sym_value)
7458 {
7459 struct elf32_arm_link_hash_table *htab;
7460 asection *sgot;
7461 asection *splt;
7462 asection *srel;
7463 bfd_byte *loc;
7464 bfd_vma plt_index;
7465 Elf_Internal_Rela rel;
7466 bfd_vma plt_header_size;
7467 bfd_vma got_header_size;
7468
7469 htab = elf32_arm_hash_table (info);
7470
7471 /* Pick the appropriate sections and sizes. */
7472 if (dynindx == -1)
7473 {
7474 splt = htab->root.iplt;
7475 sgot = htab->root.igotplt;
7476 srel = htab->root.irelplt;
7477
7478 /* There are no reserved entries in .igot.plt, and no special
7479 first entry in .iplt. */
7480 got_header_size = 0;
7481 plt_header_size = 0;
7482 }
7483 else
7484 {
7485 splt = htab->root.splt;
7486 sgot = htab->root.sgotplt;
7487 srel = htab->root.srelplt;
7488
7489 got_header_size = get_elf_backend_data (output_bfd)->got_header_size;
7490 plt_header_size = htab->plt_header_size;
7491 }
7492 BFD_ASSERT (splt != NULL && srel != NULL);
7493
7494 /* Fill in the entry in the procedure linkage table. */
7495 if (htab->symbian_p)
7496 {
7497 BFD_ASSERT (dynindx >= 0);
7498 put_arm_insn (htab, output_bfd,
7499 elf32_arm_symbian_plt_entry[0],
7500 splt->contents + root_plt->offset);
7501 bfd_put_32 (output_bfd,
7502 elf32_arm_symbian_plt_entry[1],
7503 splt->contents + root_plt->offset + 4);
7504
7505 /* Fill in the entry in the .rel.plt section. */
7506 rel.r_offset = (splt->output_section->vma
7507 + splt->output_offset
7508 + root_plt->offset + 4);
7509 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_GLOB_DAT);
7510
7511 /* Get the index in the procedure linkage table which
7512 corresponds to this symbol. This is the index of this symbol
7513 in all the symbols for which we are making plt entries. The
7514 first entry in the procedure linkage table is reserved. */
7515 plt_index = ((root_plt->offset - plt_header_size)
7516 / htab->plt_entry_size);
7517 }
7518 else
7519 {
7520 bfd_vma got_offset, got_address, plt_address;
7521 bfd_vma got_displacement, initial_got_entry;
7522 bfd_byte * ptr;
7523
7524 BFD_ASSERT (sgot != NULL);
7525
7526 /* Get the offset into the .(i)got.plt table of the entry that
7527 corresponds to this function. */
7528 got_offset = (arm_plt->got_offset & -2);
7529
7530 /* Get the index in the procedure linkage table which
7531 corresponds to this symbol. This is the index of this symbol
7532 in all the symbols for which we are making plt entries.
7533 After the reserved .got.plt entries, all symbols appear in
7534 the same order as in .plt. */
7535 plt_index = (got_offset - got_header_size) / 4;
7536
7537 /* Calculate the address of the GOT entry. */
7538 got_address = (sgot->output_section->vma
7539 + sgot->output_offset
7540 + got_offset);
7541
7542 /* ...and the address of the PLT entry. */
7543 plt_address = (splt->output_section->vma
7544 + splt->output_offset
7545 + root_plt->offset);
7546
7547 ptr = splt->contents + root_plt->offset;
7548 if (htab->vxworks_p && info->shared)
7549 {
7550 unsigned int i;
7551 bfd_vma val;
7552
7553 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
7554 {
7555 val = elf32_arm_vxworks_shared_plt_entry[i];
7556 if (i == 2)
7557 val |= got_address - sgot->output_section->vma;
7558 if (i == 5)
7559 val |= plt_index * RELOC_SIZE (htab);
7560 if (i == 2 || i == 5)
7561 bfd_put_32 (output_bfd, val, ptr);
7562 else
7563 put_arm_insn (htab, output_bfd, val, ptr);
7564 }
7565 }
7566 else if (htab->vxworks_p)
7567 {
7568 unsigned int i;
7569 bfd_vma val;
7570
7571 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
7572 {
7573 val = elf32_arm_vxworks_exec_plt_entry[i];
7574 if (i == 2)
7575 val |= got_address;
7576 if (i == 4)
7577 val |= 0xffffff & -((root_plt->offset + i * 4 + 8) >> 2);
7578 if (i == 5)
7579 val |= plt_index * RELOC_SIZE (htab);
7580 if (i == 2 || i == 5)
7581 bfd_put_32 (output_bfd, val, ptr);
7582 else
7583 put_arm_insn (htab, output_bfd, val, ptr);
7584 }
7585
7586 loc = (htab->srelplt2->contents
7587 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
7588
7589 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
7590 referencing the GOT for this PLT entry. */
7591 rel.r_offset = plt_address + 8;
7592 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
7593 rel.r_addend = got_offset;
7594 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
7595 loc += RELOC_SIZE (htab);
7596
7597 /* Create the R_ARM_ABS32 relocation referencing the
7598 beginning of the PLT for this GOT entry. */
7599 rel.r_offset = got_address;
7600 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
7601 rel.r_addend = 0;
7602 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
7603 }
7604 else if (htab->nacl_p)
7605 {
7606 /* Calculate the displacement between the PLT slot and the
7607 common tail that's part of the special initial PLT slot. */
7608 int32_t tail_displacement
7609 = ((splt->output_section->vma + splt->output_offset
7610 + ARM_NACL_PLT_TAIL_OFFSET)
7611 - (plt_address + htab->plt_entry_size + 4));
7612 BFD_ASSERT ((tail_displacement & 3) == 0);
7613 tail_displacement >>= 2;
7614
7615 BFD_ASSERT ((tail_displacement & 0xff000000) == 0
7616 || (-tail_displacement & 0xff000000) == 0);
7617
7618 /* Calculate the displacement between the PLT slot and the entry
7619 in the GOT. The offset accounts for the value produced by
7620 adding to pc in the penultimate instruction of the PLT stub. */
7621 got_displacement = (got_address
7622 - (plt_address + htab->plt_entry_size));
7623
7624 /* NaCl does not support interworking at all. */
7625 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt));
7626
7627 put_arm_insn (htab, output_bfd,
7628 elf32_arm_nacl_plt_entry[0]
7629 | arm_movw_immediate (got_displacement),
7630 ptr + 0);
7631 put_arm_insn (htab, output_bfd,
7632 elf32_arm_nacl_plt_entry[1]
7633 | arm_movt_immediate (got_displacement),
7634 ptr + 4);
7635 put_arm_insn (htab, output_bfd,
7636 elf32_arm_nacl_plt_entry[2],
7637 ptr + 8);
7638 put_arm_insn (htab, output_bfd,
7639 elf32_arm_nacl_plt_entry[3]
7640 | (tail_displacement & 0x00ffffff),
7641 ptr + 12);
7642 }
7643 else
7644 {
7645 /* Calculate the displacement between the PLT slot and the
7646 entry in the GOT. The eight-byte offset accounts for the
7647 value produced by adding to pc in the first instruction
7648 of the PLT stub. */
7649 got_displacement = got_address - (plt_address + 8);
7650
7651 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
7652
7653 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
7654 {
7655 put_thumb_insn (htab, output_bfd,
7656 elf32_arm_plt_thumb_stub[0], ptr - 4);
7657 put_thumb_insn (htab, output_bfd,
7658 elf32_arm_plt_thumb_stub[1], ptr - 2);
7659 }
7660
7661 put_arm_insn (htab, output_bfd,
7662 elf32_arm_plt_entry[0]
7663 | ((got_displacement & 0x0ff00000) >> 20),
7664 ptr + 0);
7665 put_arm_insn (htab, output_bfd,
7666 elf32_arm_plt_entry[1]
7667 | ((got_displacement & 0x000ff000) >> 12),
7668 ptr+ 4);
7669 put_arm_insn (htab, output_bfd,
7670 elf32_arm_plt_entry[2]
7671 | (got_displacement & 0x00000fff),
7672 ptr + 8);
7673 #ifdef FOUR_WORD_PLT
7674 bfd_put_32 (output_bfd, elf32_arm_plt_entry[3], ptr + 12);
7675 #endif
7676 }
7677
7678 /* Fill in the entry in the .rel(a).(i)plt section. */
7679 rel.r_offset = got_address;
7680 rel.r_addend = 0;
7681 if (dynindx == -1)
7682 {
7683 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
7684 The dynamic linker or static executable then calls SYM_VALUE
7685 to determine the correct run-time value of the .igot.plt entry. */
7686 rel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
7687 initial_got_entry = sym_value;
7688 }
7689 else
7690 {
7691 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT);
7692 initial_got_entry = (splt->output_section->vma
7693 + splt->output_offset);
7694 }
7695
7696 /* Fill in the entry in the global offset table. */
7697 bfd_put_32 (output_bfd, initial_got_entry,
7698 sgot->contents + got_offset);
7699 }
7700
7701 if (dynindx == -1)
7702 elf32_arm_add_dynreloc (output_bfd, info, srel, &rel);
7703 else
7704 {
7705 loc = srel->contents + plt_index * RELOC_SIZE (htab);
7706 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
7707 }
7708 }
7709
7710 /* Some relocations map to different relocations depending on the
7711 target. Return the real relocation. */
7712
7713 static int
7714 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
7715 int r_type)
7716 {
7717 switch (r_type)
7718 {
7719 case R_ARM_TARGET1:
7720 if (globals->target1_is_rel)
7721 return R_ARM_REL32;
7722 else
7723 return R_ARM_ABS32;
7724
7725 case R_ARM_TARGET2:
7726 return globals->target2_reloc;
7727
7728 default:
7729 return r_type;
7730 }
7731 }
7732
7733 /* Return the base VMA address which should be subtracted from real addresses
7734 when resolving @dtpoff relocation.
7735 This is PT_TLS segment p_vaddr. */
7736
7737 static bfd_vma
7738 dtpoff_base (struct bfd_link_info *info)
7739 {
7740 /* If tls_sec is NULL, we should have signalled an error already. */
7741 if (elf_hash_table (info)->tls_sec == NULL)
7742 return 0;
7743 return elf_hash_table (info)->tls_sec->vma;
7744 }
7745
7746 /* Return the relocation value for @tpoff relocation
7747 if STT_TLS virtual address is ADDRESS. */
7748
7749 static bfd_vma
7750 tpoff (struct bfd_link_info *info, bfd_vma address)
7751 {
7752 struct elf_link_hash_table *htab = elf_hash_table (info);
7753 bfd_vma base;
7754
7755 /* If tls_sec is NULL, we should have signalled an error already. */
7756 if (htab->tls_sec == NULL)
7757 return 0;
7758 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
7759 return address - htab->tls_sec->vma + base;
7760 }
7761
7762 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
7763 VALUE is the relocation value. */
7764
7765 static bfd_reloc_status_type
7766 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
7767 {
7768 if (value > 0xfff)
7769 return bfd_reloc_overflow;
7770
7771 value |= bfd_get_32 (abfd, data) & 0xfffff000;
7772 bfd_put_32 (abfd, value, data);
7773 return bfd_reloc_ok;
7774 }
7775
7776 /* Handle TLS relaxations. Relaxing is possible for symbols that use
7777 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
7778 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
7779
7780 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
7781 is to then call final_link_relocate. Return other values in the
7782 case of error.
7783
7784 FIXME:When --emit-relocs is in effect, we'll emit relocs describing
7785 the pre-relaxed code. It would be nice if the relocs were updated
7786 to match the optimization. */
7787
7788 static bfd_reloc_status_type
7789 elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals,
7790 bfd *input_bfd, asection *input_sec, bfd_byte *contents,
7791 Elf_Internal_Rela *rel, unsigned long is_local)
7792 {
7793 unsigned long insn;
7794
7795 switch (ELF32_R_TYPE (rel->r_info))
7796 {
7797 default:
7798 return bfd_reloc_notsupported;
7799
7800 case R_ARM_TLS_GOTDESC:
7801 if (is_local)
7802 insn = 0;
7803 else
7804 {
7805 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
7806 if (insn & 1)
7807 insn -= 5; /* THUMB */
7808 else
7809 insn -= 8; /* ARM */
7810 }
7811 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
7812 return bfd_reloc_continue;
7813
7814 case R_ARM_THM_TLS_DESCSEQ:
7815 /* Thumb insn. */
7816 insn = bfd_get_16 (input_bfd, contents + rel->r_offset);
7817 if ((insn & 0xff78) == 0x4478) /* add rx, pc */
7818 {
7819 if (is_local)
7820 /* nop */
7821 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
7822 }
7823 else if ((insn & 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
7824 {
7825 if (is_local)
7826 /* nop */
7827 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
7828 else
7829 /* ldr rx,[ry] */
7830 bfd_put_16 (input_bfd, insn & 0xf83f, contents + rel->r_offset);
7831 }
7832 else if ((insn & 0xff87) == 0x4780) /* blx rx */
7833 {
7834 if (is_local)
7835 /* nop */
7836 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
7837 else
7838 /* mov r0, rx */
7839 bfd_put_16 (input_bfd, 0x4600 | (insn & 0x78),
7840 contents + rel->r_offset);
7841 }
7842 else
7843 {
7844 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
7845 /* It's a 32 bit instruction, fetch the rest of it for
7846 error generation. */
7847 insn = (insn << 16)
7848 | bfd_get_16 (input_bfd, contents + rel->r_offset + 2);
7849 (*_bfd_error_handler)
7850 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' in TLS trampoline"),
7851 input_bfd, input_sec, (unsigned long)rel->r_offset, insn);
7852 return bfd_reloc_notsupported;
7853 }
7854 break;
7855
7856 case R_ARM_TLS_DESCSEQ:
7857 /* arm insn. */
7858 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
7859 if ((insn & 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
7860 {
7861 if (is_local)
7862 /* mov rx, ry */
7863 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xffff),
7864 contents + rel->r_offset);
7865 }
7866 else if ((insn & 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
7867 {
7868 if (is_local)
7869 /* nop */
7870 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
7871 else
7872 /* ldr rx,[ry] */
7873 bfd_put_32 (input_bfd, insn & 0xfffff000,
7874 contents + rel->r_offset);
7875 }
7876 else if ((insn & 0xfffffff0) == 0xe12fff30) /* blx rx */
7877 {
7878 if (is_local)
7879 /* nop */
7880 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
7881 else
7882 /* mov r0, rx */
7883 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xf),
7884 contents + rel->r_offset);
7885 }
7886 else
7887 {
7888 (*_bfd_error_handler)
7889 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' in TLS trampoline"),
7890 input_bfd, input_sec, (unsigned long)rel->r_offset, insn);
7891 return bfd_reloc_notsupported;
7892 }
7893 break;
7894
7895 case R_ARM_TLS_CALL:
7896 /* GD->IE relaxation, turn the instruction into 'nop' or
7897 'ldr r0, [pc,r0]' */
7898 insn = is_local ? 0xe1a00000 : 0xe79f0000;
7899 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
7900 break;
7901
7902 case R_ARM_THM_TLS_CALL:
7903 /* GD->IE relaxation */
7904 if (!is_local)
7905 /* add r0,pc; ldr r0, [r0] */
7906 insn = 0x44786800;
7907 else if (arch_has_thumb2_nop (globals))
7908 /* nop.w */
7909 insn = 0xf3af8000;
7910 else
7911 /* nop; nop */
7912 insn = 0xbf00bf00;
7913
7914 bfd_put_16 (input_bfd, insn >> 16, contents + rel->r_offset);
7915 bfd_put_16 (input_bfd, insn & 0xffff, contents + rel->r_offset + 2);
7916 break;
7917 }
7918 return bfd_reloc_ok;
7919 }
7920
7921 /* For a given value of n, calculate the value of G_n as required to
7922 deal with group relocations. We return it in the form of an
7923 encoded constant-and-rotation, together with the final residual. If n is
7924 specified as less than zero, then final_residual is filled with the
7925 input value and no further action is performed. */
7926
7927 static bfd_vma
7928 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
7929 {
7930 int current_n;
7931 bfd_vma g_n;
7932 bfd_vma encoded_g_n = 0;
7933 bfd_vma residual = value; /* Also known as Y_n. */
7934
7935 for (current_n = 0; current_n <= n; current_n++)
7936 {
7937 int shift;
7938
7939 /* Calculate which part of the value to mask. */
7940 if (residual == 0)
7941 shift = 0;
7942 else
7943 {
7944 int msb;
7945
7946 /* Determine the most significant bit in the residual and
7947 align the resulting value to a 2-bit boundary. */
7948 for (msb = 30; msb >= 0; msb -= 2)
7949 if (residual & (3 << msb))
7950 break;
7951
7952 /* The desired shift is now (msb - 6), or zero, whichever
7953 is the greater. */
7954 shift = msb - 6;
7955 if (shift < 0)
7956 shift = 0;
7957 }
7958
7959 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
7960 g_n = residual & (0xff << shift);
7961 encoded_g_n = (g_n >> shift)
7962 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
7963
7964 /* Calculate the residual for the next time around. */
7965 residual &= ~g_n;
7966 }
7967
7968 *final_residual = residual;
7969
7970 return encoded_g_n;
7971 }
7972
7973 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
7974 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
7975
7976 static int
7977 identify_add_or_sub (bfd_vma insn)
7978 {
7979 int opcode = insn & 0x1e00000;
7980
7981 if (opcode == 1 << 23) /* ADD */
7982 return 1;
7983
7984 if (opcode == 1 << 22) /* SUB */
7985 return -1;
7986
7987 return 0;
7988 }
7989
7990 /* Perform a relocation as part of a final link. */
7991
7992 static bfd_reloc_status_type
7993 elf32_arm_final_link_relocate (reloc_howto_type * howto,
7994 bfd * input_bfd,
7995 bfd * output_bfd,
7996 asection * input_section,
7997 bfd_byte * contents,
7998 Elf_Internal_Rela * rel,
7999 bfd_vma value,
8000 struct bfd_link_info * info,
8001 asection * sym_sec,
8002 const char * sym_name,
8003 unsigned char st_type,
8004 enum arm_st_branch_type branch_type,
8005 struct elf_link_hash_entry * h,
8006 bfd_boolean * unresolved_reloc_p,
8007 char ** error_message)
8008 {
8009 unsigned long r_type = howto->type;
8010 unsigned long r_symndx;
8011 bfd_byte * hit_data = contents + rel->r_offset;
8012 bfd_vma * local_got_offsets;
8013 bfd_vma * local_tlsdesc_gotents;
8014 asection * sgot;
8015 asection * splt;
8016 asection * sreloc = NULL;
8017 asection * srelgot;
8018 bfd_vma addend;
8019 bfd_signed_vma signed_addend;
8020 unsigned char dynreloc_st_type;
8021 bfd_vma dynreloc_value;
8022 struct elf32_arm_link_hash_table * globals;
8023 struct elf32_arm_link_hash_entry *eh;
8024 union gotplt_union *root_plt;
8025 struct arm_plt_info *arm_plt;
8026 bfd_vma plt_offset;
8027 bfd_vma gotplt_offset;
8028 bfd_boolean has_iplt_entry;
8029
8030 globals = elf32_arm_hash_table (info);
8031 if (globals == NULL)
8032 return bfd_reloc_notsupported;
8033
8034 BFD_ASSERT (is_arm_elf (input_bfd));
8035
8036 /* Some relocation types map to different relocations depending on the
8037 target. We pick the right one here. */
8038 r_type = arm_real_reloc_type (globals, r_type);
8039
8040 /* It is possible to have linker relaxations on some TLS access
8041 models. Update our information here. */
8042 r_type = elf32_arm_tls_transition (info, r_type, h);
8043
8044 if (r_type != howto->type)
8045 howto = elf32_arm_howto_from_type (r_type);
8046
8047 /* If the start address has been set, then set the EF_ARM_HASENTRY
8048 flag. Setting this more than once is redundant, but the cost is
8049 not too high, and it keeps the code simple.
8050
8051 The test is done here, rather than somewhere else, because the
8052 start address is only set just before the final link commences.
8053
8054 Note - if the user deliberately sets a start address of 0, the
8055 flag will not be set. */
8056 if (bfd_get_start_address (output_bfd) != 0)
8057 elf_elfheader (output_bfd)->e_flags |= EF_ARM_HASENTRY;
8058
8059 eh = (struct elf32_arm_link_hash_entry *) h;
8060 sgot = globals->root.sgot;
8061 local_got_offsets = elf_local_got_offsets (input_bfd);
8062 local_tlsdesc_gotents = elf32_arm_local_tlsdesc_gotent (input_bfd);
8063
8064 if (globals->root.dynamic_sections_created)
8065 srelgot = globals->root.srelgot;
8066 else
8067 srelgot = NULL;
8068
8069 r_symndx = ELF32_R_SYM (rel->r_info);
8070
8071 if (globals->use_rel)
8072 {
8073 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
8074
8075 if (addend & ((howto->src_mask + 1) >> 1))
8076 {
8077 signed_addend = -1;
8078 signed_addend &= ~ howto->src_mask;
8079 signed_addend |= addend;
8080 }
8081 else
8082 signed_addend = addend;
8083 }
8084 else
8085 addend = signed_addend = rel->r_addend;
8086
8087 /* Record the symbol information that should be used in dynamic
8088 relocations. */
8089 dynreloc_st_type = st_type;
8090 dynreloc_value = value;
8091 if (branch_type == ST_BRANCH_TO_THUMB)
8092 dynreloc_value |= 1;
8093
8094 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
8095 VALUE appropriately for relocations that we resolve at link time. */
8096 has_iplt_entry = FALSE;
8097 if (elf32_arm_get_plt_info (input_bfd, eh, r_symndx, &root_plt, &arm_plt)
8098 && root_plt->offset != (bfd_vma) -1)
8099 {
8100 plt_offset = root_plt->offset;
8101 gotplt_offset = arm_plt->got_offset;
8102
8103 if (h == NULL || eh->is_iplt)
8104 {
8105 has_iplt_entry = TRUE;
8106 splt = globals->root.iplt;
8107
8108 /* Populate .iplt entries here, because not all of them will
8109 be seen by finish_dynamic_symbol. The lower bit is set if
8110 we have already populated the entry. */
8111 if (plt_offset & 1)
8112 plt_offset--;
8113 else
8114 {
8115 elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
8116 -1, dynreloc_value);
8117 root_plt->offset |= 1;
8118 }
8119
8120 /* Static relocations always resolve to the .iplt entry. */
8121 st_type = STT_FUNC;
8122 value = (splt->output_section->vma
8123 + splt->output_offset
8124 + plt_offset);
8125 branch_type = ST_BRANCH_TO_ARM;
8126
8127 /* If there are non-call relocations that resolve to the .iplt
8128 entry, then all dynamic ones must too. */
8129 if (arm_plt->noncall_refcount != 0)
8130 {
8131 dynreloc_st_type = st_type;
8132 dynreloc_value = value;
8133 }
8134 }
8135 else
8136 /* We populate the .plt entry in finish_dynamic_symbol. */
8137 splt = globals->root.splt;
8138 }
8139 else
8140 {
8141 splt = NULL;
8142 plt_offset = (bfd_vma) -1;
8143 gotplt_offset = (bfd_vma) -1;
8144 }
8145
8146 switch (r_type)
8147 {
8148 case R_ARM_NONE:
8149 /* We don't need to find a value for this symbol. It's just a
8150 marker. */
8151 *unresolved_reloc_p = FALSE;
8152 return bfd_reloc_ok;
8153
8154 case R_ARM_ABS12:
8155 if (!globals->vxworks_p)
8156 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
8157
8158 case R_ARM_PC24:
8159 case R_ARM_ABS32:
8160 case R_ARM_ABS32_NOI:
8161 case R_ARM_REL32:
8162 case R_ARM_REL32_NOI:
8163 case R_ARM_CALL:
8164 case R_ARM_JUMP24:
8165 case R_ARM_XPC25:
8166 case R_ARM_PREL31:
8167 case R_ARM_PLT32:
8168 /* Handle relocations which should use the PLT entry. ABS32/REL32
8169 will use the symbol's value, which may point to a PLT entry, but we
8170 don't need to handle that here. If we created a PLT entry, all
8171 branches in this object should go to it, except if the PLT is too
8172 far away, in which case a long branch stub should be inserted. */
8173 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
8174 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
8175 && r_type != R_ARM_CALL
8176 && r_type != R_ARM_JUMP24
8177 && r_type != R_ARM_PLT32)
8178 && plt_offset != (bfd_vma) -1)
8179 {
8180 /* If we've created a .plt section, and assigned a PLT entry
8181 to this function, it must either be a STT_GNU_IFUNC reference
8182 or not be known to bind locally. In other cases, we should
8183 have cleared the PLT entry by now. */
8184 BFD_ASSERT (has_iplt_entry || !SYMBOL_CALLS_LOCAL (info, h));
8185
8186 value = (splt->output_section->vma
8187 + splt->output_offset
8188 + plt_offset);
8189 *unresolved_reloc_p = FALSE;
8190 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8191 contents, rel->r_offset, value,
8192 rel->r_addend);
8193 }
8194
8195 /* When generating a shared object or relocatable executable, these
8196 relocations are copied into the output file to be resolved at
8197 run time. */
8198 if ((info->shared || globals->root.is_relocatable_executable)
8199 && (input_section->flags & SEC_ALLOC)
8200 && !(globals->vxworks_p
8201 && strcmp (input_section->output_section->name,
8202 ".tls_vars") == 0)
8203 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
8204 || !SYMBOL_CALLS_LOCAL (info, h))
8205 && !(input_bfd == globals->stub_bfd
8206 && strstr (input_section->name, STUB_SUFFIX))
8207 && (h == NULL
8208 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
8209 || h->root.type != bfd_link_hash_undefweak)
8210 && r_type != R_ARM_PC24
8211 && r_type != R_ARM_CALL
8212 && r_type != R_ARM_JUMP24
8213 && r_type != R_ARM_PREL31
8214 && r_type != R_ARM_PLT32)
8215 {
8216 Elf_Internal_Rela outrel;
8217 bfd_boolean skip, relocate;
8218
8219 *unresolved_reloc_p = FALSE;
8220
8221 if (sreloc == NULL && globals->root.dynamic_sections_created)
8222 {
8223 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
8224 ! globals->use_rel);
8225
8226 if (sreloc == NULL)
8227 return bfd_reloc_notsupported;
8228 }
8229
8230 skip = FALSE;
8231 relocate = FALSE;
8232
8233 outrel.r_addend = addend;
8234 outrel.r_offset =
8235 _bfd_elf_section_offset (output_bfd, info, input_section,
8236 rel->r_offset);
8237 if (outrel.r_offset == (bfd_vma) -1)
8238 skip = TRUE;
8239 else if (outrel.r_offset == (bfd_vma) -2)
8240 skip = TRUE, relocate = TRUE;
8241 outrel.r_offset += (input_section->output_section->vma
8242 + input_section->output_offset);
8243
8244 if (skip)
8245 memset (&outrel, 0, sizeof outrel);
8246 else if (h != NULL
8247 && h->dynindx != -1
8248 && (!info->shared
8249 || !info->symbolic
8250 || !h->def_regular))
8251 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
8252 else
8253 {
8254 int symbol;
8255
8256 /* This symbol is local, or marked to become local. */
8257 BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI);
8258 if (globals->symbian_p)
8259 {
8260 asection *osec;
8261
8262 /* On Symbian OS, the data segment and text segement
8263 can be relocated independently. Therefore, we
8264 must indicate the segment to which this
8265 relocation is relative. The BPABI allows us to
8266 use any symbol in the right segment; we just use
8267 the section symbol as it is convenient. (We
8268 cannot use the symbol given by "h" directly as it
8269 will not appear in the dynamic symbol table.)
8270
8271 Note that the dynamic linker ignores the section
8272 symbol value, so we don't subtract osec->vma
8273 from the emitted reloc addend. */
8274 if (sym_sec)
8275 osec = sym_sec->output_section;
8276 else
8277 osec = input_section->output_section;
8278 symbol = elf_section_data (osec)->dynindx;
8279 if (symbol == 0)
8280 {
8281 struct elf_link_hash_table *htab = elf_hash_table (info);
8282
8283 if ((osec->flags & SEC_READONLY) == 0
8284 && htab->data_index_section != NULL)
8285 osec = htab->data_index_section;
8286 else
8287 osec = htab->text_index_section;
8288 symbol = elf_section_data (osec)->dynindx;
8289 }
8290 BFD_ASSERT (symbol != 0);
8291 }
8292 else
8293 /* On SVR4-ish systems, the dynamic loader cannot
8294 relocate the text and data segments independently,
8295 so the symbol does not matter. */
8296 symbol = 0;
8297 if (dynreloc_st_type == STT_GNU_IFUNC)
8298 /* We have an STT_GNU_IFUNC symbol that doesn't resolve
8299 to the .iplt entry. Instead, every non-call reference
8300 must use an R_ARM_IRELATIVE relocation to obtain the
8301 correct run-time address. */
8302 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE);
8303 else
8304 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
8305 if (globals->use_rel)
8306 relocate = TRUE;
8307 else
8308 outrel.r_addend += dynreloc_value;
8309 }
8310
8311 elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel);
8312
8313 /* If this reloc is against an external symbol, we do not want to
8314 fiddle with the addend. Otherwise, we need to include the symbol
8315 value so that it becomes an addend for the dynamic reloc. */
8316 if (! relocate)
8317 return bfd_reloc_ok;
8318
8319 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8320 contents, rel->r_offset,
8321 dynreloc_value, (bfd_vma) 0);
8322 }
8323 else switch (r_type)
8324 {
8325 case R_ARM_ABS12:
8326 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
8327
8328 case R_ARM_XPC25: /* Arm BLX instruction. */
8329 case R_ARM_CALL:
8330 case R_ARM_JUMP24:
8331 case R_ARM_PC24: /* Arm B/BL instruction. */
8332 case R_ARM_PLT32:
8333 {
8334 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
8335
8336 if (r_type == R_ARM_XPC25)
8337 {
8338 /* Check for Arm calling Arm function. */
8339 /* FIXME: Should we translate the instruction into a BL
8340 instruction instead ? */
8341 if (branch_type != ST_BRANCH_TO_THUMB)
8342 (*_bfd_error_handler)
8343 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
8344 input_bfd,
8345 h ? h->root.root.string : "(local)");
8346 }
8347 else if (r_type == R_ARM_PC24)
8348 {
8349 /* Check for Arm calling Thumb function. */
8350 if (branch_type == ST_BRANCH_TO_THUMB)
8351 {
8352 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
8353 output_bfd, input_section,
8354 hit_data, sym_sec, rel->r_offset,
8355 signed_addend, value,
8356 error_message))
8357 return bfd_reloc_ok;
8358 else
8359 return bfd_reloc_dangerous;
8360 }
8361 }
8362
8363 /* Check if a stub has to be inserted because the
8364 destination is too far or we are changing mode. */
8365 if ( r_type == R_ARM_CALL
8366 || r_type == R_ARM_JUMP24
8367 || r_type == R_ARM_PLT32)
8368 {
8369 enum elf32_arm_stub_type stub_type = arm_stub_none;
8370 struct elf32_arm_link_hash_entry *hash;
8371
8372 hash = (struct elf32_arm_link_hash_entry *) h;
8373 stub_type = arm_type_of_stub (info, input_section, rel,
8374 st_type, &branch_type,
8375 hash, value, sym_sec,
8376 input_bfd, sym_name);
8377
8378 if (stub_type != arm_stub_none)
8379 {
8380 /* The target is out of reach, so redirect the
8381 branch to the local stub for this function. */
8382 stub_entry = elf32_arm_get_stub_entry (input_section,
8383 sym_sec, h,
8384 rel, globals,
8385 stub_type);
8386 {
8387 if (stub_entry != NULL)
8388 value = (stub_entry->stub_offset
8389 + stub_entry->stub_sec->output_offset
8390 + stub_entry->stub_sec->output_section->vma);
8391
8392 if (plt_offset != (bfd_vma) -1)
8393 *unresolved_reloc_p = FALSE;
8394 }
8395 }
8396 else
8397 {
8398 /* If the call goes through a PLT entry, make sure to
8399 check distance to the right destination address. */
8400 if (plt_offset != (bfd_vma) -1)
8401 {
8402 value = (splt->output_section->vma
8403 + splt->output_offset
8404 + plt_offset);
8405 *unresolved_reloc_p = FALSE;
8406 /* The PLT entry is in ARM mode, regardless of the
8407 target function. */
8408 branch_type = ST_BRANCH_TO_ARM;
8409 }
8410 }
8411 }
8412
8413 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
8414 where:
8415 S is the address of the symbol in the relocation.
8416 P is address of the instruction being relocated.
8417 A is the addend (extracted from the instruction) in bytes.
8418
8419 S is held in 'value'.
8420 P is the base address of the section containing the
8421 instruction plus the offset of the reloc into that
8422 section, ie:
8423 (input_section->output_section->vma +
8424 input_section->output_offset +
8425 rel->r_offset).
8426 A is the addend, converted into bytes, ie:
8427 (signed_addend * 4)
8428
8429 Note: None of these operations have knowledge of the pipeline
8430 size of the processor, thus it is up to the assembler to
8431 encode this information into the addend. */
8432 value -= (input_section->output_section->vma
8433 + input_section->output_offset);
8434 value -= rel->r_offset;
8435 if (globals->use_rel)
8436 value += (signed_addend << howto->size);
8437 else
8438 /* RELA addends do not have to be adjusted by howto->size. */
8439 value += signed_addend;
8440
8441 signed_addend = value;
8442 signed_addend >>= howto->rightshift;
8443
8444 /* A branch to an undefined weak symbol is turned into a jump to
8445 the next instruction unless a PLT entry will be created.
8446 Do the same for local undefined symbols (but not for STN_UNDEF).
8447 The jump to the next instruction is optimized as a NOP depending
8448 on the architecture. */
8449 if (h ? (h->root.type == bfd_link_hash_undefweak
8450 && plt_offset == (bfd_vma) -1)
8451 : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
8452 {
8453 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
8454
8455 if (arch_has_arm_nop (globals))
8456 value |= 0x0320f000;
8457 else
8458 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
8459 }
8460 else
8461 {
8462 /* Perform a signed range check. */
8463 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
8464 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
8465 return bfd_reloc_overflow;
8466
8467 addend = (value & 2);
8468
8469 value = (signed_addend & howto->dst_mask)
8470 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
8471
8472 if (r_type == R_ARM_CALL)
8473 {
8474 /* Set the H bit in the BLX instruction. */
8475 if (branch_type == ST_BRANCH_TO_THUMB)
8476 {
8477 if (addend)
8478 value |= (1 << 24);
8479 else
8480 value &= ~(bfd_vma)(1 << 24);
8481 }
8482
8483 /* Select the correct instruction (BL or BLX). */
8484 /* Only if we are not handling a BL to a stub. In this
8485 case, mode switching is performed by the stub. */
8486 if (branch_type == ST_BRANCH_TO_THUMB && !stub_entry)
8487 value |= (1 << 28);
8488 else if (stub_entry || branch_type != ST_BRANCH_UNKNOWN)
8489 {
8490 value &= ~(bfd_vma)(1 << 28);
8491 value |= (1 << 24);
8492 }
8493 }
8494 }
8495 }
8496 break;
8497
8498 case R_ARM_ABS32:
8499 value += addend;
8500 if (branch_type == ST_BRANCH_TO_THUMB)
8501 value |= 1;
8502 break;
8503
8504 case R_ARM_ABS32_NOI:
8505 value += addend;
8506 break;
8507
8508 case R_ARM_REL32:
8509 value += addend;
8510 if (branch_type == ST_BRANCH_TO_THUMB)
8511 value |= 1;
8512 value -= (input_section->output_section->vma
8513 + input_section->output_offset + rel->r_offset);
8514 break;
8515
8516 case R_ARM_REL32_NOI:
8517 value += addend;
8518 value -= (input_section->output_section->vma
8519 + input_section->output_offset + rel->r_offset);
8520 break;
8521
8522 case R_ARM_PREL31:
8523 value -= (input_section->output_section->vma
8524 + input_section->output_offset + rel->r_offset);
8525 value += signed_addend;
8526 if (! h || h->root.type != bfd_link_hash_undefweak)
8527 {
8528 /* Check for overflow. */
8529 if ((value ^ (value >> 1)) & (1 << 30))
8530 return bfd_reloc_overflow;
8531 }
8532 value &= 0x7fffffff;
8533 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
8534 if (branch_type == ST_BRANCH_TO_THUMB)
8535 value |= 1;
8536 break;
8537 }
8538
8539 bfd_put_32 (input_bfd, value, hit_data);
8540 return bfd_reloc_ok;
8541
8542 case R_ARM_ABS8:
8543 value += addend;
8544
8545 /* There is no way to tell whether the user intended to use a signed or
8546 unsigned addend. When checking for overflow we accept either,
8547 as specified by the AAELF. */
8548 if ((long) value > 0xff || (long) value < -0x80)
8549 return bfd_reloc_overflow;
8550
8551 bfd_put_8 (input_bfd, value, hit_data);
8552 return bfd_reloc_ok;
8553
8554 case R_ARM_ABS16:
8555 value += addend;
8556
8557 /* See comment for R_ARM_ABS8. */
8558 if ((long) value > 0xffff || (long) value < -0x8000)
8559 return bfd_reloc_overflow;
8560
8561 bfd_put_16 (input_bfd, value, hit_data);
8562 return bfd_reloc_ok;
8563
8564 case R_ARM_THM_ABS5:
8565 /* Support ldr and str instructions for the thumb. */
8566 if (globals->use_rel)
8567 {
8568 /* Need to refetch addend. */
8569 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
8570 /* ??? Need to determine shift amount from operand size. */
8571 addend >>= howto->rightshift;
8572 }
8573 value += addend;
8574
8575 /* ??? Isn't value unsigned? */
8576 if ((long) value > 0x1f || (long) value < -0x10)
8577 return bfd_reloc_overflow;
8578
8579 /* ??? Value needs to be properly shifted into place first. */
8580 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
8581 bfd_put_16 (input_bfd, value, hit_data);
8582 return bfd_reloc_ok;
8583
8584 case R_ARM_THM_ALU_PREL_11_0:
8585 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
8586 {
8587 bfd_vma insn;
8588 bfd_signed_vma relocation;
8589
8590 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
8591 | bfd_get_16 (input_bfd, hit_data + 2);
8592
8593 if (globals->use_rel)
8594 {
8595 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
8596 | ((insn & (1 << 26)) >> 15);
8597 if (insn & 0xf00000)
8598 signed_addend = -signed_addend;
8599 }
8600
8601 relocation = value + signed_addend;
8602 relocation -= Pa (input_section->output_section->vma
8603 + input_section->output_offset
8604 + rel->r_offset);
8605
8606 value = abs (relocation);
8607
8608 if (value >= 0x1000)
8609 return bfd_reloc_overflow;
8610
8611 insn = (insn & 0xfb0f8f00) | (value & 0xff)
8612 | ((value & 0x700) << 4)
8613 | ((value & 0x800) << 15);
8614 if (relocation < 0)
8615 insn |= 0xa00000;
8616
8617 bfd_put_16 (input_bfd, insn >> 16, hit_data);
8618 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
8619
8620 return bfd_reloc_ok;
8621 }
8622
8623 case R_ARM_THM_PC8:
8624 /* PR 10073: This reloc is not generated by the GNU toolchain,
8625 but it is supported for compatibility with third party libraries
8626 generated by other compilers, specifically the ARM/IAR. */
8627 {
8628 bfd_vma insn;
8629 bfd_signed_vma relocation;
8630
8631 insn = bfd_get_16 (input_bfd, hit_data);
8632
8633 if (globals->use_rel)
8634 addend = ((((insn & 0x00ff) << 2) + 4) & 0x3ff) -4;
8635
8636 relocation = value + addend;
8637 relocation -= Pa (input_section->output_section->vma
8638 + input_section->output_offset
8639 + rel->r_offset);
8640
8641 value = abs (relocation);
8642
8643 /* We do not check for overflow of this reloc. Although strictly
8644 speaking this is incorrect, it appears to be necessary in order
8645 to work with IAR generated relocs. Since GCC and GAS do not
8646 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
8647 a problem for them. */
8648 value &= 0x3fc;
8649
8650 insn = (insn & 0xff00) | (value >> 2);
8651
8652 bfd_put_16 (input_bfd, insn, hit_data);
8653
8654 return bfd_reloc_ok;
8655 }
8656
8657 case R_ARM_THM_PC12:
8658 /* Corresponds to: ldr.w reg, [pc, #offset]. */
8659 {
8660 bfd_vma insn;
8661 bfd_signed_vma relocation;
8662
8663 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
8664 | bfd_get_16 (input_bfd, hit_data + 2);
8665
8666 if (globals->use_rel)
8667 {
8668 signed_addend = insn & 0xfff;
8669 if (!(insn & (1 << 23)))
8670 signed_addend = -signed_addend;
8671 }
8672
8673 relocation = value + signed_addend;
8674 relocation -= Pa (input_section->output_section->vma
8675 + input_section->output_offset
8676 + rel->r_offset);
8677
8678 value = abs (relocation);
8679
8680 if (value >= 0x1000)
8681 return bfd_reloc_overflow;
8682
8683 insn = (insn & 0xff7ff000) | value;
8684 if (relocation >= 0)
8685 insn |= (1 << 23);
8686
8687 bfd_put_16 (input_bfd, insn >> 16, hit_data);
8688 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
8689
8690 return bfd_reloc_ok;
8691 }
8692
8693 case R_ARM_THM_XPC22:
8694 case R_ARM_THM_CALL:
8695 case R_ARM_THM_JUMP24:
8696 /* Thumb BL (branch long instruction). */
8697 {
8698 bfd_vma relocation;
8699 bfd_vma reloc_sign;
8700 bfd_boolean overflow = FALSE;
8701 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
8702 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
8703 bfd_signed_vma reloc_signed_max;
8704 bfd_signed_vma reloc_signed_min;
8705 bfd_vma check;
8706 bfd_signed_vma signed_check;
8707 int bitsize;
8708 const int thumb2 = using_thumb2 (globals);
8709
8710 /* A branch to an undefined weak symbol is turned into a jump to
8711 the next instruction unless a PLT entry will be created.
8712 The jump to the next instruction is optimized as a NOP.W for
8713 Thumb-2 enabled architectures. */
8714 if (h && h->root.type == bfd_link_hash_undefweak
8715 && plt_offset == (bfd_vma) -1)
8716 {
8717 if (arch_has_thumb2_nop (globals))
8718 {
8719 bfd_put_16 (input_bfd, 0xf3af, hit_data);
8720 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
8721 }
8722 else
8723 {
8724 bfd_put_16 (input_bfd, 0xe000, hit_data);
8725 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
8726 }
8727 return bfd_reloc_ok;
8728 }
8729
8730 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
8731 with Thumb-1) involving the J1 and J2 bits. */
8732 if (globals->use_rel)
8733 {
8734 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
8735 bfd_vma upper = upper_insn & 0x3ff;
8736 bfd_vma lower = lower_insn & 0x7ff;
8737 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
8738 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
8739 bfd_vma i1 = j1 ^ s ? 0 : 1;
8740 bfd_vma i2 = j2 ^ s ? 0 : 1;
8741
8742 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
8743 /* Sign extend. */
8744 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
8745
8746 signed_addend = addend;
8747 }
8748
8749 if (r_type == R_ARM_THM_XPC22)
8750 {
8751 /* Check for Thumb to Thumb call. */
8752 /* FIXME: Should we translate the instruction into a BL
8753 instruction instead ? */
8754 if (branch_type == ST_BRANCH_TO_THUMB)
8755 (*_bfd_error_handler)
8756 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
8757 input_bfd,
8758 h ? h->root.root.string : "(local)");
8759 }
8760 else
8761 {
8762 /* If it is not a call to Thumb, assume call to Arm.
8763 If it is a call relative to a section name, then it is not a
8764 function call at all, but rather a long jump. Calls through
8765 the PLT do not require stubs. */
8766 if (branch_type == ST_BRANCH_TO_ARM && plt_offset == (bfd_vma) -1)
8767 {
8768 if (globals->use_blx && r_type == R_ARM_THM_CALL)
8769 {
8770 /* Convert BL to BLX. */
8771 lower_insn = (lower_insn & ~0x1000) | 0x0800;
8772 }
8773 else if (( r_type != R_ARM_THM_CALL)
8774 && (r_type != R_ARM_THM_JUMP24))
8775 {
8776 if (elf32_thumb_to_arm_stub
8777 (info, sym_name, input_bfd, output_bfd, input_section,
8778 hit_data, sym_sec, rel->r_offset, signed_addend, value,
8779 error_message))
8780 return bfd_reloc_ok;
8781 else
8782 return bfd_reloc_dangerous;
8783 }
8784 }
8785 else if (branch_type == ST_BRANCH_TO_THUMB
8786 && globals->use_blx
8787 && r_type == R_ARM_THM_CALL)
8788 {
8789 /* Make sure this is a BL. */
8790 lower_insn |= 0x1800;
8791 }
8792 }
8793
8794 enum elf32_arm_stub_type stub_type = arm_stub_none;
8795 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
8796 {
8797 /* Check if a stub has to be inserted because the destination
8798 is too far. */
8799 struct elf32_arm_stub_hash_entry *stub_entry;
8800 struct elf32_arm_link_hash_entry *hash;
8801
8802 hash = (struct elf32_arm_link_hash_entry *) h;
8803
8804 stub_type = arm_type_of_stub (info, input_section, rel,
8805 st_type, &branch_type,
8806 hash, value, sym_sec,
8807 input_bfd, sym_name);
8808
8809 if (stub_type != arm_stub_none)
8810 {
8811 /* The target is out of reach or we are changing modes, so
8812 redirect the branch to the local stub for this
8813 function. */
8814 stub_entry = elf32_arm_get_stub_entry (input_section,
8815 sym_sec, h,
8816 rel, globals,
8817 stub_type);
8818 if (stub_entry != NULL)
8819 {
8820 value = (stub_entry->stub_offset
8821 + stub_entry->stub_sec->output_offset
8822 + stub_entry->stub_sec->output_section->vma);
8823
8824 if (plt_offset != (bfd_vma) -1)
8825 *unresolved_reloc_p = FALSE;
8826 }
8827
8828 /* If this call becomes a call to Arm, force BLX. */
8829 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
8830 {
8831 if ((stub_entry
8832 && !arm_stub_is_thumb (stub_entry->stub_type))
8833 || branch_type != ST_BRANCH_TO_THUMB)
8834 lower_insn = (lower_insn & ~0x1000) | 0x0800;
8835 }
8836 }
8837 }
8838
8839 /* Handle calls via the PLT. */
8840 if (stub_type == arm_stub_none && plt_offset != (bfd_vma) -1)
8841 {
8842 value = (splt->output_section->vma
8843 + splt->output_offset
8844 + plt_offset);
8845
8846 if (globals->use_blx && r_type == R_ARM_THM_CALL)
8847 {
8848 /* If the Thumb BLX instruction is available, convert
8849 the BL to a BLX instruction to call the ARM-mode
8850 PLT entry. */
8851 lower_insn = (lower_insn & ~0x1000) | 0x0800;
8852 branch_type = ST_BRANCH_TO_ARM;
8853 }
8854 else
8855 {
8856 /* Target the Thumb stub before the ARM PLT entry. */
8857 value -= PLT_THUMB_STUB_SIZE;
8858 branch_type = ST_BRANCH_TO_THUMB;
8859 }
8860 *unresolved_reloc_p = FALSE;
8861 }
8862
8863 relocation = value + signed_addend;
8864
8865 relocation -= (input_section->output_section->vma
8866 + input_section->output_offset
8867 + rel->r_offset);
8868
8869 check = relocation >> howto->rightshift;
8870
8871 /* If this is a signed value, the rightshift just dropped
8872 leading 1 bits (assuming twos complement). */
8873 if ((bfd_signed_vma) relocation >= 0)
8874 signed_check = check;
8875 else
8876 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
8877
8878 /* Calculate the permissable maximum and minimum values for
8879 this relocation according to whether we're relocating for
8880 Thumb-2 or not. */
8881 bitsize = howto->bitsize;
8882 if (!thumb2)
8883 bitsize -= 2;
8884 reloc_signed_max = (1 << (bitsize - 1)) - 1;
8885 reloc_signed_min = ~reloc_signed_max;
8886
8887 /* Assumes two's complement. */
8888 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
8889 overflow = TRUE;
8890
8891 if ((lower_insn & 0x5000) == 0x4000)
8892 /* For a BLX instruction, make sure that the relocation is rounded up
8893 to a word boundary. This follows the semantics of the instruction
8894 which specifies that bit 1 of the target address will come from bit
8895 1 of the base address. */
8896 relocation = (relocation + 2) & ~ 3;
8897
8898 /* Put RELOCATION back into the insn. Assumes two's complement.
8899 We use the Thumb-2 encoding, which is safe even if dealing with
8900 a Thumb-1 instruction by virtue of our overflow check above. */
8901 reloc_sign = (signed_check < 0) ? 1 : 0;
8902 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
8903 | ((relocation >> 12) & 0x3ff)
8904 | (reloc_sign << 10);
8905 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
8906 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
8907 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
8908 | ((relocation >> 1) & 0x7ff);
8909
8910 /* Put the relocated value back in the object file: */
8911 bfd_put_16 (input_bfd, upper_insn, hit_data);
8912 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
8913
8914 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
8915 }
8916 break;
8917
8918 case R_ARM_THM_JUMP19:
8919 /* Thumb32 conditional branch instruction. */
8920 {
8921 bfd_vma relocation;
8922 bfd_boolean overflow = FALSE;
8923 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
8924 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
8925 bfd_signed_vma reloc_signed_max = 0xffffe;
8926 bfd_signed_vma reloc_signed_min = -0x100000;
8927 bfd_signed_vma signed_check;
8928
8929 /* Need to refetch the addend, reconstruct the top three bits,
8930 and squish the two 11 bit pieces together. */
8931 if (globals->use_rel)
8932 {
8933 bfd_vma S = (upper_insn & 0x0400) >> 10;
8934 bfd_vma upper = (upper_insn & 0x003f);
8935 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
8936 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
8937 bfd_vma lower = (lower_insn & 0x07ff);
8938
8939 upper |= J1 << 6;
8940 upper |= J2 << 7;
8941 upper |= (!S) << 8;
8942 upper -= 0x0100; /* Sign extend. */
8943
8944 addend = (upper << 12) | (lower << 1);
8945 signed_addend = addend;
8946 }
8947
8948 /* Handle calls via the PLT. */
8949 if (plt_offset != (bfd_vma) -1)
8950 {
8951 value = (splt->output_section->vma
8952 + splt->output_offset
8953 + plt_offset);
8954 /* Target the Thumb stub before the ARM PLT entry. */
8955 value -= PLT_THUMB_STUB_SIZE;
8956 *unresolved_reloc_p = FALSE;
8957 }
8958
8959 /* ??? Should handle interworking? GCC might someday try to
8960 use this for tail calls. */
8961
8962 relocation = value + signed_addend;
8963 relocation -= (input_section->output_section->vma
8964 + input_section->output_offset
8965 + rel->r_offset);
8966 signed_check = (bfd_signed_vma) relocation;
8967
8968 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
8969 overflow = TRUE;
8970
8971 /* Put RELOCATION back into the insn. */
8972 {
8973 bfd_vma S = (relocation & 0x00100000) >> 20;
8974 bfd_vma J2 = (relocation & 0x00080000) >> 19;
8975 bfd_vma J1 = (relocation & 0x00040000) >> 18;
8976 bfd_vma hi = (relocation & 0x0003f000) >> 12;
8977 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
8978
8979 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
8980 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
8981 }
8982
8983 /* Put the relocated value back in the object file: */
8984 bfd_put_16 (input_bfd, upper_insn, hit_data);
8985 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
8986
8987 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
8988 }
8989
8990 case R_ARM_THM_JUMP11:
8991 case R_ARM_THM_JUMP8:
8992 case R_ARM_THM_JUMP6:
8993 /* Thumb B (branch) instruction). */
8994 {
8995 bfd_signed_vma relocation;
8996 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
8997 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
8998 bfd_signed_vma signed_check;
8999
9000 /* CZB cannot jump backward. */
9001 if (r_type == R_ARM_THM_JUMP6)
9002 reloc_signed_min = 0;
9003
9004 if (globals->use_rel)
9005 {
9006 /* Need to refetch addend. */
9007 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
9008 if (addend & ((howto->src_mask + 1) >> 1))
9009 {
9010 signed_addend = -1;
9011 signed_addend &= ~ howto->src_mask;
9012 signed_addend |= addend;
9013 }
9014 else
9015 signed_addend = addend;
9016 /* The value in the insn has been right shifted. We need to
9017 undo this, so that we can perform the address calculation
9018 in terms of bytes. */
9019 signed_addend <<= howto->rightshift;
9020 }
9021 relocation = value + signed_addend;
9022
9023 relocation -= (input_section->output_section->vma
9024 + input_section->output_offset
9025 + rel->r_offset);
9026
9027 relocation >>= howto->rightshift;
9028 signed_check = relocation;
9029
9030 if (r_type == R_ARM_THM_JUMP6)
9031 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
9032 else
9033 relocation &= howto->dst_mask;
9034 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
9035
9036 bfd_put_16 (input_bfd, relocation, hit_data);
9037
9038 /* Assumes two's complement. */
9039 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
9040 return bfd_reloc_overflow;
9041
9042 return bfd_reloc_ok;
9043 }
9044
9045 case R_ARM_ALU_PCREL7_0:
9046 case R_ARM_ALU_PCREL15_8:
9047 case R_ARM_ALU_PCREL23_15:
9048 {
9049 bfd_vma insn;
9050 bfd_vma relocation;
9051
9052 insn = bfd_get_32 (input_bfd, hit_data);
9053 if (globals->use_rel)
9054 {
9055 /* Extract the addend. */
9056 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
9057 signed_addend = addend;
9058 }
9059 relocation = value + signed_addend;
9060
9061 relocation -= (input_section->output_section->vma
9062 + input_section->output_offset
9063 + rel->r_offset);
9064 insn = (insn & ~0xfff)
9065 | ((howto->bitpos << 7) & 0xf00)
9066 | ((relocation >> howto->bitpos) & 0xff);
9067 bfd_put_32 (input_bfd, value, hit_data);
9068 }
9069 return bfd_reloc_ok;
9070
9071 case R_ARM_GNU_VTINHERIT:
9072 case R_ARM_GNU_VTENTRY:
9073 return bfd_reloc_ok;
9074
9075 case R_ARM_GOTOFF32:
9076 /* Relocation is relative to the start of the
9077 global offset table. */
9078
9079 BFD_ASSERT (sgot != NULL);
9080 if (sgot == NULL)
9081 return bfd_reloc_notsupported;
9082
9083 /* If we are addressing a Thumb function, we need to adjust the
9084 address by one, so that attempts to call the function pointer will
9085 correctly interpret it as Thumb code. */
9086 if (branch_type == ST_BRANCH_TO_THUMB)
9087 value += 1;
9088
9089 /* Note that sgot->output_offset is not involved in this
9090 calculation. We always want the start of .got. If we
9091 define _GLOBAL_OFFSET_TABLE in a different way, as is
9092 permitted by the ABI, we might have to change this
9093 calculation. */
9094 value -= sgot->output_section->vma;
9095 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9096 contents, rel->r_offset, value,
9097 rel->r_addend);
9098
9099 case R_ARM_GOTPC:
9100 /* Use global offset table as symbol value. */
9101 BFD_ASSERT (sgot != NULL);
9102
9103 if (sgot == NULL)
9104 return bfd_reloc_notsupported;
9105
9106 *unresolved_reloc_p = FALSE;
9107 value = sgot->output_section->vma;
9108 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9109 contents, rel->r_offset, value,
9110 rel->r_addend);
9111
9112 case R_ARM_GOT32:
9113 case R_ARM_GOT_PREL:
9114 /* Relocation is to the entry for this symbol in the
9115 global offset table. */
9116 if (sgot == NULL)
9117 return bfd_reloc_notsupported;
9118
9119 if (dynreloc_st_type == STT_GNU_IFUNC
9120 && plt_offset != (bfd_vma) -1
9121 && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h)))
9122 {
9123 /* We have a relocation against a locally-binding STT_GNU_IFUNC
9124 symbol, and the relocation resolves directly to the runtime
9125 target rather than to the .iplt entry. This means that any
9126 .got entry would be the same value as the .igot.plt entry,
9127 so there's no point creating both. */
9128 sgot = globals->root.igotplt;
9129 value = sgot->output_offset + gotplt_offset;
9130 }
9131 else if (h != NULL)
9132 {
9133 bfd_vma off;
9134
9135 off = h->got.offset;
9136 BFD_ASSERT (off != (bfd_vma) -1);
9137 if ((off & 1) != 0)
9138 {
9139 /* We have already processsed one GOT relocation against
9140 this symbol. */
9141 off &= ~1;
9142 if (globals->root.dynamic_sections_created
9143 && !SYMBOL_REFERENCES_LOCAL (info, h))
9144 *unresolved_reloc_p = FALSE;
9145 }
9146 else
9147 {
9148 Elf_Internal_Rela outrel;
9149
9150 if (h->dynindx != -1 && !SYMBOL_REFERENCES_LOCAL (info, h))
9151 {
9152 /* If the symbol doesn't resolve locally in a static
9153 object, we have an undefined reference. If the
9154 symbol doesn't resolve locally in a dynamic object,
9155 it should be resolved by the dynamic linker. */
9156 if (globals->root.dynamic_sections_created)
9157 {
9158 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
9159 *unresolved_reloc_p = FALSE;
9160 }
9161 else
9162 outrel.r_info = 0;
9163 outrel.r_addend = 0;
9164 }
9165 else
9166 {
9167 if (dynreloc_st_type == STT_GNU_IFUNC)
9168 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
9169 else if (info->shared &&
9170 (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
9171 || h->root.type != bfd_link_hash_undefweak))
9172 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
9173 else
9174 outrel.r_info = 0;
9175 outrel.r_addend = dynreloc_value;
9176 }
9177
9178 /* The GOT entry is initialized to zero by default.
9179 See if we should install a different value. */
9180 if (outrel.r_addend != 0
9181 && (outrel.r_info == 0 || globals->use_rel))
9182 {
9183 bfd_put_32 (output_bfd, outrel.r_addend,
9184 sgot->contents + off);
9185 outrel.r_addend = 0;
9186 }
9187
9188 if (outrel.r_info != 0)
9189 {
9190 outrel.r_offset = (sgot->output_section->vma
9191 + sgot->output_offset
9192 + off);
9193 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9194 }
9195 h->got.offset |= 1;
9196 }
9197 value = sgot->output_offset + off;
9198 }
9199 else
9200 {
9201 bfd_vma off;
9202
9203 BFD_ASSERT (local_got_offsets != NULL &&
9204 local_got_offsets[r_symndx] != (bfd_vma) -1);
9205
9206 off = local_got_offsets[r_symndx];
9207
9208 /* The offset must always be a multiple of 4. We use the
9209 least significant bit to record whether we have already
9210 generated the necessary reloc. */
9211 if ((off & 1) != 0)
9212 off &= ~1;
9213 else
9214 {
9215 if (globals->use_rel)
9216 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off);
9217
9218 if (info->shared || dynreloc_st_type == STT_GNU_IFUNC)
9219 {
9220 Elf_Internal_Rela outrel;
9221
9222 outrel.r_addend = addend + dynreloc_value;
9223 outrel.r_offset = (sgot->output_section->vma
9224 + sgot->output_offset
9225 + off);
9226 if (dynreloc_st_type == STT_GNU_IFUNC)
9227 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
9228 else
9229 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
9230 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9231 }
9232
9233 local_got_offsets[r_symndx] |= 1;
9234 }
9235
9236 value = sgot->output_offset + off;
9237 }
9238 if (r_type != R_ARM_GOT32)
9239 value += sgot->output_section->vma;
9240
9241 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9242 contents, rel->r_offset, value,
9243 rel->r_addend);
9244
9245 case R_ARM_TLS_LDO32:
9246 value = value - dtpoff_base (info);
9247
9248 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9249 contents, rel->r_offset, value,
9250 rel->r_addend);
9251
9252 case R_ARM_TLS_LDM32:
9253 {
9254 bfd_vma off;
9255
9256 if (sgot == NULL)
9257 abort ();
9258
9259 off = globals->tls_ldm_got.offset;
9260
9261 if ((off & 1) != 0)
9262 off &= ~1;
9263 else
9264 {
9265 /* If we don't know the module number, create a relocation
9266 for it. */
9267 if (info->shared)
9268 {
9269 Elf_Internal_Rela outrel;
9270
9271 if (srelgot == NULL)
9272 abort ();
9273
9274 outrel.r_addend = 0;
9275 outrel.r_offset = (sgot->output_section->vma
9276 + sgot->output_offset + off);
9277 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
9278
9279 if (globals->use_rel)
9280 bfd_put_32 (output_bfd, outrel.r_addend,
9281 sgot->contents + off);
9282
9283 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9284 }
9285 else
9286 bfd_put_32 (output_bfd, 1, sgot->contents + off);
9287
9288 globals->tls_ldm_got.offset |= 1;
9289 }
9290
9291 value = sgot->output_section->vma + sgot->output_offset + off
9292 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
9293
9294 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9295 contents, rel->r_offset, value,
9296 rel->r_addend);
9297 }
9298
9299 case R_ARM_TLS_CALL:
9300 case R_ARM_THM_TLS_CALL:
9301 case R_ARM_TLS_GD32:
9302 case R_ARM_TLS_IE32:
9303 case R_ARM_TLS_GOTDESC:
9304 case R_ARM_TLS_DESCSEQ:
9305 case R_ARM_THM_TLS_DESCSEQ:
9306 {
9307 bfd_vma off, offplt;
9308 int indx = 0;
9309 char tls_type;
9310
9311 BFD_ASSERT (sgot != NULL);
9312
9313 if (h != NULL)
9314 {
9315 bfd_boolean dyn;
9316 dyn = globals->root.dynamic_sections_created;
9317 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
9318 && (!info->shared
9319 || !SYMBOL_REFERENCES_LOCAL (info, h)))
9320 {
9321 *unresolved_reloc_p = FALSE;
9322 indx = h->dynindx;
9323 }
9324 off = h->got.offset;
9325 offplt = elf32_arm_hash_entry (h)->tlsdesc_got;
9326 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
9327 }
9328 else
9329 {
9330 BFD_ASSERT (local_got_offsets != NULL);
9331 off = local_got_offsets[r_symndx];
9332 offplt = local_tlsdesc_gotents[r_symndx];
9333 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
9334 }
9335
9336 /* Linker relaxations happens from one of the
9337 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
9338 if (ELF32_R_TYPE(rel->r_info) != r_type)
9339 tls_type = GOT_TLS_IE;
9340
9341 BFD_ASSERT (tls_type != GOT_UNKNOWN);
9342
9343 if ((off & 1) != 0)
9344 off &= ~1;
9345 else
9346 {
9347 bfd_boolean need_relocs = FALSE;
9348 Elf_Internal_Rela outrel;
9349 int cur_off = off;
9350
9351 /* The GOT entries have not been initialized yet. Do it
9352 now, and emit any relocations. If both an IE GOT and a
9353 GD GOT are necessary, we emit the GD first. */
9354
9355 if ((info->shared || indx != 0)
9356 && (h == NULL
9357 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
9358 || h->root.type != bfd_link_hash_undefweak))
9359 {
9360 need_relocs = TRUE;
9361 BFD_ASSERT (srelgot != NULL);
9362 }
9363
9364 if (tls_type & GOT_TLS_GDESC)
9365 {
9366 bfd_byte *loc;
9367
9368 /* We should have relaxed, unless this is an undefined
9369 weak symbol. */
9370 BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak))
9371 || info->shared);
9372 BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8
9373 <= globals->root.sgotplt->size);
9374
9375 outrel.r_addend = 0;
9376 outrel.r_offset = (globals->root.sgotplt->output_section->vma
9377 + globals->root.sgotplt->output_offset
9378 + offplt
9379 + globals->sgotplt_jump_table_size);
9380
9381 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DESC);
9382 sreloc = globals->root.srelplt;
9383 loc = sreloc->contents;
9384 loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals);
9385 BFD_ASSERT (loc + RELOC_SIZE (globals)
9386 <= sreloc->contents + sreloc->size);
9387
9388 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
9389
9390 /* For globals, the first word in the relocation gets
9391 the relocation index and the top bit set, or zero,
9392 if we're binding now. For locals, it gets the
9393 symbol's offset in the tls section. */
9394 bfd_put_32 (output_bfd,
9395 !h ? value - elf_hash_table (info)->tls_sec->vma
9396 : info->flags & DF_BIND_NOW ? 0
9397 : 0x80000000 | ELF32_R_SYM (outrel.r_info),
9398 globals->root.sgotplt->contents + offplt
9399 + globals->sgotplt_jump_table_size);
9400
9401 /* Second word in the relocation is always zero. */
9402 bfd_put_32 (output_bfd, 0,
9403 globals->root.sgotplt->contents + offplt
9404 + globals->sgotplt_jump_table_size + 4);
9405 }
9406 if (tls_type & GOT_TLS_GD)
9407 {
9408 if (need_relocs)
9409 {
9410 outrel.r_addend = 0;
9411 outrel.r_offset = (sgot->output_section->vma
9412 + sgot->output_offset
9413 + cur_off);
9414 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
9415
9416 if (globals->use_rel)
9417 bfd_put_32 (output_bfd, outrel.r_addend,
9418 sgot->contents + cur_off);
9419
9420 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9421
9422 if (indx == 0)
9423 bfd_put_32 (output_bfd, value - dtpoff_base (info),
9424 sgot->contents + cur_off + 4);
9425 else
9426 {
9427 outrel.r_addend = 0;
9428 outrel.r_info = ELF32_R_INFO (indx,
9429 R_ARM_TLS_DTPOFF32);
9430 outrel.r_offset += 4;
9431
9432 if (globals->use_rel)
9433 bfd_put_32 (output_bfd, outrel.r_addend,
9434 sgot->contents + cur_off + 4);
9435
9436 elf32_arm_add_dynreloc (output_bfd, info,
9437 srelgot, &outrel);
9438 }
9439 }
9440 else
9441 {
9442 /* If we are not emitting relocations for a
9443 general dynamic reference, then we must be in a
9444 static link or an executable link with the
9445 symbol binding locally. Mark it as belonging
9446 to module 1, the executable. */
9447 bfd_put_32 (output_bfd, 1,
9448 sgot->contents + cur_off);
9449 bfd_put_32 (output_bfd, value - dtpoff_base (info),
9450 sgot->contents + cur_off + 4);
9451 }
9452
9453 cur_off += 8;
9454 }
9455
9456 if (tls_type & GOT_TLS_IE)
9457 {
9458 if (need_relocs)
9459 {
9460 if (indx == 0)
9461 outrel.r_addend = value - dtpoff_base (info);
9462 else
9463 outrel.r_addend = 0;
9464 outrel.r_offset = (sgot->output_section->vma
9465 + sgot->output_offset
9466 + cur_off);
9467 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
9468
9469 if (globals->use_rel)
9470 bfd_put_32 (output_bfd, outrel.r_addend,
9471 sgot->contents + cur_off);
9472
9473 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9474 }
9475 else
9476 bfd_put_32 (output_bfd, tpoff (info, value),
9477 sgot->contents + cur_off);
9478 cur_off += 4;
9479 }
9480
9481 if (h != NULL)
9482 h->got.offset |= 1;
9483 else
9484 local_got_offsets[r_symndx] |= 1;
9485 }
9486
9487 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
9488 off += 8;
9489 else if (tls_type & GOT_TLS_GDESC)
9490 off = offplt;
9491
9492 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL
9493 || ELF32_R_TYPE(rel->r_info) == R_ARM_THM_TLS_CALL)
9494 {
9495 bfd_signed_vma offset;
9496 /* TLS stubs are arm mode. The original symbol is a
9497 data object, so branch_type is bogus. */
9498 branch_type = ST_BRANCH_TO_ARM;
9499 enum elf32_arm_stub_type stub_type
9500 = arm_type_of_stub (info, input_section, rel,
9501 st_type, &branch_type,
9502 (struct elf32_arm_link_hash_entry *)h,
9503 globals->tls_trampoline, globals->root.splt,
9504 input_bfd, sym_name);
9505
9506 if (stub_type != arm_stub_none)
9507 {
9508 struct elf32_arm_stub_hash_entry *stub_entry
9509 = elf32_arm_get_stub_entry
9510 (input_section, globals->root.splt, 0, rel,
9511 globals, stub_type);
9512 offset = (stub_entry->stub_offset
9513 + stub_entry->stub_sec->output_offset
9514 + stub_entry->stub_sec->output_section->vma);
9515 }
9516 else
9517 offset = (globals->root.splt->output_section->vma
9518 + globals->root.splt->output_offset
9519 + globals->tls_trampoline);
9520
9521 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL)
9522 {
9523 unsigned long inst;
9524
9525 offset -= (input_section->output_section->vma
9526 + input_section->output_offset
9527 + rel->r_offset + 8);
9528
9529 inst = offset >> 2;
9530 inst &= 0x00ffffff;
9531 value = inst | (globals->use_blx ? 0xfa000000 : 0xeb000000);
9532 }
9533 else
9534 {
9535 /* Thumb blx encodes the offset in a complicated
9536 fashion. */
9537 unsigned upper_insn, lower_insn;
9538 unsigned neg;
9539
9540 offset -= (input_section->output_section->vma
9541 + input_section->output_offset
9542 + rel->r_offset + 4);
9543
9544 if (stub_type != arm_stub_none
9545 && arm_stub_is_thumb (stub_type))
9546 {
9547 lower_insn = 0xd000;
9548 }
9549 else
9550 {
9551 lower_insn = 0xc000;
9552 /* Round up the offset to a word boundary */
9553 offset = (offset + 2) & ~2;
9554 }
9555
9556 neg = offset < 0;
9557 upper_insn = (0xf000
9558 | ((offset >> 12) & 0x3ff)
9559 | (neg << 10));
9560 lower_insn |= (((!((offset >> 23) & 1)) ^ neg) << 13)
9561 | (((!((offset >> 22) & 1)) ^ neg) << 11)
9562 | ((offset >> 1) & 0x7ff);
9563 bfd_put_16 (input_bfd, upper_insn, hit_data);
9564 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
9565 return bfd_reloc_ok;
9566 }
9567 }
9568 /* These relocations needs special care, as besides the fact
9569 they point somewhere in .gotplt, the addend must be
9570 adjusted accordingly depending on the type of instruction
9571 we refer to */
9572 else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC))
9573 {
9574 unsigned long data, insn;
9575 unsigned thumb;
9576
9577 data = bfd_get_32 (input_bfd, hit_data);
9578 thumb = data & 1;
9579 data &= ~1u;
9580
9581 if (thumb)
9582 {
9583 insn = bfd_get_16 (input_bfd, contents + rel->r_offset - data);
9584 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
9585 insn = (insn << 16)
9586 | bfd_get_16 (input_bfd,
9587 contents + rel->r_offset - data + 2);
9588 if ((insn & 0xf800c000) == 0xf000c000)
9589 /* bl/blx */
9590 value = -6;
9591 else if ((insn & 0xffffff00) == 0x4400)
9592 /* add */
9593 value = -5;
9594 else
9595 {
9596 (*_bfd_error_handler)
9597 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' referenced by TLS_GOTDESC"),
9598 input_bfd, input_section,
9599 (unsigned long)rel->r_offset, insn);
9600 return bfd_reloc_notsupported;
9601 }
9602 }
9603 else
9604 {
9605 insn = bfd_get_32 (input_bfd, contents + rel->r_offset - data);
9606
9607 switch (insn >> 24)
9608 {
9609 case 0xeb: /* bl */
9610 case 0xfa: /* blx */
9611 value = -4;
9612 break;
9613
9614 case 0xe0: /* add */
9615 value = -8;
9616 break;
9617
9618 default:
9619 (*_bfd_error_handler)
9620 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' referenced by TLS_GOTDESC"),
9621 input_bfd, input_section,
9622 (unsigned long)rel->r_offset, insn);
9623 return bfd_reloc_notsupported;
9624 }
9625 }
9626
9627 value += ((globals->root.sgotplt->output_section->vma
9628 + globals->root.sgotplt->output_offset + off)
9629 - (input_section->output_section->vma
9630 + input_section->output_offset
9631 + rel->r_offset)
9632 + globals->sgotplt_jump_table_size);
9633 }
9634 else
9635 value = ((globals->root.sgot->output_section->vma
9636 + globals->root.sgot->output_offset + off)
9637 - (input_section->output_section->vma
9638 + input_section->output_offset + rel->r_offset));
9639
9640 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9641 contents, rel->r_offset, value,
9642 rel->r_addend);
9643 }
9644
9645 case R_ARM_TLS_LE32:
9646 if (info->shared && !info->pie)
9647 {
9648 (*_bfd_error_handler)
9649 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
9650 input_bfd, input_section,
9651 (long) rel->r_offset, howto->name);
9652 return bfd_reloc_notsupported;
9653 }
9654 else
9655 value = tpoff (info, value);
9656
9657 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9658 contents, rel->r_offset, value,
9659 rel->r_addend);
9660
9661 case R_ARM_V4BX:
9662 if (globals->fix_v4bx)
9663 {
9664 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
9665
9666 /* Ensure that we have a BX instruction. */
9667 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
9668
9669 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
9670 {
9671 /* Branch to veneer. */
9672 bfd_vma glue_addr;
9673 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
9674 glue_addr -= input_section->output_section->vma
9675 + input_section->output_offset
9676 + rel->r_offset + 8;
9677 insn = (insn & 0xf0000000) | 0x0a000000
9678 | ((glue_addr >> 2) & 0x00ffffff);
9679 }
9680 else
9681 {
9682 /* Preserve Rm (lowest four bits) and the condition code
9683 (highest four bits). Other bits encode MOV PC,Rm. */
9684 insn = (insn & 0xf000000f) | 0x01a0f000;
9685 }
9686
9687 bfd_put_32 (input_bfd, insn, hit_data);
9688 }
9689 return bfd_reloc_ok;
9690
9691 case R_ARM_MOVW_ABS_NC:
9692 case R_ARM_MOVT_ABS:
9693 case R_ARM_MOVW_PREL_NC:
9694 case R_ARM_MOVT_PREL:
9695 /* Until we properly support segment-base-relative addressing then
9696 we assume the segment base to be zero, as for the group relocations.
9697 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
9698 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
9699 case R_ARM_MOVW_BREL_NC:
9700 case R_ARM_MOVW_BREL:
9701 case R_ARM_MOVT_BREL:
9702 {
9703 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
9704
9705 if (globals->use_rel)
9706 {
9707 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
9708 signed_addend = (addend ^ 0x8000) - 0x8000;
9709 }
9710
9711 value += signed_addend;
9712
9713 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
9714 value -= (input_section->output_section->vma
9715 + input_section->output_offset + rel->r_offset);
9716
9717 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
9718 return bfd_reloc_overflow;
9719
9720 if (branch_type == ST_BRANCH_TO_THUMB)
9721 value |= 1;
9722
9723 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
9724 || r_type == R_ARM_MOVT_BREL)
9725 value >>= 16;
9726
9727 insn &= 0xfff0f000;
9728 insn |= value & 0xfff;
9729 insn |= (value & 0xf000) << 4;
9730 bfd_put_32 (input_bfd, insn, hit_data);
9731 }
9732 return bfd_reloc_ok;
9733
9734 case R_ARM_THM_MOVW_ABS_NC:
9735 case R_ARM_THM_MOVT_ABS:
9736 case R_ARM_THM_MOVW_PREL_NC:
9737 case R_ARM_THM_MOVT_PREL:
9738 /* Until we properly support segment-base-relative addressing then
9739 we assume the segment base to be zero, as for the above relocations.
9740 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
9741 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
9742 as R_ARM_THM_MOVT_ABS. */
9743 case R_ARM_THM_MOVW_BREL_NC:
9744 case R_ARM_THM_MOVW_BREL:
9745 case R_ARM_THM_MOVT_BREL:
9746 {
9747 bfd_vma insn;
9748
9749 insn = bfd_get_16 (input_bfd, hit_data) << 16;
9750 insn |= bfd_get_16 (input_bfd, hit_data + 2);
9751
9752 if (globals->use_rel)
9753 {
9754 addend = ((insn >> 4) & 0xf000)
9755 | ((insn >> 15) & 0x0800)
9756 | ((insn >> 4) & 0x0700)
9757 | (insn & 0x00ff);
9758 signed_addend = (addend ^ 0x8000) - 0x8000;
9759 }
9760
9761 value += signed_addend;
9762
9763 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
9764 value -= (input_section->output_section->vma
9765 + input_section->output_offset + rel->r_offset);
9766
9767 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
9768 return bfd_reloc_overflow;
9769
9770 if (branch_type == ST_BRANCH_TO_THUMB)
9771 value |= 1;
9772
9773 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
9774 || r_type == R_ARM_THM_MOVT_BREL)
9775 value >>= 16;
9776
9777 insn &= 0xfbf08f00;
9778 insn |= (value & 0xf000) << 4;
9779 insn |= (value & 0x0800) << 15;
9780 insn |= (value & 0x0700) << 4;
9781 insn |= (value & 0x00ff);
9782
9783 bfd_put_16 (input_bfd, insn >> 16, hit_data);
9784 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
9785 }
9786 return bfd_reloc_ok;
9787
9788 case R_ARM_ALU_PC_G0_NC:
9789 case R_ARM_ALU_PC_G1_NC:
9790 case R_ARM_ALU_PC_G0:
9791 case R_ARM_ALU_PC_G1:
9792 case R_ARM_ALU_PC_G2:
9793 case R_ARM_ALU_SB_G0_NC:
9794 case R_ARM_ALU_SB_G1_NC:
9795 case R_ARM_ALU_SB_G0:
9796 case R_ARM_ALU_SB_G1:
9797 case R_ARM_ALU_SB_G2:
9798 {
9799 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
9800 bfd_vma pc = input_section->output_section->vma
9801 + input_section->output_offset + rel->r_offset;
9802 /* sb should be the origin of the *segment* containing the symbol.
9803 It is not clear how to obtain this OS-dependent value, so we
9804 make an arbitrary choice of zero. */
9805 bfd_vma sb = 0;
9806 bfd_vma residual;
9807 bfd_vma g_n;
9808 bfd_signed_vma signed_value;
9809 int group = 0;
9810
9811 /* Determine which group of bits to select. */
9812 switch (r_type)
9813 {
9814 case R_ARM_ALU_PC_G0_NC:
9815 case R_ARM_ALU_PC_G0:
9816 case R_ARM_ALU_SB_G0_NC:
9817 case R_ARM_ALU_SB_G0:
9818 group = 0;
9819 break;
9820
9821 case R_ARM_ALU_PC_G1_NC:
9822 case R_ARM_ALU_PC_G1:
9823 case R_ARM_ALU_SB_G1_NC:
9824 case R_ARM_ALU_SB_G1:
9825 group = 1;
9826 break;
9827
9828 case R_ARM_ALU_PC_G2:
9829 case R_ARM_ALU_SB_G2:
9830 group = 2;
9831 break;
9832
9833 default:
9834 abort ();
9835 }
9836
9837 /* If REL, extract the addend from the insn. If RELA, it will
9838 have already been fetched for us. */
9839 if (globals->use_rel)
9840 {
9841 int negative;
9842 bfd_vma constant = insn & 0xff;
9843 bfd_vma rotation = (insn & 0xf00) >> 8;
9844
9845 if (rotation == 0)
9846 signed_addend = constant;
9847 else
9848 {
9849 /* Compensate for the fact that in the instruction, the
9850 rotation is stored in multiples of 2 bits. */
9851 rotation *= 2;
9852
9853 /* Rotate "constant" right by "rotation" bits. */
9854 signed_addend = (constant >> rotation) |
9855 (constant << (8 * sizeof (bfd_vma) - rotation));
9856 }
9857
9858 /* Determine if the instruction is an ADD or a SUB.
9859 (For REL, this determines the sign of the addend.) */
9860 negative = identify_add_or_sub (insn);
9861 if (negative == 0)
9862 {
9863 (*_bfd_error_handler)
9864 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
9865 input_bfd, input_section,
9866 (long) rel->r_offset, howto->name);
9867 return bfd_reloc_overflow;
9868 }
9869
9870 signed_addend *= negative;
9871 }
9872
9873 /* Compute the value (X) to go in the place. */
9874 if (r_type == R_ARM_ALU_PC_G0_NC
9875 || r_type == R_ARM_ALU_PC_G1_NC
9876 || r_type == R_ARM_ALU_PC_G0
9877 || r_type == R_ARM_ALU_PC_G1
9878 || r_type == R_ARM_ALU_PC_G2)
9879 /* PC relative. */
9880 signed_value = value - pc + signed_addend;
9881 else
9882 /* Section base relative. */
9883 signed_value = value - sb + signed_addend;
9884
9885 /* If the target symbol is a Thumb function, then set the
9886 Thumb bit in the address. */
9887 if (branch_type == ST_BRANCH_TO_THUMB)
9888 signed_value |= 1;
9889
9890 /* Calculate the value of the relevant G_n, in encoded
9891 constant-with-rotation format. */
9892 g_n = calculate_group_reloc_mask (abs (signed_value), group,
9893 &residual);
9894
9895 /* Check for overflow if required. */
9896 if ((r_type == R_ARM_ALU_PC_G0
9897 || r_type == R_ARM_ALU_PC_G1
9898 || r_type == R_ARM_ALU_PC_G2
9899 || r_type == R_ARM_ALU_SB_G0
9900 || r_type == R_ARM_ALU_SB_G1
9901 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
9902 {
9903 (*_bfd_error_handler)
9904 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
9905 input_bfd, input_section,
9906 (long) rel->r_offset, abs (signed_value), howto->name);
9907 return bfd_reloc_overflow;
9908 }
9909
9910 /* Mask out the value and the ADD/SUB part of the opcode; take care
9911 not to destroy the S bit. */
9912 insn &= 0xff1ff000;
9913
9914 /* Set the opcode according to whether the value to go in the
9915 place is negative. */
9916 if (signed_value < 0)
9917 insn |= 1 << 22;
9918 else
9919 insn |= 1 << 23;
9920
9921 /* Encode the offset. */
9922 insn |= g_n;
9923
9924 bfd_put_32 (input_bfd, insn, hit_data);
9925 }
9926 return bfd_reloc_ok;
9927
9928 case R_ARM_LDR_PC_G0:
9929 case R_ARM_LDR_PC_G1:
9930 case R_ARM_LDR_PC_G2:
9931 case R_ARM_LDR_SB_G0:
9932 case R_ARM_LDR_SB_G1:
9933 case R_ARM_LDR_SB_G2:
9934 {
9935 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
9936 bfd_vma pc = input_section->output_section->vma
9937 + input_section->output_offset + rel->r_offset;
9938 bfd_vma sb = 0; /* See note above. */
9939 bfd_vma residual;
9940 bfd_signed_vma signed_value;
9941 int group = 0;
9942
9943 /* Determine which groups of bits to calculate. */
9944 switch (r_type)
9945 {
9946 case R_ARM_LDR_PC_G0:
9947 case R_ARM_LDR_SB_G0:
9948 group = 0;
9949 break;
9950
9951 case R_ARM_LDR_PC_G1:
9952 case R_ARM_LDR_SB_G1:
9953 group = 1;
9954 break;
9955
9956 case R_ARM_LDR_PC_G2:
9957 case R_ARM_LDR_SB_G2:
9958 group = 2;
9959 break;
9960
9961 default:
9962 abort ();
9963 }
9964
9965 /* If REL, extract the addend from the insn. If RELA, it will
9966 have already been fetched for us. */
9967 if (globals->use_rel)
9968 {
9969 int negative = (insn & (1 << 23)) ? 1 : -1;
9970 signed_addend = negative * (insn & 0xfff);
9971 }
9972
9973 /* Compute the value (X) to go in the place. */
9974 if (r_type == R_ARM_LDR_PC_G0
9975 || r_type == R_ARM_LDR_PC_G1
9976 || r_type == R_ARM_LDR_PC_G2)
9977 /* PC relative. */
9978 signed_value = value - pc + signed_addend;
9979 else
9980 /* Section base relative. */
9981 signed_value = value - sb + signed_addend;
9982
9983 /* Calculate the value of the relevant G_{n-1} to obtain
9984 the residual at that stage. */
9985 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
9986
9987 /* Check for overflow. */
9988 if (residual >= 0x1000)
9989 {
9990 (*_bfd_error_handler)
9991 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
9992 input_bfd, input_section,
9993 (long) rel->r_offset, abs (signed_value), howto->name);
9994 return bfd_reloc_overflow;
9995 }
9996
9997 /* Mask out the value and U bit. */
9998 insn &= 0xff7ff000;
9999
10000 /* Set the U bit if the value to go in the place is non-negative. */
10001 if (signed_value >= 0)
10002 insn |= 1 << 23;
10003
10004 /* Encode the offset. */
10005 insn |= residual;
10006
10007 bfd_put_32 (input_bfd, insn, hit_data);
10008 }
10009 return bfd_reloc_ok;
10010
10011 case R_ARM_LDRS_PC_G0:
10012 case R_ARM_LDRS_PC_G1:
10013 case R_ARM_LDRS_PC_G2:
10014 case R_ARM_LDRS_SB_G0:
10015 case R_ARM_LDRS_SB_G1:
10016 case R_ARM_LDRS_SB_G2:
10017 {
10018 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10019 bfd_vma pc = input_section->output_section->vma
10020 + input_section->output_offset + rel->r_offset;
10021 bfd_vma sb = 0; /* See note above. */
10022 bfd_vma residual;
10023 bfd_signed_vma signed_value;
10024 int group = 0;
10025
10026 /* Determine which groups of bits to calculate. */
10027 switch (r_type)
10028 {
10029 case R_ARM_LDRS_PC_G0:
10030 case R_ARM_LDRS_SB_G0:
10031 group = 0;
10032 break;
10033
10034 case R_ARM_LDRS_PC_G1:
10035 case R_ARM_LDRS_SB_G1:
10036 group = 1;
10037 break;
10038
10039 case R_ARM_LDRS_PC_G2:
10040 case R_ARM_LDRS_SB_G2:
10041 group = 2;
10042 break;
10043
10044 default:
10045 abort ();
10046 }
10047
10048 /* If REL, extract the addend from the insn. If RELA, it will
10049 have already been fetched for us. */
10050 if (globals->use_rel)
10051 {
10052 int negative = (insn & (1 << 23)) ? 1 : -1;
10053 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
10054 }
10055
10056 /* Compute the value (X) to go in the place. */
10057 if (r_type == R_ARM_LDRS_PC_G0
10058 || r_type == R_ARM_LDRS_PC_G1
10059 || r_type == R_ARM_LDRS_PC_G2)
10060 /* PC relative. */
10061 signed_value = value - pc + signed_addend;
10062 else
10063 /* Section base relative. */
10064 signed_value = value - sb + signed_addend;
10065
10066 /* Calculate the value of the relevant G_{n-1} to obtain
10067 the residual at that stage. */
10068 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
10069
10070 /* Check for overflow. */
10071 if (residual >= 0x100)
10072 {
10073 (*_bfd_error_handler)
10074 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
10075 input_bfd, input_section,
10076 (long) rel->r_offset, abs (signed_value), howto->name);
10077 return bfd_reloc_overflow;
10078 }
10079
10080 /* Mask out the value and U bit. */
10081 insn &= 0xff7ff0f0;
10082
10083 /* Set the U bit if the value to go in the place is non-negative. */
10084 if (signed_value >= 0)
10085 insn |= 1 << 23;
10086
10087 /* Encode the offset. */
10088 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
10089
10090 bfd_put_32 (input_bfd, insn, hit_data);
10091 }
10092 return bfd_reloc_ok;
10093
10094 case R_ARM_LDC_PC_G0:
10095 case R_ARM_LDC_PC_G1:
10096 case R_ARM_LDC_PC_G2:
10097 case R_ARM_LDC_SB_G0:
10098 case R_ARM_LDC_SB_G1:
10099 case R_ARM_LDC_SB_G2:
10100 {
10101 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10102 bfd_vma pc = input_section->output_section->vma
10103 + input_section->output_offset + rel->r_offset;
10104 bfd_vma sb = 0; /* See note above. */
10105 bfd_vma residual;
10106 bfd_signed_vma signed_value;
10107 int group = 0;
10108
10109 /* Determine which groups of bits to calculate. */
10110 switch (r_type)
10111 {
10112 case R_ARM_LDC_PC_G0:
10113 case R_ARM_LDC_SB_G0:
10114 group = 0;
10115 break;
10116
10117 case R_ARM_LDC_PC_G1:
10118 case R_ARM_LDC_SB_G1:
10119 group = 1;
10120 break;
10121
10122 case R_ARM_LDC_PC_G2:
10123 case R_ARM_LDC_SB_G2:
10124 group = 2;
10125 break;
10126
10127 default:
10128 abort ();
10129 }
10130
10131 /* If REL, extract the addend from the insn. If RELA, it will
10132 have already been fetched for us. */
10133 if (globals->use_rel)
10134 {
10135 int negative = (insn & (1 << 23)) ? 1 : -1;
10136 signed_addend = negative * ((insn & 0xff) << 2);
10137 }
10138
10139 /* Compute the value (X) to go in the place. */
10140 if (r_type == R_ARM_LDC_PC_G0
10141 || r_type == R_ARM_LDC_PC_G1
10142 || r_type == R_ARM_LDC_PC_G2)
10143 /* PC relative. */
10144 signed_value = value - pc + signed_addend;
10145 else
10146 /* Section base relative. */
10147 signed_value = value - sb + signed_addend;
10148
10149 /* Calculate the value of the relevant G_{n-1} to obtain
10150 the residual at that stage. */
10151 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
10152
10153 /* Check for overflow. (The absolute value to go in the place must be
10154 divisible by four and, after having been divided by four, must
10155 fit in eight bits.) */
10156 if ((residual & 0x3) != 0 || residual >= 0x400)
10157 {
10158 (*_bfd_error_handler)
10159 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
10160 input_bfd, input_section,
10161 (long) rel->r_offset, abs (signed_value), howto->name);
10162 return bfd_reloc_overflow;
10163 }
10164
10165 /* Mask out the value and U bit. */
10166 insn &= 0xff7fff00;
10167
10168 /* Set the U bit if the value to go in the place is non-negative. */
10169 if (signed_value >= 0)
10170 insn |= 1 << 23;
10171
10172 /* Encode the offset. */
10173 insn |= residual >> 2;
10174
10175 bfd_put_32 (input_bfd, insn, hit_data);
10176 }
10177 return bfd_reloc_ok;
10178
10179 default:
10180 return bfd_reloc_notsupported;
10181 }
10182 }
10183
10184 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
10185 static void
10186 arm_add_to_rel (bfd * abfd,
10187 bfd_byte * address,
10188 reloc_howto_type * howto,
10189 bfd_signed_vma increment)
10190 {
10191 bfd_signed_vma addend;
10192
10193 if (howto->type == R_ARM_THM_CALL
10194 || howto->type == R_ARM_THM_JUMP24)
10195 {
10196 int upper_insn, lower_insn;
10197 int upper, lower;
10198
10199 upper_insn = bfd_get_16 (abfd, address);
10200 lower_insn = bfd_get_16 (abfd, address + 2);
10201 upper = upper_insn & 0x7ff;
10202 lower = lower_insn & 0x7ff;
10203
10204 addend = (upper << 12) | (lower << 1);
10205 addend += increment;
10206 addend >>= 1;
10207
10208 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
10209 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
10210
10211 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
10212 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
10213 }
10214 else
10215 {
10216 bfd_vma contents;
10217
10218 contents = bfd_get_32 (abfd, address);
10219
10220 /* Get the (signed) value from the instruction. */
10221 addend = contents & howto->src_mask;
10222 if (addend & ((howto->src_mask + 1) >> 1))
10223 {
10224 bfd_signed_vma mask;
10225
10226 mask = -1;
10227 mask &= ~ howto->src_mask;
10228 addend |= mask;
10229 }
10230
10231 /* Add in the increment, (which is a byte value). */
10232 switch (howto->type)
10233 {
10234 default:
10235 addend += increment;
10236 break;
10237
10238 case R_ARM_PC24:
10239 case R_ARM_PLT32:
10240 case R_ARM_CALL:
10241 case R_ARM_JUMP24:
10242 addend <<= howto->size;
10243 addend += increment;
10244
10245 /* Should we check for overflow here ? */
10246
10247 /* Drop any undesired bits. */
10248 addend >>= howto->rightshift;
10249 break;
10250 }
10251
10252 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
10253
10254 bfd_put_32 (abfd, contents, address);
10255 }
10256 }
10257
10258 #define IS_ARM_TLS_RELOC(R_TYPE) \
10259 ((R_TYPE) == R_ARM_TLS_GD32 \
10260 || (R_TYPE) == R_ARM_TLS_LDO32 \
10261 || (R_TYPE) == R_ARM_TLS_LDM32 \
10262 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
10263 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
10264 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
10265 || (R_TYPE) == R_ARM_TLS_LE32 \
10266 || (R_TYPE) == R_ARM_TLS_IE32 \
10267 || IS_ARM_TLS_GNU_RELOC (R_TYPE))
10268
10269 /* Specific set of relocations for the gnu tls dialect. */
10270 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
10271 ((R_TYPE) == R_ARM_TLS_GOTDESC \
10272 || (R_TYPE) == R_ARM_TLS_CALL \
10273 || (R_TYPE) == R_ARM_THM_TLS_CALL \
10274 || (R_TYPE) == R_ARM_TLS_DESCSEQ \
10275 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
10276
10277 /* Relocate an ARM ELF section. */
10278
10279 static bfd_boolean
10280 elf32_arm_relocate_section (bfd * output_bfd,
10281 struct bfd_link_info * info,
10282 bfd * input_bfd,
10283 asection * input_section,
10284 bfd_byte * contents,
10285 Elf_Internal_Rela * relocs,
10286 Elf_Internal_Sym * local_syms,
10287 asection ** local_sections)
10288 {
10289 Elf_Internal_Shdr *symtab_hdr;
10290 struct elf_link_hash_entry **sym_hashes;
10291 Elf_Internal_Rela *rel;
10292 Elf_Internal_Rela *relend;
10293 const char *name;
10294 struct elf32_arm_link_hash_table * globals;
10295
10296 globals = elf32_arm_hash_table (info);
10297 if (globals == NULL)
10298 return FALSE;
10299
10300 symtab_hdr = & elf_symtab_hdr (input_bfd);
10301 sym_hashes = elf_sym_hashes (input_bfd);
10302
10303 rel = relocs;
10304 relend = relocs + input_section->reloc_count;
10305 for (; rel < relend; rel++)
10306 {
10307 int r_type;
10308 reloc_howto_type * howto;
10309 unsigned long r_symndx;
10310 Elf_Internal_Sym * sym;
10311 asection * sec;
10312 struct elf_link_hash_entry * h;
10313 bfd_vma relocation;
10314 bfd_reloc_status_type r;
10315 arelent bfd_reloc;
10316 char sym_type;
10317 bfd_boolean unresolved_reloc = FALSE;
10318 char *error_message = NULL;
10319
10320 r_symndx = ELF32_R_SYM (rel->r_info);
10321 r_type = ELF32_R_TYPE (rel->r_info);
10322 r_type = arm_real_reloc_type (globals, r_type);
10323
10324 if ( r_type == R_ARM_GNU_VTENTRY
10325 || r_type == R_ARM_GNU_VTINHERIT)
10326 continue;
10327
10328 bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
10329 howto = bfd_reloc.howto;
10330
10331 h = NULL;
10332 sym = NULL;
10333 sec = NULL;
10334
10335 if (r_symndx < symtab_hdr->sh_info)
10336 {
10337 sym = local_syms + r_symndx;
10338 sym_type = ELF32_ST_TYPE (sym->st_info);
10339 sec = local_sections[r_symndx];
10340
10341 /* An object file might have a reference to a local
10342 undefined symbol. This is a daft object file, but we
10343 should at least do something about it. V4BX & NONE
10344 relocations do not use the symbol and are explicitly
10345 allowed to use the undefined symbol, so allow those.
10346 Likewise for relocations against STN_UNDEF. */
10347 if (r_type != R_ARM_V4BX
10348 && r_type != R_ARM_NONE
10349 && r_symndx != STN_UNDEF
10350 && bfd_is_und_section (sec)
10351 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
10352 {
10353 if (!info->callbacks->undefined_symbol
10354 (info, bfd_elf_string_from_elf_section
10355 (input_bfd, symtab_hdr->sh_link, sym->st_name),
10356 input_bfd, input_section,
10357 rel->r_offset, TRUE))
10358 return FALSE;
10359 }
10360
10361 if (globals->use_rel)
10362 {
10363 relocation = (sec->output_section->vma
10364 + sec->output_offset
10365 + sym->st_value);
10366 if (!info->relocatable
10367 && (sec->flags & SEC_MERGE)
10368 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
10369 {
10370 asection *msec;
10371 bfd_vma addend, value;
10372
10373 switch (r_type)
10374 {
10375 case R_ARM_MOVW_ABS_NC:
10376 case R_ARM_MOVT_ABS:
10377 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
10378 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
10379 addend = (addend ^ 0x8000) - 0x8000;
10380 break;
10381
10382 case R_ARM_THM_MOVW_ABS_NC:
10383 case R_ARM_THM_MOVT_ABS:
10384 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
10385 << 16;
10386 value |= bfd_get_16 (input_bfd,
10387 contents + rel->r_offset + 2);
10388 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
10389 | ((value & 0x04000000) >> 15);
10390 addend = (addend ^ 0x8000) - 0x8000;
10391 break;
10392
10393 default:
10394 if (howto->rightshift
10395 || (howto->src_mask & (howto->src_mask + 1)))
10396 {
10397 (*_bfd_error_handler)
10398 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
10399 input_bfd, input_section,
10400 (long) rel->r_offset, howto->name);
10401 return FALSE;
10402 }
10403
10404 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
10405
10406 /* Get the (signed) value from the instruction. */
10407 addend = value & howto->src_mask;
10408 if (addend & ((howto->src_mask + 1) >> 1))
10409 {
10410 bfd_signed_vma mask;
10411
10412 mask = -1;
10413 mask &= ~ howto->src_mask;
10414 addend |= mask;
10415 }
10416 break;
10417 }
10418
10419 msec = sec;
10420 addend =
10421 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
10422 - relocation;
10423 addend += msec->output_section->vma + msec->output_offset;
10424
10425 /* Cases here must match those in the preceding
10426 switch statement. */
10427 switch (r_type)
10428 {
10429 case R_ARM_MOVW_ABS_NC:
10430 case R_ARM_MOVT_ABS:
10431 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
10432 | (addend & 0xfff);
10433 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
10434 break;
10435
10436 case R_ARM_THM_MOVW_ABS_NC:
10437 case R_ARM_THM_MOVT_ABS:
10438 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
10439 | (addend & 0xff) | ((addend & 0x0800) << 15);
10440 bfd_put_16 (input_bfd, value >> 16,
10441 contents + rel->r_offset);
10442 bfd_put_16 (input_bfd, value,
10443 contents + rel->r_offset + 2);
10444 break;
10445
10446 default:
10447 value = (value & ~ howto->dst_mask)
10448 | (addend & howto->dst_mask);
10449 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
10450 break;
10451 }
10452 }
10453 }
10454 else
10455 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
10456 }
10457 else
10458 {
10459 bfd_boolean warned;
10460
10461 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
10462 r_symndx, symtab_hdr, sym_hashes,
10463 h, sec, relocation,
10464 unresolved_reloc, warned);
10465
10466 sym_type = h->type;
10467 }
10468
10469 if (sec != NULL && discarded_section (sec))
10470 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
10471 rel, 1, relend, howto, 0, contents);
10472
10473 if (info->relocatable)
10474 {
10475 /* This is a relocatable link. We don't have to change
10476 anything, unless the reloc is against a section symbol,
10477 in which case we have to adjust according to where the
10478 section symbol winds up in the output section. */
10479 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
10480 {
10481 if (globals->use_rel)
10482 arm_add_to_rel (input_bfd, contents + rel->r_offset,
10483 howto, (bfd_signed_vma) sec->output_offset);
10484 else
10485 rel->r_addend += sec->output_offset;
10486 }
10487 continue;
10488 }
10489
10490 if (h != NULL)
10491 name = h->root.root.string;
10492 else
10493 {
10494 name = (bfd_elf_string_from_elf_section
10495 (input_bfd, symtab_hdr->sh_link, sym->st_name));
10496 if (name == NULL || *name == '\0')
10497 name = bfd_section_name (input_bfd, sec);
10498 }
10499
10500 if (r_symndx != STN_UNDEF
10501 && r_type != R_ARM_NONE
10502 && (h == NULL
10503 || h->root.type == bfd_link_hash_defined
10504 || h->root.type == bfd_link_hash_defweak)
10505 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
10506 {
10507 (*_bfd_error_handler)
10508 ((sym_type == STT_TLS
10509 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
10510 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
10511 input_bfd,
10512 input_section,
10513 (long) rel->r_offset,
10514 howto->name,
10515 name);
10516 }
10517
10518 /* We call elf32_arm_final_link_relocate unless we're completely
10519 done, i.e., the relaxation produced the final output we want,
10520 and we won't let anybody mess with it. Also, we have to do
10521 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
10522 both in relaxed and non-relaxed cases */
10523 if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
10524 || (IS_ARM_TLS_GNU_RELOC (r_type)
10525 && !((h ? elf32_arm_hash_entry (h)->tls_type :
10526 elf32_arm_local_got_tls_type (input_bfd)[r_symndx])
10527 & GOT_TLS_GDESC)))
10528 {
10529 r = elf32_arm_tls_relax (globals, input_bfd, input_section,
10530 contents, rel, h == NULL);
10531 /* This may have been marked unresolved because it came from
10532 a shared library. But we've just dealt with that. */
10533 unresolved_reloc = 0;
10534 }
10535 else
10536 r = bfd_reloc_continue;
10537
10538 if (r == bfd_reloc_continue)
10539 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
10540 input_section, contents, rel,
10541 relocation, info, sec, name, sym_type,
10542 (h ? h->target_internal
10543 : ARM_SYM_BRANCH_TYPE (sym)), h,
10544 &unresolved_reloc, &error_message);
10545
10546 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
10547 because such sections are not SEC_ALLOC and thus ld.so will
10548 not process them. */
10549 if (unresolved_reloc
10550 && !((input_section->flags & SEC_DEBUGGING) != 0
10551 && h->def_dynamic)
10552 && _bfd_elf_section_offset (output_bfd, info, input_section,
10553 rel->r_offset) != (bfd_vma) -1)
10554 {
10555 (*_bfd_error_handler)
10556 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
10557 input_bfd,
10558 input_section,
10559 (long) rel->r_offset,
10560 howto->name,
10561 h->root.root.string);
10562 return FALSE;
10563 }
10564
10565 if (r != bfd_reloc_ok)
10566 {
10567 switch (r)
10568 {
10569 case bfd_reloc_overflow:
10570 /* If the overflowing reloc was to an undefined symbol,
10571 we have already printed one error message and there
10572 is no point complaining again. */
10573 if ((! h ||
10574 h->root.type != bfd_link_hash_undefined)
10575 && (!((*info->callbacks->reloc_overflow)
10576 (info, (h ? &h->root : NULL), name, howto->name,
10577 (bfd_vma) 0, input_bfd, input_section,
10578 rel->r_offset))))
10579 return FALSE;
10580 break;
10581
10582 case bfd_reloc_undefined:
10583 if (!((*info->callbacks->undefined_symbol)
10584 (info, name, input_bfd, input_section,
10585 rel->r_offset, TRUE)))
10586 return FALSE;
10587 break;
10588
10589 case bfd_reloc_outofrange:
10590 error_message = _("out of range");
10591 goto common_error;
10592
10593 case bfd_reloc_notsupported:
10594 error_message = _("unsupported relocation");
10595 goto common_error;
10596
10597 case bfd_reloc_dangerous:
10598 /* error_message should already be set. */
10599 goto common_error;
10600
10601 default:
10602 error_message = _("unknown error");
10603 /* Fall through. */
10604
10605 common_error:
10606 BFD_ASSERT (error_message != NULL);
10607 if (!((*info->callbacks->reloc_dangerous)
10608 (info, error_message, input_bfd, input_section,
10609 rel->r_offset)))
10610 return FALSE;
10611 break;
10612 }
10613 }
10614 }
10615
10616 return TRUE;
10617 }
10618
10619 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
10620 adds the edit to the start of the list. (The list must be built in order of
10621 ascending TINDEX: the function's callers are primarily responsible for
10622 maintaining that condition). */
10623
10624 static void
10625 add_unwind_table_edit (arm_unwind_table_edit **head,
10626 arm_unwind_table_edit **tail,
10627 arm_unwind_edit_type type,
10628 asection *linked_section,
10629 unsigned int tindex)
10630 {
10631 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
10632 xmalloc (sizeof (arm_unwind_table_edit));
10633
10634 new_edit->type = type;
10635 new_edit->linked_section = linked_section;
10636 new_edit->index = tindex;
10637
10638 if (tindex > 0)
10639 {
10640 new_edit->next = NULL;
10641
10642 if (*tail)
10643 (*tail)->next = new_edit;
10644
10645 (*tail) = new_edit;
10646
10647 if (!*head)
10648 (*head) = new_edit;
10649 }
10650 else
10651 {
10652 new_edit->next = *head;
10653
10654 if (!*tail)
10655 *tail = new_edit;
10656
10657 *head = new_edit;
10658 }
10659 }
10660
10661 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
10662
10663 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
10664 static void
10665 adjust_exidx_size(asection *exidx_sec, int adjust)
10666 {
10667 asection *out_sec;
10668
10669 if (!exidx_sec->rawsize)
10670 exidx_sec->rawsize = exidx_sec->size;
10671
10672 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
10673 out_sec = exidx_sec->output_section;
10674 /* Adjust size of output section. */
10675 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
10676 }
10677
10678 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
10679 static void
10680 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
10681 {
10682 struct _arm_elf_section_data *exidx_arm_data;
10683
10684 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
10685 add_unwind_table_edit (
10686 &exidx_arm_data->u.exidx.unwind_edit_list,
10687 &exidx_arm_data->u.exidx.unwind_edit_tail,
10688 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
10689
10690 adjust_exidx_size(exidx_sec, 8);
10691 }
10692
10693 /* Scan .ARM.exidx tables, and create a list describing edits which should be
10694 made to those tables, such that:
10695
10696 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
10697 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
10698 codes which have been inlined into the index).
10699
10700 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
10701
10702 The edits are applied when the tables are written
10703 (in elf32_arm_write_section). */
10704
10705 bfd_boolean
10706 elf32_arm_fix_exidx_coverage (asection **text_section_order,
10707 unsigned int num_text_sections,
10708 struct bfd_link_info *info,
10709 bfd_boolean merge_exidx_entries)
10710 {
10711 bfd *inp;
10712 unsigned int last_second_word = 0, i;
10713 asection *last_exidx_sec = NULL;
10714 asection *last_text_sec = NULL;
10715 int last_unwind_type = -1;
10716
10717 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
10718 text sections. */
10719 for (inp = info->input_bfds; inp != NULL; inp = inp->link_next)
10720 {
10721 asection *sec;
10722
10723 for (sec = inp->sections; sec != NULL; sec = sec->next)
10724 {
10725 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
10726 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
10727
10728 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
10729 continue;
10730
10731 if (elf_sec->linked_to)
10732 {
10733 Elf_Internal_Shdr *linked_hdr
10734 = &elf_section_data (elf_sec->linked_to)->this_hdr;
10735 struct _arm_elf_section_data *linked_sec_arm_data
10736 = get_arm_elf_section_data (linked_hdr->bfd_section);
10737
10738 if (linked_sec_arm_data == NULL)
10739 continue;
10740
10741 /* Link this .ARM.exidx section back from the text section it
10742 describes. */
10743 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
10744 }
10745 }
10746 }
10747
10748 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
10749 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
10750 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
10751
10752 for (i = 0; i < num_text_sections; i++)
10753 {
10754 asection *sec = text_section_order[i];
10755 asection *exidx_sec;
10756 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
10757 struct _arm_elf_section_data *exidx_arm_data;
10758 bfd_byte *contents = NULL;
10759 int deleted_exidx_bytes = 0;
10760 bfd_vma j;
10761 arm_unwind_table_edit *unwind_edit_head = NULL;
10762 arm_unwind_table_edit *unwind_edit_tail = NULL;
10763 Elf_Internal_Shdr *hdr;
10764 bfd *ibfd;
10765
10766 if (arm_data == NULL)
10767 continue;
10768
10769 exidx_sec = arm_data->u.text.arm_exidx_sec;
10770 if (exidx_sec == NULL)
10771 {
10772 /* Section has no unwind data. */
10773 if (last_unwind_type == 0 || !last_exidx_sec)
10774 continue;
10775
10776 /* Ignore zero sized sections. */
10777 if (sec->size == 0)
10778 continue;
10779
10780 insert_cantunwind_after(last_text_sec, last_exidx_sec);
10781 last_unwind_type = 0;
10782 continue;
10783 }
10784
10785 /* Skip /DISCARD/ sections. */
10786 if (bfd_is_abs_section (exidx_sec->output_section))
10787 continue;
10788
10789 hdr = &elf_section_data (exidx_sec)->this_hdr;
10790 if (hdr->sh_type != SHT_ARM_EXIDX)
10791 continue;
10792
10793 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
10794 if (exidx_arm_data == NULL)
10795 continue;
10796
10797 ibfd = exidx_sec->owner;
10798
10799 if (hdr->contents != NULL)
10800 contents = hdr->contents;
10801 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
10802 /* An error? */
10803 continue;
10804
10805 for (j = 0; j < hdr->sh_size; j += 8)
10806 {
10807 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
10808 int unwind_type;
10809 int elide = 0;
10810
10811 /* An EXIDX_CANTUNWIND entry. */
10812 if (second_word == 1)
10813 {
10814 if (last_unwind_type == 0)
10815 elide = 1;
10816 unwind_type = 0;
10817 }
10818 /* Inlined unwinding data. Merge if equal to previous. */
10819 else if ((second_word & 0x80000000) != 0)
10820 {
10821 if (merge_exidx_entries
10822 && last_second_word == second_word && last_unwind_type == 1)
10823 elide = 1;
10824 unwind_type = 1;
10825 last_second_word = second_word;
10826 }
10827 /* Normal table entry. In theory we could merge these too,
10828 but duplicate entries are likely to be much less common. */
10829 else
10830 unwind_type = 2;
10831
10832 if (elide)
10833 {
10834 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
10835 DELETE_EXIDX_ENTRY, NULL, j / 8);
10836
10837 deleted_exidx_bytes += 8;
10838 }
10839
10840 last_unwind_type = unwind_type;
10841 }
10842
10843 /* Free contents if we allocated it ourselves. */
10844 if (contents != hdr->contents)
10845 free (contents);
10846
10847 /* Record edits to be applied later (in elf32_arm_write_section). */
10848 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
10849 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
10850
10851 if (deleted_exidx_bytes > 0)
10852 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
10853
10854 last_exidx_sec = exidx_sec;
10855 last_text_sec = sec;
10856 }
10857
10858 /* Add terminating CANTUNWIND entry. */
10859 if (last_exidx_sec && last_unwind_type != 0)
10860 insert_cantunwind_after(last_text_sec, last_exidx_sec);
10861
10862 return TRUE;
10863 }
10864
10865 static bfd_boolean
10866 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
10867 bfd *ibfd, const char *name)
10868 {
10869 asection *sec, *osec;
10870
10871 sec = bfd_get_linker_section (ibfd, name);
10872 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
10873 return TRUE;
10874
10875 osec = sec->output_section;
10876 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
10877 return TRUE;
10878
10879 if (! bfd_set_section_contents (obfd, osec, sec->contents,
10880 sec->output_offset, sec->size))
10881 return FALSE;
10882
10883 return TRUE;
10884 }
10885
10886 static bfd_boolean
10887 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
10888 {
10889 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
10890 asection *sec, *osec;
10891
10892 if (globals == NULL)
10893 return FALSE;
10894
10895 /* Invoke the regular ELF backend linker to do all the work. */
10896 if (!bfd_elf_final_link (abfd, info))
10897 return FALSE;
10898
10899 /* Process stub sections (eg BE8 encoding, ...). */
10900 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
10901 int i;
10902 for (i=0; i<htab->top_id; i++)
10903 {
10904 sec = htab->stub_group[i].stub_sec;
10905 /* Only process it once, in its link_sec slot. */
10906 if (sec && i == htab->stub_group[i].link_sec->id)
10907 {
10908 osec = sec->output_section;
10909 elf32_arm_write_section (abfd, info, sec, sec->contents);
10910 if (! bfd_set_section_contents (abfd, osec, sec->contents,
10911 sec->output_offset, sec->size))
10912 return FALSE;
10913 }
10914 }
10915
10916 /* Write out any glue sections now that we have created all the
10917 stubs. */
10918 if (globals->bfd_of_glue_owner != NULL)
10919 {
10920 if (! elf32_arm_output_glue_section (info, abfd,
10921 globals->bfd_of_glue_owner,
10922 ARM2THUMB_GLUE_SECTION_NAME))
10923 return FALSE;
10924
10925 if (! elf32_arm_output_glue_section (info, abfd,
10926 globals->bfd_of_glue_owner,
10927 THUMB2ARM_GLUE_SECTION_NAME))
10928 return FALSE;
10929
10930 if (! elf32_arm_output_glue_section (info, abfd,
10931 globals->bfd_of_glue_owner,
10932 VFP11_ERRATUM_VENEER_SECTION_NAME))
10933 return FALSE;
10934
10935 if (! elf32_arm_output_glue_section (info, abfd,
10936 globals->bfd_of_glue_owner,
10937 ARM_BX_GLUE_SECTION_NAME))
10938 return FALSE;
10939 }
10940
10941 return TRUE;
10942 }
10943
10944 /* Return a best guess for the machine number based on the attributes. */
10945
10946 static unsigned int
10947 bfd_arm_get_mach_from_attributes (bfd * abfd)
10948 {
10949 int arch = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_CPU_arch);
10950
10951 switch (arch)
10952 {
10953 case TAG_CPU_ARCH_V4: return bfd_mach_arm_4;
10954 case TAG_CPU_ARCH_V4T: return bfd_mach_arm_4T;
10955 case TAG_CPU_ARCH_V5T: return bfd_mach_arm_5T;
10956
10957 case TAG_CPU_ARCH_V5TE:
10958 {
10959 char * name;
10960
10961 BFD_ASSERT (Tag_CPU_name < NUM_KNOWN_OBJ_ATTRIBUTES);
10962 name = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_CPU_name].s;
10963
10964 if (name)
10965 {
10966 if (strcmp (name, "IWMMXT2") == 0)
10967 return bfd_mach_arm_iWMMXt2;
10968
10969 if (strcmp (name, "IWMMXT") == 0)
10970 return bfd_mach_arm_iWMMXt;
10971
10972 if (strcmp (name, "XSCALE") == 0)
10973 {
10974 int wmmx;
10975
10976 BFD_ASSERT (Tag_WMMX_arch < NUM_KNOWN_OBJ_ATTRIBUTES);
10977 wmmx = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_WMMX_arch].i;
10978 switch (wmmx)
10979 {
10980 case 1: return bfd_mach_arm_iWMMXt;
10981 case 2: return bfd_mach_arm_iWMMXt2;
10982 default: return bfd_mach_arm_XScale;
10983 }
10984 }
10985 }
10986
10987 return bfd_mach_arm_5TE;
10988 }
10989
10990 default:
10991 return bfd_mach_arm_unknown;
10992 }
10993 }
10994
10995 /* Set the right machine number. */
10996
10997 static bfd_boolean
10998 elf32_arm_object_p (bfd *abfd)
10999 {
11000 unsigned int mach;
11001
11002 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
11003
11004 if (mach == bfd_mach_arm_unknown)
11005 {
11006 if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
11007 mach = bfd_mach_arm_ep9312;
11008 else
11009 mach = bfd_arm_get_mach_from_attributes (abfd);
11010 }
11011
11012 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
11013 return TRUE;
11014 }
11015
11016 /* Function to keep ARM specific flags in the ELF header. */
11017
11018 static bfd_boolean
11019 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
11020 {
11021 if (elf_flags_init (abfd)
11022 && elf_elfheader (abfd)->e_flags != flags)
11023 {
11024 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
11025 {
11026 if (flags & EF_ARM_INTERWORK)
11027 (*_bfd_error_handler)
11028 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
11029 abfd);
11030 else
11031 _bfd_error_handler
11032 (_("Warning: Clearing the interworking flag of %B due to outside request"),
11033 abfd);
11034 }
11035 }
11036 else
11037 {
11038 elf_elfheader (abfd)->e_flags = flags;
11039 elf_flags_init (abfd) = TRUE;
11040 }
11041
11042 return TRUE;
11043 }
11044
11045 /* Copy backend specific data from one object module to another. */
11046
11047 static bfd_boolean
11048 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
11049 {
11050 flagword in_flags;
11051 flagword out_flags;
11052
11053 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
11054 return TRUE;
11055
11056 in_flags = elf_elfheader (ibfd)->e_flags;
11057 out_flags = elf_elfheader (obfd)->e_flags;
11058
11059 if (elf_flags_init (obfd)
11060 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
11061 && in_flags != out_flags)
11062 {
11063 /* Cannot mix APCS26 and APCS32 code. */
11064 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
11065 return FALSE;
11066
11067 /* Cannot mix float APCS and non-float APCS code. */
11068 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
11069 return FALSE;
11070
11071 /* If the src and dest have different interworking flags
11072 then turn off the interworking bit. */
11073 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
11074 {
11075 if (out_flags & EF_ARM_INTERWORK)
11076 _bfd_error_handler
11077 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
11078 obfd, ibfd);
11079
11080 in_flags &= ~EF_ARM_INTERWORK;
11081 }
11082
11083 /* Likewise for PIC, though don't warn for this case. */
11084 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
11085 in_flags &= ~EF_ARM_PIC;
11086 }
11087
11088 elf_elfheader (obfd)->e_flags = in_flags;
11089 elf_flags_init (obfd) = TRUE;
11090
11091 /* Also copy the EI_OSABI field. */
11092 elf_elfheader (obfd)->e_ident[EI_OSABI] =
11093 elf_elfheader (ibfd)->e_ident[EI_OSABI];
11094
11095 /* Copy object attributes. */
11096 _bfd_elf_copy_obj_attributes (ibfd, obfd);
11097
11098 return TRUE;
11099 }
11100
11101 /* Values for Tag_ABI_PCS_R9_use. */
11102 enum
11103 {
11104 AEABI_R9_V6,
11105 AEABI_R9_SB,
11106 AEABI_R9_TLS,
11107 AEABI_R9_unused
11108 };
11109
11110 /* Values for Tag_ABI_PCS_RW_data. */
11111 enum
11112 {
11113 AEABI_PCS_RW_data_absolute,
11114 AEABI_PCS_RW_data_PCrel,
11115 AEABI_PCS_RW_data_SBrel,
11116 AEABI_PCS_RW_data_unused
11117 };
11118
11119 /* Values for Tag_ABI_enum_size. */
11120 enum
11121 {
11122 AEABI_enum_unused,
11123 AEABI_enum_short,
11124 AEABI_enum_wide,
11125 AEABI_enum_forced_wide
11126 };
11127
11128 /* Determine whether an object attribute tag takes an integer, a
11129 string or both. */
11130
11131 static int
11132 elf32_arm_obj_attrs_arg_type (int tag)
11133 {
11134 if (tag == Tag_compatibility)
11135 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
11136 else if (tag == Tag_nodefaults)
11137 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
11138 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
11139 return ATTR_TYPE_FLAG_STR_VAL;
11140 else if (tag < 32)
11141 return ATTR_TYPE_FLAG_INT_VAL;
11142 else
11143 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
11144 }
11145
11146 /* The ABI defines that Tag_conformance should be emitted first, and that
11147 Tag_nodefaults should be second (if either is defined). This sets those
11148 two positions, and bumps up the position of all the remaining tags to
11149 compensate. */
11150 static int
11151 elf32_arm_obj_attrs_order (int num)
11152 {
11153 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
11154 return Tag_conformance;
11155 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
11156 return Tag_nodefaults;
11157 if ((num - 2) < Tag_nodefaults)
11158 return num - 2;
11159 if ((num - 1) < Tag_conformance)
11160 return num - 1;
11161 return num;
11162 }
11163
11164 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
11165 static bfd_boolean
11166 elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag)
11167 {
11168 if ((tag & 127) < 64)
11169 {
11170 _bfd_error_handler
11171 (_("%B: Unknown mandatory EABI object attribute %d"),
11172 abfd, tag);
11173 bfd_set_error (bfd_error_bad_value);
11174 return FALSE;
11175 }
11176 else
11177 {
11178 _bfd_error_handler
11179 (_("Warning: %B: Unknown EABI object attribute %d"),
11180 abfd, tag);
11181 return TRUE;
11182 }
11183 }
11184
11185 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
11186 Returns -1 if no architecture could be read. */
11187
11188 static int
11189 get_secondary_compatible_arch (bfd *abfd)
11190 {
11191 obj_attribute *attr =
11192 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
11193
11194 /* Note: the tag and its argument below are uleb128 values, though
11195 currently-defined values fit in one byte for each. */
11196 if (attr->s
11197 && attr->s[0] == Tag_CPU_arch
11198 && (attr->s[1] & 128) != 128
11199 && attr->s[2] == 0)
11200 return attr->s[1];
11201
11202 /* This tag is "safely ignorable", so don't complain if it looks funny. */
11203 return -1;
11204 }
11205
11206 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
11207 The tag is removed if ARCH is -1. */
11208
11209 static void
11210 set_secondary_compatible_arch (bfd *abfd, int arch)
11211 {
11212 obj_attribute *attr =
11213 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
11214
11215 if (arch == -1)
11216 {
11217 attr->s = NULL;
11218 return;
11219 }
11220
11221 /* Note: the tag and its argument below are uleb128 values, though
11222 currently-defined values fit in one byte for each. */
11223 if (!attr->s)
11224 attr->s = (char *) bfd_alloc (abfd, 3);
11225 attr->s[0] = Tag_CPU_arch;
11226 attr->s[1] = arch;
11227 attr->s[2] = '\0';
11228 }
11229
11230 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
11231 into account. */
11232
11233 static int
11234 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
11235 int newtag, int secondary_compat)
11236 {
11237 #define T(X) TAG_CPU_ARCH_##X
11238 int tagl, tagh, result;
11239 const int v6t2[] =
11240 {
11241 T(V6T2), /* PRE_V4. */
11242 T(V6T2), /* V4. */
11243 T(V6T2), /* V4T. */
11244 T(V6T2), /* V5T. */
11245 T(V6T2), /* V5TE. */
11246 T(V6T2), /* V5TEJ. */
11247 T(V6T2), /* V6. */
11248 T(V7), /* V6KZ. */
11249 T(V6T2) /* V6T2. */
11250 };
11251 const int v6k[] =
11252 {
11253 T(V6K), /* PRE_V4. */
11254 T(V6K), /* V4. */
11255 T(V6K), /* V4T. */
11256 T(V6K), /* V5T. */
11257 T(V6K), /* V5TE. */
11258 T(V6K), /* V5TEJ. */
11259 T(V6K), /* V6. */
11260 T(V6KZ), /* V6KZ. */
11261 T(V7), /* V6T2. */
11262 T(V6K) /* V6K. */
11263 };
11264 const int v7[] =
11265 {
11266 T(V7), /* PRE_V4. */
11267 T(V7), /* V4. */
11268 T(V7), /* V4T. */
11269 T(V7), /* V5T. */
11270 T(V7), /* V5TE. */
11271 T(V7), /* V5TEJ. */
11272 T(V7), /* V6. */
11273 T(V7), /* V6KZ. */
11274 T(V7), /* V6T2. */
11275 T(V7), /* V6K. */
11276 T(V7) /* V7. */
11277 };
11278 const int v6_m[] =
11279 {
11280 -1, /* PRE_V4. */
11281 -1, /* V4. */
11282 T(V6K), /* V4T. */
11283 T(V6K), /* V5T. */
11284 T(V6K), /* V5TE. */
11285 T(V6K), /* V5TEJ. */
11286 T(V6K), /* V6. */
11287 T(V6KZ), /* V6KZ. */
11288 T(V7), /* V6T2. */
11289 T(V6K), /* V6K. */
11290 T(V7), /* V7. */
11291 T(V6_M) /* V6_M. */
11292 };
11293 const int v6s_m[] =
11294 {
11295 -1, /* PRE_V4. */
11296 -1, /* V4. */
11297 T(V6K), /* V4T. */
11298 T(V6K), /* V5T. */
11299 T(V6K), /* V5TE. */
11300 T(V6K), /* V5TEJ. */
11301 T(V6K), /* V6. */
11302 T(V6KZ), /* V6KZ. */
11303 T(V7), /* V6T2. */
11304 T(V6K), /* V6K. */
11305 T(V7), /* V7. */
11306 T(V6S_M), /* V6_M. */
11307 T(V6S_M) /* V6S_M. */
11308 };
11309 const int v7e_m[] =
11310 {
11311 -1, /* PRE_V4. */
11312 -1, /* V4. */
11313 T(V7E_M), /* V4T. */
11314 T(V7E_M), /* V5T. */
11315 T(V7E_M), /* V5TE. */
11316 T(V7E_M), /* V5TEJ. */
11317 T(V7E_M), /* V6. */
11318 T(V7E_M), /* V6KZ. */
11319 T(V7E_M), /* V6T2. */
11320 T(V7E_M), /* V6K. */
11321 T(V7E_M), /* V7. */
11322 T(V7E_M), /* V6_M. */
11323 T(V7E_M), /* V6S_M. */
11324 T(V7E_M) /* V7E_M. */
11325 };
11326 const int v8[] =
11327 {
11328 T(V8), /* PRE_V4. */
11329 T(V8), /* V4. */
11330 T(V8), /* V4T. */
11331 T(V8), /* V5T. */
11332 T(V8), /* V5TE. */
11333 T(V8), /* V5TEJ. */
11334 T(V8), /* V6. */
11335 T(V8), /* V6KZ. */
11336 T(V8), /* V6T2. */
11337 T(V8), /* V6K. */
11338 T(V8), /* V7. */
11339 T(V8), /* V6_M. */
11340 T(V8), /* V6S_M. */
11341 T(V8), /* V7E_M. */
11342 T(V8) /* V8. */
11343 };
11344 const int v4t_plus_v6_m[] =
11345 {
11346 -1, /* PRE_V4. */
11347 -1, /* V4. */
11348 T(V4T), /* V4T. */
11349 T(V5T), /* V5T. */
11350 T(V5TE), /* V5TE. */
11351 T(V5TEJ), /* V5TEJ. */
11352 T(V6), /* V6. */
11353 T(V6KZ), /* V6KZ. */
11354 T(V6T2), /* V6T2. */
11355 T(V6K), /* V6K. */
11356 T(V7), /* V7. */
11357 T(V6_M), /* V6_M. */
11358 T(V6S_M), /* V6S_M. */
11359 T(V7E_M), /* V7E_M. */
11360 T(V8), /* V8. */
11361 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
11362 };
11363 const int *comb[] =
11364 {
11365 v6t2,
11366 v6k,
11367 v7,
11368 v6_m,
11369 v6s_m,
11370 v7e_m,
11371 v8,
11372 /* Pseudo-architecture. */
11373 v4t_plus_v6_m
11374 };
11375
11376 /* Check we've not got a higher architecture than we know about. */
11377
11378 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
11379 {
11380 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
11381 return -1;
11382 }
11383
11384 /* Override old tag if we have a Tag_also_compatible_with on the output. */
11385
11386 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
11387 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
11388 oldtag = T(V4T_PLUS_V6_M);
11389
11390 /* And override the new tag if we have a Tag_also_compatible_with on the
11391 input. */
11392
11393 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
11394 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
11395 newtag = T(V4T_PLUS_V6_M);
11396
11397 tagl = (oldtag < newtag) ? oldtag : newtag;
11398 result = tagh = (oldtag > newtag) ? oldtag : newtag;
11399
11400 /* Architectures before V6KZ add features monotonically. */
11401 if (tagh <= TAG_CPU_ARCH_V6KZ)
11402 return result;
11403
11404 result = comb[tagh - T(V6T2)][tagl];
11405
11406 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
11407 as the canonical version. */
11408 if (result == T(V4T_PLUS_V6_M))
11409 {
11410 result = T(V4T);
11411 *secondary_compat_out = T(V6_M);
11412 }
11413 else
11414 *secondary_compat_out = -1;
11415
11416 if (result == -1)
11417 {
11418 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
11419 ibfd, oldtag, newtag);
11420 return -1;
11421 }
11422
11423 return result;
11424 #undef T
11425 }
11426
11427 /* Query attributes object to see if integer divide instructions may be
11428 present in an object. */
11429 static bfd_boolean
11430 elf32_arm_attributes_accept_div (const obj_attribute *attr)
11431 {
11432 int arch = attr[Tag_CPU_arch].i;
11433 int profile = attr[Tag_CPU_arch_profile].i;
11434
11435 switch (attr[Tag_DIV_use].i)
11436 {
11437 case 0:
11438 /* Integer divide allowed if instruction contained in archetecture. */
11439 if (arch == TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M'))
11440 return TRUE;
11441 else if (arch >= TAG_CPU_ARCH_V7E_M)
11442 return TRUE;
11443 else
11444 return FALSE;
11445
11446 case 1:
11447 /* Integer divide explicitly prohibited. */
11448 return FALSE;
11449
11450 default:
11451 /* Unrecognised case - treat as allowing divide everywhere. */
11452 case 2:
11453 /* Integer divide allowed in ARM state. */
11454 return TRUE;
11455 }
11456 }
11457
11458 /* Query attributes object to see if integer divide instructions are
11459 forbidden to be in the object. This is not the inverse of
11460 elf32_arm_attributes_accept_div. */
11461 static bfd_boolean
11462 elf32_arm_attributes_forbid_div (const obj_attribute *attr)
11463 {
11464 return attr[Tag_DIV_use].i == 1;
11465 }
11466
11467 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
11468 are conflicting attributes. */
11469
11470 static bfd_boolean
11471 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
11472 {
11473 obj_attribute *in_attr;
11474 obj_attribute *out_attr;
11475 /* Some tags have 0 = don't care, 1 = strong requirement,
11476 2 = weak requirement. */
11477 static const int order_021[3] = {0, 2, 1};
11478 int i;
11479 bfd_boolean result = TRUE;
11480
11481 /* Skip the linker stubs file. This preserves previous behavior
11482 of accepting unknown attributes in the first input file - but
11483 is that a bug? */
11484 if (ibfd->flags & BFD_LINKER_CREATED)
11485 return TRUE;
11486
11487 if (!elf_known_obj_attributes_proc (obfd)[0].i)
11488 {
11489 /* This is the first object. Copy the attributes. */
11490 _bfd_elf_copy_obj_attributes (ibfd, obfd);
11491
11492 out_attr = elf_known_obj_attributes_proc (obfd);
11493
11494 /* Use the Tag_null value to indicate the attributes have been
11495 initialized. */
11496 out_attr[0].i = 1;
11497
11498 /* We do not output objects with Tag_MPextension_use_legacy - we move
11499 the attribute's value to Tag_MPextension_use. */
11500 if (out_attr[Tag_MPextension_use_legacy].i != 0)
11501 {
11502 if (out_attr[Tag_MPextension_use].i != 0
11503 && out_attr[Tag_MPextension_use_legacy].i
11504 != out_attr[Tag_MPextension_use].i)
11505 {
11506 _bfd_error_handler
11507 (_("Error: %B has both the current and legacy "
11508 "Tag_MPextension_use attributes"), ibfd);
11509 result = FALSE;
11510 }
11511
11512 out_attr[Tag_MPextension_use] =
11513 out_attr[Tag_MPextension_use_legacy];
11514 out_attr[Tag_MPextension_use_legacy].type = 0;
11515 out_attr[Tag_MPextension_use_legacy].i = 0;
11516 }
11517
11518 return result;
11519 }
11520
11521 in_attr = elf_known_obj_attributes_proc (ibfd);
11522 out_attr = elf_known_obj_attributes_proc (obfd);
11523 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
11524 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
11525 {
11526 /* Ignore mismatches if the object doesn't use floating point. */
11527 if (out_attr[Tag_ABI_FP_number_model].i == 0)
11528 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
11529 else if (in_attr[Tag_ABI_FP_number_model].i != 0)
11530 {
11531 _bfd_error_handler
11532 (_("error: %B uses VFP register arguments, %B does not"),
11533 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
11534 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
11535 result = FALSE;
11536 }
11537 }
11538
11539 for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
11540 {
11541 /* Merge this attribute with existing attributes. */
11542 switch (i)
11543 {
11544 case Tag_CPU_raw_name:
11545 case Tag_CPU_name:
11546 /* These are merged after Tag_CPU_arch. */
11547 break;
11548
11549 case Tag_ABI_optimization_goals:
11550 case Tag_ABI_FP_optimization_goals:
11551 /* Use the first value seen. */
11552 break;
11553
11554 case Tag_CPU_arch:
11555 {
11556 int secondary_compat = -1, secondary_compat_out = -1;
11557 unsigned int saved_out_attr = out_attr[i].i;
11558 static const char *name_table[] = {
11559 /* These aren't real CPU names, but we can't guess
11560 that from the architecture version alone. */
11561 "Pre v4",
11562 "ARM v4",
11563 "ARM v4T",
11564 "ARM v5T",
11565 "ARM v5TE",
11566 "ARM v5TEJ",
11567 "ARM v6",
11568 "ARM v6KZ",
11569 "ARM v6T2",
11570 "ARM v6K",
11571 "ARM v7",
11572 "ARM v6-M",
11573 "ARM v6S-M",
11574 "ARM v8"
11575 };
11576
11577 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
11578 secondary_compat = get_secondary_compatible_arch (ibfd);
11579 secondary_compat_out = get_secondary_compatible_arch (obfd);
11580 out_attr[i].i = tag_cpu_arch_combine (ibfd, out_attr[i].i,
11581 &secondary_compat_out,
11582 in_attr[i].i,
11583 secondary_compat);
11584 set_secondary_compatible_arch (obfd, secondary_compat_out);
11585
11586 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
11587 if (out_attr[i].i == saved_out_attr)
11588 ; /* Leave the names alone. */
11589 else if (out_attr[i].i == in_attr[i].i)
11590 {
11591 /* The output architecture has been changed to match the
11592 input architecture. Use the input names. */
11593 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
11594 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
11595 : NULL;
11596 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
11597 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
11598 : NULL;
11599 }
11600 else
11601 {
11602 out_attr[Tag_CPU_name].s = NULL;
11603 out_attr[Tag_CPU_raw_name].s = NULL;
11604 }
11605
11606 /* If we still don't have a value for Tag_CPU_name,
11607 make one up now. Tag_CPU_raw_name remains blank. */
11608 if (out_attr[Tag_CPU_name].s == NULL
11609 && out_attr[i].i < ARRAY_SIZE (name_table))
11610 out_attr[Tag_CPU_name].s =
11611 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
11612 }
11613 break;
11614
11615 case Tag_ARM_ISA_use:
11616 case Tag_THUMB_ISA_use:
11617 case Tag_WMMX_arch:
11618 case Tag_Advanced_SIMD_arch:
11619 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
11620 case Tag_ABI_FP_rounding:
11621 case Tag_ABI_FP_exceptions:
11622 case Tag_ABI_FP_user_exceptions:
11623 case Tag_ABI_FP_number_model:
11624 case Tag_FP_HP_extension:
11625 case Tag_CPU_unaligned_access:
11626 case Tag_T2EE_use:
11627 case Tag_MPextension_use:
11628 /* Use the largest value specified. */
11629 if (in_attr[i].i > out_attr[i].i)
11630 out_attr[i].i = in_attr[i].i;
11631 break;
11632
11633 case Tag_ABI_align_preserved:
11634 case Tag_ABI_PCS_RO_data:
11635 /* Use the smallest value specified. */
11636 if (in_attr[i].i < out_attr[i].i)
11637 out_attr[i].i = in_attr[i].i;
11638 break;
11639
11640 case Tag_ABI_align_needed:
11641 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
11642 && (in_attr[Tag_ABI_align_preserved].i == 0
11643 || out_attr[Tag_ABI_align_preserved].i == 0))
11644 {
11645 /* This error message should be enabled once all non-conformant
11646 binaries in the toolchain have had the attributes set
11647 properly.
11648 _bfd_error_handler
11649 (_("error: %B: 8-byte data alignment conflicts with %B"),
11650 obfd, ibfd);
11651 result = FALSE; */
11652 }
11653 /* Fall through. */
11654 case Tag_ABI_FP_denormal:
11655 case Tag_ABI_PCS_GOT_use:
11656 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
11657 value if greater than 2 (for future-proofing). */
11658 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
11659 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
11660 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
11661 out_attr[i].i = in_attr[i].i;
11662 break;
11663
11664 case Tag_Virtualization_use:
11665 /* The virtualization tag effectively stores two bits of
11666 information: the intended use of TrustZone (in bit 0), and the
11667 intended use of Virtualization (in bit 1). */
11668 if (out_attr[i].i == 0)
11669 out_attr[i].i = in_attr[i].i;
11670 else if (in_attr[i].i != 0
11671 && in_attr[i].i != out_attr[i].i)
11672 {
11673 if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
11674 out_attr[i].i = 3;
11675 else
11676 {
11677 _bfd_error_handler
11678 (_("error: %B: unable to merge virtualization attributes "
11679 "with %B"),
11680 obfd, ibfd);
11681 result = FALSE;
11682 }
11683 }
11684 break;
11685
11686 case Tag_CPU_arch_profile:
11687 if (out_attr[i].i != in_attr[i].i)
11688 {
11689 /* 0 will merge with anything.
11690 'A' and 'S' merge to 'A'.
11691 'R' and 'S' merge to 'R'.
11692 'M' and 'A|R|S' is an error. */
11693 if (out_attr[i].i == 0
11694 || (out_attr[i].i == 'S'
11695 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
11696 out_attr[i].i = in_attr[i].i;
11697 else if (in_attr[i].i == 0
11698 || (in_attr[i].i == 'S'
11699 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
11700 ; /* Do nothing. */
11701 else
11702 {
11703 _bfd_error_handler
11704 (_("error: %B: Conflicting architecture profiles %c/%c"),
11705 ibfd,
11706 in_attr[i].i ? in_attr[i].i : '0',
11707 out_attr[i].i ? out_attr[i].i : '0');
11708 result = FALSE;
11709 }
11710 }
11711 break;
11712 case Tag_FP_arch:
11713 {
11714 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
11715 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
11716 when it's 0. It might mean absence of FP hardware if
11717 Tag_FP_arch is zero, otherwise it is effectively SP + DP. */
11718
11719 #define VFP_VERSION_COUNT 8
11720 static const struct
11721 {
11722 int ver;
11723 int regs;
11724 } vfp_versions[VFP_VERSION_COUNT] =
11725 {
11726 {0, 0},
11727 {1, 16},
11728 {2, 16},
11729 {3, 32},
11730 {3, 16},
11731 {4, 32},
11732 {4, 16},
11733 {8, 32}
11734 };
11735 int ver;
11736 int regs;
11737 int newval;
11738
11739 /* If the output has no requirement about FP hardware,
11740 follow the requirement of the input. */
11741 if (out_attr[i].i == 0)
11742 {
11743 BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
11744 out_attr[i].i = in_attr[i].i;
11745 out_attr[Tag_ABI_HardFP_use].i
11746 = in_attr[Tag_ABI_HardFP_use].i;
11747 break;
11748 }
11749 /* If the input has no requirement about FP hardware, do
11750 nothing. */
11751 else if (in_attr[i].i == 0)
11752 {
11753 BFD_ASSERT (in_attr[Tag_ABI_HardFP_use].i == 0);
11754 break;
11755 }
11756
11757 /* Both the input and the output have nonzero Tag_FP_arch.
11758 So Tag_ABI_HardFP_use is (SP & DP) when it's zero. */
11759
11760 /* If both the input and the output have zero Tag_ABI_HardFP_use,
11761 do nothing. */
11762 if (in_attr[Tag_ABI_HardFP_use].i == 0
11763 && out_attr[Tag_ABI_HardFP_use].i == 0)
11764 ;
11765 /* If the input and the output have different Tag_ABI_HardFP_use,
11766 the combination of them is 3 (SP & DP). */
11767 else if (in_attr[Tag_ABI_HardFP_use].i
11768 != out_attr[Tag_ABI_HardFP_use].i)
11769 out_attr[Tag_ABI_HardFP_use].i = 3;
11770
11771 /* Now we can handle Tag_FP_arch. */
11772
11773 /* Values of VFP_VERSION_COUNT or more aren't defined, so just
11774 pick the biggest. */
11775 if (in_attr[i].i >= VFP_VERSION_COUNT
11776 && in_attr[i].i > out_attr[i].i)
11777 {
11778 out_attr[i] = in_attr[i];
11779 break;
11780 }
11781 /* The output uses the superset of input features
11782 (ISA version) and registers. */
11783 ver = vfp_versions[in_attr[i].i].ver;
11784 if (ver < vfp_versions[out_attr[i].i].ver)
11785 ver = vfp_versions[out_attr[i].i].ver;
11786 regs = vfp_versions[in_attr[i].i].regs;
11787 if (regs < vfp_versions[out_attr[i].i].regs)
11788 regs = vfp_versions[out_attr[i].i].regs;
11789 /* This assumes all possible supersets are also a valid
11790 options. */
11791 for (newval = VFP_VERSION_COUNT - 1; newval > 0; newval--)
11792 {
11793 if (regs == vfp_versions[newval].regs
11794 && ver == vfp_versions[newval].ver)
11795 break;
11796 }
11797 out_attr[i].i = newval;
11798 }
11799 break;
11800 case Tag_PCS_config:
11801 if (out_attr[i].i == 0)
11802 out_attr[i].i = in_attr[i].i;
11803 else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i)
11804 {
11805 /* It's sometimes ok to mix different configs, so this is only
11806 a warning. */
11807 _bfd_error_handler
11808 (_("Warning: %B: Conflicting platform configuration"), ibfd);
11809 }
11810 break;
11811 case Tag_ABI_PCS_R9_use:
11812 if (in_attr[i].i != out_attr[i].i
11813 && out_attr[i].i != AEABI_R9_unused
11814 && in_attr[i].i != AEABI_R9_unused)
11815 {
11816 _bfd_error_handler
11817 (_("error: %B: Conflicting use of R9"), ibfd);
11818 result = FALSE;
11819 }
11820 if (out_attr[i].i == AEABI_R9_unused)
11821 out_attr[i].i = in_attr[i].i;
11822 break;
11823 case Tag_ABI_PCS_RW_data:
11824 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
11825 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
11826 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
11827 {
11828 _bfd_error_handler
11829 (_("error: %B: SB relative addressing conflicts with use of R9"),
11830 ibfd);
11831 result = FALSE;
11832 }
11833 /* Use the smallest value specified. */
11834 if (in_attr[i].i < out_attr[i].i)
11835 out_attr[i].i = in_attr[i].i;
11836 break;
11837 case Tag_ABI_PCS_wchar_t:
11838 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
11839 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
11840 {
11841 _bfd_error_handler
11842 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
11843 ibfd, in_attr[i].i, out_attr[i].i);
11844 }
11845 else if (in_attr[i].i && !out_attr[i].i)
11846 out_attr[i].i = in_attr[i].i;
11847 break;
11848 case Tag_ABI_enum_size:
11849 if (in_attr[i].i != AEABI_enum_unused)
11850 {
11851 if (out_attr[i].i == AEABI_enum_unused
11852 || out_attr[i].i == AEABI_enum_forced_wide)
11853 {
11854 /* The existing object is compatible with anything.
11855 Use whatever requirements the new object has. */
11856 out_attr[i].i = in_attr[i].i;
11857 }
11858 else if (in_attr[i].i != AEABI_enum_forced_wide
11859 && out_attr[i].i != in_attr[i].i
11860 && !elf_arm_tdata (obfd)->no_enum_size_warning)
11861 {
11862 static const char *aeabi_enum_names[] =
11863 { "", "variable-size", "32-bit", "" };
11864 const char *in_name =
11865 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
11866 ? aeabi_enum_names[in_attr[i].i]
11867 : "<unknown>";
11868 const char *out_name =
11869 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
11870 ? aeabi_enum_names[out_attr[i].i]
11871 : "<unknown>";
11872 _bfd_error_handler
11873 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
11874 ibfd, in_name, out_name);
11875 }
11876 }
11877 break;
11878 case Tag_ABI_VFP_args:
11879 /* Aready done. */
11880 break;
11881 case Tag_ABI_WMMX_args:
11882 if (in_attr[i].i != out_attr[i].i)
11883 {
11884 _bfd_error_handler
11885 (_("error: %B uses iWMMXt register arguments, %B does not"),
11886 ibfd, obfd);
11887 result = FALSE;
11888 }
11889 break;
11890 case Tag_compatibility:
11891 /* Merged in target-independent code. */
11892 break;
11893 case Tag_ABI_HardFP_use:
11894 /* This is handled along with Tag_FP_arch. */
11895 break;
11896 case Tag_ABI_FP_16bit_format:
11897 if (in_attr[i].i != 0 && out_attr[i].i != 0)
11898 {
11899 if (in_attr[i].i != out_attr[i].i)
11900 {
11901 _bfd_error_handler
11902 (_("error: fp16 format mismatch between %B and %B"),
11903 ibfd, obfd);
11904 result = FALSE;
11905 }
11906 }
11907 if (in_attr[i].i != 0)
11908 out_attr[i].i = in_attr[i].i;
11909 break;
11910
11911 case Tag_DIV_use:
11912 /* A value of zero on input means that the divide instruction may
11913 be used if available in the base architecture as specified via
11914 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that
11915 the user did not want divide instructions. A value of 2
11916 explicitly means that divide instructions were allowed in ARM
11917 and Thumb state. */
11918 if (in_attr[i].i == out_attr[i].i)
11919 /* Do nothing. */ ;
11920 else if (elf32_arm_attributes_forbid_div (in_attr)
11921 && !elf32_arm_attributes_accept_div (out_attr))
11922 out_attr[i].i = 1;
11923 else if (elf32_arm_attributes_forbid_div (out_attr)
11924 && elf32_arm_attributes_accept_div (in_attr))
11925 out_attr[i].i = in_attr[i].i;
11926 else if (in_attr[i].i == 2)
11927 out_attr[i].i = in_attr[i].i;
11928 break;
11929
11930 case Tag_MPextension_use_legacy:
11931 /* We don't output objects with Tag_MPextension_use_legacy - we
11932 move the value to Tag_MPextension_use. */
11933 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
11934 {
11935 if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
11936 {
11937 _bfd_error_handler
11938 (_("%B has has both the current and legacy "
11939 "Tag_MPextension_use attributes"),
11940 ibfd);
11941 result = FALSE;
11942 }
11943 }
11944
11945 if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
11946 out_attr[Tag_MPextension_use] = in_attr[i];
11947
11948 break;
11949
11950 case Tag_nodefaults:
11951 /* This tag is set if it exists, but the value is unused (and is
11952 typically zero). We don't actually need to do anything here -
11953 the merge happens automatically when the type flags are merged
11954 below. */
11955 break;
11956 case Tag_also_compatible_with:
11957 /* Already done in Tag_CPU_arch. */
11958 break;
11959 case Tag_conformance:
11960 /* Keep the attribute if it matches. Throw it away otherwise.
11961 No attribute means no claim to conform. */
11962 if (!in_attr[i].s || !out_attr[i].s
11963 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
11964 out_attr[i].s = NULL;
11965 break;
11966
11967 default:
11968 result
11969 = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i);
11970 }
11971
11972 /* If out_attr was copied from in_attr then it won't have a type yet. */
11973 if (in_attr[i].type && !out_attr[i].type)
11974 out_attr[i].type = in_attr[i].type;
11975 }
11976
11977 /* Merge Tag_compatibility attributes and any common GNU ones. */
11978 if (!_bfd_elf_merge_object_attributes (ibfd, obfd))
11979 return FALSE;
11980
11981 /* Check for any attributes not known on ARM. */
11982 result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd);
11983
11984 return result;
11985 }
11986
11987
11988 /* Return TRUE if the two EABI versions are incompatible. */
11989
11990 static bfd_boolean
11991 elf32_arm_versions_compatible (unsigned iver, unsigned over)
11992 {
11993 /* v4 and v5 are the same spec before and after it was released,
11994 so allow mixing them. */
11995 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
11996 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
11997 return TRUE;
11998
11999 return (iver == over);
12000 }
12001
12002 /* Merge backend specific data from an object file to the output
12003 object file when linking. */
12004
12005 static bfd_boolean
12006 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd);
12007
12008 /* Display the flags field. */
12009
12010 static bfd_boolean
12011 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
12012 {
12013 FILE * file = (FILE *) ptr;
12014 unsigned long flags;
12015
12016 BFD_ASSERT (abfd != NULL && ptr != NULL);
12017
12018 /* Print normal ELF private data. */
12019 _bfd_elf_print_private_bfd_data (abfd, ptr);
12020
12021 flags = elf_elfheader (abfd)->e_flags;
12022 /* Ignore init flag - it may not be set, despite the flags field
12023 containing valid data. */
12024
12025 /* xgettext:c-format */
12026 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
12027
12028 switch (EF_ARM_EABI_VERSION (flags))
12029 {
12030 case EF_ARM_EABI_UNKNOWN:
12031 /* The following flag bits are GNU extensions and not part of the
12032 official ARM ELF extended ABI. Hence they are only decoded if
12033 the EABI version is not set. */
12034 if (flags & EF_ARM_INTERWORK)
12035 fprintf (file, _(" [interworking enabled]"));
12036
12037 if (flags & EF_ARM_APCS_26)
12038 fprintf (file, " [APCS-26]");
12039 else
12040 fprintf (file, " [APCS-32]");
12041
12042 if (flags & EF_ARM_VFP_FLOAT)
12043 fprintf (file, _(" [VFP float format]"));
12044 else if (flags & EF_ARM_MAVERICK_FLOAT)
12045 fprintf (file, _(" [Maverick float format]"));
12046 else
12047 fprintf (file, _(" [FPA float format]"));
12048
12049 if (flags & EF_ARM_APCS_FLOAT)
12050 fprintf (file, _(" [floats passed in float registers]"));
12051
12052 if (flags & EF_ARM_PIC)
12053 fprintf (file, _(" [position independent]"));
12054
12055 if (flags & EF_ARM_NEW_ABI)
12056 fprintf (file, _(" [new ABI]"));
12057
12058 if (flags & EF_ARM_OLD_ABI)
12059 fprintf (file, _(" [old ABI]"));
12060
12061 if (flags & EF_ARM_SOFT_FLOAT)
12062 fprintf (file, _(" [software FP]"));
12063
12064 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
12065 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
12066 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
12067 | EF_ARM_MAVERICK_FLOAT);
12068 break;
12069
12070 case EF_ARM_EABI_VER1:
12071 fprintf (file, _(" [Version1 EABI]"));
12072
12073 if (flags & EF_ARM_SYMSARESORTED)
12074 fprintf (file, _(" [sorted symbol table]"));
12075 else
12076 fprintf (file, _(" [unsorted symbol table]"));
12077
12078 flags &= ~ EF_ARM_SYMSARESORTED;
12079 break;
12080
12081 case EF_ARM_EABI_VER2:
12082 fprintf (file, _(" [Version2 EABI]"));
12083
12084 if (flags & EF_ARM_SYMSARESORTED)
12085 fprintf (file, _(" [sorted symbol table]"));
12086 else
12087 fprintf (file, _(" [unsorted symbol table]"));
12088
12089 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
12090 fprintf (file, _(" [dynamic symbols use segment index]"));
12091
12092 if (flags & EF_ARM_MAPSYMSFIRST)
12093 fprintf (file, _(" [mapping symbols precede others]"));
12094
12095 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
12096 | EF_ARM_MAPSYMSFIRST);
12097 break;
12098
12099 case EF_ARM_EABI_VER3:
12100 fprintf (file, _(" [Version3 EABI]"));
12101 break;
12102
12103 case EF_ARM_EABI_VER4:
12104 fprintf (file, _(" [Version4 EABI]"));
12105 goto eabi;
12106
12107 case EF_ARM_EABI_VER5:
12108 fprintf (file, _(" [Version5 EABI]"));
12109
12110 if (flags & EF_ARM_ABI_FLOAT_SOFT)
12111 fprintf (file, _(" [soft-float ABI]"));
12112
12113 if (flags & EF_ARM_ABI_FLOAT_HARD)
12114 fprintf (file, _(" [hard-float ABI]"));
12115
12116 flags &= ~(EF_ARM_ABI_FLOAT_SOFT | EF_ARM_ABI_FLOAT_HARD);
12117
12118 eabi:
12119 if (flags & EF_ARM_BE8)
12120 fprintf (file, _(" [BE8]"));
12121
12122 if (flags & EF_ARM_LE8)
12123 fprintf (file, _(" [LE8]"));
12124
12125 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
12126 break;
12127
12128 default:
12129 fprintf (file, _(" <EABI version unrecognised>"));
12130 break;
12131 }
12132
12133 flags &= ~ EF_ARM_EABIMASK;
12134
12135 if (flags & EF_ARM_RELEXEC)
12136 fprintf (file, _(" [relocatable executable]"));
12137
12138 if (flags & EF_ARM_HASENTRY)
12139 fprintf (file, _(" [has entry point]"));
12140
12141 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_HASENTRY);
12142
12143 if (flags)
12144 fprintf (file, _("<Unrecognised flag bits set>"));
12145
12146 fputc ('\n', file);
12147
12148 return TRUE;
12149 }
12150
12151 static int
12152 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
12153 {
12154 switch (ELF_ST_TYPE (elf_sym->st_info))
12155 {
12156 case STT_ARM_TFUNC:
12157 return ELF_ST_TYPE (elf_sym->st_info);
12158
12159 case STT_ARM_16BIT:
12160 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
12161 This allows us to distinguish between data used by Thumb instructions
12162 and non-data (which is probably code) inside Thumb regions of an
12163 executable. */
12164 if (type != STT_OBJECT && type != STT_TLS)
12165 return ELF_ST_TYPE (elf_sym->st_info);
12166 break;
12167
12168 default:
12169 break;
12170 }
12171
12172 return type;
12173 }
12174
12175 static asection *
12176 elf32_arm_gc_mark_hook (asection *sec,
12177 struct bfd_link_info *info,
12178 Elf_Internal_Rela *rel,
12179 struct elf_link_hash_entry *h,
12180 Elf_Internal_Sym *sym)
12181 {
12182 if (h != NULL)
12183 switch (ELF32_R_TYPE (rel->r_info))
12184 {
12185 case R_ARM_GNU_VTINHERIT:
12186 case R_ARM_GNU_VTENTRY:
12187 return NULL;
12188 }
12189
12190 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
12191 }
12192
12193 /* Update the got entry reference counts for the section being removed. */
12194
12195 static bfd_boolean
12196 elf32_arm_gc_sweep_hook (bfd * abfd,
12197 struct bfd_link_info * info,
12198 asection * sec,
12199 const Elf_Internal_Rela * relocs)
12200 {
12201 Elf_Internal_Shdr *symtab_hdr;
12202 struct elf_link_hash_entry **sym_hashes;
12203 bfd_signed_vma *local_got_refcounts;
12204 const Elf_Internal_Rela *rel, *relend;
12205 struct elf32_arm_link_hash_table * globals;
12206
12207 if (info->relocatable)
12208 return TRUE;
12209
12210 globals = elf32_arm_hash_table (info);
12211 if (globals == NULL)
12212 return FALSE;
12213
12214 elf_section_data (sec)->local_dynrel = NULL;
12215
12216 symtab_hdr = & elf_symtab_hdr (abfd);
12217 sym_hashes = elf_sym_hashes (abfd);
12218 local_got_refcounts = elf_local_got_refcounts (abfd);
12219
12220 check_use_blx (globals);
12221
12222 relend = relocs + sec->reloc_count;
12223 for (rel = relocs; rel < relend; rel++)
12224 {
12225 unsigned long r_symndx;
12226 struct elf_link_hash_entry *h = NULL;
12227 struct elf32_arm_link_hash_entry *eh;
12228 int r_type;
12229 bfd_boolean call_reloc_p;
12230 bfd_boolean may_become_dynamic_p;
12231 bfd_boolean may_need_local_target_p;
12232 union gotplt_union *root_plt;
12233 struct arm_plt_info *arm_plt;
12234
12235 r_symndx = ELF32_R_SYM (rel->r_info);
12236 if (r_symndx >= symtab_hdr->sh_info)
12237 {
12238 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
12239 while (h->root.type == bfd_link_hash_indirect
12240 || h->root.type == bfd_link_hash_warning)
12241 h = (struct elf_link_hash_entry *) h->root.u.i.link;
12242 }
12243 eh = (struct elf32_arm_link_hash_entry *) h;
12244
12245 call_reloc_p = FALSE;
12246 may_become_dynamic_p = FALSE;
12247 may_need_local_target_p = FALSE;
12248
12249 r_type = ELF32_R_TYPE (rel->r_info);
12250 r_type = arm_real_reloc_type (globals, r_type);
12251 switch (r_type)
12252 {
12253 case R_ARM_GOT32:
12254 case R_ARM_GOT_PREL:
12255 case R_ARM_TLS_GD32:
12256 case R_ARM_TLS_IE32:
12257 if (h != NULL)
12258 {
12259 if (h->got.refcount > 0)
12260 h->got.refcount -= 1;
12261 }
12262 else if (local_got_refcounts != NULL)
12263 {
12264 if (local_got_refcounts[r_symndx] > 0)
12265 local_got_refcounts[r_symndx] -= 1;
12266 }
12267 break;
12268
12269 case R_ARM_TLS_LDM32:
12270 globals->tls_ldm_got.refcount -= 1;
12271 break;
12272
12273 case R_ARM_PC24:
12274 case R_ARM_PLT32:
12275 case R_ARM_CALL:
12276 case R_ARM_JUMP24:
12277 case R_ARM_PREL31:
12278 case R_ARM_THM_CALL:
12279 case R_ARM_THM_JUMP24:
12280 case R_ARM_THM_JUMP19:
12281 call_reloc_p = TRUE;
12282 may_need_local_target_p = TRUE;
12283 break;
12284
12285 case R_ARM_ABS12:
12286 if (!globals->vxworks_p)
12287 {
12288 may_need_local_target_p = TRUE;
12289 break;
12290 }
12291 /* Fall through. */
12292 case R_ARM_ABS32:
12293 case R_ARM_ABS32_NOI:
12294 case R_ARM_REL32:
12295 case R_ARM_REL32_NOI:
12296 case R_ARM_MOVW_ABS_NC:
12297 case R_ARM_MOVT_ABS:
12298 case R_ARM_MOVW_PREL_NC:
12299 case R_ARM_MOVT_PREL:
12300 case R_ARM_THM_MOVW_ABS_NC:
12301 case R_ARM_THM_MOVT_ABS:
12302 case R_ARM_THM_MOVW_PREL_NC:
12303 case R_ARM_THM_MOVT_PREL:
12304 /* Should the interworking branches be here also? */
12305 if ((info->shared || globals->root.is_relocatable_executable)
12306 && (sec->flags & SEC_ALLOC) != 0)
12307 {
12308 if (h == NULL
12309 && (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI))
12310 {
12311 call_reloc_p = TRUE;
12312 may_need_local_target_p = TRUE;
12313 }
12314 else
12315 may_become_dynamic_p = TRUE;
12316 }
12317 else
12318 may_need_local_target_p = TRUE;
12319 break;
12320
12321 default:
12322 break;
12323 }
12324
12325 if (may_need_local_target_p
12326 && elf32_arm_get_plt_info (abfd, eh, r_symndx, &root_plt, &arm_plt))
12327 {
12328 /* If PLT refcount book-keeping is wrong and too low, we'll
12329 see a zero value (going to -1) for the root PLT reference
12330 count. */
12331 if (root_plt->refcount >= 0)
12332 {
12333 BFD_ASSERT (root_plt->refcount != 0);
12334 root_plt->refcount -= 1;
12335 }
12336 else
12337 /* A value of -1 means the symbol has become local, forced
12338 or seeing a hidden definition. Any other negative value
12339 is an error. */
12340 BFD_ASSERT (root_plt->refcount == -1);
12341
12342 if (!call_reloc_p)
12343 arm_plt->noncall_refcount--;
12344
12345 if (r_type == R_ARM_THM_CALL)
12346 arm_plt->maybe_thumb_refcount--;
12347
12348 if (r_type == R_ARM_THM_JUMP24
12349 || r_type == R_ARM_THM_JUMP19)
12350 arm_plt->thumb_refcount--;
12351 }
12352
12353 if (may_become_dynamic_p)
12354 {
12355 struct elf_dyn_relocs **pp;
12356 struct elf_dyn_relocs *p;
12357
12358 if (h != NULL)
12359 pp = &(eh->dyn_relocs);
12360 else
12361 {
12362 Elf_Internal_Sym *isym;
12363
12364 isym = bfd_sym_from_r_symndx (&globals->sym_cache,
12365 abfd, r_symndx);
12366 if (isym == NULL)
12367 return FALSE;
12368 pp = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
12369 if (pp == NULL)
12370 return FALSE;
12371 }
12372 for (; (p = *pp) != NULL; pp = &p->next)
12373 if (p->sec == sec)
12374 {
12375 /* Everything must go for SEC. */
12376 *pp = p->next;
12377 break;
12378 }
12379 }
12380 }
12381
12382 return TRUE;
12383 }
12384
12385 /* Look through the relocs for a section during the first phase. */
12386
12387 static bfd_boolean
12388 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
12389 asection *sec, const Elf_Internal_Rela *relocs)
12390 {
12391 Elf_Internal_Shdr *symtab_hdr;
12392 struct elf_link_hash_entry **sym_hashes;
12393 const Elf_Internal_Rela *rel;
12394 const Elf_Internal_Rela *rel_end;
12395 bfd *dynobj;
12396 asection *sreloc;
12397 struct elf32_arm_link_hash_table *htab;
12398 bfd_boolean call_reloc_p;
12399 bfd_boolean may_become_dynamic_p;
12400 bfd_boolean may_need_local_target_p;
12401 unsigned long nsyms;
12402
12403 if (info->relocatable)
12404 return TRUE;
12405
12406 BFD_ASSERT (is_arm_elf (abfd));
12407
12408 htab = elf32_arm_hash_table (info);
12409 if (htab == NULL)
12410 return FALSE;
12411
12412 sreloc = NULL;
12413
12414 /* Create dynamic sections for relocatable executables so that we can
12415 copy relocations. */
12416 if (htab->root.is_relocatable_executable
12417 && ! htab->root.dynamic_sections_created)
12418 {
12419 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
12420 return FALSE;
12421 }
12422
12423 if (htab->root.dynobj == NULL)
12424 htab->root.dynobj = abfd;
12425 if (!create_ifunc_sections (info))
12426 return FALSE;
12427
12428 dynobj = htab->root.dynobj;
12429
12430 symtab_hdr = & elf_symtab_hdr (abfd);
12431 sym_hashes = elf_sym_hashes (abfd);
12432 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
12433
12434 rel_end = relocs + sec->reloc_count;
12435 for (rel = relocs; rel < rel_end; rel++)
12436 {
12437 Elf_Internal_Sym *isym;
12438 struct elf_link_hash_entry *h;
12439 struct elf32_arm_link_hash_entry *eh;
12440 unsigned long r_symndx;
12441 int r_type;
12442
12443 r_symndx = ELF32_R_SYM (rel->r_info);
12444 r_type = ELF32_R_TYPE (rel->r_info);
12445 r_type = arm_real_reloc_type (htab, r_type);
12446
12447 if (r_symndx >= nsyms
12448 /* PR 9934: It is possible to have relocations that do not
12449 refer to symbols, thus it is also possible to have an
12450 object file containing relocations but no symbol table. */
12451 && (r_symndx > STN_UNDEF || nsyms > 0))
12452 {
12453 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
12454 r_symndx);
12455 return FALSE;
12456 }
12457
12458 h = NULL;
12459 isym = NULL;
12460 if (nsyms > 0)
12461 {
12462 if (r_symndx < symtab_hdr->sh_info)
12463 {
12464 /* A local symbol. */
12465 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
12466 abfd, r_symndx);
12467 if (isym == NULL)
12468 return FALSE;
12469 }
12470 else
12471 {
12472 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
12473 while (h->root.type == bfd_link_hash_indirect
12474 || h->root.type == bfd_link_hash_warning)
12475 h = (struct elf_link_hash_entry *) h->root.u.i.link;
12476
12477 /* PR15323, ref flags aren't set for references in the
12478 same object. */
12479 h->root.non_ir_ref = 1;
12480 }
12481 }
12482
12483 eh = (struct elf32_arm_link_hash_entry *) h;
12484
12485 call_reloc_p = FALSE;
12486 may_become_dynamic_p = FALSE;
12487 may_need_local_target_p = FALSE;
12488
12489 /* Could be done earlier, if h were already available. */
12490 r_type = elf32_arm_tls_transition (info, r_type, h);
12491 switch (r_type)
12492 {
12493 case R_ARM_GOT32:
12494 case R_ARM_GOT_PREL:
12495 case R_ARM_TLS_GD32:
12496 case R_ARM_TLS_IE32:
12497 case R_ARM_TLS_GOTDESC:
12498 case R_ARM_TLS_DESCSEQ:
12499 case R_ARM_THM_TLS_DESCSEQ:
12500 case R_ARM_TLS_CALL:
12501 case R_ARM_THM_TLS_CALL:
12502 /* This symbol requires a global offset table entry. */
12503 {
12504 int tls_type, old_tls_type;
12505
12506 switch (r_type)
12507 {
12508 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
12509
12510 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
12511
12512 case R_ARM_TLS_GOTDESC:
12513 case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL:
12514 case R_ARM_TLS_DESCSEQ: case R_ARM_THM_TLS_DESCSEQ:
12515 tls_type = GOT_TLS_GDESC; break;
12516
12517 default: tls_type = GOT_NORMAL; break;
12518 }
12519
12520 if (h != NULL)
12521 {
12522 h->got.refcount++;
12523 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
12524 }
12525 else
12526 {
12527 /* This is a global offset table entry for a local symbol. */
12528 if (!elf32_arm_allocate_local_sym_info (abfd))
12529 return FALSE;
12530 elf_local_got_refcounts (abfd)[r_symndx] += 1;
12531 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
12532 }
12533
12534 /* If a variable is accessed with both tls methods, two
12535 slots may be created. */
12536 if (GOT_TLS_GD_ANY_P (old_tls_type)
12537 && GOT_TLS_GD_ANY_P (tls_type))
12538 tls_type |= old_tls_type;
12539
12540 /* We will already have issued an error message if there
12541 is a TLS/non-TLS mismatch, based on the symbol
12542 type. So just combine any TLS types needed. */
12543 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
12544 && tls_type != GOT_NORMAL)
12545 tls_type |= old_tls_type;
12546
12547 /* If the symbol is accessed in both IE and GDESC
12548 method, we're able to relax. Turn off the GDESC flag,
12549 without messing up with any other kind of tls types
12550 that may be involved */
12551 if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC))
12552 tls_type &= ~GOT_TLS_GDESC;
12553
12554 if (old_tls_type != tls_type)
12555 {
12556 if (h != NULL)
12557 elf32_arm_hash_entry (h)->tls_type = tls_type;
12558 else
12559 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
12560 }
12561 }
12562 /* Fall through. */
12563
12564 case R_ARM_TLS_LDM32:
12565 if (r_type == R_ARM_TLS_LDM32)
12566 htab->tls_ldm_got.refcount++;
12567 /* Fall through. */
12568
12569 case R_ARM_GOTOFF32:
12570 case R_ARM_GOTPC:
12571 if (htab->root.sgot == NULL
12572 && !create_got_section (htab->root.dynobj, info))
12573 return FALSE;
12574 break;
12575
12576 case R_ARM_PC24:
12577 case R_ARM_PLT32:
12578 case R_ARM_CALL:
12579 case R_ARM_JUMP24:
12580 case R_ARM_PREL31:
12581 case R_ARM_THM_CALL:
12582 case R_ARM_THM_JUMP24:
12583 case R_ARM_THM_JUMP19:
12584 call_reloc_p = TRUE;
12585 may_need_local_target_p = TRUE;
12586 break;
12587
12588 case R_ARM_ABS12:
12589 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
12590 ldr __GOTT_INDEX__ offsets. */
12591 if (!htab->vxworks_p)
12592 {
12593 may_need_local_target_p = TRUE;
12594 break;
12595 }
12596 /* Fall through. */
12597
12598 case R_ARM_MOVW_ABS_NC:
12599 case R_ARM_MOVT_ABS:
12600 case R_ARM_THM_MOVW_ABS_NC:
12601 case R_ARM_THM_MOVT_ABS:
12602 if (info->shared)
12603 {
12604 (*_bfd_error_handler)
12605 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
12606 abfd, elf32_arm_howto_table_1[r_type].name,
12607 (h) ? h->root.root.string : "a local symbol");
12608 bfd_set_error (bfd_error_bad_value);
12609 return FALSE;
12610 }
12611
12612 /* Fall through. */
12613 case R_ARM_ABS32:
12614 case R_ARM_ABS32_NOI:
12615 case R_ARM_REL32:
12616 case R_ARM_REL32_NOI:
12617 case R_ARM_MOVW_PREL_NC:
12618 case R_ARM_MOVT_PREL:
12619 case R_ARM_THM_MOVW_PREL_NC:
12620 case R_ARM_THM_MOVT_PREL:
12621
12622 /* Should the interworking branches be listed here? */
12623 if ((info->shared || htab->root.is_relocatable_executable)
12624 && (sec->flags & SEC_ALLOC) != 0)
12625 {
12626 if (h == NULL
12627 && (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI))
12628 {
12629 /* In shared libraries and relocatable executables,
12630 we treat local relative references as calls;
12631 see the related SYMBOL_CALLS_LOCAL code in
12632 allocate_dynrelocs. */
12633 call_reloc_p = TRUE;
12634 may_need_local_target_p = TRUE;
12635 }
12636 else
12637 /* We are creating a shared library or relocatable
12638 executable, and this is a reloc against a global symbol,
12639 or a non-PC-relative reloc against a local symbol.
12640 We may need to copy the reloc into the output. */
12641 may_become_dynamic_p = TRUE;
12642 }
12643 else
12644 may_need_local_target_p = TRUE;
12645 break;
12646
12647 /* This relocation describes the C++ object vtable hierarchy.
12648 Reconstruct it for later use during GC. */
12649 case R_ARM_GNU_VTINHERIT:
12650 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
12651 return FALSE;
12652 break;
12653
12654 /* This relocation describes which C++ vtable entries are actually
12655 used. Record for later use during GC. */
12656 case R_ARM_GNU_VTENTRY:
12657 BFD_ASSERT (h != NULL);
12658 if (h != NULL
12659 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
12660 return FALSE;
12661 break;
12662 }
12663
12664 if (h != NULL)
12665 {
12666 if (call_reloc_p)
12667 /* We may need a .plt entry if the function this reloc
12668 refers to is in a different object, regardless of the
12669 symbol's type. We can't tell for sure yet, because
12670 something later might force the symbol local. */
12671 h->needs_plt = 1;
12672 else if (may_need_local_target_p)
12673 /* If this reloc is in a read-only section, we might
12674 need a copy reloc. We can't check reliably at this
12675 stage whether the section is read-only, as input
12676 sections have not yet been mapped to output sections.
12677 Tentatively set the flag for now, and correct in
12678 adjust_dynamic_symbol. */
12679 h->non_got_ref = 1;
12680 }
12681
12682 if (may_need_local_target_p
12683 && (h != NULL || ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC))
12684 {
12685 union gotplt_union *root_plt;
12686 struct arm_plt_info *arm_plt;
12687 struct arm_local_iplt_info *local_iplt;
12688
12689 if (h != NULL)
12690 {
12691 root_plt = &h->plt;
12692 arm_plt = &eh->plt;
12693 }
12694 else
12695 {
12696 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
12697 if (local_iplt == NULL)
12698 return FALSE;
12699 root_plt = &local_iplt->root;
12700 arm_plt = &local_iplt->arm;
12701 }
12702
12703 /* If the symbol is a function that doesn't bind locally,
12704 this relocation will need a PLT entry. */
12705 if (root_plt->refcount != -1)
12706 root_plt->refcount += 1;
12707
12708 if (!call_reloc_p)
12709 arm_plt->noncall_refcount++;
12710
12711 /* It's too early to use htab->use_blx here, so we have to
12712 record possible blx references separately from
12713 relocs that definitely need a thumb stub. */
12714
12715 if (r_type == R_ARM_THM_CALL)
12716 arm_plt->maybe_thumb_refcount += 1;
12717
12718 if (r_type == R_ARM_THM_JUMP24
12719 || r_type == R_ARM_THM_JUMP19)
12720 arm_plt->thumb_refcount += 1;
12721 }
12722
12723 if (may_become_dynamic_p)
12724 {
12725 struct elf_dyn_relocs *p, **head;
12726
12727 /* Create a reloc section in dynobj. */
12728 if (sreloc == NULL)
12729 {
12730 sreloc = _bfd_elf_make_dynamic_reloc_section
12731 (sec, dynobj, 2, abfd, ! htab->use_rel);
12732
12733 if (sreloc == NULL)
12734 return FALSE;
12735
12736 /* BPABI objects never have dynamic relocations mapped. */
12737 if (htab->symbian_p)
12738 {
12739 flagword flags;
12740
12741 flags = bfd_get_section_flags (dynobj, sreloc);
12742 flags &= ~(SEC_LOAD | SEC_ALLOC);
12743 bfd_set_section_flags (dynobj, sreloc, flags);
12744 }
12745 }
12746
12747 /* If this is a global symbol, count the number of
12748 relocations we need for this symbol. */
12749 if (h != NULL)
12750 head = &((struct elf32_arm_link_hash_entry *) h)->dyn_relocs;
12751 else
12752 {
12753 head = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
12754 if (head == NULL)
12755 return FALSE;
12756 }
12757
12758 p = *head;
12759 if (p == NULL || p->sec != sec)
12760 {
12761 bfd_size_type amt = sizeof *p;
12762
12763 p = (struct elf_dyn_relocs *) bfd_alloc (htab->root.dynobj, amt);
12764 if (p == NULL)
12765 return FALSE;
12766 p->next = *head;
12767 *head = p;
12768 p->sec = sec;
12769 p->count = 0;
12770 p->pc_count = 0;
12771 }
12772
12773 if (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
12774 p->pc_count += 1;
12775 p->count += 1;
12776 }
12777 }
12778
12779 return TRUE;
12780 }
12781
12782 /* Unwinding tables are not referenced directly. This pass marks them as
12783 required if the corresponding code section is marked. */
12784
12785 static bfd_boolean
12786 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
12787 elf_gc_mark_hook_fn gc_mark_hook)
12788 {
12789 bfd *sub;
12790 Elf_Internal_Shdr **elf_shdrp;
12791 bfd_boolean again;
12792
12793 _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
12794
12795 /* Marking EH data may cause additional code sections to be marked,
12796 requiring multiple passes. */
12797 again = TRUE;
12798 while (again)
12799 {
12800 again = FALSE;
12801 for (sub = info->input_bfds; sub != NULL; sub = sub->link_next)
12802 {
12803 asection *o;
12804
12805 if (! is_arm_elf (sub))
12806 continue;
12807
12808 elf_shdrp = elf_elfsections (sub);
12809 for (o = sub->sections; o != NULL; o = o->next)
12810 {
12811 Elf_Internal_Shdr *hdr;
12812
12813 hdr = &elf_section_data (o)->this_hdr;
12814 if (hdr->sh_type == SHT_ARM_EXIDX
12815 && hdr->sh_link
12816 && hdr->sh_link < elf_numsections (sub)
12817 && !o->gc_mark
12818 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
12819 {
12820 again = TRUE;
12821 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
12822 return FALSE;
12823 }
12824 }
12825 }
12826 }
12827
12828 return TRUE;
12829 }
12830
12831 /* Treat mapping symbols as special target symbols. */
12832
12833 static bfd_boolean
12834 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
12835 {
12836 return bfd_is_arm_special_symbol_name (sym->name,
12837 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
12838 }
12839
12840 /* This is a copy of elf_find_function() from elf.c except that
12841 ARM mapping symbols are ignored when looking for function names
12842 and STT_ARM_TFUNC is considered to a function type. */
12843
12844 static bfd_boolean
12845 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
12846 asection * section,
12847 asymbol ** symbols,
12848 bfd_vma offset,
12849 const char ** filename_ptr,
12850 const char ** functionname_ptr)
12851 {
12852 const char * filename = NULL;
12853 asymbol * func = NULL;
12854 bfd_vma low_func = 0;
12855 asymbol ** p;
12856
12857 for (p = symbols; *p != NULL; p++)
12858 {
12859 elf_symbol_type *q;
12860
12861 q = (elf_symbol_type *) *p;
12862
12863 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
12864 {
12865 default:
12866 break;
12867 case STT_FILE:
12868 filename = bfd_asymbol_name (&q->symbol);
12869 break;
12870 case STT_FUNC:
12871 case STT_ARM_TFUNC:
12872 case STT_NOTYPE:
12873 /* Skip mapping symbols. */
12874 if ((q->symbol.flags & BSF_LOCAL)
12875 && bfd_is_arm_special_symbol_name (q->symbol.name,
12876 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
12877 continue;
12878 /* Fall through. */
12879 if (bfd_get_section (&q->symbol) == section
12880 && q->symbol.value >= low_func
12881 && q->symbol.value <= offset)
12882 {
12883 func = (asymbol *) q;
12884 low_func = q->symbol.value;
12885 }
12886 break;
12887 }
12888 }
12889
12890 if (func == NULL)
12891 return FALSE;
12892
12893 if (filename_ptr)
12894 *filename_ptr = filename;
12895 if (functionname_ptr)
12896 *functionname_ptr = bfd_asymbol_name (func);
12897
12898 return TRUE;
12899 }
12900
12901
12902 /* Find the nearest line to a particular section and offset, for error
12903 reporting. This code is a duplicate of the code in elf.c, except
12904 that it uses arm_elf_find_function. */
12905
12906 static bfd_boolean
12907 elf32_arm_find_nearest_line (bfd * abfd,
12908 asection * section,
12909 asymbol ** symbols,
12910 bfd_vma offset,
12911 const char ** filename_ptr,
12912 const char ** functionname_ptr,
12913 unsigned int * line_ptr)
12914 {
12915 bfd_boolean found = FALSE;
12916
12917 /* We skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain uses it. */
12918
12919 if (_bfd_dwarf2_find_nearest_line (abfd, dwarf_debug_sections,
12920 section, symbols, offset,
12921 filename_ptr, functionname_ptr,
12922 line_ptr, NULL, 0,
12923 & elf_tdata (abfd)->dwarf2_find_line_info))
12924 {
12925 if (!*functionname_ptr)
12926 arm_elf_find_function (abfd, section, symbols, offset,
12927 *filename_ptr ? NULL : filename_ptr,
12928 functionname_ptr);
12929
12930 return TRUE;
12931 }
12932
12933 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
12934 & found, filename_ptr,
12935 functionname_ptr, line_ptr,
12936 & elf_tdata (abfd)->line_info))
12937 return FALSE;
12938
12939 if (found && (*functionname_ptr || *line_ptr))
12940 return TRUE;
12941
12942 if (symbols == NULL)
12943 return FALSE;
12944
12945 if (! arm_elf_find_function (abfd, section, symbols, offset,
12946 filename_ptr, functionname_ptr))
12947 return FALSE;
12948
12949 *line_ptr = 0;
12950 return TRUE;
12951 }
12952
12953 static bfd_boolean
12954 elf32_arm_find_inliner_info (bfd * abfd,
12955 const char ** filename_ptr,
12956 const char ** functionname_ptr,
12957 unsigned int * line_ptr)
12958 {
12959 bfd_boolean found;
12960 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
12961 functionname_ptr, line_ptr,
12962 & elf_tdata (abfd)->dwarf2_find_line_info);
12963 return found;
12964 }
12965
12966 /* Adjust a symbol defined by a dynamic object and referenced by a
12967 regular object. The current definition is in some section of the
12968 dynamic object, but we're not including those sections. We have to
12969 change the definition to something the rest of the link can
12970 understand. */
12971
12972 static bfd_boolean
12973 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
12974 struct elf_link_hash_entry * h)
12975 {
12976 bfd * dynobj;
12977 asection * s;
12978 struct elf32_arm_link_hash_entry * eh;
12979 struct elf32_arm_link_hash_table *globals;
12980
12981 globals = elf32_arm_hash_table (info);
12982 if (globals == NULL)
12983 return FALSE;
12984
12985 dynobj = elf_hash_table (info)->dynobj;
12986
12987 /* Make sure we know what is going on here. */
12988 BFD_ASSERT (dynobj != NULL
12989 && (h->needs_plt
12990 || h->type == STT_GNU_IFUNC
12991 || h->u.weakdef != NULL
12992 || (h->def_dynamic
12993 && h->ref_regular
12994 && !h->def_regular)));
12995
12996 eh = (struct elf32_arm_link_hash_entry *) h;
12997
12998 /* If this is a function, put it in the procedure linkage table. We
12999 will fill in the contents of the procedure linkage table later,
13000 when we know the address of the .got section. */
13001 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
13002 {
13003 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
13004 symbol binds locally. */
13005 if (h->plt.refcount <= 0
13006 || (h->type != STT_GNU_IFUNC
13007 && (SYMBOL_CALLS_LOCAL (info, h)
13008 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
13009 && h->root.type == bfd_link_hash_undefweak))))
13010 {
13011 /* This case can occur if we saw a PLT32 reloc in an input
13012 file, but the symbol was never referred to by a dynamic
13013 object, or if all references were garbage collected. In
13014 such a case, we don't actually need to build a procedure
13015 linkage table, and we can just do a PC24 reloc instead. */
13016 h->plt.offset = (bfd_vma) -1;
13017 eh->plt.thumb_refcount = 0;
13018 eh->plt.maybe_thumb_refcount = 0;
13019 eh->plt.noncall_refcount = 0;
13020 h->needs_plt = 0;
13021 }
13022
13023 return TRUE;
13024 }
13025 else
13026 {
13027 /* It's possible that we incorrectly decided a .plt reloc was
13028 needed for an R_ARM_PC24 or similar reloc to a non-function sym
13029 in check_relocs. We can't decide accurately between function
13030 and non-function syms in check-relocs; Objects loaded later in
13031 the link may change h->type. So fix it now. */
13032 h->plt.offset = (bfd_vma) -1;
13033 eh->plt.thumb_refcount = 0;
13034 eh->plt.maybe_thumb_refcount = 0;
13035 eh->plt.noncall_refcount = 0;
13036 }
13037
13038 /* If this is a weak symbol, and there is a real definition, the
13039 processor independent code will have arranged for us to see the
13040 real definition first, and we can just use the same value. */
13041 if (h->u.weakdef != NULL)
13042 {
13043 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
13044 || h->u.weakdef->root.type == bfd_link_hash_defweak);
13045 h->root.u.def.section = h->u.weakdef->root.u.def.section;
13046 h->root.u.def.value = h->u.weakdef->root.u.def.value;
13047 return TRUE;
13048 }
13049
13050 /* If there are no non-GOT references, we do not need a copy
13051 relocation. */
13052 if (!h->non_got_ref)
13053 return TRUE;
13054
13055 /* This is a reference to a symbol defined by a dynamic object which
13056 is not a function. */
13057
13058 /* If we are creating a shared library, we must presume that the
13059 only references to the symbol are via the global offset table.
13060 For such cases we need not do anything here; the relocations will
13061 be handled correctly by relocate_section. Relocatable executables
13062 can reference data in shared objects directly, so we don't need to
13063 do anything here. */
13064 if (info->shared || globals->root.is_relocatable_executable)
13065 return TRUE;
13066
13067 /* We must allocate the symbol in our .dynbss section, which will
13068 become part of the .bss section of the executable. There will be
13069 an entry for this symbol in the .dynsym section. The dynamic
13070 object will contain position independent code, so all references
13071 from the dynamic object to this symbol will go through the global
13072 offset table. The dynamic linker will use the .dynsym entry to
13073 determine the address it must put in the global offset table, so
13074 both the dynamic object and the regular object will refer to the
13075 same memory location for the variable. */
13076 s = bfd_get_linker_section (dynobj, ".dynbss");
13077 BFD_ASSERT (s != NULL);
13078
13079 /* We must generate a R_ARM_COPY reloc to tell the dynamic linker to
13080 copy the initial value out of the dynamic object and into the
13081 runtime process image. We need to remember the offset into the
13082 .rel(a).bss section we are going to use. */
13083 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
13084 {
13085 asection *srel;
13086
13087 srel = bfd_get_linker_section (dynobj, RELOC_SECTION (globals, ".bss"));
13088 elf32_arm_allocate_dynrelocs (info, srel, 1);
13089 h->needs_copy = 1;
13090 }
13091
13092 return _bfd_elf_adjust_dynamic_copy (h, s);
13093 }
13094
13095 /* Allocate space in .plt, .got and associated reloc sections for
13096 dynamic relocs. */
13097
13098 static bfd_boolean
13099 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
13100 {
13101 struct bfd_link_info *info;
13102 struct elf32_arm_link_hash_table *htab;
13103 struct elf32_arm_link_hash_entry *eh;
13104 struct elf_dyn_relocs *p;
13105
13106 if (h->root.type == bfd_link_hash_indirect)
13107 return TRUE;
13108
13109 eh = (struct elf32_arm_link_hash_entry *) h;
13110
13111 info = (struct bfd_link_info *) inf;
13112 htab = elf32_arm_hash_table (info);
13113 if (htab == NULL)
13114 return FALSE;
13115
13116 if ((htab->root.dynamic_sections_created || h->type == STT_GNU_IFUNC)
13117 && h->plt.refcount > 0)
13118 {
13119 /* Make sure this symbol is output as a dynamic symbol.
13120 Undefined weak syms won't yet be marked as dynamic. */
13121 if (h->dynindx == -1
13122 && !h->forced_local)
13123 {
13124 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13125 return FALSE;
13126 }
13127
13128 /* If the call in the PLT entry binds locally, the associated
13129 GOT entry should use an R_ARM_IRELATIVE relocation instead of
13130 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
13131 than the .plt section. */
13132 if (h->type == STT_GNU_IFUNC && SYMBOL_CALLS_LOCAL (info, h))
13133 {
13134 eh->is_iplt = 1;
13135 if (eh->plt.noncall_refcount == 0
13136 && SYMBOL_REFERENCES_LOCAL (info, h))
13137 /* All non-call references can be resolved directly.
13138 This means that they can (and in some cases, must)
13139 resolve directly to the run-time target, rather than
13140 to the PLT. That in turns means that any .got entry
13141 would be equal to the .igot.plt entry, so there's
13142 no point having both. */
13143 h->got.refcount = 0;
13144 }
13145
13146 if (info->shared
13147 || eh->is_iplt
13148 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
13149 {
13150 elf32_arm_allocate_plt_entry (info, eh->is_iplt, &h->plt, &eh->plt);
13151
13152 /* If this symbol is not defined in a regular file, and we are
13153 not generating a shared library, then set the symbol to this
13154 location in the .plt. This is required to make function
13155 pointers compare as equal between the normal executable and
13156 the shared library. */
13157 if (! info->shared
13158 && !h->def_regular)
13159 {
13160 h->root.u.def.section = htab->root.splt;
13161 h->root.u.def.value = h->plt.offset;
13162
13163 /* Make sure the function is not marked as Thumb, in case
13164 it is the target of an ABS32 relocation, which will
13165 point to the PLT entry. */
13166 h->target_internal = ST_BRANCH_TO_ARM;
13167 }
13168
13169 htab->next_tls_desc_index++;
13170
13171 /* VxWorks executables have a second set of relocations for
13172 each PLT entry. They go in a separate relocation section,
13173 which is processed by the kernel loader. */
13174 if (htab->vxworks_p && !info->shared)
13175 {
13176 /* There is a relocation for the initial PLT entry:
13177 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
13178 if (h->plt.offset == htab->plt_header_size)
13179 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 1);
13180
13181 /* There are two extra relocations for each subsequent
13182 PLT entry: an R_ARM_32 relocation for the GOT entry,
13183 and an R_ARM_32 relocation for the PLT entry. */
13184 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 2);
13185 }
13186 }
13187 else
13188 {
13189 h->plt.offset = (bfd_vma) -1;
13190 h->needs_plt = 0;
13191 }
13192 }
13193 else
13194 {
13195 h->plt.offset = (bfd_vma) -1;
13196 h->needs_plt = 0;
13197 }
13198
13199 eh = (struct elf32_arm_link_hash_entry *) h;
13200 eh->tlsdesc_got = (bfd_vma) -1;
13201
13202 if (h->got.refcount > 0)
13203 {
13204 asection *s;
13205 bfd_boolean dyn;
13206 int tls_type = elf32_arm_hash_entry (h)->tls_type;
13207 int indx;
13208
13209 /* Make sure this symbol is output as a dynamic symbol.
13210 Undefined weak syms won't yet be marked as dynamic. */
13211 if (h->dynindx == -1
13212 && !h->forced_local)
13213 {
13214 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13215 return FALSE;
13216 }
13217
13218 if (!htab->symbian_p)
13219 {
13220 s = htab->root.sgot;
13221 h->got.offset = s->size;
13222
13223 if (tls_type == GOT_UNKNOWN)
13224 abort ();
13225
13226 if (tls_type == GOT_NORMAL)
13227 /* Non-TLS symbols need one GOT slot. */
13228 s->size += 4;
13229 else
13230 {
13231 if (tls_type & GOT_TLS_GDESC)
13232 {
13233 /* R_ARM_TLS_DESC needs 2 GOT slots. */
13234 eh->tlsdesc_got
13235 = (htab->root.sgotplt->size
13236 - elf32_arm_compute_jump_table_size (htab));
13237 htab->root.sgotplt->size += 8;
13238 h->got.offset = (bfd_vma) -2;
13239 /* plt.got_offset needs to know there's a TLS_DESC
13240 reloc in the middle of .got.plt. */
13241 htab->num_tls_desc++;
13242 }
13243
13244 if (tls_type & GOT_TLS_GD)
13245 {
13246 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. If
13247 the symbol is both GD and GDESC, got.offset may
13248 have been overwritten. */
13249 h->got.offset = s->size;
13250 s->size += 8;
13251 }
13252
13253 if (tls_type & GOT_TLS_IE)
13254 /* R_ARM_TLS_IE32 needs one GOT slot. */
13255 s->size += 4;
13256 }
13257
13258 dyn = htab->root.dynamic_sections_created;
13259
13260 indx = 0;
13261 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
13262 && (!info->shared
13263 || !SYMBOL_REFERENCES_LOCAL (info, h)))
13264 indx = h->dynindx;
13265
13266 if (tls_type != GOT_NORMAL
13267 && (info->shared || indx != 0)
13268 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
13269 || h->root.type != bfd_link_hash_undefweak))
13270 {
13271 if (tls_type & GOT_TLS_IE)
13272 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13273
13274 if (tls_type & GOT_TLS_GD)
13275 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13276
13277 if (tls_type & GOT_TLS_GDESC)
13278 {
13279 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
13280 /* GDESC needs a trampoline to jump to. */
13281 htab->tls_trampoline = -1;
13282 }
13283
13284 /* Only GD needs it. GDESC just emits one relocation per
13285 2 entries. */
13286 if ((tls_type & GOT_TLS_GD) && indx != 0)
13287 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13288 }
13289 else if (indx != -1 && !SYMBOL_REFERENCES_LOCAL (info, h))
13290 {
13291 if (htab->root.dynamic_sections_created)
13292 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
13293 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13294 }
13295 else if (h->type == STT_GNU_IFUNC
13296 && eh->plt.noncall_refcount == 0)
13297 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
13298 they all resolve dynamically instead. Reserve room for the
13299 GOT entry's R_ARM_IRELATIVE relocation. */
13300 elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1);
13301 else if (info->shared && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
13302 || h->root.type != bfd_link_hash_undefweak))
13303 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
13304 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13305 }
13306 }
13307 else
13308 h->got.offset = (bfd_vma) -1;
13309
13310 /* Allocate stubs for exported Thumb functions on v4t. */
13311 if (!htab->use_blx && h->dynindx != -1
13312 && h->def_regular
13313 && h->target_internal == ST_BRANCH_TO_THUMB
13314 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
13315 {
13316 struct elf_link_hash_entry * th;
13317 struct bfd_link_hash_entry * bh;
13318 struct elf_link_hash_entry * myh;
13319 char name[1024];
13320 asection *s;
13321 bh = NULL;
13322 /* Create a new symbol to regist the real location of the function. */
13323 s = h->root.u.def.section;
13324 sprintf (name, "__real_%s", h->root.root.string);
13325 _bfd_generic_link_add_one_symbol (info, s->owner,
13326 name, BSF_GLOBAL, s,
13327 h->root.u.def.value,
13328 NULL, TRUE, FALSE, &bh);
13329
13330 myh = (struct elf_link_hash_entry *) bh;
13331 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
13332 myh->forced_local = 1;
13333 myh->target_internal = ST_BRANCH_TO_THUMB;
13334 eh->export_glue = myh;
13335 th = record_arm_to_thumb_glue (info, h);
13336 /* Point the symbol at the stub. */
13337 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
13338 h->target_internal = ST_BRANCH_TO_ARM;
13339 h->root.u.def.section = th->root.u.def.section;
13340 h->root.u.def.value = th->root.u.def.value & ~1;
13341 }
13342
13343 if (eh->dyn_relocs == NULL)
13344 return TRUE;
13345
13346 /* In the shared -Bsymbolic case, discard space allocated for
13347 dynamic pc-relative relocs against symbols which turn out to be
13348 defined in regular objects. For the normal shared case, discard
13349 space for pc-relative relocs that have become local due to symbol
13350 visibility changes. */
13351
13352 if (info->shared || htab->root.is_relocatable_executable)
13353 {
13354 /* The only relocs that use pc_count are R_ARM_REL32 and
13355 R_ARM_REL32_NOI, which will appear on something like
13356 ".long foo - .". We want calls to protected symbols to resolve
13357 directly to the function rather than going via the plt. If people
13358 want function pointer comparisons to work as expected then they
13359 should avoid writing assembly like ".long foo - .". */
13360 if (SYMBOL_CALLS_LOCAL (info, h))
13361 {
13362 struct elf_dyn_relocs **pp;
13363
13364 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
13365 {
13366 p->count -= p->pc_count;
13367 p->pc_count = 0;
13368 if (p->count == 0)
13369 *pp = p->next;
13370 else
13371 pp = &p->next;
13372 }
13373 }
13374
13375 if (htab->vxworks_p)
13376 {
13377 struct elf_dyn_relocs **pp;
13378
13379 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
13380 {
13381 if (strcmp (p->sec->output_section->name, ".tls_vars") == 0)
13382 *pp = p->next;
13383 else
13384 pp = &p->next;
13385 }
13386 }
13387
13388 /* Also discard relocs on undefined weak syms with non-default
13389 visibility. */
13390 if (eh->dyn_relocs != NULL
13391 && h->root.type == bfd_link_hash_undefweak)
13392 {
13393 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
13394 eh->dyn_relocs = NULL;
13395
13396 /* Make sure undefined weak symbols are output as a dynamic
13397 symbol in PIEs. */
13398 else if (h->dynindx == -1
13399 && !h->forced_local)
13400 {
13401 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13402 return FALSE;
13403 }
13404 }
13405
13406 else if (htab->root.is_relocatable_executable && h->dynindx == -1
13407 && h->root.type == bfd_link_hash_new)
13408 {
13409 /* Output absolute symbols so that we can create relocations
13410 against them. For normal symbols we output a relocation
13411 against the section that contains them. */
13412 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13413 return FALSE;
13414 }
13415
13416 }
13417 else
13418 {
13419 /* For the non-shared case, discard space for relocs against
13420 symbols which turn out to need copy relocs or are not
13421 dynamic. */
13422
13423 if (!h->non_got_ref
13424 && ((h->def_dynamic
13425 && !h->def_regular)
13426 || (htab->root.dynamic_sections_created
13427 && (h->root.type == bfd_link_hash_undefweak
13428 || h->root.type == bfd_link_hash_undefined))))
13429 {
13430 /* Make sure this symbol is output as a dynamic symbol.
13431 Undefined weak syms won't yet be marked as dynamic. */
13432 if (h->dynindx == -1
13433 && !h->forced_local)
13434 {
13435 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13436 return FALSE;
13437 }
13438
13439 /* If that succeeded, we know we'll be keeping all the
13440 relocs. */
13441 if (h->dynindx != -1)
13442 goto keep;
13443 }
13444
13445 eh->dyn_relocs = NULL;
13446
13447 keep: ;
13448 }
13449
13450 /* Finally, allocate space. */
13451 for (p = eh->dyn_relocs; p != NULL; p = p->next)
13452 {
13453 asection *sreloc = elf_section_data (p->sec)->sreloc;
13454 if (h->type == STT_GNU_IFUNC
13455 && eh->plt.noncall_refcount == 0
13456 && SYMBOL_REFERENCES_LOCAL (info, h))
13457 elf32_arm_allocate_irelocs (info, sreloc, p->count);
13458 else
13459 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
13460 }
13461
13462 return TRUE;
13463 }
13464
13465 /* Find any dynamic relocs that apply to read-only sections. */
13466
13467 static bfd_boolean
13468 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
13469 {
13470 struct elf32_arm_link_hash_entry * eh;
13471 struct elf_dyn_relocs * p;
13472
13473 eh = (struct elf32_arm_link_hash_entry *) h;
13474 for (p = eh->dyn_relocs; p != NULL; p = p->next)
13475 {
13476 asection *s = p->sec;
13477
13478 if (s != NULL && (s->flags & SEC_READONLY) != 0)
13479 {
13480 struct bfd_link_info *info = (struct bfd_link_info *) inf;
13481
13482 info->flags |= DF_TEXTREL;
13483
13484 /* Not an error, just cut short the traversal. */
13485 return FALSE;
13486 }
13487 }
13488 return TRUE;
13489 }
13490
13491 void
13492 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
13493 int byteswap_code)
13494 {
13495 struct elf32_arm_link_hash_table *globals;
13496
13497 globals = elf32_arm_hash_table (info);
13498 if (globals == NULL)
13499 return;
13500
13501 globals->byteswap_code = byteswap_code;
13502 }
13503
13504 /* Set the sizes of the dynamic sections. */
13505
13506 static bfd_boolean
13507 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
13508 struct bfd_link_info * info)
13509 {
13510 bfd * dynobj;
13511 asection * s;
13512 bfd_boolean plt;
13513 bfd_boolean relocs;
13514 bfd *ibfd;
13515 struct elf32_arm_link_hash_table *htab;
13516
13517 htab = elf32_arm_hash_table (info);
13518 if (htab == NULL)
13519 return FALSE;
13520
13521 dynobj = elf_hash_table (info)->dynobj;
13522 BFD_ASSERT (dynobj != NULL);
13523 check_use_blx (htab);
13524
13525 if (elf_hash_table (info)->dynamic_sections_created)
13526 {
13527 /* Set the contents of the .interp section to the interpreter. */
13528 if (info->executable)
13529 {
13530 s = bfd_get_linker_section (dynobj, ".interp");
13531 BFD_ASSERT (s != NULL);
13532 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
13533 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
13534 }
13535 }
13536
13537 /* Set up .got offsets for local syms, and space for local dynamic
13538 relocs. */
13539 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
13540 {
13541 bfd_signed_vma *local_got;
13542 bfd_signed_vma *end_local_got;
13543 struct arm_local_iplt_info **local_iplt_ptr, *local_iplt;
13544 char *local_tls_type;
13545 bfd_vma *local_tlsdesc_gotent;
13546 bfd_size_type locsymcount;
13547 Elf_Internal_Shdr *symtab_hdr;
13548 asection *srel;
13549 bfd_boolean is_vxworks = htab->vxworks_p;
13550 unsigned int symndx;
13551
13552 if (! is_arm_elf (ibfd))
13553 continue;
13554
13555 for (s = ibfd->sections; s != NULL; s = s->next)
13556 {
13557 struct elf_dyn_relocs *p;
13558
13559 for (p = (struct elf_dyn_relocs *)
13560 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
13561 {
13562 if (!bfd_is_abs_section (p->sec)
13563 && bfd_is_abs_section (p->sec->output_section))
13564 {
13565 /* Input section has been discarded, either because
13566 it is a copy of a linkonce section or due to
13567 linker script /DISCARD/, so we'll be discarding
13568 the relocs too. */
13569 }
13570 else if (is_vxworks
13571 && strcmp (p->sec->output_section->name,
13572 ".tls_vars") == 0)
13573 {
13574 /* Relocations in vxworks .tls_vars sections are
13575 handled specially by the loader. */
13576 }
13577 else if (p->count != 0)
13578 {
13579 srel = elf_section_data (p->sec)->sreloc;
13580 elf32_arm_allocate_dynrelocs (info, srel, p->count);
13581 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
13582 info->flags |= DF_TEXTREL;
13583 }
13584 }
13585 }
13586
13587 local_got = elf_local_got_refcounts (ibfd);
13588 if (!local_got)
13589 continue;
13590
13591 symtab_hdr = & elf_symtab_hdr (ibfd);
13592 locsymcount = symtab_hdr->sh_info;
13593 end_local_got = local_got + locsymcount;
13594 local_iplt_ptr = elf32_arm_local_iplt (ibfd);
13595 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
13596 local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd);
13597 symndx = 0;
13598 s = htab->root.sgot;
13599 srel = htab->root.srelgot;
13600 for (; local_got < end_local_got;
13601 ++local_got, ++local_iplt_ptr, ++local_tls_type,
13602 ++local_tlsdesc_gotent, ++symndx)
13603 {
13604 *local_tlsdesc_gotent = (bfd_vma) -1;
13605 local_iplt = *local_iplt_ptr;
13606 if (local_iplt != NULL)
13607 {
13608 struct elf_dyn_relocs *p;
13609
13610 if (local_iplt->root.refcount > 0)
13611 {
13612 elf32_arm_allocate_plt_entry (info, TRUE,
13613 &local_iplt->root,
13614 &local_iplt->arm);
13615 if (local_iplt->arm.noncall_refcount == 0)
13616 /* All references to the PLT are calls, so all
13617 non-call references can resolve directly to the
13618 run-time target. This means that the .got entry
13619 would be the same as the .igot.plt entry, so there's
13620 no point creating both. */
13621 *local_got = 0;
13622 }
13623 else
13624 {
13625 BFD_ASSERT (local_iplt->arm.noncall_refcount == 0);
13626 local_iplt->root.offset = (bfd_vma) -1;
13627 }
13628
13629 for (p = local_iplt->dyn_relocs; p != NULL; p = p->next)
13630 {
13631 asection *psrel;
13632
13633 psrel = elf_section_data (p->sec)->sreloc;
13634 if (local_iplt->arm.noncall_refcount == 0)
13635 elf32_arm_allocate_irelocs (info, psrel, p->count);
13636 else
13637 elf32_arm_allocate_dynrelocs (info, psrel, p->count);
13638 }
13639 }
13640 if (*local_got > 0)
13641 {
13642 Elf_Internal_Sym *isym;
13643
13644 *local_got = s->size;
13645 if (*local_tls_type & GOT_TLS_GD)
13646 /* TLS_GD relocs need an 8-byte structure in the GOT. */
13647 s->size += 8;
13648 if (*local_tls_type & GOT_TLS_GDESC)
13649 {
13650 *local_tlsdesc_gotent = htab->root.sgotplt->size
13651 - elf32_arm_compute_jump_table_size (htab);
13652 htab->root.sgotplt->size += 8;
13653 *local_got = (bfd_vma) -2;
13654 /* plt.got_offset needs to know there's a TLS_DESC
13655 reloc in the middle of .got.plt. */
13656 htab->num_tls_desc++;
13657 }
13658 if (*local_tls_type & GOT_TLS_IE)
13659 s->size += 4;
13660
13661 if (*local_tls_type & GOT_NORMAL)
13662 {
13663 /* If the symbol is both GD and GDESC, *local_got
13664 may have been overwritten. */
13665 *local_got = s->size;
13666 s->size += 4;
13667 }
13668
13669 isym = bfd_sym_from_r_symndx (&htab->sym_cache, ibfd, symndx);
13670 if (isym == NULL)
13671 return FALSE;
13672
13673 /* If all references to an STT_GNU_IFUNC PLT are calls,
13674 then all non-call references, including this GOT entry,
13675 resolve directly to the run-time target. */
13676 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC
13677 && (local_iplt == NULL
13678 || local_iplt->arm.noncall_refcount == 0))
13679 elf32_arm_allocate_irelocs (info, srel, 1);
13680 else if (info->shared || output_bfd->flags & DYNAMIC)
13681 {
13682 if ((info->shared && !(*local_tls_type & GOT_TLS_GDESC))
13683 || *local_tls_type & GOT_TLS_GD)
13684 elf32_arm_allocate_dynrelocs (info, srel, 1);
13685
13686 if (info->shared && *local_tls_type & GOT_TLS_GDESC)
13687 {
13688 elf32_arm_allocate_dynrelocs (info,
13689 htab->root.srelplt, 1);
13690 htab->tls_trampoline = -1;
13691 }
13692 }
13693 }
13694 else
13695 *local_got = (bfd_vma) -1;
13696 }
13697 }
13698
13699 if (htab->tls_ldm_got.refcount > 0)
13700 {
13701 /* Allocate two GOT entries and one dynamic relocation (if necessary)
13702 for R_ARM_TLS_LDM32 relocations. */
13703 htab->tls_ldm_got.offset = htab->root.sgot->size;
13704 htab->root.sgot->size += 8;
13705 if (info->shared)
13706 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13707 }
13708 else
13709 htab->tls_ldm_got.offset = -1;
13710
13711 /* Allocate global sym .plt and .got entries, and space for global
13712 sym dynamic relocs. */
13713 elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info);
13714
13715 /* Here we rummage through the found bfds to collect glue information. */
13716 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
13717 {
13718 if (! is_arm_elf (ibfd))
13719 continue;
13720
13721 /* Initialise mapping tables for code/data. */
13722 bfd_elf32_arm_init_maps (ibfd);
13723
13724 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
13725 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info))
13726 /* xgettext:c-format */
13727 _bfd_error_handler (_("Errors encountered processing file %s"),
13728 ibfd->filename);
13729 }
13730
13731 /* Allocate space for the glue sections now that we've sized them. */
13732 bfd_elf32_arm_allocate_interworking_sections (info);
13733
13734 /* For every jump slot reserved in the sgotplt, reloc_count is
13735 incremented. However, when we reserve space for TLS descriptors,
13736 it's not incremented, so in order to compute the space reserved
13737 for them, it suffices to multiply the reloc count by the jump
13738 slot size. */
13739 if (htab->root.srelplt)
13740 htab->sgotplt_jump_table_size = elf32_arm_compute_jump_table_size(htab);
13741
13742 if (htab->tls_trampoline)
13743 {
13744 if (htab->root.splt->size == 0)
13745 htab->root.splt->size += htab->plt_header_size;
13746
13747 htab->tls_trampoline = htab->root.splt->size;
13748 htab->root.splt->size += htab->plt_entry_size;
13749
13750 /* If we're not using lazy TLS relocations, don't generate the
13751 PLT and GOT entries they require. */
13752 if (!(info->flags & DF_BIND_NOW))
13753 {
13754 htab->dt_tlsdesc_got = htab->root.sgot->size;
13755 htab->root.sgot->size += 4;
13756
13757 htab->dt_tlsdesc_plt = htab->root.splt->size;
13758 htab->root.splt->size += 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline);
13759 }
13760 }
13761
13762 /* The check_relocs and adjust_dynamic_symbol entry points have
13763 determined the sizes of the various dynamic sections. Allocate
13764 memory for them. */
13765 plt = FALSE;
13766 relocs = FALSE;
13767 for (s = dynobj->sections; s != NULL; s = s->next)
13768 {
13769 const char * name;
13770
13771 if ((s->flags & SEC_LINKER_CREATED) == 0)
13772 continue;
13773
13774 /* It's OK to base decisions on the section name, because none
13775 of the dynobj section names depend upon the input files. */
13776 name = bfd_get_section_name (dynobj, s);
13777
13778 if (s == htab->root.splt)
13779 {
13780 /* Remember whether there is a PLT. */
13781 plt = s->size != 0;
13782 }
13783 else if (CONST_STRNEQ (name, ".rel"))
13784 {
13785 if (s->size != 0)
13786 {
13787 /* Remember whether there are any reloc sections other
13788 than .rel(a).plt and .rela.plt.unloaded. */
13789 if (s != htab->root.srelplt && s != htab->srelplt2)
13790 relocs = TRUE;
13791
13792 /* We use the reloc_count field as a counter if we need
13793 to copy relocs into the output file. */
13794 s->reloc_count = 0;
13795 }
13796 }
13797 else if (s != htab->root.sgot
13798 && s != htab->root.sgotplt
13799 && s != htab->root.iplt
13800 && s != htab->root.igotplt
13801 && s != htab->sdynbss)
13802 {
13803 /* It's not one of our sections, so don't allocate space. */
13804 continue;
13805 }
13806
13807 if (s->size == 0)
13808 {
13809 /* If we don't need this section, strip it from the
13810 output file. This is mostly to handle .rel(a).bss and
13811 .rel(a).plt. We must create both sections in
13812 create_dynamic_sections, because they must be created
13813 before the linker maps input sections to output
13814 sections. The linker does that before
13815 adjust_dynamic_symbol is called, and it is that
13816 function which decides whether anything needs to go
13817 into these sections. */
13818 s->flags |= SEC_EXCLUDE;
13819 continue;
13820 }
13821
13822 if ((s->flags & SEC_HAS_CONTENTS) == 0)
13823 continue;
13824
13825 /* Allocate memory for the section contents. */
13826 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
13827 if (s->contents == NULL)
13828 return FALSE;
13829 }
13830
13831 if (elf_hash_table (info)->dynamic_sections_created)
13832 {
13833 /* Add some entries to the .dynamic section. We fill in the
13834 values later, in elf32_arm_finish_dynamic_sections, but we
13835 must add the entries now so that we get the correct size for
13836 the .dynamic section. The DT_DEBUG entry is filled in by the
13837 dynamic linker and used by the debugger. */
13838 #define add_dynamic_entry(TAG, VAL) \
13839 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
13840
13841 if (info->executable)
13842 {
13843 if (!add_dynamic_entry (DT_DEBUG, 0))
13844 return FALSE;
13845 }
13846
13847 if (plt)
13848 {
13849 if ( !add_dynamic_entry (DT_PLTGOT, 0)
13850 || !add_dynamic_entry (DT_PLTRELSZ, 0)
13851 || !add_dynamic_entry (DT_PLTREL,
13852 htab->use_rel ? DT_REL : DT_RELA)
13853 || !add_dynamic_entry (DT_JMPREL, 0))
13854 return FALSE;
13855
13856 if (htab->dt_tlsdesc_plt &&
13857 (!add_dynamic_entry (DT_TLSDESC_PLT,0)
13858 || !add_dynamic_entry (DT_TLSDESC_GOT,0)))
13859 return FALSE;
13860 }
13861
13862 if (relocs)
13863 {
13864 if (htab->use_rel)
13865 {
13866 if (!add_dynamic_entry (DT_REL, 0)
13867 || !add_dynamic_entry (DT_RELSZ, 0)
13868 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
13869 return FALSE;
13870 }
13871 else
13872 {
13873 if (!add_dynamic_entry (DT_RELA, 0)
13874 || !add_dynamic_entry (DT_RELASZ, 0)
13875 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
13876 return FALSE;
13877 }
13878 }
13879
13880 /* If any dynamic relocs apply to a read-only section,
13881 then we need a DT_TEXTREL entry. */
13882 if ((info->flags & DF_TEXTREL) == 0)
13883 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs,
13884 info);
13885
13886 if ((info->flags & DF_TEXTREL) != 0)
13887 {
13888 if (!add_dynamic_entry (DT_TEXTREL, 0))
13889 return FALSE;
13890 }
13891 if (htab->vxworks_p
13892 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
13893 return FALSE;
13894 }
13895 #undef add_dynamic_entry
13896
13897 return TRUE;
13898 }
13899
13900 /* Size sections even though they're not dynamic. We use it to setup
13901 _TLS_MODULE_BASE_, if needed. */
13902
13903 static bfd_boolean
13904 elf32_arm_always_size_sections (bfd *output_bfd,
13905 struct bfd_link_info *info)
13906 {
13907 asection *tls_sec;
13908
13909 if (info->relocatable)
13910 return TRUE;
13911
13912 tls_sec = elf_hash_table (info)->tls_sec;
13913
13914 if (tls_sec)
13915 {
13916 struct elf_link_hash_entry *tlsbase;
13917
13918 tlsbase = elf_link_hash_lookup
13919 (elf_hash_table (info), "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
13920
13921 if (tlsbase)
13922 {
13923 struct bfd_link_hash_entry *bh = NULL;
13924 const struct elf_backend_data *bed
13925 = get_elf_backend_data (output_bfd);
13926
13927 if (!(_bfd_generic_link_add_one_symbol
13928 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
13929 tls_sec, 0, NULL, FALSE,
13930 bed->collect, &bh)))
13931 return FALSE;
13932
13933 tlsbase->type = STT_TLS;
13934 tlsbase = (struct elf_link_hash_entry *)bh;
13935 tlsbase->def_regular = 1;
13936 tlsbase->other = STV_HIDDEN;
13937 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
13938 }
13939 }
13940 return TRUE;
13941 }
13942
13943 /* Finish up dynamic symbol handling. We set the contents of various
13944 dynamic sections here. */
13945
13946 static bfd_boolean
13947 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
13948 struct bfd_link_info * info,
13949 struct elf_link_hash_entry * h,
13950 Elf_Internal_Sym * sym)
13951 {
13952 struct elf32_arm_link_hash_table *htab;
13953 struct elf32_arm_link_hash_entry *eh;
13954
13955 htab = elf32_arm_hash_table (info);
13956 if (htab == NULL)
13957 return FALSE;
13958
13959 eh = (struct elf32_arm_link_hash_entry *) h;
13960
13961 if (h->plt.offset != (bfd_vma) -1)
13962 {
13963 if (!eh->is_iplt)
13964 {
13965 BFD_ASSERT (h->dynindx != -1);
13966 elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
13967 h->dynindx, 0);
13968 }
13969
13970 if (!h->def_regular)
13971 {
13972 /* Mark the symbol as undefined, rather than as defined in
13973 the .plt section. Leave the value alone. */
13974 sym->st_shndx = SHN_UNDEF;
13975 /* If the symbol is weak, we do need to clear the value.
13976 Otherwise, the PLT entry would provide a definition for
13977 the symbol even if the symbol wasn't defined anywhere,
13978 and so the symbol would never be NULL. */
13979 if (!h->ref_regular_nonweak)
13980 sym->st_value = 0;
13981 }
13982 else if (eh->is_iplt && eh->plt.noncall_refcount != 0)
13983 {
13984 /* At least one non-call relocation references this .iplt entry,
13985 so the .iplt entry is the function's canonical address. */
13986 sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC);
13987 sym->st_target_internal = ST_BRANCH_TO_ARM;
13988 sym->st_shndx = (_bfd_elf_section_from_bfd_section
13989 (output_bfd, htab->root.iplt->output_section));
13990 sym->st_value = (h->plt.offset
13991 + htab->root.iplt->output_section->vma
13992 + htab->root.iplt->output_offset);
13993 }
13994 }
13995
13996 if (h->needs_copy)
13997 {
13998 asection * s;
13999 Elf_Internal_Rela rel;
14000
14001 /* This symbol needs a copy reloc. Set it up. */
14002 BFD_ASSERT (h->dynindx != -1
14003 && (h->root.type == bfd_link_hash_defined
14004 || h->root.type == bfd_link_hash_defweak));
14005
14006 s = htab->srelbss;
14007 BFD_ASSERT (s != NULL);
14008
14009 rel.r_addend = 0;
14010 rel.r_offset = (h->root.u.def.value
14011 + h->root.u.def.section->output_section->vma
14012 + h->root.u.def.section->output_offset);
14013 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
14014 elf32_arm_add_dynreloc (output_bfd, info, s, &rel);
14015 }
14016
14017 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
14018 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
14019 to the ".got" section. */
14020 if (h == htab->root.hdynamic
14021 || (!htab->vxworks_p && h == htab->root.hgot))
14022 sym->st_shndx = SHN_ABS;
14023
14024 return TRUE;
14025 }
14026
14027 static void
14028 arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
14029 void *contents,
14030 const unsigned long *template, unsigned count)
14031 {
14032 unsigned ix;
14033
14034 for (ix = 0; ix != count; ix++)
14035 {
14036 unsigned long insn = template[ix];
14037
14038 /* Emit mov pc,rx if bx is not permitted. */
14039 if (htab->fix_v4bx == 1 && (insn & 0x0ffffff0) == 0x012fff10)
14040 insn = (insn & 0xf000000f) | 0x01a0f000;
14041 put_arm_insn (htab, output_bfd, insn, (char *)contents + ix*4);
14042 }
14043 }
14044
14045 /* Install the special first PLT entry for elf32-arm-nacl. Unlike
14046 other variants, NaCl needs this entry in a static executable's
14047 .iplt too. When we're handling that case, GOT_DISPLACEMENT is
14048 zero. For .iplt really only the last bundle is useful, and .iplt
14049 could have a shorter first entry, with each individual PLT entry's
14050 relative branch calculated differently so it targets the last
14051 bundle instead of the instruction before it (labelled .Lplt_tail
14052 above). But it's simpler to keep the size and layout of PLT0
14053 consistent with the dynamic case, at the cost of some dead code at
14054 the start of .iplt and the one dead store to the stack at the start
14055 of .Lplt_tail. */
14056 static void
14057 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
14058 asection *plt, bfd_vma got_displacement)
14059 {
14060 unsigned int i;
14061
14062 put_arm_insn (htab, output_bfd,
14063 elf32_arm_nacl_plt0_entry[0]
14064 | arm_movw_immediate (got_displacement),
14065 plt->contents + 0);
14066 put_arm_insn (htab, output_bfd,
14067 elf32_arm_nacl_plt0_entry[1]
14068 | arm_movt_immediate (got_displacement),
14069 plt->contents + 4);
14070
14071 for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i)
14072 put_arm_insn (htab, output_bfd,
14073 elf32_arm_nacl_plt0_entry[i],
14074 plt->contents + (i * 4));
14075 }
14076
14077 /* Finish up the dynamic sections. */
14078
14079 static bfd_boolean
14080 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
14081 {
14082 bfd * dynobj;
14083 asection * sgot;
14084 asection * sdyn;
14085 struct elf32_arm_link_hash_table *htab;
14086
14087 htab = elf32_arm_hash_table (info);
14088 if (htab == NULL)
14089 return FALSE;
14090
14091 dynobj = elf_hash_table (info)->dynobj;
14092
14093 sgot = htab->root.sgotplt;
14094 /* A broken linker script might have discarded the dynamic sections.
14095 Catch this here so that we do not seg-fault later on. */
14096 if (sgot != NULL && bfd_is_abs_section (sgot->output_section))
14097 return FALSE;
14098 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
14099
14100 if (elf_hash_table (info)->dynamic_sections_created)
14101 {
14102 asection *splt;
14103 Elf32_External_Dyn *dyncon, *dynconend;
14104
14105 splt = htab->root.splt;
14106 BFD_ASSERT (splt != NULL && sdyn != NULL);
14107 BFD_ASSERT (htab->symbian_p || sgot != NULL);
14108
14109 dyncon = (Elf32_External_Dyn *) sdyn->contents;
14110 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
14111
14112 for (; dyncon < dynconend; dyncon++)
14113 {
14114 Elf_Internal_Dyn dyn;
14115 const char * name;
14116 asection * s;
14117
14118 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
14119
14120 switch (dyn.d_tag)
14121 {
14122 unsigned int type;
14123
14124 default:
14125 if (htab->vxworks_p
14126 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
14127 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14128 break;
14129
14130 case DT_HASH:
14131 name = ".hash";
14132 goto get_vma_if_bpabi;
14133 case DT_STRTAB:
14134 name = ".dynstr";
14135 goto get_vma_if_bpabi;
14136 case DT_SYMTAB:
14137 name = ".dynsym";
14138 goto get_vma_if_bpabi;
14139 case DT_VERSYM:
14140 name = ".gnu.version";
14141 goto get_vma_if_bpabi;
14142 case DT_VERDEF:
14143 name = ".gnu.version_d";
14144 goto get_vma_if_bpabi;
14145 case DT_VERNEED:
14146 name = ".gnu.version_r";
14147 goto get_vma_if_bpabi;
14148
14149 case DT_PLTGOT:
14150 name = ".got";
14151 goto get_vma;
14152 case DT_JMPREL:
14153 name = RELOC_SECTION (htab, ".plt");
14154 get_vma:
14155 s = bfd_get_section_by_name (output_bfd, name);
14156 if (s == NULL)
14157 {
14158 /* PR ld/14397: Issue an error message if a required section is missing. */
14159 (*_bfd_error_handler)
14160 (_("error: required section '%s' not found in the linker script"), name);
14161 bfd_set_error (bfd_error_invalid_operation);
14162 return FALSE;
14163 }
14164 if (!htab->symbian_p)
14165 dyn.d_un.d_ptr = s->vma;
14166 else
14167 /* In the BPABI, tags in the PT_DYNAMIC section point
14168 at the file offset, not the memory address, for the
14169 convenience of the post linker. */
14170 dyn.d_un.d_ptr = s->filepos;
14171 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14172 break;
14173
14174 get_vma_if_bpabi:
14175 if (htab->symbian_p)
14176 goto get_vma;
14177 break;
14178
14179 case DT_PLTRELSZ:
14180 s = htab->root.srelplt;
14181 BFD_ASSERT (s != NULL);
14182 dyn.d_un.d_val = s->size;
14183 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14184 break;
14185
14186 case DT_RELSZ:
14187 case DT_RELASZ:
14188 if (!htab->symbian_p)
14189 {
14190 /* My reading of the SVR4 ABI indicates that the
14191 procedure linkage table relocs (DT_JMPREL) should be
14192 included in the overall relocs (DT_REL). This is
14193 what Solaris does. However, UnixWare can not handle
14194 that case. Therefore, we override the DT_RELSZ entry
14195 here to make it not include the JMPREL relocs. Since
14196 the linker script arranges for .rel(a).plt to follow all
14197 other relocation sections, we don't have to worry
14198 about changing the DT_REL entry. */
14199 s = htab->root.srelplt;
14200 if (s != NULL)
14201 dyn.d_un.d_val -= s->size;
14202 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14203 break;
14204 }
14205 /* Fall through. */
14206
14207 case DT_REL:
14208 case DT_RELA:
14209 /* In the BPABI, the DT_REL tag must point at the file
14210 offset, not the VMA, of the first relocation
14211 section. So, we use code similar to that in
14212 elflink.c, but do not check for SHF_ALLOC on the
14213 relcoation section, since relocations sections are
14214 never allocated under the BPABI. The comments above
14215 about Unixware notwithstanding, we include all of the
14216 relocations here. */
14217 if (htab->symbian_p)
14218 {
14219 unsigned int i;
14220 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
14221 ? SHT_REL : SHT_RELA);
14222 dyn.d_un.d_val = 0;
14223 for (i = 1; i < elf_numsections (output_bfd); i++)
14224 {
14225 Elf_Internal_Shdr *hdr
14226 = elf_elfsections (output_bfd)[i];
14227 if (hdr->sh_type == type)
14228 {
14229 if (dyn.d_tag == DT_RELSZ
14230 || dyn.d_tag == DT_RELASZ)
14231 dyn.d_un.d_val += hdr->sh_size;
14232 else if ((ufile_ptr) hdr->sh_offset
14233 <= dyn.d_un.d_val - 1)
14234 dyn.d_un.d_val = hdr->sh_offset;
14235 }
14236 }
14237 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14238 }
14239 break;
14240
14241 case DT_TLSDESC_PLT:
14242 s = htab->root.splt;
14243 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
14244 + htab->dt_tlsdesc_plt);
14245 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14246 break;
14247
14248 case DT_TLSDESC_GOT:
14249 s = htab->root.sgot;
14250 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
14251 + htab->dt_tlsdesc_got);
14252 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14253 break;
14254
14255 /* Set the bottom bit of DT_INIT/FINI if the
14256 corresponding function is Thumb. */
14257 case DT_INIT:
14258 name = info->init_function;
14259 goto get_sym;
14260 case DT_FINI:
14261 name = info->fini_function;
14262 get_sym:
14263 /* If it wasn't set by elf_bfd_final_link
14264 then there is nothing to adjust. */
14265 if (dyn.d_un.d_val != 0)
14266 {
14267 struct elf_link_hash_entry * eh;
14268
14269 eh = elf_link_hash_lookup (elf_hash_table (info), name,
14270 FALSE, FALSE, TRUE);
14271 if (eh != NULL && eh->target_internal == ST_BRANCH_TO_THUMB)
14272 {
14273 dyn.d_un.d_val |= 1;
14274 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14275 }
14276 }
14277 break;
14278 }
14279 }
14280
14281 /* Fill in the first entry in the procedure linkage table. */
14282 if (splt->size > 0 && htab->plt_header_size)
14283 {
14284 const bfd_vma *plt0_entry;
14285 bfd_vma got_address, plt_address, got_displacement;
14286
14287 /* Calculate the addresses of the GOT and PLT. */
14288 got_address = sgot->output_section->vma + sgot->output_offset;
14289 plt_address = splt->output_section->vma + splt->output_offset;
14290
14291 if (htab->vxworks_p)
14292 {
14293 /* The VxWorks GOT is relocated by the dynamic linker.
14294 Therefore, we must emit relocations rather than simply
14295 computing the values now. */
14296 Elf_Internal_Rela rel;
14297
14298 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
14299 put_arm_insn (htab, output_bfd, plt0_entry[0],
14300 splt->contents + 0);
14301 put_arm_insn (htab, output_bfd, plt0_entry[1],
14302 splt->contents + 4);
14303 put_arm_insn (htab, output_bfd, plt0_entry[2],
14304 splt->contents + 8);
14305 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
14306
14307 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
14308 rel.r_offset = plt_address + 12;
14309 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
14310 rel.r_addend = 0;
14311 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
14312 htab->srelplt2->contents);
14313 }
14314 else if (htab->nacl_p)
14315 arm_nacl_put_plt0 (htab, output_bfd, splt,
14316 got_address + 8 - (plt_address + 16));
14317 else
14318 {
14319 got_displacement = got_address - (plt_address + 16);
14320
14321 plt0_entry = elf32_arm_plt0_entry;
14322 put_arm_insn (htab, output_bfd, plt0_entry[0],
14323 splt->contents + 0);
14324 put_arm_insn (htab, output_bfd, plt0_entry[1],
14325 splt->contents + 4);
14326 put_arm_insn (htab, output_bfd, plt0_entry[2],
14327 splt->contents + 8);
14328 put_arm_insn (htab, output_bfd, plt0_entry[3],
14329 splt->contents + 12);
14330
14331 #ifdef FOUR_WORD_PLT
14332 /* The displacement value goes in the otherwise-unused
14333 last word of the second entry. */
14334 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
14335 #else
14336 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
14337 #endif
14338 }
14339 }
14340
14341 /* UnixWare sets the entsize of .plt to 4, although that doesn't
14342 really seem like the right value. */
14343 if (splt->output_section->owner == output_bfd)
14344 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
14345
14346 if (htab->dt_tlsdesc_plt)
14347 {
14348 bfd_vma got_address
14349 = sgot->output_section->vma + sgot->output_offset;
14350 bfd_vma gotplt_address = (htab->root.sgot->output_section->vma
14351 + htab->root.sgot->output_offset);
14352 bfd_vma plt_address
14353 = splt->output_section->vma + splt->output_offset;
14354
14355 arm_put_trampoline (htab, output_bfd,
14356 splt->contents + htab->dt_tlsdesc_plt,
14357 dl_tlsdesc_lazy_trampoline, 6);
14358
14359 bfd_put_32 (output_bfd,
14360 gotplt_address + htab->dt_tlsdesc_got
14361 - (plt_address + htab->dt_tlsdesc_plt)
14362 - dl_tlsdesc_lazy_trampoline[6],
14363 splt->contents + htab->dt_tlsdesc_plt + 24);
14364 bfd_put_32 (output_bfd,
14365 got_address - (plt_address + htab->dt_tlsdesc_plt)
14366 - dl_tlsdesc_lazy_trampoline[7],
14367 splt->contents + htab->dt_tlsdesc_plt + 24 + 4);
14368 }
14369
14370 if (htab->tls_trampoline)
14371 {
14372 arm_put_trampoline (htab, output_bfd,
14373 splt->contents + htab->tls_trampoline,
14374 tls_trampoline, 3);
14375 #ifdef FOUR_WORD_PLT
14376 bfd_put_32 (output_bfd, 0x00000000,
14377 splt->contents + htab->tls_trampoline + 12);
14378 #endif
14379 }
14380
14381 if (htab->vxworks_p && !info->shared && htab->root.splt->size > 0)
14382 {
14383 /* Correct the .rel(a).plt.unloaded relocations. They will have
14384 incorrect symbol indexes. */
14385 int num_plts;
14386 unsigned char *p;
14387
14388 num_plts = ((htab->root.splt->size - htab->plt_header_size)
14389 / htab->plt_entry_size);
14390 p = htab->srelplt2->contents + RELOC_SIZE (htab);
14391
14392 for (; num_plts; num_plts--)
14393 {
14394 Elf_Internal_Rela rel;
14395
14396 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
14397 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
14398 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
14399 p += RELOC_SIZE (htab);
14400
14401 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
14402 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
14403 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
14404 p += RELOC_SIZE (htab);
14405 }
14406 }
14407 }
14408
14409 if (htab->nacl_p && htab->root.iplt != NULL && htab->root.iplt->size > 0)
14410 /* NaCl uses a special first entry in .iplt too. */
14411 arm_nacl_put_plt0 (htab, output_bfd, htab->root.iplt, 0);
14412
14413 /* Fill in the first three entries in the global offset table. */
14414 if (sgot)
14415 {
14416 if (sgot->size > 0)
14417 {
14418 if (sdyn == NULL)
14419 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
14420 else
14421 bfd_put_32 (output_bfd,
14422 sdyn->output_section->vma + sdyn->output_offset,
14423 sgot->contents);
14424 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
14425 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
14426 }
14427
14428 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
14429 }
14430
14431 return TRUE;
14432 }
14433
14434 static void
14435 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
14436 {
14437 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
14438 struct elf32_arm_link_hash_table *globals;
14439
14440 i_ehdrp = elf_elfheader (abfd);
14441
14442 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
14443 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
14444 else
14445 i_ehdrp->e_ident[EI_OSABI] = 0;
14446 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
14447
14448 if (link_info)
14449 {
14450 globals = elf32_arm_hash_table (link_info);
14451 if (globals != NULL && globals->byteswap_code)
14452 i_ehdrp->e_flags |= EF_ARM_BE8;
14453 }
14454
14455 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_VER5
14456 && ((i_ehdrp->e_type == ET_DYN) || (i_ehdrp->e_type == ET_EXEC)))
14457 {
14458 int abi = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_ABI_VFP_args);
14459 if (abi)
14460 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_HARD;
14461 else
14462 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_SOFT;
14463 }
14464 }
14465
14466 static enum elf_reloc_type_class
14467 elf32_arm_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
14468 const asection *rel_sec ATTRIBUTE_UNUSED,
14469 const Elf_Internal_Rela *rela)
14470 {
14471 switch ((int) ELF32_R_TYPE (rela->r_info))
14472 {
14473 case R_ARM_RELATIVE:
14474 return reloc_class_relative;
14475 case R_ARM_JUMP_SLOT:
14476 return reloc_class_plt;
14477 case R_ARM_COPY:
14478 return reloc_class_copy;
14479 default:
14480 return reloc_class_normal;
14481 }
14482 }
14483
14484 static void
14485 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
14486 {
14487 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
14488 }
14489
14490 /* Return TRUE if this is an unwinding table entry. */
14491
14492 static bfd_boolean
14493 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
14494 {
14495 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
14496 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
14497 }
14498
14499
14500 /* Set the type and flags for an ARM section. We do this by
14501 the section name, which is a hack, but ought to work. */
14502
14503 static bfd_boolean
14504 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
14505 {
14506 const char * name;
14507
14508 name = bfd_get_section_name (abfd, sec);
14509
14510 if (is_arm_elf_unwind_section_name (abfd, name))
14511 {
14512 hdr->sh_type = SHT_ARM_EXIDX;
14513 hdr->sh_flags |= SHF_LINK_ORDER;
14514 }
14515 return TRUE;
14516 }
14517
14518 /* Handle an ARM specific section when reading an object file. This is
14519 called when bfd_section_from_shdr finds a section with an unknown
14520 type. */
14521
14522 static bfd_boolean
14523 elf32_arm_section_from_shdr (bfd *abfd,
14524 Elf_Internal_Shdr * hdr,
14525 const char *name,
14526 int shindex)
14527 {
14528 /* There ought to be a place to keep ELF backend specific flags, but
14529 at the moment there isn't one. We just keep track of the
14530 sections by their name, instead. Fortunately, the ABI gives
14531 names for all the ARM specific sections, so we will probably get
14532 away with this. */
14533 switch (hdr->sh_type)
14534 {
14535 case SHT_ARM_EXIDX:
14536 case SHT_ARM_PREEMPTMAP:
14537 case SHT_ARM_ATTRIBUTES:
14538 break;
14539
14540 default:
14541 return FALSE;
14542 }
14543
14544 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
14545 return FALSE;
14546
14547 return TRUE;
14548 }
14549
14550 static _arm_elf_section_data *
14551 get_arm_elf_section_data (asection * sec)
14552 {
14553 if (sec && sec->owner && is_arm_elf (sec->owner))
14554 return elf32_arm_section_data (sec);
14555 else
14556 return NULL;
14557 }
14558
14559 typedef struct
14560 {
14561 void *flaginfo;
14562 struct bfd_link_info *info;
14563 asection *sec;
14564 int sec_shndx;
14565 int (*func) (void *, const char *, Elf_Internal_Sym *,
14566 asection *, struct elf_link_hash_entry *);
14567 } output_arch_syminfo;
14568
14569 enum map_symbol_type
14570 {
14571 ARM_MAP_ARM,
14572 ARM_MAP_THUMB,
14573 ARM_MAP_DATA
14574 };
14575
14576
14577 /* Output a single mapping symbol. */
14578
14579 static bfd_boolean
14580 elf32_arm_output_map_sym (output_arch_syminfo *osi,
14581 enum map_symbol_type type,
14582 bfd_vma offset)
14583 {
14584 static const char *names[3] = {"$a", "$t", "$d"};
14585 Elf_Internal_Sym sym;
14586
14587 sym.st_value = osi->sec->output_section->vma
14588 + osi->sec->output_offset
14589 + offset;
14590 sym.st_size = 0;
14591 sym.st_other = 0;
14592 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
14593 sym.st_shndx = osi->sec_shndx;
14594 sym.st_target_internal = 0;
14595 elf32_arm_section_map_add (osi->sec, names[type][1], offset);
14596 return osi->func (osi->flaginfo, names[type], &sym, osi->sec, NULL) == 1;
14597 }
14598
14599 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
14600 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
14601
14602 static bfd_boolean
14603 elf32_arm_output_plt_map_1 (output_arch_syminfo *osi,
14604 bfd_boolean is_iplt_entry_p,
14605 union gotplt_union *root_plt,
14606 struct arm_plt_info *arm_plt)
14607 {
14608 struct elf32_arm_link_hash_table *htab;
14609 bfd_vma addr, plt_header_size;
14610
14611 if (root_plt->offset == (bfd_vma) -1)
14612 return TRUE;
14613
14614 htab = elf32_arm_hash_table (osi->info);
14615 if (htab == NULL)
14616 return FALSE;
14617
14618 if (is_iplt_entry_p)
14619 {
14620 osi->sec = htab->root.iplt;
14621 plt_header_size = 0;
14622 }
14623 else
14624 {
14625 osi->sec = htab->root.splt;
14626 plt_header_size = htab->plt_header_size;
14627 }
14628 osi->sec_shndx = (_bfd_elf_section_from_bfd_section
14629 (osi->info->output_bfd, osi->sec->output_section));
14630
14631 addr = root_plt->offset & -2;
14632 if (htab->symbian_p)
14633 {
14634 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14635 return FALSE;
14636 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
14637 return FALSE;
14638 }
14639 else if (htab->vxworks_p)
14640 {
14641 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14642 return FALSE;
14643 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
14644 return FALSE;
14645 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
14646 return FALSE;
14647 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
14648 return FALSE;
14649 }
14650 else if (htab->nacl_p)
14651 {
14652 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14653 return FALSE;
14654 }
14655 else
14656 {
14657 bfd_boolean thumb_stub_p;
14658
14659 thumb_stub_p = elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt);
14660 if (thumb_stub_p)
14661 {
14662 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
14663 return FALSE;
14664 }
14665 #ifdef FOUR_WORD_PLT
14666 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14667 return FALSE;
14668 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
14669 return FALSE;
14670 #else
14671 /* A three-word PLT with no Thumb thunk contains only Arm code,
14672 so only need to output a mapping symbol for the first PLT entry and
14673 entries with thumb thunks. */
14674 if (thumb_stub_p || addr == plt_header_size)
14675 {
14676 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14677 return FALSE;
14678 }
14679 #endif
14680 }
14681
14682 return TRUE;
14683 }
14684
14685 /* Output mapping symbols for PLT entries associated with H. */
14686
14687 static bfd_boolean
14688 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
14689 {
14690 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
14691 struct elf32_arm_link_hash_entry *eh;
14692
14693 if (h->root.type == bfd_link_hash_indirect)
14694 return TRUE;
14695
14696 if (h->root.type == bfd_link_hash_warning)
14697 /* When warning symbols are created, they **replace** the "real"
14698 entry in the hash table, thus we never get to see the real
14699 symbol in a hash traversal. So look at it now. */
14700 h = (struct elf_link_hash_entry *) h->root.u.i.link;
14701
14702 eh = (struct elf32_arm_link_hash_entry *) h;
14703 return elf32_arm_output_plt_map_1 (osi, SYMBOL_CALLS_LOCAL (osi->info, h),
14704 &h->plt, &eh->plt);
14705 }
14706
14707 /* Output a single local symbol for a generated stub. */
14708
14709 static bfd_boolean
14710 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
14711 bfd_vma offset, bfd_vma size)
14712 {
14713 Elf_Internal_Sym sym;
14714
14715 sym.st_value = osi->sec->output_section->vma
14716 + osi->sec->output_offset
14717 + offset;
14718 sym.st_size = size;
14719 sym.st_other = 0;
14720 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
14721 sym.st_shndx = osi->sec_shndx;
14722 sym.st_target_internal = 0;
14723 return osi->func (osi->flaginfo, name, &sym, osi->sec, NULL) == 1;
14724 }
14725
14726 static bfd_boolean
14727 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
14728 void * in_arg)
14729 {
14730 struct elf32_arm_stub_hash_entry *stub_entry;
14731 asection *stub_sec;
14732 bfd_vma addr;
14733 char *stub_name;
14734 output_arch_syminfo *osi;
14735 const insn_sequence *template_sequence;
14736 enum stub_insn_type prev_type;
14737 int size;
14738 int i;
14739 enum map_symbol_type sym_type;
14740
14741 /* Massage our args to the form they really have. */
14742 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
14743 osi = (output_arch_syminfo *) in_arg;
14744
14745 stub_sec = stub_entry->stub_sec;
14746
14747 /* Ensure this stub is attached to the current section being
14748 processed. */
14749 if (stub_sec != osi->sec)
14750 return TRUE;
14751
14752 addr = (bfd_vma) stub_entry->stub_offset;
14753 stub_name = stub_entry->output_name;
14754
14755 template_sequence = stub_entry->stub_template;
14756 switch (template_sequence[0].type)
14757 {
14758 case ARM_TYPE:
14759 if (!elf32_arm_output_stub_sym (osi, stub_name, addr, stub_entry->stub_size))
14760 return FALSE;
14761 break;
14762 case THUMB16_TYPE:
14763 case THUMB32_TYPE:
14764 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
14765 stub_entry->stub_size))
14766 return FALSE;
14767 break;
14768 default:
14769 BFD_FAIL ();
14770 return 0;
14771 }
14772
14773 prev_type = DATA_TYPE;
14774 size = 0;
14775 for (i = 0; i < stub_entry->stub_template_size; i++)
14776 {
14777 switch (template_sequence[i].type)
14778 {
14779 case ARM_TYPE:
14780 sym_type = ARM_MAP_ARM;
14781 break;
14782
14783 case THUMB16_TYPE:
14784 case THUMB32_TYPE:
14785 sym_type = ARM_MAP_THUMB;
14786 break;
14787
14788 case DATA_TYPE:
14789 sym_type = ARM_MAP_DATA;
14790 break;
14791
14792 default:
14793 BFD_FAIL ();
14794 return FALSE;
14795 }
14796
14797 if (template_sequence[i].type != prev_type)
14798 {
14799 prev_type = template_sequence[i].type;
14800 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
14801 return FALSE;
14802 }
14803
14804 switch (template_sequence[i].type)
14805 {
14806 case ARM_TYPE:
14807 case THUMB32_TYPE:
14808 size += 4;
14809 break;
14810
14811 case THUMB16_TYPE:
14812 size += 2;
14813 break;
14814
14815 case DATA_TYPE:
14816 size += 4;
14817 break;
14818
14819 default:
14820 BFD_FAIL ();
14821 return FALSE;
14822 }
14823 }
14824
14825 return TRUE;
14826 }
14827
14828 /* Output mapping symbols for linker generated sections,
14829 and for those data-only sections that do not have a
14830 $d. */
14831
14832 static bfd_boolean
14833 elf32_arm_output_arch_local_syms (bfd *output_bfd,
14834 struct bfd_link_info *info,
14835 void *flaginfo,
14836 int (*func) (void *, const char *,
14837 Elf_Internal_Sym *,
14838 asection *,
14839 struct elf_link_hash_entry *))
14840 {
14841 output_arch_syminfo osi;
14842 struct elf32_arm_link_hash_table *htab;
14843 bfd_vma offset;
14844 bfd_size_type size;
14845 bfd *input_bfd;
14846
14847 htab = elf32_arm_hash_table (info);
14848 if (htab == NULL)
14849 return FALSE;
14850
14851 check_use_blx (htab);
14852
14853 osi.flaginfo = flaginfo;
14854 osi.info = info;
14855 osi.func = func;
14856
14857 /* Add a $d mapping symbol to data-only sections that
14858 don't have any mapping symbol. This may result in (harmless) redundant
14859 mapping symbols. */
14860 for (input_bfd = info->input_bfds;
14861 input_bfd != NULL;
14862 input_bfd = input_bfd->link_next)
14863 {
14864 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
14865 for (osi.sec = input_bfd->sections;
14866 osi.sec != NULL;
14867 osi.sec = osi.sec->next)
14868 {
14869 if (osi.sec->output_section != NULL
14870 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
14871 != 0)
14872 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
14873 == SEC_HAS_CONTENTS
14874 && get_arm_elf_section_data (osi.sec) != NULL
14875 && get_arm_elf_section_data (osi.sec)->mapcount == 0
14876 && osi.sec->size > 0
14877 && (osi.sec->flags & SEC_EXCLUDE) == 0)
14878 {
14879 osi.sec_shndx = _bfd_elf_section_from_bfd_section
14880 (output_bfd, osi.sec->output_section);
14881 if (osi.sec_shndx != (int)SHN_BAD)
14882 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
14883 }
14884 }
14885 }
14886
14887 /* ARM->Thumb glue. */
14888 if (htab->arm_glue_size > 0)
14889 {
14890 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
14891 ARM2THUMB_GLUE_SECTION_NAME);
14892
14893 osi.sec_shndx = _bfd_elf_section_from_bfd_section
14894 (output_bfd, osi.sec->output_section);
14895 if (info->shared || htab->root.is_relocatable_executable
14896 || htab->pic_veneer)
14897 size = ARM2THUMB_PIC_GLUE_SIZE;
14898 else if (htab->use_blx)
14899 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
14900 else
14901 size = ARM2THUMB_STATIC_GLUE_SIZE;
14902
14903 for (offset = 0; offset < htab->arm_glue_size; offset += size)
14904 {
14905 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
14906 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
14907 }
14908 }
14909
14910 /* Thumb->ARM glue. */
14911 if (htab->thumb_glue_size > 0)
14912 {
14913 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
14914 THUMB2ARM_GLUE_SECTION_NAME);
14915
14916 osi.sec_shndx = _bfd_elf_section_from_bfd_section
14917 (output_bfd, osi.sec->output_section);
14918 size = THUMB2ARM_GLUE_SIZE;
14919
14920 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
14921 {
14922 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
14923 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
14924 }
14925 }
14926
14927 /* ARMv4 BX veneers. */
14928 if (htab->bx_glue_size > 0)
14929 {
14930 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
14931 ARM_BX_GLUE_SECTION_NAME);
14932
14933 osi.sec_shndx = _bfd_elf_section_from_bfd_section
14934 (output_bfd, osi.sec->output_section);
14935
14936 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
14937 }
14938
14939 /* Long calls stubs. */
14940 if (htab->stub_bfd && htab->stub_bfd->sections)
14941 {
14942 asection* stub_sec;
14943
14944 for (stub_sec = htab->stub_bfd->sections;
14945 stub_sec != NULL;
14946 stub_sec = stub_sec->next)
14947 {
14948 /* Ignore non-stub sections. */
14949 if (!strstr (stub_sec->name, STUB_SUFFIX))
14950 continue;
14951
14952 osi.sec = stub_sec;
14953
14954 osi.sec_shndx = _bfd_elf_section_from_bfd_section
14955 (output_bfd, osi.sec->output_section);
14956
14957 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
14958 }
14959 }
14960
14961 /* Finally, output mapping symbols for the PLT. */
14962 if (htab->root.splt && htab->root.splt->size > 0)
14963 {
14964 osi.sec = htab->root.splt;
14965 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
14966 (output_bfd, osi.sec->output_section));
14967
14968 /* Output mapping symbols for the plt header. SymbianOS does not have a
14969 plt header. */
14970 if (htab->vxworks_p)
14971 {
14972 /* VxWorks shared libraries have no PLT header. */
14973 if (!info->shared)
14974 {
14975 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
14976 return FALSE;
14977 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
14978 return FALSE;
14979 }
14980 }
14981 else if (htab->nacl_p)
14982 {
14983 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
14984 return FALSE;
14985 }
14986 else if (!htab->symbian_p)
14987 {
14988 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
14989 return FALSE;
14990 #ifndef FOUR_WORD_PLT
14991 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
14992 return FALSE;
14993 #endif
14994 }
14995 }
14996 if (htab->nacl_p && htab->root.iplt && htab->root.iplt->size > 0)
14997 {
14998 /* NaCl uses a special first entry in .iplt too. */
14999 osi.sec = htab->root.iplt;
15000 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
15001 (output_bfd, osi.sec->output_section));
15002 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
15003 return FALSE;
15004 }
15005 if ((htab->root.splt && htab->root.splt->size > 0)
15006 || (htab->root.iplt && htab->root.iplt->size > 0))
15007 {
15008 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi);
15009 for (input_bfd = info->input_bfds;
15010 input_bfd != NULL;
15011 input_bfd = input_bfd->link_next)
15012 {
15013 struct arm_local_iplt_info **local_iplt;
15014 unsigned int i, num_syms;
15015
15016 local_iplt = elf32_arm_local_iplt (input_bfd);
15017 if (local_iplt != NULL)
15018 {
15019 num_syms = elf_symtab_hdr (input_bfd).sh_info;
15020 for (i = 0; i < num_syms; i++)
15021 if (local_iplt[i] != NULL
15022 && !elf32_arm_output_plt_map_1 (&osi, TRUE,
15023 &local_iplt[i]->root,
15024 &local_iplt[i]->arm))
15025 return FALSE;
15026 }
15027 }
15028 }
15029 if (htab->dt_tlsdesc_plt != 0)
15030 {
15031 /* Mapping symbols for the lazy tls trampoline. */
15032 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->dt_tlsdesc_plt))
15033 return FALSE;
15034
15035 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
15036 htab->dt_tlsdesc_plt + 24))
15037 return FALSE;
15038 }
15039 if (htab->tls_trampoline != 0)
15040 {
15041 /* Mapping symbols for the tls trampoline. */
15042 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->tls_trampoline))
15043 return FALSE;
15044 #ifdef FOUR_WORD_PLT
15045 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
15046 htab->tls_trampoline + 12))
15047 return FALSE;
15048 #endif
15049 }
15050
15051 return TRUE;
15052 }
15053
15054 /* Allocate target specific section data. */
15055
15056 static bfd_boolean
15057 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
15058 {
15059 if (!sec->used_by_bfd)
15060 {
15061 _arm_elf_section_data *sdata;
15062 bfd_size_type amt = sizeof (*sdata);
15063
15064 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
15065 if (sdata == NULL)
15066 return FALSE;
15067 sec->used_by_bfd = sdata;
15068 }
15069
15070 return _bfd_elf_new_section_hook (abfd, sec);
15071 }
15072
15073
15074 /* Used to order a list of mapping symbols by address. */
15075
15076 static int
15077 elf32_arm_compare_mapping (const void * a, const void * b)
15078 {
15079 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
15080 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
15081
15082 if (amap->vma > bmap->vma)
15083 return 1;
15084 else if (amap->vma < bmap->vma)
15085 return -1;
15086 else if (amap->type > bmap->type)
15087 /* Ensure results do not depend on the host qsort for objects with
15088 multiple mapping symbols at the same address by sorting on type
15089 after vma. */
15090 return 1;
15091 else if (amap->type < bmap->type)
15092 return -1;
15093 else
15094 return 0;
15095 }
15096
15097 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
15098
15099 static unsigned long
15100 offset_prel31 (unsigned long addr, bfd_vma offset)
15101 {
15102 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
15103 }
15104
15105 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
15106 relocations. */
15107
15108 static void
15109 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
15110 {
15111 unsigned long first_word = bfd_get_32 (output_bfd, from);
15112 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
15113
15114 /* High bit of first word is supposed to be zero. */
15115 if ((first_word & 0x80000000ul) == 0)
15116 first_word = offset_prel31 (first_word, offset);
15117
15118 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
15119 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
15120 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
15121 second_word = offset_prel31 (second_word, offset);
15122
15123 bfd_put_32 (output_bfd, first_word, to);
15124 bfd_put_32 (output_bfd, second_word, to + 4);
15125 }
15126
15127 /* Data for make_branch_to_a8_stub(). */
15128
15129 struct a8_branch_to_stub_data
15130 {
15131 asection *writing_section;
15132 bfd_byte *contents;
15133 };
15134
15135
15136 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
15137 places for a particular section. */
15138
15139 static bfd_boolean
15140 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
15141 void *in_arg)
15142 {
15143 struct elf32_arm_stub_hash_entry *stub_entry;
15144 struct a8_branch_to_stub_data *data;
15145 bfd_byte *contents;
15146 unsigned long branch_insn;
15147 bfd_vma veneered_insn_loc, veneer_entry_loc;
15148 bfd_signed_vma branch_offset;
15149 bfd *abfd;
15150 unsigned int target;
15151
15152 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
15153 data = (struct a8_branch_to_stub_data *) in_arg;
15154
15155 if (stub_entry->target_section != data->writing_section
15156 || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
15157 return TRUE;
15158
15159 contents = data->contents;
15160
15161 veneered_insn_loc = stub_entry->target_section->output_section->vma
15162 + stub_entry->target_section->output_offset
15163 + stub_entry->target_value;
15164
15165 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
15166 + stub_entry->stub_sec->output_offset
15167 + stub_entry->stub_offset;
15168
15169 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
15170 veneered_insn_loc &= ~3u;
15171
15172 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
15173
15174 abfd = stub_entry->target_section->owner;
15175 target = stub_entry->target_value;
15176
15177 /* We attempt to avoid this condition by setting stubs_always_after_branch
15178 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
15179 This check is just to be on the safe side... */
15180 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
15181 {
15182 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
15183 "allocated in unsafe location"), abfd);
15184 return FALSE;
15185 }
15186
15187 switch (stub_entry->stub_type)
15188 {
15189 case arm_stub_a8_veneer_b:
15190 case arm_stub_a8_veneer_b_cond:
15191 branch_insn = 0xf0009000;
15192 goto jump24;
15193
15194 case arm_stub_a8_veneer_blx:
15195 branch_insn = 0xf000e800;
15196 goto jump24;
15197
15198 case arm_stub_a8_veneer_bl:
15199 {
15200 unsigned int i1, j1, i2, j2, s;
15201
15202 branch_insn = 0xf000d000;
15203
15204 jump24:
15205 if (branch_offset < -16777216 || branch_offset > 16777214)
15206 {
15207 /* There's not much we can do apart from complain if this
15208 happens. */
15209 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
15210 "of range (input file too large)"), abfd);
15211 return FALSE;
15212 }
15213
15214 /* i1 = not(j1 eor s), so:
15215 not i1 = j1 eor s
15216 j1 = (not i1) eor s. */
15217
15218 branch_insn |= (branch_offset >> 1) & 0x7ff;
15219 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
15220 i2 = (branch_offset >> 22) & 1;
15221 i1 = (branch_offset >> 23) & 1;
15222 s = (branch_offset >> 24) & 1;
15223 j1 = (!i1) ^ s;
15224 j2 = (!i2) ^ s;
15225 branch_insn |= j2 << 11;
15226 branch_insn |= j1 << 13;
15227 branch_insn |= s << 26;
15228 }
15229 break;
15230
15231 default:
15232 BFD_FAIL ();
15233 return FALSE;
15234 }
15235
15236 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[target]);
15237 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[target + 2]);
15238
15239 return TRUE;
15240 }
15241
15242 /* Do code byteswapping. Return FALSE afterwards so that the section is
15243 written out as normal. */
15244
15245 static bfd_boolean
15246 elf32_arm_write_section (bfd *output_bfd,
15247 struct bfd_link_info *link_info,
15248 asection *sec,
15249 bfd_byte *contents)
15250 {
15251 unsigned int mapcount, errcount;
15252 _arm_elf_section_data *arm_data;
15253 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
15254 elf32_arm_section_map *map;
15255 elf32_vfp11_erratum_list *errnode;
15256 bfd_vma ptr;
15257 bfd_vma end;
15258 bfd_vma offset = sec->output_section->vma + sec->output_offset;
15259 bfd_byte tmp;
15260 unsigned int i;
15261
15262 if (globals == NULL)
15263 return FALSE;
15264
15265 /* If this section has not been allocated an _arm_elf_section_data
15266 structure then we cannot record anything. */
15267 arm_data = get_arm_elf_section_data (sec);
15268 if (arm_data == NULL)
15269 return FALSE;
15270
15271 mapcount = arm_data->mapcount;
15272 map = arm_data->map;
15273 errcount = arm_data->erratumcount;
15274
15275 if (errcount != 0)
15276 {
15277 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
15278
15279 for (errnode = arm_data->erratumlist; errnode != 0;
15280 errnode = errnode->next)
15281 {
15282 bfd_vma target = errnode->vma - offset;
15283
15284 switch (errnode->type)
15285 {
15286 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
15287 {
15288 bfd_vma branch_to_veneer;
15289 /* Original condition code of instruction, plus bit mask for
15290 ARM B instruction. */
15291 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
15292 | 0x0a000000;
15293
15294 /* The instruction is before the label. */
15295 target -= 4;
15296
15297 /* Above offset included in -4 below. */
15298 branch_to_veneer = errnode->u.b.veneer->vma
15299 - errnode->vma - 4;
15300
15301 if ((signed) branch_to_veneer < -(1 << 25)
15302 || (signed) branch_to_veneer >= (1 << 25))
15303 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
15304 "range"), output_bfd);
15305
15306 insn |= (branch_to_veneer >> 2) & 0xffffff;
15307 contents[endianflip ^ target] = insn & 0xff;
15308 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
15309 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
15310 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
15311 }
15312 break;
15313
15314 case VFP11_ERRATUM_ARM_VENEER:
15315 {
15316 bfd_vma branch_from_veneer;
15317 unsigned int insn;
15318
15319 /* Take size of veneer into account. */
15320 branch_from_veneer = errnode->u.v.branch->vma
15321 - errnode->vma - 12;
15322
15323 if ((signed) branch_from_veneer < -(1 << 25)
15324 || (signed) branch_from_veneer >= (1 << 25))
15325 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
15326 "range"), output_bfd);
15327
15328 /* Original instruction. */
15329 insn = errnode->u.v.branch->u.b.vfp_insn;
15330 contents[endianflip ^ target] = insn & 0xff;
15331 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
15332 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
15333 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
15334
15335 /* Branch back to insn after original insn. */
15336 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
15337 contents[endianflip ^ (target + 4)] = insn & 0xff;
15338 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
15339 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
15340 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
15341 }
15342 break;
15343
15344 default:
15345 abort ();
15346 }
15347 }
15348 }
15349
15350 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
15351 {
15352 arm_unwind_table_edit *edit_node
15353 = arm_data->u.exidx.unwind_edit_list;
15354 /* Now, sec->size is the size of the section we will write. The original
15355 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
15356 markers) was sec->rawsize. (This isn't the case if we perform no
15357 edits, then rawsize will be zero and we should use size). */
15358 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
15359 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
15360 unsigned int in_index, out_index;
15361 bfd_vma add_to_offsets = 0;
15362
15363 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
15364 {
15365 if (edit_node)
15366 {
15367 unsigned int edit_index = edit_node->index;
15368
15369 if (in_index < edit_index && in_index * 8 < input_size)
15370 {
15371 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
15372 contents + in_index * 8, add_to_offsets);
15373 out_index++;
15374 in_index++;
15375 }
15376 else if (in_index == edit_index
15377 || (in_index * 8 >= input_size
15378 && edit_index == UINT_MAX))
15379 {
15380 switch (edit_node->type)
15381 {
15382 case DELETE_EXIDX_ENTRY:
15383 in_index++;
15384 add_to_offsets += 8;
15385 break;
15386
15387 case INSERT_EXIDX_CANTUNWIND_AT_END:
15388 {
15389 asection *text_sec = edit_node->linked_section;
15390 bfd_vma text_offset = text_sec->output_section->vma
15391 + text_sec->output_offset
15392 + text_sec->size;
15393 bfd_vma exidx_offset = offset + out_index * 8;
15394 unsigned long prel31_offset;
15395
15396 /* Note: this is meant to be equivalent to an
15397 R_ARM_PREL31 relocation. These synthetic
15398 EXIDX_CANTUNWIND markers are not relocated by the
15399 usual BFD method. */
15400 prel31_offset = (text_offset - exidx_offset)
15401 & 0x7ffffffful;
15402
15403 /* First address we can't unwind. */
15404 bfd_put_32 (output_bfd, prel31_offset,
15405 &edited_contents[out_index * 8]);
15406
15407 /* Code for EXIDX_CANTUNWIND. */
15408 bfd_put_32 (output_bfd, 0x1,
15409 &edited_contents[out_index * 8 + 4]);
15410
15411 out_index++;
15412 add_to_offsets -= 8;
15413 }
15414 break;
15415 }
15416
15417 edit_node = edit_node->next;
15418 }
15419 }
15420 else
15421 {
15422 /* No more edits, copy remaining entries verbatim. */
15423 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
15424 contents + in_index * 8, add_to_offsets);
15425 out_index++;
15426 in_index++;
15427 }
15428 }
15429
15430 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
15431 bfd_set_section_contents (output_bfd, sec->output_section,
15432 edited_contents,
15433 (file_ptr) sec->output_offset, sec->size);
15434
15435 return TRUE;
15436 }
15437
15438 /* Fix code to point to Cortex-A8 erratum stubs. */
15439 if (globals->fix_cortex_a8)
15440 {
15441 struct a8_branch_to_stub_data data;
15442
15443 data.writing_section = sec;
15444 data.contents = contents;
15445
15446 bfd_hash_traverse (&globals->stub_hash_table, make_branch_to_a8_stub,
15447 &data);
15448 }
15449
15450 if (mapcount == 0)
15451 return FALSE;
15452
15453 if (globals->byteswap_code)
15454 {
15455 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
15456
15457 ptr = map[0].vma;
15458 for (i = 0; i < mapcount; i++)
15459 {
15460 if (i == mapcount - 1)
15461 end = sec->size;
15462 else
15463 end = map[i + 1].vma;
15464
15465 switch (map[i].type)
15466 {
15467 case 'a':
15468 /* Byte swap code words. */
15469 while (ptr + 3 < end)
15470 {
15471 tmp = contents[ptr];
15472 contents[ptr] = contents[ptr + 3];
15473 contents[ptr + 3] = tmp;
15474 tmp = contents[ptr + 1];
15475 contents[ptr + 1] = contents[ptr + 2];
15476 contents[ptr + 2] = tmp;
15477 ptr += 4;
15478 }
15479 break;
15480
15481 case 't':
15482 /* Byte swap code halfwords. */
15483 while (ptr + 1 < end)
15484 {
15485 tmp = contents[ptr];
15486 contents[ptr] = contents[ptr + 1];
15487 contents[ptr + 1] = tmp;
15488 ptr += 2;
15489 }
15490 break;
15491
15492 case 'd':
15493 /* Leave data alone. */
15494 break;
15495 }
15496 ptr = end;
15497 }
15498 }
15499
15500 free (map);
15501 arm_data->mapcount = -1;
15502 arm_data->mapsize = 0;
15503 arm_data->map = NULL;
15504
15505 return FALSE;
15506 }
15507
15508 /* Mangle thumb function symbols as we read them in. */
15509
15510 static bfd_boolean
15511 elf32_arm_swap_symbol_in (bfd * abfd,
15512 const void *psrc,
15513 const void *pshn,
15514 Elf_Internal_Sym *dst)
15515 {
15516 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
15517 return FALSE;
15518
15519 /* New EABI objects mark thumb function symbols by setting the low bit of
15520 the address. */
15521 if (ELF_ST_TYPE (dst->st_info) == STT_FUNC
15522 || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC)
15523 {
15524 if (dst->st_value & 1)
15525 {
15526 dst->st_value &= ~(bfd_vma) 1;
15527 dst->st_target_internal = ST_BRANCH_TO_THUMB;
15528 }
15529 else
15530 dst->st_target_internal = ST_BRANCH_TO_ARM;
15531 }
15532 else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC)
15533 {
15534 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC);
15535 dst->st_target_internal = ST_BRANCH_TO_THUMB;
15536 }
15537 else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION)
15538 dst->st_target_internal = ST_BRANCH_LONG;
15539 else
15540 dst->st_target_internal = ST_BRANCH_UNKNOWN;
15541
15542 return TRUE;
15543 }
15544
15545
15546 /* Mangle thumb function symbols as we write them out. */
15547
15548 static void
15549 elf32_arm_swap_symbol_out (bfd *abfd,
15550 const Elf_Internal_Sym *src,
15551 void *cdst,
15552 void *shndx)
15553 {
15554 Elf_Internal_Sym newsym;
15555
15556 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
15557 of the address set, as per the new EABI. We do this unconditionally
15558 because objcopy does not set the elf header flags until after
15559 it writes out the symbol table. */
15560 if (src->st_target_internal == ST_BRANCH_TO_THUMB)
15561 {
15562 newsym = *src;
15563 if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC)
15564 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
15565 if (newsym.st_shndx != SHN_UNDEF)
15566 {
15567 /* Do this only for defined symbols. At link type, the static
15568 linker will simulate the work of dynamic linker of resolving
15569 symbols and will carry over the thumbness of found symbols to
15570 the output symbol table. It's not clear how it happens, but
15571 the thumbness of undefined symbols can well be different at
15572 runtime, and writing '1' for them will be confusing for users
15573 and possibly for dynamic linker itself.
15574 */
15575 newsym.st_value |= 1;
15576 }
15577
15578 src = &newsym;
15579 }
15580 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
15581 }
15582
15583 /* Add the PT_ARM_EXIDX program header. */
15584
15585 static bfd_boolean
15586 elf32_arm_modify_segment_map (bfd *abfd,
15587 struct bfd_link_info *info ATTRIBUTE_UNUSED)
15588 {
15589 struct elf_segment_map *m;
15590 asection *sec;
15591
15592 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
15593 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
15594 {
15595 /* If there is already a PT_ARM_EXIDX header, then we do not
15596 want to add another one. This situation arises when running
15597 "strip"; the input binary already has the header. */
15598 m = elf_seg_map (abfd);
15599 while (m && m->p_type != PT_ARM_EXIDX)
15600 m = m->next;
15601 if (!m)
15602 {
15603 m = (struct elf_segment_map *)
15604 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
15605 if (m == NULL)
15606 return FALSE;
15607 m->p_type = PT_ARM_EXIDX;
15608 m->count = 1;
15609 m->sections[0] = sec;
15610
15611 m->next = elf_seg_map (abfd);
15612 elf_seg_map (abfd) = m;
15613 }
15614 }
15615
15616 return TRUE;
15617 }
15618
15619 /* We may add a PT_ARM_EXIDX program header. */
15620
15621 static int
15622 elf32_arm_additional_program_headers (bfd *abfd,
15623 struct bfd_link_info *info ATTRIBUTE_UNUSED)
15624 {
15625 asection *sec;
15626
15627 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
15628 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
15629 return 1;
15630 else
15631 return 0;
15632 }
15633
15634 /* Hook called by the linker routine which adds symbols from an object
15635 file. */
15636
15637 static bfd_boolean
15638 elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
15639 Elf_Internal_Sym *sym, const char **namep,
15640 flagword *flagsp, asection **secp, bfd_vma *valp)
15641 {
15642 if ((abfd->flags & DYNAMIC) == 0
15643 && (ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC
15644 || ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE))
15645 elf_tdata (info->output_bfd)->has_gnu_symbols = TRUE;
15646
15647 if (elf32_arm_hash_table (info)->vxworks_p
15648 && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep,
15649 flagsp, secp, valp))
15650 return FALSE;
15651
15652 return TRUE;
15653 }
15654
15655 /* We use this to override swap_symbol_in and swap_symbol_out. */
15656 const struct elf_size_info elf32_arm_size_info =
15657 {
15658 sizeof (Elf32_External_Ehdr),
15659 sizeof (Elf32_External_Phdr),
15660 sizeof (Elf32_External_Shdr),
15661 sizeof (Elf32_External_Rel),
15662 sizeof (Elf32_External_Rela),
15663 sizeof (Elf32_External_Sym),
15664 sizeof (Elf32_External_Dyn),
15665 sizeof (Elf_External_Note),
15666 4,
15667 1,
15668 32, 2,
15669 ELFCLASS32, EV_CURRENT,
15670 bfd_elf32_write_out_phdrs,
15671 bfd_elf32_write_shdrs_and_ehdr,
15672 bfd_elf32_checksum_contents,
15673 bfd_elf32_write_relocs,
15674 elf32_arm_swap_symbol_in,
15675 elf32_arm_swap_symbol_out,
15676 bfd_elf32_slurp_reloc_table,
15677 bfd_elf32_slurp_symbol_table,
15678 bfd_elf32_swap_dyn_in,
15679 bfd_elf32_swap_dyn_out,
15680 bfd_elf32_swap_reloc_in,
15681 bfd_elf32_swap_reloc_out,
15682 bfd_elf32_swap_reloca_in,
15683 bfd_elf32_swap_reloca_out
15684 };
15685
15686 #define ELF_ARCH bfd_arch_arm
15687 #define ELF_TARGET_ID ARM_ELF_DATA
15688 #define ELF_MACHINE_CODE EM_ARM
15689 #ifdef __QNXTARGET__
15690 #define ELF_MAXPAGESIZE 0x1000
15691 #else
15692 #define ELF_MAXPAGESIZE 0x8000
15693 #endif
15694 #define ELF_MINPAGESIZE 0x1000
15695 #define ELF_COMMONPAGESIZE 0x1000
15696
15697 #define bfd_elf32_mkobject elf32_arm_mkobject
15698
15699 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
15700 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
15701 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
15702 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
15703 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
15704 #define bfd_elf32_bfd_link_hash_table_free elf32_arm_hash_table_free
15705 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
15706 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
15707 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
15708 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
15709 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
15710 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
15711 #define bfd_elf32_bfd_final_link elf32_arm_final_link
15712
15713 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
15714 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
15715 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
15716 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
15717 #define elf_backend_check_relocs elf32_arm_check_relocs
15718 #define elf_backend_relocate_section elf32_arm_relocate_section
15719 #define elf_backend_write_section elf32_arm_write_section
15720 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
15721 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
15722 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
15723 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
15724 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
15725 #define elf_backend_always_size_sections elf32_arm_always_size_sections
15726 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
15727 #define elf_backend_post_process_headers elf32_arm_post_process_headers
15728 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
15729 #define elf_backend_object_p elf32_arm_object_p
15730 #define elf_backend_fake_sections elf32_arm_fake_sections
15731 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
15732 #define elf_backend_final_write_processing elf32_arm_final_write_processing
15733 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
15734 #define elf_backend_size_info elf32_arm_size_info
15735 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
15736 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
15737 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
15738 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
15739 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
15740
15741 #define elf_backend_can_refcount 1
15742 #define elf_backend_can_gc_sections 1
15743 #define elf_backend_plt_readonly 1
15744 #define elf_backend_want_got_plt 1
15745 #define elf_backend_want_plt_sym 0
15746 #define elf_backend_may_use_rel_p 1
15747 #define elf_backend_may_use_rela_p 0
15748 #define elf_backend_default_use_rela_p 0
15749
15750 #define elf_backend_got_header_size 12
15751
15752 #undef elf_backend_obj_attrs_vendor
15753 #define elf_backend_obj_attrs_vendor "aeabi"
15754 #undef elf_backend_obj_attrs_section
15755 #define elf_backend_obj_attrs_section ".ARM.attributes"
15756 #undef elf_backend_obj_attrs_arg_type
15757 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
15758 #undef elf_backend_obj_attrs_section_type
15759 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
15760 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
15761 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
15762
15763 #include "elf32-target.h"
15764
15765 /* Native Client targets. */
15766
15767 #undef TARGET_LITTLE_SYM
15768 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_nacl_vec
15769 #undef TARGET_LITTLE_NAME
15770 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl"
15771 #undef TARGET_BIG_SYM
15772 #define TARGET_BIG_SYM bfd_elf32_bigarm_nacl_vec
15773 #undef TARGET_BIG_NAME
15774 #define TARGET_BIG_NAME "elf32-bigarm-nacl"
15775
15776 /* Like elf32_arm_link_hash_table_create -- but overrides
15777 appropriately for NaCl. */
15778
15779 static struct bfd_link_hash_table *
15780 elf32_arm_nacl_link_hash_table_create (bfd *abfd)
15781 {
15782 struct bfd_link_hash_table *ret;
15783
15784 ret = elf32_arm_link_hash_table_create (abfd);
15785 if (ret)
15786 {
15787 struct elf32_arm_link_hash_table *htab
15788 = (struct elf32_arm_link_hash_table *) ret;
15789
15790 htab->nacl_p = 1;
15791
15792 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry);
15793 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry);
15794 }
15795 return ret;
15796 }
15797
15798 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
15799 really need to use elf32_arm_modify_segment_map. But we do it
15800 anyway just to reduce gratuitous differences with the stock ARM backend. */
15801
15802 static bfd_boolean
15803 elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
15804 {
15805 return (elf32_arm_modify_segment_map (abfd, info)
15806 && nacl_modify_segment_map (abfd, info));
15807 }
15808
15809 #undef elf32_bed
15810 #define elf32_bed elf32_arm_nacl_bed
15811 #undef bfd_elf32_bfd_link_hash_table_create
15812 #define bfd_elf32_bfd_link_hash_table_create \
15813 elf32_arm_nacl_link_hash_table_create
15814 #undef elf_backend_plt_alignment
15815 #define elf_backend_plt_alignment 4
15816 #undef elf_backend_modify_segment_map
15817 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
15818 #undef elf_backend_modify_program_headers
15819 #define elf_backend_modify_program_headers nacl_modify_program_headers
15820
15821 #undef ELF_MAXPAGESIZE
15822 #define ELF_MAXPAGESIZE 0x10000
15823
15824 #include "elf32-target.h"
15825
15826 /* Reset to defaults. */
15827 #undef elf_backend_plt_alignment
15828 #undef elf_backend_modify_segment_map
15829 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
15830 #undef elf_backend_modify_program_headers
15831
15832 /* VxWorks Targets. */
15833
15834 #undef TARGET_LITTLE_SYM
15835 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vxworks_vec
15836 #undef TARGET_LITTLE_NAME
15837 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
15838 #undef TARGET_BIG_SYM
15839 #define TARGET_BIG_SYM bfd_elf32_bigarm_vxworks_vec
15840 #undef TARGET_BIG_NAME
15841 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
15842
15843 /* Like elf32_arm_link_hash_table_create -- but overrides
15844 appropriately for VxWorks. */
15845
15846 static struct bfd_link_hash_table *
15847 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
15848 {
15849 struct bfd_link_hash_table *ret;
15850
15851 ret = elf32_arm_link_hash_table_create (abfd);
15852 if (ret)
15853 {
15854 struct elf32_arm_link_hash_table *htab
15855 = (struct elf32_arm_link_hash_table *) ret;
15856 htab->use_rel = 0;
15857 htab->vxworks_p = 1;
15858 }
15859 return ret;
15860 }
15861
15862 static void
15863 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
15864 {
15865 elf32_arm_final_write_processing (abfd, linker);
15866 elf_vxworks_final_write_processing (abfd, linker);
15867 }
15868
15869 #undef elf32_bed
15870 #define elf32_bed elf32_arm_vxworks_bed
15871
15872 #undef bfd_elf32_bfd_link_hash_table_create
15873 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
15874 #undef elf_backend_final_write_processing
15875 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
15876 #undef elf_backend_emit_relocs
15877 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
15878
15879 #undef elf_backend_may_use_rel_p
15880 #define elf_backend_may_use_rel_p 0
15881 #undef elf_backend_may_use_rela_p
15882 #define elf_backend_may_use_rela_p 1
15883 #undef elf_backend_default_use_rela_p
15884 #define elf_backend_default_use_rela_p 1
15885 #undef elf_backend_want_plt_sym
15886 #define elf_backend_want_plt_sym 1
15887 #undef ELF_MAXPAGESIZE
15888 #define ELF_MAXPAGESIZE 0x1000
15889
15890 #include "elf32-target.h"
15891
15892
15893 /* Merge backend specific data from an object file to the output
15894 object file when linking. */
15895
15896 static bfd_boolean
15897 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
15898 {
15899 flagword out_flags;
15900 flagword in_flags;
15901 bfd_boolean flags_compatible = TRUE;
15902 asection *sec;
15903
15904 /* Check if we have the same endianness. */
15905 if (! _bfd_generic_verify_endian_match (ibfd, obfd))
15906 return FALSE;
15907
15908 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
15909 return TRUE;
15910
15911 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
15912 return FALSE;
15913
15914 /* The input BFD must have had its flags initialised. */
15915 /* The following seems bogus to me -- The flags are initialized in
15916 the assembler but I don't think an elf_flags_init field is
15917 written into the object. */
15918 /* BFD_ASSERT (elf_flags_init (ibfd)); */
15919
15920 in_flags = elf_elfheader (ibfd)->e_flags;
15921 out_flags = elf_elfheader (obfd)->e_flags;
15922
15923 /* In theory there is no reason why we couldn't handle this. However
15924 in practice it isn't even close to working and there is no real
15925 reason to want it. */
15926 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
15927 && !(ibfd->flags & DYNAMIC)
15928 && (in_flags & EF_ARM_BE8))
15929 {
15930 _bfd_error_handler (_("error: %B is already in final BE8 format"),
15931 ibfd);
15932 return FALSE;
15933 }
15934
15935 if (!elf_flags_init (obfd))
15936 {
15937 /* If the input is the default architecture and had the default
15938 flags then do not bother setting the flags for the output
15939 architecture, instead allow future merges to do this. If no
15940 future merges ever set these flags then they will retain their
15941 uninitialised values, which surprise surprise, correspond
15942 to the default values. */
15943 if (bfd_get_arch_info (ibfd)->the_default
15944 && elf_elfheader (ibfd)->e_flags == 0)
15945 return TRUE;
15946
15947 elf_flags_init (obfd) = TRUE;
15948 elf_elfheader (obfd)->e_flags = in_flags;
15949
15950 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
15951 && bfd_get_arch_info (obfd)->the_default)
15952 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
15953
15954 return TRUE;
15955 }
15956
15957 /* Determine what should happen if the input ARM architecture
15958 does not match the output ARM architecture. */
15959 if (! bfd_arm_merge_machines (ibfd, obfd))
15960 return FALSE;
15961
15962 /* Identical flags must be compatible. */
15963 if (in_flags == out_flags)
15964 return TRUE;
15965
15966 /* Check to see if the input BFD actually contains any sections. If
15967 not, its flags may not have been initialised either, but it
15968 cannot actually cause any incompatiblity. Do not short-circuit
15969 dynamic objects; their section list may be emptied by
15970 elf_link_add_object_symbols.
15971
15972 Also check to see if there are no code sections in the input.
15973 In this case there is no need to check for code specific flags.
15974 XXX - do we need to worry about floating-point format compatability
15975 in data sections ? */
15976 if (!(ibfd->flags & DYNAMIC))
15977 {
15978 bfd_boolean null_input_bfd = TRUE;
15979 bfd_boolean only_data_sections = TRUE;
15980
15981 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
15982 {
15983 /* Ignore synthetic glue sections. */
15984 if (strcmp (sec->name, ".glue_7")
15985 && strcmp (sec->name, ".glue_7t"))
15986 {
15987 if ((bfd_get_section_flags (ibfd, sec)
15988 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
15989 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
15990 only_data_sections = FALSE;
15991
15992 null_input_bfd = FALSE;
15993 break;
15994 }
15995 }
15996
15997 if (null_input_bfd || only_data_sections)
15998 return TRUE;
15999 }
16000
16001 /* Complain about various flag mismatches. */
16002 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
16003 EF_ARM_EABI_VERSION (out_flags)))
16004 {
16005 _bfd_error_handler
16006 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
16007 ibfd, obfd,
16008 (in_flags & EF_ARM_EABIMASK) >> 24,
16009 (out_flags & EF_ARM_EABIMASK) >> 24);
16010 return FALSE;
16011 }
16012
16013 /* Not sure what needs to be checked for EABI versions >= 1. */
16014 /* VxWorks libraries do not use these flags. */
16015 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
16016 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
16017 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
16018 {
16019 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
16020 {
16021 _bfd_error_handler
16022 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
16023 ibfd, obfd,
16024 in_flags & EF_ARM_APCS_26 ? 26 : 32,
16025 out_flags & EF_ARM_APCS_26 ? 26 : 32);
16026 flags_compatible = FALSE;
16027 }
16028
16029 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
16030 {
16031 if (in_flags & EF_ARM_APCS_FLOAT)
16032 _bfd_error_handler
16033 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
16034 ibfd, obfd);
16035 else
16036 _bfd_error_handler
16037 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
16038 ibfd, obfd);
16039
16040 flags_compatible = FALSE;
16041 }
16042
16043 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
16044 {
16045 if (in_flags & EF_ARM_VFP_FLOAT)
16046 _bfd_error_handler
16047 (_("error: %B uses VFP instructions, whereas %B does not"),
16048 ibfd, obfd);
16049 else
16050 _bfd_error_handler
16051 (_("error: %B uses FPA instructions, whereas %B does not"),
16052 ibfd, obfd);
16053
16054 flags_compatible = FALSE;
16055 }
16056
16057 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
16058 {
16059 if (in_flags & EF_ARM_MAVERICK_FLOAT)
16060 _bfd_error_handler
16061 (_("error: %B uses Maverick instructions, whereas %B does not"),
16062 ibfd, obfd);
16063 else
16064 _bfd_error_handler
16065 (_("error: %B does not use Maverick instructions, whereas %B does"),
16066 ibfd, obfd);
16067
16068 flags_compatible = FALSE;
16069 }
16070
16071 #ifdef EF_ARM_SOFT_FLOAT
16072 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
16073 {
16074 /* We can allow interworking between code that is VFP format
16075 layout, and uses either soft float or integer regs for
16076 passing floating point arguments and results. We already
16077 know that the APCS_FLOAT flags match; similarly for VFP
16078 flags. */
16079 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
16080 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
16081 {
16082 if (in_flags & EF_ARM_SOFT_FLOAT)
16083 _bfd_error_handler
16084 (_("error: %B uses software FP, whereas %B uses hardware FP"),
16085 ibfd, obfd);
16086 else
16087 _bfd_error_handler
16088 (_("error: %B uses hardware FP, whereas %B uses software FP"),
16089 ibfd, obfd);
16090
16091 flags_compatible = FALSE;
16092 }
16093 }
16094 #endif
16095
16096 /* Interworking mismatch is only a warning. */
16097 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
16098 {
16099 if (in_flags & EF_ARM_INTERWORK)
16100 {
16101 _bfd_error_handler
16102 (_("Warning: %B supports interworking, whereas %B does not"),
16103 ibfd, obfd);
16104 }
16105 else
16106 {
16107 _bfd_error_handler
16108 (_("Warning: %B does not support interworking, whereas %B does"),
16109 ibfd, obfd);
16110 }
16111 }
16112 }
16113
16114 return flags_compatible;
16115 }
16116
16117
16118 /* Symbian OS Targets. */
16119
16120 #undef TARGET_LITTLE_SYM
16121 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_symbian_vec
16122 #undef TARGET_LITTLE_NAME
16123 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
16124 #undef TARGET_BIG_SYM
16125 #define TARGET_BIG_SYM bfd_elf32_bigarm_symbian_vec
16126 #undef TARGET_BIG_NAME
16127 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
16128
16129 /* Like elf32_arm_link_hash_table_create -- but overrides
16130 appropriately for Symbian OS. */
16131
16132 static struct bfd_link_hash_table *
16133 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
16134 {
16135 struct bfd_link_hash_table *ret;
16136
16137 ret = elf32_arm_link_hash_table_create (abfd);
16138 if (ret)
16139 {
16140 struct elf32_arm_link_hash_table *htab
16141 = (struct elf32_arm_link_hash_table *)ret;
16142 /* There is no PLT header for Symbian OS. */
16143 htab->plt_header_size = 0;
16144 /* The PLT entries are each one instruction and one word. */
16145 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
16146 htab->symbian_p = 1;
16147 /* Symbian uses armv5t or above, so use_blx is always true. */
16148 htab->use_blx = 1;
16149 htab->root.is_relocatable_executable = 1;
16150 }
16151 return ret;
16152 }
16153
16154 static const struct bfd_elf_special_section
16155 elf32_arm_symbian_special_sections[] =
16156 {
16157 /* In a BPABI executable, the dynamic linking sections do not go in
16158 the loadable read-only segment. The post-linker may wish to
16159 refer to these sections, but they are not part of the final
16160 program image. */
16161 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
16162 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
16163 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
16164 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
16165 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
16166 /* These sections do not need to be writable as the SymbianOS
16167 postlinker will arrange things so that no dynamic relocation is
16168 required. */
16169 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
16170 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
16171 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
16172 { NULL, 0, 0, 0, 0 }
16173 };
16174
16175 static void
16176 elf32_arm_symbian_begin_write_processing (bfd *abfd,
16177 struct bfd_link_info *link_info)
16178 {
16179 /* BPABI objects are never loaded directly by an OS kernel; they are
16180 processed by a postlinker first, into an OS-specific format. If
16181 the D_PAGED bit is set on the file, BFD will align segments on
16182 page boundaries, so that an OS can directly map the file. With
16183 BPABI objects, that just results in wasted space. In addition,
16184 because we clear the D_PAGED bit, map_sections_to_segments will
16185 recognize that the program headers should not be mapped into any
16186 loadable segment. */
16187 abfd->flags &= ~D_PAGED;
16188 elf32_arm_begin_write_processing (abfd, link_info);
16189 }
16190
16191 static bfd_boolean
16192 elf32_arm_symbian_modify_segment_map (bfd *abfd,
16193 struct bfd_link_info *info)
16194 {
16195 struct elf_segment_map *m;
16196 asection *dynsec;
16197
16198 /* BPABI shared libraries and executables should have a PT_DYNAMIC
16199 segment. However, because the .dynamic section is not marked
16200 with SEC_LOAD, the generic ELF code will not create such a
16201 segment. */
16202 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
16203 if (dynsec)
16204 {
16205 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
16206 if (m->p_type == PT_DYNAMIC)
16207 break;
16208
16209 if (m == NULL)
16210 {
16211 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
16212 m->next = elf_seg_map (abfd);
16213 elf_seg_map (abfd) = m;
16214 }
16215 }
16216
16217 /* Also call the generic arm routine. */
16218 return elf32_arm_modify_segment_map (abfd, info);
16219 }
16220
16221 /* Return address for Ith PLT stub in section PLT, for relocation REL
16222 or (bfd_vma) -1 if it should not be included. */
16223
16224 static bfd_vma
16225 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
16226 const arelent *rel ATTRIBUTE_UNUSED)
16227 {
16228 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
16229 }
16230
16231
16232 #undef elf32_bed
16233 #define elf32_bed elf32_arm_symbian_bed
16234
16235 /* The dynamic sections are not allocated on SymbianOS; the postlinker
16236 will process them and then discard them. */
16237 #undef ELF_DYNAMIC_SEC_FLAGS
16238 #define ELF_DYNAMIC_SEC_FLAGS \
16239 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
16240
16241 #undef elf_backend_emit_relocs
16242
16243 #undef bfd_elf32_bfd_link_hash_table_create
16244 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
16245 #undef elf_backend_special_sections
16246 #define elf_backend_special_sections elf32_arm_symbian_special_sections
16247 #undef elf_backend_begin_write_processing
16248 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
16249 #undef elf_backend_final_write_processing
16250 #define elf_backend_final_write_processing elf32_arm_final_write_processing
16251
16252 #undef elf_backend_modify_segment_map
16253 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
16254
16255 /* There is no .got section for BPABI objects, and hence no header. */
16256 #undef elf_backend_got_header_size
16257 #define elf_backend_got_header_size 0
16258
16259 /* Similarly, there is no .got.plt section. */
16260 #undef elf_backend_want_got_plt
16261 #define elf_backend_want_got_plt 0
16262
16263 #undef elf_backend_plt_sym_val
16264 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
16265
16266 #undef elf_backend_may_use_rel_p
16267 #define elf_backend_may_use_rel_p 1
16268 #undef elf_backend_may_use_rela_p
16269 #define elf_backend_may_use_rela_p 0
16270 #undef elf_backend_default_use_rela_p
16271 #define elf_backend_default_use_rela_p 0
16272 #undef elf_backend_want_plt_sym
16273 #define elf_backend_want_plt_sym 0
16274 #undef ELF_MAXPAGESIZE
16275 #define ELF_MAXPAGESIZE 0x8000
16276
16277 #include "elf32-target.h"