4ac0a9d1b8093b737d32e156dbeb0d8346a8b159
[binutils-gdb.git] / bfd / elf32-arm.c
1 /* 32-bit ELF support for ARM
2 Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
3 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "sysdep.h"
23 #include <limits.h>
24
25 #include "bfd.h"
26 #include "bfd_stdint.h"
27 #include "libiberty.h"
28 #include "libbfd.h"
29 #include "elf-bfd.h"
30 #include "elf-nacl.h"
31 #include "elf-vxworks.h"
32 #include "elf/arm.h"
33
34 /* Return the relocation section associated with NAME. HTAB is the
35 bfd's elf32_arm_link_hash_entry. */
36 #define RELOC_SECTION(HTAB, NAME) \
37 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
38
39 /* Return size of a relocation entry. HTAB is the bfd's
40 elf32_arm_link_hash_entry. */
41 #define RELOC_SIZE(HTAB) \
42 ((HTAB)->use_rel \
43 ? sizeof (Elf32_External_Rel) \
44 : sizeof (Elf32_External_Rela))
45
46 /* Return function to swap relocations in. HTAB is the bfd's
47 elf32_arm_link_hash_entry. */
48 #define SWAP_RELOC_IN(HTAB) \
49 ((HTAB)->use_rel \
50 ? bfd_elf32_swap_reloc_in \
51 : bfd_elf32_swap_reloca_in)
52
53 /* Return function to swap relocations out. HTAB is the bfd's
54 elf32_arm_link_hash_entry. */
55 #define SWAP_RELOC_OUT(HTAB) \
56 ((HTAB)->use_rel \
57 ? bfd_elf32_swap_reloc_out \
58 : bfd_elf32_swap_reloca_out)
59
60 #define elf_info_to_howto 0
61 #define elf_info_to_howto_rel elf32_arm_info_to_howto
62
63 #define ARM_ELF_ABI_VERSION 0
64 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
65
66 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
67 struct bfd_link_info *link_info,
68 asection *sec,
69 bfd_byte *contents);
70
71 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
72 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
73 in that slot. */
74
75 static reloc_howto_type elf32_arm_howto_table_1[] =
76 {
77 /* No relocation. */
78 HOWTO (R_ARM_NONE, /* type */
79 0, /* rightshift */
80 0, /* size (0 = byte, 1 = short, 2 = long) */
81 0, /* bitsize */
82 FALSE, /* pc_relative */
83 0, /* bitpos */
84 complain_overflow_dont,/* complain_on_overflow */
85 bfd_elf_generic_reloc, /* special_function */
86 "R_ARM_NONE", /* name */
87 FALSE, /* partial_inplace */
88 0, /* src_mask */
89 0, /* dst_mask */
90 FALSE), /* pcrel_offset */
91
92 HOWTO (R_ARM_PC24, /* type */
93 2, /* rightshift */
94 2, /* size (0 = byte, 1 = short, 2 = long) */
95 24, /* bitsize */
96 TRUE, /* pc_relative */
97 0, /* bitpos */
98 complain_overflow_signed,/* complain_on_overflow */
99 bfd_elf_generic_reloc, /* special_function */
100 "R_ARM_PC24", /* name */
101 FALSE, /* partial_inplace */
102 0x00ffffff, /* src_mask */
103 0x00ffffff, /* dst_mask */
104 TRUE), /* pcrel_offset */
105
106 /* 32 bit absolute */
107 HOWTO (R_ARM_ABS32, /* type */
108 0, /* rightshift */
109 2, /* size (0 = byte, 1 = short, 2 = long) */
110 32, /* bitsize */
111 FALSE, /* pc_relative */
112 0, /* bitpos */
113 complain_overflow_bitfield,/* complain_on_overflow */
114 bfd_elf_generic_reloc, /* special_function */
115 "R_ARM_ABS32", /* name */
116 FALSE, /* partial_inplace */
117 0xffffffff, /* src_mask */
118 0xffffffff, /* dst_mask */
119 FALSE), /* pcrel_offset */
120
121 /* standard 32bit pc-relative reloc */
122 HOWTO (R_ARM_REL32, /* type */
123 0, /* rightshift */
124 2, /* size (0 = byte, 1 = short, 2 = long) */
125 32, /* bitsize */
126 TRUE, /* pc_relative */
127 0, /* bitpos */
128 complain_overflow_bitfield,/* complain_on_overflow */
129 bfd_elf_generic_reloc, /* special_function */
130 "R_ARM_REL32", /* name */
131 FALSE, /* partial_inplace */
132 0xffffffff, /* src_mask */
133 0xffffffff, /* dst_mask */
134 TRUE), /* pcrel_offset */
135
136 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
137 HOWTO (R_ARM_LDR_PC_G0, /* type */
138 0, /* rightshift */
139 0, /* size (0 = byte, 1 = short, 2 = long) */
140 32, /* bitsize */
141 TRUE, /* pc_relative */
142 0, /* bitpos */
143 complain_overflow_dont,/* complain_on_overflow */
144 bfd_elf_generic_reloc, /* special_function */
145 "R_ARM_LDR_PC_G0", /* name */
146 FALSE, /* partial_inplace */
147 0xffffffff, /* src_mask */
148 0xffffffff, /* dst_mask */
149 TRUE), /* pcrel_offset */
150
151 /* 16 bit absolute */
152 HOWTO (R_ARM_ABS16, /* type */
153 0, /* rightshift */
154 1, /* size (0 = byte, 1 = short, 2 = long) */
155 16, /* bitsize */
156 FALSE, /* pc_relative */
157 0, /* bitpos */
158 complain_overflow_bitfield,/* complain_on_overflow */
159 bfd_elf_generic_reloc, /* special_function */
160 "R_ARM_ABS16", /* name */
161 FALSE, /* partial_inplace */
162 0x0000ffff, /* src_mask */
163 0x0000ffff, /* dst_mask */
164 FALSE), /* pcrel_offset */
165
166 /* 12 bit absolute */
167 HOWTO (R_ARM_ABS12, /* type */
168 0, /* rightshift */
169 2, /* size (0 = byte, 1 = short, 2 = long) */
170 12, /* bitsize */
171 FALSE, /* pc_relative */
172 0, /* bitpos */
173 complain_overflow_bitfield,/* complain_on_overflow */
174 bfd_elf_generic_reloc, /* special_function */
175 "R_ARM_ABS12", /* name */
176 FALSE, /* partial_inplace */
177 0x00000fff, /* src_mask */
178 0x00000fff, /* dst_mask */
179 FALSE), /* pcrel_offset */
180
181 HOWTO (R_ARM_THM_ABS5, /* type */
182 6, /* rightshift */
183 1, /* size (0 = byte, 1 = short, 2 = long) */
184 5, /* bitsize */
185 FALSE, /* pc_relative */
186 0, /* bitpos */
187 complain_overflow_bitfield,/* complain_on_overflow */
188 bfd_elf_generic_reloc, /* special_function */
189 "R_ARM_THM_ABS5", /* name */
190 FALSE, /* partial_inplace */
191 0x000007e0, /* src_mask */
192 0x000007e0, /* dst_mask */
193 FALSE), /* pcrel_offset */
194
195 /* 8 bit absolute */
196 HOWTO (R_ARM_ABS8, /* type */
197 0, /* rightshift */
198 0, /* size (0 = byte, 1 = short, 2 = long) */
199 8, /* bitsize */
200 FALSE, /* pc_relative */
201 0, /* bitpos */
202 complain_overflow_bitfield,/* complain_on_overflow */
203 bfd_elf_generic_reloc, /* special_function */
204 "R_ARM_ABS8", /* name */
205 FALSE, /* partial_inplace */
206 0x000000ff, /* src_mask */
207 0x000000ff, /* dst_mask */
208 FALSE), /* pcrel_offset */
209
210 HOWTO (R_ARM_SBREL32, /* type */
211 0, /* rightshift */
212 2, /* size (0 = byte, 1 = short, 2 = long) */
213 32, /* bitsize */
214 FALSE, /* pc_relative */
215 0, /* bitpos */
216 complain_overflow_dont,/* complain_on_overflow */
217 bfd_elf_generic_reloc, /* special_function */
218 "R_ARM_SBREL32", /* name */
219 FALSE, /* partial_inplace */
220 0xffffffff, /* src_mask */
221 0xffffffff, /* dst_mask */
222 FALSE), /* pcrel_offset */
223
224 HOWTO (R_ARM_THM_CALL, /* type */
225 1, /* rightshift */
226 2, /* size (0 = byte, 1 = short, 2 = long) */
227 24, /* bitsize */
228 TRUE, /* pc_relative */
229 0, /* bitpos */
230 complain_overflow_signed,/* complain_on_overflow */
231 bfd_elf_generic_reloc, /* special_function */
232 "R_ARM_THM_CALL", /* name */
233 FALSE, /* partial_inplace */
234 0x07ff2fff, /* src_mask */
235 0x07ff2fff, /* dst_mask */
236 TRUE), /* pcrel_offset */
237
238 HOWTO (R_ARM_THM_PC8, /* type */
239 1, /* rightshift */
240 1, /* size (0 = byte, 1 = short, 2 = long) */
241 8, /* bitsize */
242 TRUE, /* pc_relative */
243 0, /* bitpos */
244 complain_overflow_signed,/* complain_on_overflow */
245 bfd_elf_generic_reloc, /* special_function */
246 "R_ARM_THM_PC8", /* name */
247 FALSE, /* partial_inplace */
248 0x000000ff, /* src_mask */
249 0x000000ff, /* dst_mask */
250 TRUE), /* pcrel_offset */
251
252 HOWTO (R_ARM_BREL_ADJ, /* type */
253 1, /* rightshift */
254 1, /* size (0 = byte, 1 = short, 2 = long) */
255 32, /* bitsize */
256 FALSE, /* pc_relative */
257 0, /* bitpos */
258 complain_overflow_signed,/* complain_on_overflow */
259 bfd_elf_generic_reloc, /* special_function */
260 "R_ARM_BREL_ADJ", /* name */
261 FALSE, /* partial_inplace */
262 0xffffffff, /* src_mask */
263 0xffffffff, /* dst_mask */
264 FALSE), /* pcrel_offset */
265
266 HOWTO (R_ARM_TLS_DESC, /* type */
267 0, /* rightshift */
268 2, /* size (0 = byte, 1 = short, 2 = long) */
269 32, /* bitsize */
270 FALSE, /* pc_relative */
271 0, /* bitpos */
272 complain_overflow_bitfield,/* complain_on_overflow */
273 bfd_elf_generic_reloc, /* special_function */
274 "R_ARM_TLS_DESC", /* name */
275 FALSE, /* partial_inplace */
276 0xffffffff, /* src_mask */
277 0xffffffff, /* dst_mask */
278 FALSE), /* pcrel_offset */
279
280 HOWTO (R_ARM_THM_SWI8, /* type */
281 0, /* rightshift */
282 0, /* size (0 = byte, 1 = short, 2 = long) */
283 0, /* bitsize */
284 FALSE, /* pc_relative */
285 0, /* bitpos */
286 complain_overflow_signed,/* complain_on_overflow */
287 bfd_elf_generic_reloc, /* special_function */
288 "R_ARM_SWI8", /* name */
289 FALSE, /* partial_inplace */
290 0x00000000, /* src_mask */
291 0x00000000, /* dst_mask */
292 FALSE), /* pcrel_offset */
293
294 /* BLX instruction for the ARM. */
295 HOWTO (R_ARM_XPC25, /* type */
296 2, /* rightshift */
297 2, /* size (0 = byte, 1 = short, 2 = long) */
298 24, /* bitsize */
299 TRUE, /* pc_relative */
300 0, /* bitpos */
301 complain_overflow_signed,/* complain_on_overflow */
302 bfd_elf_generic_reloc, /* special_function */
303 "R_ARM_XPC25", /* name */
304 FALSE, /* partial_inplace */
305 0x00ffffff, /* src_mask */
306 0x00ffffff, /* dst_mask */
307 TRUE), /* pcrel_offset */
308
309 /* BLX instruction for the Thumb. */
310 HOWTO (R_ARM_THM_XPC22, /* type */
311 2, /* rightshift */
312 2, /* size (0 = byte, 1 = short, 2 = long) */
313 24, /* bitsize */
314 TRUE, /* pc_relative */
315 0, /* bitpos */
316 complain_overflow_signed,/* complain_on_overflow */
317 bfd_elf_generic_reloc, /* special_function */
318 "R_ARM_THM_XPC22", /* name */
319 FALSE, /* partial_inplace */
320 0x07ff2fff, /* src_mask */
321 0x07ff2fff, /* dst_mask */
322 TRUE), /* pcrel_offset */
323
324 /* Dynamic TLS relocations. */
325
326 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
327 0, /* rightshift */
328 2, /* size (0 = byte, 1 = short, 2 = long) */
329 32, /* bitsize */
330 FALSE, /* pc_relative */
331 0, /* bitpos */
332 complain_overflow_bitfield,/* complain_on_overflow */
333 bfd_elf_generic_reloc, /* special_function */
334 "R_ARM_TLS_DTPMOD32", /* name */
335 TRUE, /* partial_inplace */
336 0xffffffff, /* src_mask */
337 0xffffffff, /* dst_mask */
338 FALSE), /* pcrel_offset */
339
340 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
341 0, /* rightshift */
342 2, /* size (0 = byte, 1 = short, 2 = long) */
343 32, /* bitsize */
344 FALSE, /* pc_relative */
345 0, /* bitpos */
346 complain_overflow_bitfield,/* complain_on_overflow */
347 bfd_elf_generic_reloc, /* special_function */
348 "R_ARM_TLS_DTPOFF32", /* name */
349 TRUE, /* partial_inplace */
350 0xffffffff, /* src_mask */
351 0xffffffff, /* dst_mask */
352 FALSE), /* pcrel_offset */
353
354 HOWTO (R_ARM_TLS_TPOFF32, /* type */
355 0, /* rightshift */
356 2, /* size (0 = byte, 1 = short, 2 = long) */
357 32, /* bitsize */
358 FALSE, /* pc_relative */
359 0, /* bitpos */
360 complain_overflow_bitfield,/* complain_on_overflow */
361 bfd_elf_generic_reloc, /* special_function */
362 "R_ARM_TLS_TPOFF32", /* name */
363 TRUE, /* partial_inplace */
364 0xffffffff, /* src_mask */
365 0xffffffff, /* dst_mask */
366 FALSE), /* pcrel_offset */
367
368 /* Relocs used in ARM Linux */
369
370 HOWTO (R_ARM_COPY, /* type */
371 0, /* rightshift */
372 2, /* size (0 = byte, 1 = short, 2 = long) */
373 32, /* bitsize */
374 FALSE, /* pc_relative */
375 0, /* bitpos */
376 complain_overflow_bitfield,/* complain_on_overflow */
377 bfd_elf_generic_reloc, /* special_function */
378 "R_ARM_COPY", /* name */
379 TRUE, /* partial_inplace */
380 0xffffffff, /* src_mask */
381 0xffffffff, /* dst_mask */
382 FALSE), /* pcrel_offset */
383
384 HOWTO (R_ARM_GLOB_DAT, /* type */
385 0, /* rightshift */
386 2, /* size (0 = byte, 1 = short, 2 = long) */
387 32, /* bitsize */
388 FALSE, /* pc_relative */
389 0, /* bitpos */
390 complain_overflow_bitfield,/* complain_on_overflow */
391 bfd_elf_generic_reloc, /* special_function */
392 "R_ARM_GLOB_DAT", /* name */
393 TRUE, /* partial_inplace */
394 0xffffffff, /* src_mask */
395 0xffffffff, /* dst_mask */
396 FALSE), /* pcrel_offset */
397
398 HOWTO (R_ARM_JUMP_SLOT, /* type */
399 0, /* rightshift */
400 2, /* size (0 = byte, 1 = short, 2 = long) */
401 32, /* bitsize */
402 FALSE, /* pc_relative */
403 0, /* bitpos */
404 complain_overflow_bitfield,/* complain_on_overflow */
405 bfd_elf_generic_reloc, /* special_function */
406 "R_ARM_JUMP_SLOT", /* name */
407 TRUE, /* partial_inplace */
408 0xffffffff, /* src_mask */
409 0xffffffff, /* dst_mask */
410 FALSE), /* pcrel_offset */
411
412 HOWTO (R_ARM_RELATIVE, /* type */
413 0, /* rightshift */
414 2, /* size (0 = byte, 1 = short, 2 = long) */
415 32, /* bitsize */
416 FALSE, /* pc_relative */
417 0, /* bitpos */
418 complain_overflow_bitfield,/* complain_on_overflow */
419 bfd_elf_generic_reloc, /* special_function */
420 "R_ARM_RELATIVE", /* name */
421 TRUE, /* partial_inplace */
422 0xffffffff, /* src_mask */
423 0xffffffff, /* dst_mask */
424 FALSE), /* pcrel_offset */
425
426 HOWTO (R_ARM_GOTOFF32, /* type */
427 0, /* rightshift */
428 2, /* size (0 = byte, 1 = short, 2 = long) */
429 32, /* bitsize */
430 FALSE, /* pc_relative */
431 0, /* bitpos */
432 complain_overflow_bitfield,/* complain_on_overflow */
433 bfd_elf_generic_reloc, /* special_function */
434 "R_ARM_GOTOFF32", /* name */
435 TRUE, /* partial_inplace */
436 0xffffffff, /* src_mask */
437 0xffffffff, /* dst_mask */
438 FALSE), /* pcrel_offset */
439
440 HOWTO (R_ARM_GOTPC, /* type */
441 0, /* rightshift */
442 2, /* size (0 = byte, 1 = short, 2 = long) */
443 32, /* bitsize */
444 TRUE, /* pc_relative */
445 0, /* bitpos */
446 complain_overflow_bitfield,/* complain_on_overflow */
447 bfd_elf_generic_reloc, /* special_function */
448 "R_ARM_GOTPC", /* name */
449 TRUE, /* partial_inplace */
450 0xffffffff, /* src_mask */
451 0xffffffff, /* dst_mask */
452 TRUE), /* pcrel_offset */
453
454 HOWTO (R_ARM_GOT32, /* type */
455 0, /* rightshift */
456 2, /* size (0 = byte, 1 = short, 2 = long) */
457 32, /* bitsize */
458 FALSE, /* pc_relative */
459 0, /* bitpos */
460 complain_overflow_bitfield,/* complain_on_overflow */
461 bfd_elf_generic_reloc, /* special_function */
462 "R_ARM_GOT32", /* name */
463 TRUE, /* partial_inplace */
464 0xffffffff, /* src_mask */
465 0xffffffff, /* dst_mask */
466 FALSE), /* pcrel_offset */
467
468 HOWTO (R_ARM_PLT32, /* type */
469 2, /* rightshift */
470 2, /* size (0 = byte, 1 = short, 2 = long) */
471 24, /* bitsize */
472 TRUE, /* pc_relative */
473 0, /* bitpos */
474 complain_overflow_bitfield,/* complain_on_overflow */
475 bfd_elf_generic_reloc, /* special_function */
476 "R_ARM_PLT32", /* name */
477 FALSE, /* partial_inplace */
478 0x00ffffff, /* src_mask */
479 0x00ffffff, /* dst_mask */
480 TRUE), /* pcrel_offset */
481
482 HOWTO (R_ARM_CALL, /* type */
483 2, /* rightshift */
484 2, /* size (0 = byte, 1 = short, 2 = long) */
485 24, /* bitsize */
486 TRUE, /* pc_relative */
487 0, /* bitpos */
488 complain_overflow_signed,/* complain_on_overflow */
489 bfd_elf_generic_reloc, /* special_function */
490 "R_ARM_CALL", /* name */
491 FALSE, /* partial_inplace */
492 0x00ffffff, /* src_mask */
493 0x00ffffff, /* dst_mask */
494 TRUE), /* pcrel_offset */
495
496 HOWTO (R_ARM_JUMP24, /* type */
497 2, /* rightshift */
498 2, /* size (0 = byte, 1 = short, 2 = long) */
499 24, /* bitsize */
500 TRUE, /* pc_relative */
501 0, /* bitpos */
502 complain_overflow_signed,/* complain_on_overflow */
503 bfd_elf_generic_reloc, /* special_function */
504 "R_ARM_JUMP24", /* name */
505 FALSE, /* partial_inplace */
506 0x00ffffff, /* src_mask */
507 0x00ffffff, /* dst_mask */
508 TRUE), /* pcrel_offset */
509
510 HOWTO (R_ARM_THM_JUMP24, /* type */
511 1, /* rightshift */
512 2, /* size (0 = byte, 1 = short, 2 = long) */
513 24, /* bitsize */
514 TRUE, /* pc_relative */
515 0, /* bitpos */
516 complain_overflow_signed,/* complain_on_overflow */
517 bfd_elf_generic_reloc, /* special_function */
518 "R_ARM_THM_JUMP24", /* name */
519 FALSE, /* partial_inplace */
520 0x07ff2fff, /* src_mask */
521 0x07ff2fff, /* dst_mask */
522 TRUE), /* pcrel_offset */
523
524 HOWTO (R_ARM_BASE_ABS, /* type */
525 0, /* rightshift */
526 2, /* size (0 = byte, 1 = short, 2 = long) */
527 32, /* bitsize */
528 FALSE, /* pc_relative */
529 0, /* bitpos */
530 complain_overflow_dont,/* complain_on_overflow */
531 bfd_elf_generic_reloc, /* special_function */
532 "R_ARM_BASE_ABS", /* name */
533 FALSE, /* partial_inplace */
534 0xffffffff, /* src_mask */
535 0xffffffff, /* dst_mask */
536 FALSE), /* pcrel_offset */
537
538 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
539 0, /* rightshift */
540 2, /* size (0 = byte, 1 = short, 2 = long) */
541 12, /* bitsize */
542 TRUE, /* pc_relative */
543 0, /* bitpos */
544 complain_overflow_dont,/* complain_on_overflow */
545 bfd_elf_generic_reloc, /* special_function */
546 "R_ARM_ALU_PCREL_7_0", /* name */
547 FALSE, /* partial_inplace */
548 0x00000fff, /* src_mask */
549 0x00000fff, /* dst_mask */
550 TRUE), /* pcrel_offset */
551
552 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
553 0, /* rightshift */
554 2, /* size (0 = byte, 1 = short, 2 = long) */
555 12, /* bitsize */
556 TRUE, /* pc_relative */
557 8, /* bitpos */
558 complain_overflow_dont,/* complain_on_overflow */
559 bfd_elf_generic_reloc, /* special_function */
560 "R_ARM_ALU_PCREL_15_8",/* name */
561 FALSE, /* partial_inplace */
562 0x00000fff, /* src_mask */
563 0x00000fff, /* dst_mask */
564 TRUE), /* pcrel_offset */
565
566 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
567 0, /* rightshift */
568 2, /* size (0 = byte, 1 = short, 2 = long) */
569 12, /* bitsize */
570 TRUE, /* pc_relative */
571 16, /* bitpos */
572 complain_overflow_dont,/* complain_on_overflow */
573 bfd_elf_generic_reloc, /* special_function */
574 "R_ARM_ALU_PCREL_23_15",/* name */
575 FALSE, /* partial_inplace */
576 0x00000fff, /* src_mask */
577 0x00000fff, /* dst_mask */
578 TRUE), /* pcrel_offset */
579
580 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
581 0, /* rightshift */
582 2, /* size (0 = byte, 1 = short, 2 = long) */
583 12, /* bitsize */
584 FALSE, /* pc_relative */
585 0, /* bitpos */
586 complain_overflow_dont,/* complain_on_overflow */
587 bfd_elf_generic_reloc, /* special_function */
588 "R_ARM_LDR_SBREL_11_0",/* name */
589 FALSE, /* partial_inplace */
590 0x00000fff, /* src_mask */
591 0x00000fff, /* dst_mask */
592 FALSE), /* pcrel_offset */
593
594 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
595 0, /* rightshift */
596 2, /* size (0 = byte, 1 = short, 2 = long) */
597 8, /* bitsize */
598 FALSE, /* pc_relative */
599 12, /* bitpos */
600 complain_overflow_dont,/* complain_on_overflow */
601 bfd_elf_generic_reloc, /* special_function */
602 "R_ARM_ALU_SBREL_19_12",/* name */
603 FALSE, /* partial_inplace */
604 0x000ff000, /* src_mask */
605 0x000ff000, /* dst_mask */
606 FALSE), /* pcrel_offset */
607
608 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
609 0, /* rightshift */
610 2, /* size (0 = byte, 1 = short, 2 = long) */
611 8, /* bitsize */
612 FALSE, /* pc_relative */
613 20, /* bitpos */
614 complain_overflow_dont,/* complain_on_overflow */
615 bfd_elf_generic_reloc, /* special_function */
616 "R_ARM_ALU_SBREL_27_20",/* name */
617 FALSE, /* partial_inplace */
618 0x0ff00000, /* src_mask */
619 0x0ff00000, /* dst_mask */
620 FALSE), /* pcrel_offset */
621
622 HOWTO (R_ARM_TARGET1, /* type */
623 0, /* rightshift */
624 2, /* size (0 = byte, 1 = short, 2 = long) */
625 32, /* bitsize */
626 FALSE, /* pc_relative */
627 0, /* bitpos */
628 complain_overflow_dont,/* complain_on_overflow */
629 bfd_elf_generic_reloc, /* special_function */
630 "R_ARM_TARGET1", /* name */
631 FALSE, /* partial_inplace */
632 0xffffffff, /* src_mask */
633 0xffffffff, /* dst_mask */
634 FALSE), /* pcrel_offset */
635
636 HOWTO (R_ARM_ROSEGREL32, /* type */
637 0, /* rightshift */
638 2, /* size (0 = byte, 1 = short, 2 = long) */
639 32, /* bitsize */
640 FALSE, /* pc_relative */
641 0, /* bitpos */
642 complain_overflow_dont,/* complain_on_overflow */
643 bfd_elf_generic_reloc, /* special_function */
644 "R_ARM_ROSEGREL32", /* name */
645 FALSE, /* partial_inplace */
646 0xffffffff, /* src_mask */
647 0xffffffff, /* dst_mask */
648 FALSE), /* pcrel_offset */
649
650 HOWTO (R_ARM_V4BX, /* type */
651 0, /* rightshift */
652 2, /* size (0 = byte, 1 = short, 2 = long) */
653 32, /* bitsize */
654 FALSE, /* pc_relative */
655 0, /* bitpos */
656 complain_overflow_dont,/* complain_on_overflow */
657 bfd_elf_generic_reloc, /* special_function */
658 "R_ARM_V4BX", /* name */
659 FALSE, /* partial_inplace */
660 0xffffffff, /* src_mask */
661 0xffffffff, /* dst_mask */
662 FALSE), /* pcrel_offset */
663
664 HOWTO (R_ARM_TARGET2, /* type */
665 0, /* rightshift */
666 2, /* size (0 = byte, 1 = short, 2 = long) */
667 32, /* bitsize */
668 FALSE, /* pc_relative */
669 0, /* bitpos */
670 complain_overflow_signed,/* complain_on_overflow */
671 bfd_elf_generic_reloc, /* special_function */
672 "R_ARM_TARGET2", /* name */
673 FALSE, /* partial_inplace */
674 0xffffffff, /* src_mask */
675 0xffffffff, /* dst_mask */
676 TRUE), /* pcrel_offset */
677
678 HOWTO (R_ARM_PREL31, /* type */
679 0, /* rightshift */
680 2, /* size (0 = byte, 1 = short, 2 = long) */
681 31, /* bitsize */
682 TRUE, /* pc_relative */
683 0, /* bitpos */
684 complain_overflow_signed,/* complain_on_overflow */
685 bfd_elf_generic_reloc, /* special_function */
686 "R_ARM_PREL31", /* name */
687 FALSE, /* partial_inplace */
688 0x7fffffff, /* src_mask */
689 0x7fffffff, /* dst_mask */
690 TRUE), /* pcrel_offset */
691
692 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
693 0, /* rightshift */
694 2, /* size (0 = byte, 1 = short, 2 = long) */
695 16, /* bitsize */
696 FALSE, /* pc_relative */
697 0, /* bitpos */
698 complain_overflow_dont,/* complain_on_overflow */
699 bfd_elf_generic_reloc, /* special_function */
700 "R_ARM_MOVW_ABS_NC", /* name */
701 FALSE, /* partial_inplace */
702 0x000f0fff, /* src_mask */
703 0x000f0fff, /* dst_mask */
704 FALSE), /* pcrel_offset */
705
706 HOWTO (R_ARM_MOVT_ABS, /* type */
707 0, /* rightshift */
708 2, /* size (0 = byte, 1 = short, 2 = long) */
709 16, /* bitsize */
710 FALSE, /* pc_relative */
711 0, /* bitpos */
712 complain_overflow_bitfield,/* complain_on_overflow */
713 bfd_elf_generic_reloc, /* special_function */
714 "R_ARM_MOVT_ABS", /* name */
715 FALSE, /* partial_inplace */
716 0x000f0fff, /* src_mask */
717 0x000f0fff, /* dst_mask */
718 FALSE), /* pcrel_offset */
719
720 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
721 0, /* rightshift */
722 2, /* size (0 = byte, 1 = short, 2 = long) */
723 16, /* bitsize */
724 TRUE, /* pc_relative */
725 0, /* bitpos */
726 complain_overflow_dont,/* complain_on_overflow */
727 bfd_elf_generic_reloc, /* special_function */
728 "R_ARM_MOVW_PREL_NC", /* name */
729 FALSE, /* partial_inplace */
730 0x000f0fff, /* src_mask */
731 0x000f0fff, /* dst_mask */
732 TRUE), /* pcrel_offset */
733
734 HOWTO (R_ARM_MOVT_PREL, /* type */
735 0, /* rightshift */
736 2, /* size (0 = byte, 1 = short, 2 = long) */
737 16, /* bitsize */
738 TRUE, /* pc_relative */
739 0, /* bitpos */
740 complain_overflow_bitfield,/* complain_on_overflow */
741 bfd_elf_generic_reloc, /* special_function */
742 "R_ARM_MOVT_PREL", /* name */
743 FALSE, /* partial_inplace */
744 0x000f0fff, /* src_mask */
745 0x000f0fff, /* dst_mask */
746 TRUE), /* pcrel_offset */
747
748 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
749 0, /* rightshift */
750 2, /* size (0 = byte, 1 = short, 2 = long) */
751 16, /* bitsize */
752 FALSE, /* pc_relative */
753 0, /* bitpos */
754 complain_overflow_dont,/* complain_on_overflow */
755 bfd_elf_generic_reloc, /* special_function */
756 "R_ARM_THM_MOVW_ABS_NC",/* name */
757 FALSE, /* partial_inplace */
758 0x040f70ff, /* src_mask */
759 0x040f70ff, /* dst_mask */
760 FALSE), /* pcrel_offset */
761
762 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
763 0, /* rightshift */
764 2, /* size (0 = byte, 1 = short, 2 = long) */
765 16, /* bitsize */
766 FALSE, /* pc_relative */
767 0, /* bitpos */
768 complain_overflow_bitfield,/* complain_on_overflow */
769 bfd_elf_generic_reloc, /* special_function */
770 "R_ARM_THM_MOVT_ABS", /* name */
771 FALSE, /* partial_inplace */
772 0x040f70ff, /* src_mask */
773 0x040f70ff, /* dst_mask */
774 FALSE), /* pcrel_offset */
775
776 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
777 0, /* rightshift */
778 2, /* size (0 = byte, 1 = short, 2 = long) */
779 16, /* bitsize */
780 TRUE, /* pc_relative */
781 0, /* bitpos */
782 complain_overflow_dont,/* complain_on_overflow */
783 bfd_elf_generic_reloc, /* special_function */
784 "R_ARM_THM_MOVW_PREL_NC",/* name */
785 FALSE, /* partial_inplace */
786 0x040f70ff, /* src_mask */
787 0x040f70ff, /* dst_mask */
788 TRUE), /* pcrel_offset */
789
790 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
791 0, /* rightshift */
792 2, /* size (0 = byte, 1 = short, 2 = long) */
793 16, /* bitsize */
794 TRUE, /* pc_relative */
795 0, /* bitpos */
796 complain_overflow_bitfield,/* complain_on_overflow */
797 bfd_elf_generic_reloc, /* special_function */
798 "R_ARM_THM_MOVT_PREL", /* name */
799 FALSE, /* partial_inplace */
800 0x040f70ff, /* src_mask */
801 0x040f70ff, /* dst_mask */
802 TRUE), /* pcrel_offset */
803
804 HOWTO (R_ARM_THM_JUMP19, /* type */
805 1, /* rightshift */
806 2, /* size (0 = byte, 1 = short, 2 = long) */
807 19, /* bitsize */
808 TRUE, /* pc_relative */
809 0, /* bitpos */
810 complain_overflow_signed,/* complain_on_overflow */
811 bfd_elf_generic_reloc, /* special_function */
812 "R_ARM_THM_JUMP19", /* name */
813 FALSE, /* partial_inplace */
814 0x043f2fff, /* src_mask */
815 0x043f2fff, /* dst_mask */
816 TRUE), /* pcrel_offset */
817
818 HOWTO (R_ARM_THM_JUMP6, /* type */
819 1, /* rightshift */
820 1, /* size (0 = byte, 1 = short, 2 = long) */
821 6, /* bitsize */
822 TRUE, /* pc_relative */
823 0, /* bitpos */
824 complain_overflow_unsigned,/* complain_on_overflow */
825 bfd_elf_generic_reloc, /* special_function */
826 "R_ARM_THM_JUMP6", /* name */
827 FALSE, /* partial_inplace */
828 0x02f8, /* src_mask */
829 0x02f8, /* dst_mask */
830 TRUE), /* pcrel_offset */
831
832 /* These are declared as 13-bit signed relocations because we can
833 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
834 versa. */
835 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
836 0, /* rightshift */
837 2, /* size (0 = byte, 1 = short, 2 = long) */
838 13, /* bitsize */
839 TRUE, /* pc_relative */
840 0, /* bitpos */
841 complain_overflow_dont,/* complain_on_overflow */
842 bfd_elf_generic_reloc, /* special_function */
843 "R_ARM_THM_ALU_PREL_11_0",/* name */
844 FALSE, /* partial_inplace */
845 0xffffffff, /* src_mask */
846 0xffffffff, /* dst_mask */
847 TRUE), /* pcrel_offset */
848
849 HOWTO (R_ARM_THM_PC12, /* type */
850 0, /* rightshift */
851 2, /* size (0 = byte, 1 = short, 2 = long) */
852 13, /* bitsize */
853 TRUE, /* pc_relative */
854 0, /* bitpos */
855 complain_overflow_dont,/* complain_on_overflow */
856 bfd_elf_generic_reloc, /* special_function */
857 "R_ARM_THM_PC12", /* name */
858 FALSE, /* partial_inplace */
859 0xffffffff, /* src_mask */
860 0xffffffff, /* dst_mask */
861 TRUE), /* pcrel_offset */
862
863 HOWTO (R_ARM_ABS32_NOI, /* type */
864 0, /* rightshift */
865 2, /* size (0 = byte, 1 = short, 2 = long) */
866 32, /* bitsize */
867 FALSE, /* pc_relative */
868 0, /* bitpos */
869 complain_overflow_dont,/* complain_on_overflow */
870 bfd_elf_generic_reloc, /* special_function */
871 "R_ARM_ABS32_NOI", /* name */
872 FALSE, /* partial_inplace */
873 0xffffffff, /* src_mask */
874 0xffffffff, /* dst_mask */
875 FALSE), /* pcrel_offset */
876
877 HOWTO (R_ARM_REL32_NOI, /* type */
878 0, /* rightshift */
879 2, /* size (0 = byte, 1 = short, 2 = long) */
880 32, /* bitsize */
881 TRUE, /* pc_relative */
882 0, /* bitpos */
883 complain_overflow_dont,/* complain_on_overflow */
884 bfd_elf_generic_reloc, /* special_function */
885 "R_ARM_REL32_NOI", /* name */
886 FALSE, /* partial_inplace */
887 0xffffffff, /* src_mask */
888 0xffffffff, /* dst_mask */
889 FALSE), /* pcrel_offset */
890
891 /* Group relocations. */
892
893 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
894 0, /* rightshift */
895 2, /* size (0 = byte, 1 = short, 2 = long) */
896 32, /* bitsize */
897 TRUE, /* pc_relative */
898 0, /* bitpos */
899 complain_overflow_dont,/* complain_on_overflow */
900 bfd_elf_generic_reloc, /* special_function */
901 "R_ARM_ALU_PC_G0_NC", /* name */
902 FALSE, /* partial_inplace */
903 0xffffffff, /* src_mask */
904 0xffffffff, /* dst_mask */
905 TRUE), /* pcrel_offset */
906
907 HOWTO (R_ARM_ALU_PC_G0, /* type */
908 0, /* rightshift */
909 2, /* size (0 = byte, 1 = short, 2 = long) */
910 32, /* bitsize */
911 TRUE, /* pc_relative */
912 0, /* bitpos */
913 complain_overflow_dont,/* complain_on_overflow */
914 bfd_elf_generic_reloc, /* special_function */
915 "R_ARM_ALU_PC_G0", /* name */
916 FALSE, /* partial_inplace */
917 0xffffffff, /* src_mask */
918 0xffffffff, /* dst_mask */
919 TRUE), /* pcrel_offset */
920
921 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
922 0, /* rightshift */
923 2, /* size (0 = byte, 1 = short, 2 = long) */
924 32, /* bitsize */
925 TRUE, /* pc_relative */
926 0, /* bitpos */
927 complain_overflow_dont,/* complain_on_overflow */
928 bfd_elf_generic_reloc, /* special_function */
929 "R_ARM_ALU_PC_G1_NC", /* name */
930 FALSE, /* partial_inplace */
931 0xffffffff, /* src_mask */
932 0xffffffff, /* dst_mask */
933 TRUE), /* pcrel_offset */
934
935 HOWTO (R_ARM_ALU_PC_G1, /* type */
936 0, /* rightshift */
937 2, /* size (0 = byte, 1 = short, 2 = long) */
938 32, /* bitsize */
939 TRUE, /* pc_relative */
940 0, /* bitpos */
941 complain_overflow_dont,/* complain_on_overflow */
942 bfd_elf_generic_reloc, /* special_function */
943 "R_ARM_ALU_PC_G1", /* name */
944 FALSE, /* partial_inplace */
945 0xffffffff, /* src_mask */
946 0xffffffff, /* dst_mask */
947 TRUE), /* pcrel_offset */
948
949 HOWTO (R_ARM_ALU_PC_G2, /* type */
950 0, /* rightshift */
951 2, /* size (0 = byte, 1 = short, 2 = long) */
952 32, /* bitsize */
953 TRUE, /* pc_relative */
954 0, /* bitpos */
955 complain_overflow_dont,/* complain_on_overflow */
956 bfd_elf_generic_reloc, /* special_function */
957 "R_ARM_ALU_PC_G2", /* name */
958 FALSE, /* partial_inplace */
959 0xffffffff, /* src_mask */
960 0xffffffff, /* dst_mask */
961 TRUE), /* pcrel_offset */
962
963 HOWTO (R_ARM_LDR_PC_G1, /* type */
964 0, /* rightshift */
965 2, /* size (0 = byte, 1 = short, 2 = long) */
966 32, /* bitsize */
967 TRUE, /* pc_relative */
968 0, /* bitpos */
969 complain_overflow_dont,/* complain_on_overflow */
970 bfd_elf_generic_reloc, /* special_function */
971 "R_ARM_LDR_PC_G1", /* name */
972 FALSE, /* partial_inplace */
973 0xffffffff, /* src_mask */
974 0xffffffff, /* dst_mask */
975 TRUE), /* pcrel_offset */
976
977 HOWTO (R_ARM_LDR_PC_G2, /* type */
978 0, /* rightshift */
979 2, /* size (0 = byte, 1 = short, 2 = long) */
980 32, /* bitsize */
981 TRUE, /* pc_relative */
982 0, /* bitpos */
983 complain_overflow_dont,/* complain_on_overflow */
984 bfd_elf_generic_reloc, /* special_function */
985 "R_ARM_LDR_PC_G2", /* name */
986 FALSE, /* partial_inplace */
987 0xffffffff, /* src_mask */
988 0xffffffff, /* dst_mask */
989 TRUE), /* pcrel_offset */
990
991 HOWTO (R_ARM_LDRS_PC_G0, /* type */
992 0, /* rightshift */
993 2, /* size (0 = byte, 1 = short, 2 = long) */
994 32, /* bitsize */
995 TRUE, /* pc_relative */
996 0, /* bitpos */
997 complain_overflow_dont,/* complain_on_overflow */
998 bfd_elf_generic_reloc, /* special_function */
999 "R_ARM_LDRS_PC_G0", /* name */
1000 FALSE, /* partial_inplace */
1001 0xffffffff, /* src_mask */
1002 0xffffffff, /* dst_mask */
1003 TRUE), /* pcrel_offset */
1004
1005 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1006 0, /* rightshift */
1007 2, /* size (0 = byte, 1 = short, 2 = long) */
1008 32, /* bitsize */
1009 TRUE, /* pc_relative */
1010 0, /* bitpos */
1011 complain_overflow_dont,/* complain_on_overflow */
1012 bfd_elf_generic_reloc, /* special_function */
1013 "R_ARM_LDRS_PC_G1", /* name */
1014 FALSE, /* partial_inplace */
1015 0xffffffff, /* src_mask */
1016 0xffffffff, /* dst_mask */
1017 TRUE), /* pcrel_offset */
1018
1019 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1020 0, /* rightshift */
1021 2, /* size (0 = byte, 1 = short, 2 = long) */
1022 32, /* bitsize */
1023 TRUE, /* pc_relative */
1024 0, /* bitpos */
1025 complain_overflow_dont,/* complain_on_overflow */
1026 bfd_elf_generic_reloc, /* special_function */
1027 "R_ARM_LDRS_PC_G2", /* name */
1028 FALSE, /* partial_inplace */
1029 0xffffffff, /* src_mask */
1030 0xffffffff, /* dst_mask */
1031 TRUE), /* pcrel_offset */
1032
1033 HOWTO (R_ARM_LDC_PC_G0, /* type */
1034 0, /* rightshift */
1035 2, /* size (0 = byte, 1 = short, 2 = long) */
1036 32, /* bitsize */
1037 TRUE, /* pc_relative */
1038 0, /* bitpos */
1039 complain_overflow_dont,/* complain_on_overflow */
1040 bfd_elf_generic_reloc, /* special_function */
1041 "R_ARM_LDC_PC_G0", /* name */
1042 FALSE, /* partial_inplace */
1043 0xffffffff, /* src_mask */
1044 0xffffffff, /* dst_mask */
1045 TRUE), /* pcrel_offset */
1046
1047 HOWTO (R_ARM_LDC_PC_G1, /* type */
1048 0, /* rightshift */
1049 2, /* size (0 = byte, 1 = short, 2 = long) */
1050 32, /* bitsize */
1051 TRUE, /* pc_relative */
1052 0, /* bitpos */
1053 complain_overflow_dont,/* complain_on_overflow */
1054 bfd_elf_generic_reloc, /* special_function */
1055 "R_ARM_LDC_PC_G1", /* name */
1056 FALSE, /* partial_inplace */
1057 0xffffffff, /* src_mask */
1058 0xffffffff, /* dst_mask */
1059 TRUE), /* pcrel_offset */
1060
1061 HOWTO (R_ARM_LDC_PC_G2, /* type */
1062 0, /* rightshift */
1063 2, /* size (0 = byte, 1 = short, 2 = long) */
1064 32, /* bitsize */
1065 TRUE, /* pc_relative */
1066 0, /* bitpos */
1067 complain_overflow_dont,/* complain_on_overflow */
1068 bfd_elf_generic_reloc, /* special_function */
1069 "R_ARM_LDC_PC_G2", /* name */
1070 FALSE, /* partial_inplace */
1071 0xffffffff, /* src_mask */
1072 0xffffffff, /* dst_mask */
1073 TRUE), /* pcrel_offset */
1074
1075 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1076 0, /* rightshift */
1077 2, /* size (0 = byte, 1 = short, 2 = long) */
1078 32, /* bitsize */
1079 TRUE, /* pc_relative */
1080 0, /* bitpos */
1081 complain_overflow_dont,/* complain_on_overflow */
1082 bfd_elf_generic_reloc, /* special_function */
1083 "R_ARM_ALU_SB_G0_NC", /* name */
1084 FALSE, /* partial_inplace */
1085 0xffffffff, /* src_mask */
1086 0xffffffff, /* dst_mask */
1087 TRUE), /* pcrel_offset */
1088
1089 HOWTO (R_ARM_ALU_SB_G0, /* type */
1090 0, /* rightshift */
1091 2, /* size (0 = byte, 1 = short, 2 = long) */
1092 32, /* bitsize */
1093 TRUE, /* pc_relative */
1094 0, /* bitpos */
1095 complain_overflow_dont,/* complain_on_overflow */
1096 bfd_elf_generic_reloc, /* special_function */
1097 "R_ARM_ALU_SB_G0", /* name */
1098 FALSE, /* partial_inplace */
1099 0xffffffff, /* src_mask */
1100 0xffffffff, /* dst_mask */
1101 TRUE), /* pcrel_offset */
1102
1103 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1104 0, /* rightshift */
1105 2, /* size (0 = byte, 1 = short, 2 = long) */
1106 32, /* bitsize */
1107 TRUE, /* pc_relative */
1108 0, /* bitpos */
1109 complain_overflow_dont,/* complain_on_overflow */
1110 bfd_elf_generic_reloc, /* special_function */
1111 "R_ARM_ALU_SB_G1_NC", /* name */
1112 FALSE, /* partial_inplace */
1113 0xffffffff, /* src_mask */
1114 0xffffffff, /* dst_mask */
1115 TRUE), /* pcrel_offset */
1116
1117 HOWTO (R_ARM_ALU_SB_G1, /* type */
1118 0, /* rightshift */
1119 2, /* size (0 = byte, 1 = short, 2 = long) */
1120 32, /* bitsize */
1121 TRUE, /* pc_relative */
1122 0, /* bitpos */
1123 complain_overflow_dont,/* complain_on_overflow */
1124 bfd_elf_generic_reloc, /* special_function */
1125 "R_ARM_ALU_SB_G1", /* name */
1126 FALSE, /* partial_inplace */
1127 0xffffffff, /* src_mask */
1128 0xffffffff, /* dst_mask */
1129 TRUE), /* pcrel_offset */
1130
1131 HOWTO (R_ARM_ALU_SB_G2, /* type */
1132 0, /* rightshift */
1133 2, /* size (0 = byte, 1 = short, 2 = long) */
1134 32, /* bitsize */
1135 TRUE, /* pc_relative */
1136 0, /* bitpos */
1137 complain_overflow_dont,/* complain_on_overflow */
1138 bfd_elf_generic_reloc, /* special_function */
1139 "R_ARM_ALU_SB_G2", /* name */
1140 FALSE, /* partial_inplace */
1141 0xffffffff, /* src_mask */
1142 0xffffffff, /* dst_mask */
1143 TRUE), /* pcrel_offset */
1144
1145 HOWTO (R_ARM_LDR_SB_G0, /* type */
1146 0, /* rightshift */
1147 2, /* size (0 = byte, 1 = short, 2 = long) */
1148 32, /* bitsize */
1149 TRUE, /* pc_relative */
1150 0, /* bitpos */
1151 complain_overflow_dont,/* complain_on_overflow */
1152 bfd_elf_generic_reloc, /* special_function */
1153 "R_ARM_LDR_SB_G0", /* name */
1154 FALSE, /* partial_inplace */
1155 0xffffffff, /* src_mask */
1156 0xffffffff, /* dst_mask */
1157 TRUE), /* pcrel_offset */
1158
1159 HOWTO (R_ARM_LDR_SB_G1, /* type */
1160 0, /* rightshift */
1161 2, /* size (0 = byte, 1 = short, 2 = long) */
1162 32, /* bitsize */
1163 TRUE, /* pc_relative */
1164 0, /* bitpos */
1165 complain_overflow_dont,/* complain_on_overflow */
1166 bfd_elf_generic_reloc, /* special_function */
1167 "R_ARM_LDR_SB_G1", /* name */
1168 FALSE, /* partial_inplace */
1169 0xffffffff, /* src_mask */
1170 0xffffffff, /* dst_mask */
1171 TRUE), /* pcrel_offset */
1172
1173 HOWTO (R_ARM_LDR_SB_G2, /* type */
1174 0, /* rightshift */
1175 2, /* size (0 = byte, 1 = short, 2 = long) */
1176 32, /* bitsize */
1177 TRUE, /* pc_relative */
1178 0, /* bitpos */
1179 complain_overflow_dont,/* complain_on_overflow */
1180 bfd_elf_generic_reloc, /* special_function */
1181 "R_ARM_LDR_SB_G2", /* name */
1182 FALSE, /* partial_inplace */
1183 0xffffffff, /* src_mask */
1184 0xffffffff, /* dst_mask */
1185 TRUE), /* pcrel_offset */
1186
1187 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1188 0, /* rightshift */
1189 2, /* size (0 = byte, 1 = short, 2 = long) */
1190 32, /* bitsize */
1191 TRUE, /* pc_relative */
1192 0, /* bitpos */
1193 complain_overflow_dont,/* complain_on_overflow */
1194 bfd_elf_generic_reloc, /* special_function */
1195 "R_ARM_LDRS_SB_G0", /* name */
1196 FALSE, /* partial_inplace */
1197 0xffffffff, /* src_mask */
1198 0xffffffff, /* dst_mask */
1199 TRUE), /* pcrel_offset */
1200
1201 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1202 0, /* rightshift */
1203 2, /* size (0 = byte, 1 = short, 2 = long) */
1204 32, /* bitsize */
1205 TRUE, /* pc_relative */
1206 0, /* bitpos */
1207 complain_overflow_dont,/* complain_on_overflow */
1208 bfd_elf_generic_reloc, /* special_function */
1209 "R_ARM_LDRS_SB_G1", /* name */
1210 FALSE, /* partial_inplace */
1211 0xffffffff, /* src_mask */
1212 0xffffffff, /* dst_mask */
1213 TRUE), /* pcrel_offset */
1214
1215 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1216 0, /* rightshift */
1217 2, /* size (0 = byte, 1 = short, 2 = long) */
1218 32, /* bitsize */
1219 TRUE, /* pc_relative */
1220 0, /* bitpos */
1221 complain_overflow_dont,/* complain_on_overflow */
1222 bfd_elf_generic_reloc, /* special_function */
1223 "R_ARM_LDRS_SB_G2", /* name */
1224 FALSE, /* partial_inplace */
1225 0xffffffff, /* src_mask */
1226 0xffffffff, /* dst_mask */
1227 TRUE), /* pcrel_offset */
1228
1229 HOWTO (R_ARM_LDC_SB_G0, /* type */
1230 0, /* rightshift */
1231 2, /* size (0 = byte, 1 = short, 2 = long) */
1232 32, /* bitsize */
1233 TRUE, /* pc_relative */
1234 0, /* bitpos */
1235 complain_overflow_dont,/* complain_on_overflow */
1236 bfd_elf_generic_reloc, /* special_function */
1237 "R_ARM_LDC_SB_G0", /* name */
1238 FALSE, /* partial_inplace */
1239 0xffffffff, /* src_mask */
1240 0xffffffff, /* dst_mask */
1241 TRUE), /* pcrel_offset */
1242
1243 HOWTO (R_ARM_LDC_SB_G1, /* type */
1244 0, /* rightshift */
1245 2, /* size (0 = byte, 1 = short, 2 = long) */
1246 32, /* bitsize */
1247 TRUE, /* pc_relative */
1248 0, /* bitpos */
1249 complain_overflow_dont,/* complain_on_overflow */
1250 bfd_elf_generic_reloc, /* special_function */
1251 "R_ARM_LDC_SB_G1", /* name */
1252 FALSE, /* partial_inplace */
1253 0xffffffff, /* src_mask */
1254 0xffffffff, /* dst_mask */
1255 TRUE), /* pcrel_offset */
1256
1257 HOWTO (R_ARM_LDC_SB_G2, /* type */
1258 0, /* rightshift */
1259 2, /* size (0 = byte, 1 = short, 2 = long) */
1260 32, /* bitsize */
1261 TRUE, /* pc_relative */
1262 0, /* bitpos */
1263 complain_overflow_dont,/* complain_on_overflow */
1264 bfd_elf_generic_reloc, /* special_function */
1265 "R_ARM_LDC_SB_G2", /* name */
1266 FALSE, /* partial_inplace */
1267 0xffffffff, /* src_mask */
1268 0xffffffff, /* dst_mask */
1269 TRUE), /* pcrel_offset */
1270
1271 /* End of group relocations. */
1272
1273 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1274 0, /* rightshift */
1275 2, /* size (0 = byte, 1 = short, 2 = long) */
1276 16, /* bitsize */
1277 FALSE, /* pc_relative */
1278 0, /* bitpos */
1279 complain_overflow_dont,/* complain_on_overflow */
1280 bfd_elf_generic_reloc, /* special_function */
1281 "R_ARM_MOVW_BREL_NC", /* name */
1282 FALSE, /* partial_inplace */
1283 0x0000ffff, /* src_mask */
1284 0x0000ffff, /* dst_mask */
1285 FALSE), /* pcrel_offset */
1286
1287 HOWTO (R_ARM_MOVT_BREL, /* type */
1288 0, /* rightshift */
1289 2, /* size (0 = byte, 1 = short, 2 = long) */
1290 16, /* bitsize */
1291 FALSE, /* pc_relative */
1292 0, /* bitpos */
1293 complain_overflow_bitfield,/* complain_on_overflow */
1294 bfd_elf_generic_reloc, /* special_function */
1295 "R_ARM_MOVT_BREL", /* name */
1296 FALSE, /* partial_inplace */
1297 0x0000ffff, /* src_mask */
1298 0x0000ffff, /* dst_mask */
1299 FALSE), /* pcrel_offset */
1300
1301 HOWTO (R_ARM_MOVW_BREL, /* type */
1302 0, /* rightshift */
1303 2, /* size (0 = byte, 1 = short, 2 = long) */
1304 16, /* bitsize */
1305 FALSE, /* pc_relative */
1306 0, /* bitpos */
1307 complain_overflow_dont,/* complain_on_overflow */
1308 bfd_elf_generic_reloc, /* special_function */
1309 "R_ARM_MOVW_BREL", /* name */
1310 FALSE, /* partial_inplace */
1311 0x0000ffff, /* src_mask */
1312 0x0000ffff, /* dst_mask */
1313 FALSE), /* pcrel_offset */
1314
1315 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1316 0, /* rightshift */
1317 2, /* size (0 = byte, 1 = short, 2 = long) */
1318 16, /* bitsize */
1319 FALSE, /* pc_relative */
1320 0, /* bitpos */
1321 complain_overflow_dont,/* complain_on_overflow */
1322 bfd_elf_generic_reloc, /* special_function */
1323 "R_ARM_THM_MOVW_BREL_NC",/* name */
1324 FALSE, /* partial_inplace */
1325 0x040f70ff, /* src_mask */
1326 0x040f70ff, /* dst_mask */
1327 FALSE), /* pcrel_offset */
1328
1329 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1330 0, /* rightshift */
1331 2, /* size (0 = byte, 1 = short, 2 = long) */
1332 16, /* bitsize */
1333 FALSE, /* pc_relative */
1334 0, /* bitpos */
1335 complain_overflow_bitfield,/* complain_on_overflow */
1336 bfd_elf_generic_reloc, /* special_function */
1337 "R_ARM_THM_MOVT_BREL", /* name */
1338 FALSE, /* partial_inplace */
1339 0x040f70ff, /* src_mask */
1340 0x040f70ff, /* dst_mask */
1341 FALSE), /* pcrel_offset */
1342
1343 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1344 0, /* rightshift */
1345 2, /* size (0 = byte, 1 = short, 2 = long) */
1346 16, /* bitsize */
1347 FALSE, /* pc_relative */
1348 0, /* bitpos */
1349 complain_overflow_dont,/* complain_on_overflow */
1350 bfd_elf_generic_reloc, /* special_function */
1351 "R_ARM_THM_MOVW_BREL", /* name */
1352 FALSE, /* partial_inplace */
1353 0x040f70ff, /* src_mask */
1354 0x040f70ff, /* dst_mask */
1355 FALSE), /* pcrel_offset */
1356
1357 HOWTO (R_ARM_TLS_GOTDESC, /* type */
1358 0, /* rightshift */
1359 2, /* size (0 = byte, 1 = short, 2 = long) */
1360 32, /* bitsize */
1361 FALSE, /* pc_relative */
1362 0, /* bitpos */
1363 complain_overflow_bitfield,/* complain_on_overflow */
1364 NULL, /* special_function */
1365 "R_ARM_TLS_GOTDESC", /* name */
1366 TRUE, /* partial_inplace */
1367 0xffffffff, /* src_mask */
1368 0xffffffff, /* dst_mask */
1369 FALSE), /* pcrel_offset */
1370
1371 HOWTO (R_ARM_TLS_CALL, /* type */
1372 0, /* rightshift */
1373 2, /* size (0 = byte, 1 = short, 2 = long) */
1374 24, /* bitsize */
1375 FALSE, /* pc_relative */
1376 0, /* bitpos */
1377 complain_overflow_dont,/* complain_on_overflow */
1378 bfd_elf_generic_reloc, /* special_function */
1379 "R_ARM_TLS_CALL", /* name */
1380 FALSE, /* partial_inplace */
1381 0x00ffffff, /* src_mask */
1382 0x00ffffff, /* dst_mask */
1383 FALSE), /* pcrel_offset */
1384
1385 HOWTO (R_ARM_TLS_DESCSEQ, /* type */
1386 0, /* rightshift */
1387 2, /* size (0 = byte, 1 = short, 2 = long) */
1388 0, /* bitsize */
1389 FALSE, /* pc_relative */
1390 0, /* bitpos */
1391 complain_overflow_bitfield,/* complain_on_overflow */
1392 bfd_elf_generic_reloc, /* special_function */
1393 "R_ARM_TLS_DESCSEQ", /* name */
1394 FALSE, /* partial_inplace */
1395 0x00000000, /* src_mask */
1396 0x00000000, /* dst_mask */
1397 FALSE), /* pcrel_offset */
1398
1399 HOWTO (R_ARM_THM_TLS_CALL, /* type */
1400 0, /* rightshift */
1401 2, /* size (0 = byte, 1 = short, 2 = long) */
1402 24, /* bitsize */
1403 FALSE, /* pc_relative */
1404 0, /* bitpos */
1405 complain_overflow_dont,/* complain_on_overflow */
1406 bfd_elf_generic_reloc, /* special_function */
1407 "R_ARM_THM_TLS_CALL", /* name */
1408 FALSE, /* partial_inplace */
1409 0x07ff07ff, /* src_mask */
1410 0x07ff07ff, /* dst_mask */
1411 FALSE), /* pcrel_offset */
1412
1413 HOWTO (R_ARM_PLT32_ABS, /* type */
1414 0, /* rightshift */
1415 2, /* size (0 = byte, 1 = short, 2 = long) */
1416 32, /* bitsize */
1417 FALSE, /* pc_relative */
1418 0, /* bitpos */
1419 complain_overflow_dont,/* complain_on_overflow */
1420 bfd_elf_generic_reloc, /* special_function */
1421 "R_ARM_PLT32_ABS", /* name */
1422 FALSE, /* partial_inplace */
1423 0xffffffff, /* src_mask */
1424 0xffffffff, /* dst_mask */
1425 FALSE), /* pcrel_offset */
1426
1427 HOWTO (R_ARM_GOT_ABS, /* type */
1428 0, /* rightshift */
1429 2, /* size (0 = byte, 1 = short, 2 = long) */
1430 32, /* bitsize */
1431 FALSE, /* pc_relative */
1432 0, /* bitpos */
1433 complain_overflow_dont,/* complain_on_overflow */
1434 bfd_elf_generic_reloc, /* special_function */
1435 "R_ARM_GOT_ABS", /* name */
1436 FALSE, /* partial_inplace */
1437 0xffffffff, /* src_mask */
1438 0xffffffff, /* dst_mask */
1439 FALSE), /* pcrel_offset */
1440
1441 HOWTO (R_ARM_GOT_PREL, /* type */
1442 0, /* rightshift */
1443 2, /* size (0 = byte, 1 = short, 2 = long) */
1444 32, /* bitsize */
1445 TRUE, /* pc_relative */
1446 0, /* bitpos */
1447 complain_overflow_dont, /* complain_on_overflow */
1448 bfd_elf_generic_reloc, /* special_function */
1449 "R_ARM_GOT_PREL", /* name */
1450 FALSE, /* partial_inplace */
1451 0xffffffff, /* src_mask */
1452 0xffffffff, /* dst_mask */
1453 TRUE), /* pcrel_offset */
1454
1455 HOWTO (R_ARM_GOT_BREL12, /* type */
1456 0, /* rightshift */
1457 2, /* size (0 = byte, 1 = short, 2 = long) */
1458 12, /* bitsize */
1459 FALSE, /* pc_relative */
1460 0, /* bitpos */
1461 complain_overflow_bitfield,/* complain_on_overflow */
1462 bfd_elf_generic_reloc, /* special_function */
1463 "R_ARM_GOT_BREL12", /* name */
1464 FALSE, /* partial_inplace */
1465 0x00000fff, /* src_mask */
1466 0x00000fff, /* dst_mask */
1467 FALSE), /* pcrel_offset */
1468
1469 HOWTO (R_ARM_GOTOFF12, /* type */
1470 0, /* rightshift */
1471 2, /* size (0 = byte, 1 = short, 2 = long) */
1472 12, /* bitsize */
1473 FALSE, /* pc_relative */
1474 0, /* bitpos */
1475 complain_overflow_bitfield,/* complain_on_overflow */
1476 bfd_elf_generic_reloc, /* special_function */
1477 "R_ARM_GOTOFF12", /* name */
1478 FALSE, /* partial_inplace */
1479 0x00000fff, /* src_mask */
1480 0x00000fff, /* dst_mask */
1481 FALSE), /* pcrel_offset */
1482
1483 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1484
1485 /* GNU extension to record C++ vtable member usage */
1486 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1487 0, /* rightshift */
1488 2, /* size (0 = byte, 1 = short, 2 = long) */
1489 0, /* bitsize */
1490 FALSE, /* pc_relative */
1491 0, /* bitpos */
1492 complain_overflow_dont, /* complain_on_overflow */
1493 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1494 "R_ARM_GNU_VTENTRY", /* name */
1495 FALSE, /* partial_inplace */
1496 0, /* src_mask */
1497 0, /* dst_mask */
1498 FALSE), /* pcrel_offset */
1499
1500 /* GNU extension to record C++ vtable hierarchy */
1501 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1502 0, /* rightshift */
1503 2, /* size (0 = byte, 1 = short, 2 = long) */
1504 0, /* bitsize */
1505 FALSE, /* pc_relative */
1506 0, /* bitpos */
1507 complain_overflow_dont, /* complain_on_overflow */
1508 NULL, /* special_function */
1509 "R_ARM_GNU_VTINHERIT", /* name */
1510 FALSE, /* partial_inplace */
1511 0, /* src_mask */
1512 0, /* dst_mask */
1513 FALSE), /* pcrel_offset */
1514
1515 HOWTO (R_ARM_THM_JUMP11, /* type */
1516 1, /* rightshift */
1517 1, /* size (0 = byte, 1 = short, 2 = long) */
1518 11, /* bitsize */
1519 TRUE, /* pc_relative */
1520 0, /* bitpos */
1521 complain_overflow_signed, /* complain_on_overflow */
1522 bfd_elf_generic_reloc, /* special_function */
1523 "R_ARM_THM_JUMP11", /* name */
1524 FALSE, /* partial_inplace */
1525 0x000007ff, /* src_mask */
1526 0x000007ff, /* dst_mask */
1527 TRUE), /* pcrel_offset */
1528
1529 HOWTO (R_ARM_THM_JUMP8, /* type */
1530 1, /* rightshift */
1531 1, /* size (0 = byte, 1 = short, 2 = long) */
1532 8, /* bitsize */
1533 TRUE, /* pc_relative */
1534 0, /* bitpos */
1535 complain_overflow_signed, /* complain_on_overflow */
1536 bfd_elf_generic_reloc, /* special_function */
1537 "R_ARM_THM_JUMP8", /* name */
1538 FALSE, /* partial_inplace */
1539 0x000000ff, /* src_mask */
1540 0x000000ff, /* dst_mask */
1541 TRUE), /* pcrel_offset */
1542
1543 /* TLS relocations */
1544 HOWTO (R_ARM_TLS_GD32, /* type */
1545 0, /* rightshift */
1546 2, /* size (0 = byte, 1 = short, 2 = long) */
1547 32, /* bitsize */
1548 FALSE, /* pc_relative */
1549 0, /* bitpos */
1550 complain_overflow_bitfield,/* complain_on_overflow */
1551 NULL, /* special_function */
1552 "R_ARM_TLS_GD32", /* name */
1553 TRUE, /* partial_inplace */
1554 0xffffffff, /* src_mask */
1555 0xffffffff, /* dst_mask */
1556 FALSE), /* pcrel_offset */
1557
1558 HOWTO (R_ARM_TLS_LDM32, /* type */
1559 0, /* rightshift */
1560 2, /* size (0 = byte, 1 = short, 2 = long) */
1561 32, /* bitsize */
1562 FALSE, /* pc_relative */
1563 0, /* bitpos */
1564 complain_overflow_bitfield,/* complain_on_overflow */
1565 bfd_elf_generic_reloc, /* special_function */
1566 "R_ARM_TLS_LDM32", /* name */
1567 TRUE, /* partial_inplace */
1568 0xffffffff, /* src_mask */
1569 0xffffffff, /* dst_mask */
1570 FALSE), /* pcrel_offset */
1571
1572 HOWTO (R_ARM_TLS_LDO32, /* type */
1573 0, /* rightshift */
1574 2, /* size (0 = byte, 1 = short, 2 = long) */
1575 32, /* bitsize */
1576 FALSE, /* pc_relative */
1577 0, /* bitpos */
1578 complain_overflow_bitfield,/* complain_on_overflow */
1579 bfd_elf_generic_reloc, /* special_function */
1580 "R_ARM_TLS_LDO32", /* name */
1581 TRUE, /* partial_inplace */
1582 0xffffffff, /* src_mask */
1583 0xffffffff, /* dst_mask */
1584 FALSE), /* pcrel_offset */
1585
1586 HOWTO (R_ARM_TLS_IE32, /* type */
1587 0, /* rightshift */
1588 2, /* size (0 = byte, 1 = short, 2 = long) */
1589 32, /* bitsize */
1590 FALSE, /* pc_relative */
1591 0, /* bitpos */
1592 complain_overflow_bitfield,/* complain_on_overflow */
1593 NULL, /* special_function */
1594 "R_ARM_TLS_IE32", /* name */
1595 TRUE, /* partial_inplace */
1596 0xffffffff, /* src_mask */
1597 0xffffffff, /* dst_mask */
1598 FALSE), /* pcrel_offset */
1599
1600 HOWTO (R_ARM_TLS_LE32, /* type */
1601 0, /* rightshift */
1602 2, /* size (0 = byte, 1 = short, 2 = long) */
1603 32, /* bitsize */
1604 FALSE, /* pc_relative */
1605 0, /* bitpos */
1606 complain_overflow_bitfield,/* complain_on_overflow */
1607 bfd_elf_generic_reloc, /* special_function */
1608 "R_ARM_TLS_LE32", /* name */
1609 TRUE, /* partial_inplace */
1610 0xffffffff, /* src_mask */
1611 0xffffffff, /* dst_mask */
1612 FALSE), /* pcrel_offset */
1613
1614 HOWTO (R_ARM_TLS_LDO12, /* type */
1615 0, /* rightshift */
1616 2, /* size (0 = byte, 1 = short, 2 = long) */
1617 12, /* bitsize */
1618 FALSE, /* pc_relative */
1619 0, /* bitpos */
1620 complain_overflow_bitfield,/* complain_on_overflow */
1621 bfd_elf_generic_reloc, /* special_function */
1622 "R_ARM_TLS_LDO12", /* name */
1623 FALSE, /* partial_inplace */
1624 0x00000fff, /* src_mask */
1625 0x00000fff, /* dst_mask */
1626 FALSE), /* pcrel_offset */
1627
1628 HOWTO (R_ARM_TLS_LE12, /* type */
1629 0, /* rightshift */
1630 2, /* size (0 = byte, 1 = short, 2 = long) */
1631 12, /* bitsize */
1632 FALSE, /* pc_relative */
1633 0, /* bitpos */
1634 complain_overflow_bitfield,/* complain_on_overflow */
1635 bfd_elf_generic_reloc, /* special_function */
1636 "R_ARM_TLS_LE12", /* name */
1637 FALSE, /* partial_inplace */
1638 0x00000fff, /* src_mask */
1639 0x00000fff, /* dst_mask */
1640 FALSE), /* pcrel_offset */
1641
1642 HOWTO (R_ARM_TLS_IE12GP, /* type */
1643 0, /* rightshift */
1644 2, /* size (0 = byte, 1 = short, 2 = long) */
1645 12, /* bitsize */
1646 FALSE, /* pc_relative */
1647 0, /* bitpos */
1648 complain_overflow_bitfield,/* complain_on_overflow */
1649 bfd_elf_generic_reloc, /* special_function */
1650 "R_ARM_TLS_IE12GP", /* name */
1651 FALSE, /* partial_inplace */
1652 0x00000fff, /* src_mask */
1653 0x00000fff, /* dst_mask */
1654 FALSE), /* pcrel_offset */
1655
1656 /* 112-127 private relocations. */
1657 EMPTY_HOWTO (112),
1658 EMPTY_HOWTO (113),
1659 EMPTY_HOWTO (114),
1660 EMPTY_HOWTO (115),
1661 EMPTY_HOWTO (116),
1662 EMPTY_HOWTO (117),
1663 EMPTY_HOWTO (118),
1664 EMPTY_HOWTO (119),
1665 EMPTY_HOWTO (120),
1666 EMPTY_HOWTO (121),
1667 EMPTY_HOWTO (122),
1668 EMPTY_HOWTO (123),
1669 EMPTY_HOWTO (124),
1670 EMPTY_HOWTO (125),
1671 EMPTY_HOWTO (126),
1672 EMPTY_HOWTO (127),
1673
1674 /* R_ARM_ME_TOO, obsolete. */
1675 EMPTY_HOWTO (128),
1676
1677 HOWTO (R_ARM_THM_TLS_DESCSEQ, /* type */
1678 0, /* rightshift */
1679 1, /* size (0 = byte, 1 = short, 2 = long) */
1680 0, /* bitsize */
1681 FALSE, /* pc_relative */
1682 0, /* bitpos */
1683 complain_overflow_bitfield,/* complain_on_overflow */
1684 bfd_elf_generic_reloc, /* special_function */
1685 "R_ARM_THM_TLS_DESCSEQ",/* name */
1686 FALSE, /* partial_inplace */
1687 0x00000000, /* src_mask */
1688 0x00000000, /* dst_mask */
1689 FALSE), /* pcrel_offset */
1690 };
1691
1692 /* 160 onwards: */
1693 static reloc_howto_type elf32_arm_howto_table_2[1] =
1694 {
1695 HOWTO (R_ARM_IRELATIVE, /* type */
1696 0, /* rightshift */
1697 2, /* size (0 = byte, 1 = short, 2 = long) */
1698 32, /* bitsize */
1699 FALSE, /* pc_relative */
1700 0, /* bitpos */
1701 complain_overflow_bitfield,/* complain_on_overflow */
1702 bfd_elf_generic_reloc, /* special_function */
1703 "R_ARM_IRELATIVE", /* name */
1704 TRUE, /* partial_inplace */
1705 0xffffffff, /* src_mask */
1706 0xffffffff, /* dst_mask */
1707 FALSE) /* pcrel_offset */
1708 };
1709
1710 /* 249-255 extended, currently unused, relocations: */
1711 static reloc_howto_type elf32_arm_howto_table_3[4] =
1712 {
1713 HOWTO (R_ARM_RREL32, /* type */
1714 0, /* rightshift */
1715 0, /* size (0 = byte, 1 = short, 2 = long) */
1716 0, /* bitsize */
1717 FALSE, /* pc_relative */
1718 0, /* bitpos */
1719 complain_overflow_dont,/* complain_on_overflow */
1720 bfd_elf_generic_reloc, /* special_function */
1721 "R_ARM_RREL32", /* name */
1722 FALSE, /* partial_inplace */
1723 0, /* src_mask */
1724 0, /* dst_mask */
1725 FALSE), /* pcrel_offset */
1726
1727 HOWTO (R_ARM_RABS32, /* type */
1728 0, /* rightshift */
1729 0, /* size (0 = byte, 1 = short, 2 = long) */
1730 0, /* bitsize */
1731 FALSE, /* pc_relative */
1732 0, /* bitpos */
1733 complain_overflow_dont,/* complain_on_overflow */
1734 bfd_elf_generic_reloc, /* special_function */
1735 "R_ARM_RABS32", /* name */
1736 FALSE, /* partial_inplace */
1737 0, /* src_mask */
1738 0, /* dst_mask */
1739 FALSE), /* pcrel_offset */
1740
1741 HOWTO (R_ARM_RPC24, /* type */
1742 0, /* rightshift */
1743 0, /* size (0 = byte, 1 = short, 2 = long) */
1744 0, /* bitsize */
1745 FALSE, /* pc_relative */
1746 0, /* bitpos */
1747 complain_overflow_dont,/* complain_on_overflow */
1748 bfd_elf_generic_reloc, /* special_function */
1749 "R_ARM_RPC24", /* name */
1750 FALSE, /* partial_inplace */
1751 0, /* src_mask */
1752 0, /* dst_mask */
1753 FALSE), /* pcrel_offset */
1754
1755 HOWTO (R_ARM_RBASE, /* type */
1756 0, /* rightshift */
1757 0, /* size (0 = byte, 1 = short, 2 = long) */
1758 0, /* bitsize */
1759 FALSE, /* pc_relative */
1760 0, /* bitpos */
1761 complain_overflow_dont,/* complain_on_overflow */
1762 bfd_elf_generic_reloc, /* special_function */
1763 "R_ARM_RBASE", /* name */
1764 FALSE, /* partial_inplace */
1765 0, /* src_mask */
1766 0, /* dst_mask */
1767 FALSE) /* pcrel_offset */
1768 };
1769
1770 static reloc_howto_type *
1771 elf32_arm_howto_from_type (unsigned int r_type)
1772 {
1773 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1774 return &elf32_arm_howto_table_1[r_type];
1775
1776 if (r_type == R_ARM_IRELATIVE)
1777 return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE];
1778
1779 if (r_type >= R_ARM_RREL32
1780 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_3))
1781 return &elf32_arm_howto_table_3[r_type - R_ARM_RREL32];
1782
1783 return NULL;
1784 }
1785
1786 static void
1787 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc,
1788 Elf_Internal_Rela * elf_reloc)
1789 {
1790 unsigned int r_type;
1791
1792 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1793 bfd_reloc->howto = elf32_arm_howto_from_type (r_type);
1794 }
1795
1796 struct elf32_arm_reloc_map
1797 {
1798 bfd_reloc_code_real_type bfd_reloc_val;
1799 unsigned char elf_reloc_val;
1800 };
1801
1802 /* All entries in this list must also be present in elf32_arm_howto_table. */
1803 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1804 {
1805 {BFD_RELOC_NONE, R_ARM_NONE},
1806 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1807 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1808 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1809 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1810 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1811 {BFD_RELOC_32, R_ARM_ABS32},
1812 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1813 {BFD_RELOC_8, R_ARM_ABS8},
1814 {BFD_RELOC_16, R_ARM_ABS16},
1815 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1816 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1817 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1818 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1819 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1820 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1821 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1822 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1823 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1824 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1825 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1826 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1827 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1828 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL},
1829 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1830 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1831 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1832 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1833 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1834 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1835 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1836 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1837 {BFD_RELOC_ARM_TLS_GOTDESC, R_ARM_TLS_GOTDESC},
1838 {BFD_RELOC_ARM_TLS_CALL, R_ARM_TLS_CALL},
1839 {BFD_RELOC_ARM_THM_TLS_CALL, R_ARM_THM_TLS_CALL},
1840 {BFD_RELOC_ARM_TLS_DESCSEQ, R_ARM_TLS_DESCSEQ},
1841 {BFD_RELOC_ARM_THM_TLS_DESCSEQ, R_ARM_THM_TLS_DESCSEQ},
1842 {BFD_RELOC_ARM_TLS_DESC, R_ARM_TLS_DESC},
1843 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
1844 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
1845 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
1846 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
1847 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
1848 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
1849 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
1850 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
1851 {BFD_RELOC_ARM_IRELATIVE, R_ARM_IRELATIVE},
1852 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
1853 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
1854 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
1855 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
1856 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
1857 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
1858 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
1859 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
1860 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1861 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1862 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1863 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1864 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1865 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1866 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1867 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1868 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1869 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1870 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1871 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1872 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1873 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1874 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1875 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1876 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1877 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1878 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1879 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1880 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1881 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1882 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1883 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1884 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1885 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1886 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1887 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1888 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1889 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1890 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX}
1891 };
1892
1893 static reloc_howto_type *
1894 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1895 bfd_reloc_code_real_type code)
1896 {
1897 unsigned int i;
1898
1899 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1900 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1901 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1902
1903 return NULL;
1904 }
1905
1906 static reloc_howto_type *
1907 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1908 const char *r_name)
1909 {
1910 unsigned int i;
1911
1912 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1913 if (elf32_arm_howto_table_1[i].name != NULL
1914 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1915 return &elf32_arm_howto_table_1[i];
1916
1917 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1918 if (elf32_arm_howto_table_2[i].name != NULL
1919 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1920 return &elf32_arm_howto_table_2[i];
1921
1922 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_3); i++)
1923 if (elf32_arm_howto_table_3[i].name != NULL
1924 && strcasecmp (elf32_arm_howto_table_3[i].name, r_name) == 0)
1925 return &elf32_arm_howto_table_3[i];
1926
1927 return NULL;
1928 }
1929
1930 /* Support for core dump NOTE sections. */
1931
1932 static bfd_boolean
1933 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1934 {
1935 int offset;
1936 size_t size;
1937
1938 switch (note->descsz)
1939 {
1940 default:
1941 return FALSE;
1942
1943 case 148: /* Linux/ARM 32-bit. */
1944 /* pr_cursig */
1945 elf_tdata (abfd)->core_signal = bfd_get_16 (abfd, note->descdata + 12);
1946
1947 /* pr_pid */
1948 elf_tdata (abfd)->core_lwpid = bfd_get_32 (abfd, note->descdata + 24);
1949
1950 /* pr_reg */
1951 offset = 72;
1952 size = 72;
1953
1954 break;
1955 }
1956
1957 /* Make a ".reg/999" section. */
1958 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
1959 size, note->descpos + offset);
1960 }
1961
1962 static bfd_boolean
1963 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
1964 {
1965 switch (note->descsz)
1966 {
1967 default:
1968 return FALSE;
1969
1970 case 124: /* Linux/ARM elf_prpsinfo. */
1971 elf_tdata (abfd)->core_pid
1972 = bfd_get_32 (abfd, note->descdata + 12);
1973 elf_tdata (abfd)->core_program
1974 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
1975 elf_tdata (abfd)->core_command
1976 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
1977 }
1978
1979 /* Note that for some reason, a spurious space is tacked
1980 onto the end of the args in some (at least one anyway)
1981 implementations, so strip it off if it exists. */
1982 {
1983 char *command = elf_tdata (abfd)->core_command;
1984 int n = strlen (command);
1985
1986 if (0 < n && command[n - 1] == ' ')
1987 command[n - 1] = '\0';
1988 }
1989
1990 return TRUE;
1991 }
1992
1993 static char *
1994 elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz,
1995 int note_type, ...)
1996 {
1997 switch (note_type)
1998 {
1999 default:
2000 return NULL;
2001
2002 case NT_PRPSINFO:
2003 {
2004 char data[124];
2005 va_list ap;
2006
2007 va_start (ap, note_type);
2008 memset (data, 0, sizeof (data));
2009 strncpy (data + 28, va_arg (ap, const char *), 16);
2010 strncpy (data + 44, va_arg (ap, const char *), 80);
2011 va_end (ap);
2012
2013 return elfcore_write_note (abfd, buf, bufsiz,
2014 "CORE", note_type, data, sizeof (data));
2015 }
2016
2017 case NT_PRSTATUS:
2018 {
2019 char data[148];
2020 va_list ap;
2021 long pid;
2022 int cursig;
2023 const void *greg;
2024
2025 va_start (ap, note_type);
2026 memset (data, 0, sizeof (data));
2027 pid = va_arg (ap, long);
2028 bfd_put_32 (abfd, pid, data + 24);
2029 cursig = va_arg (ap, int);
2030 bfd_put_16 (abfd, cursig, data + 12);
2031 greg = va_arg (ap, const void *);
2032 memcpy (data + 72, greg, 72);
2033 va_end (ap);
2034
2035 return elfcore_write_note (abfd, buf, bufsiz,
2036 "CORE", note_type, data, sizeof (data));
2037 }
2038 }
2039 }
2040
2041 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vec
2042 #define TARGET_LITTLE_NAME "elf32-littlearm"
2043 #define TARGET_BIG_SYM bfd_elf32_bigarm_vec
2044 #define TARGET_BIG_NAME "elf32-bigarm"
2045
2046 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
2047 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
2048 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note
2049
2050 typedef unsigned long int insn32;
2051 typedef unsigned short int insn16;
2052
2053 /* In lieu of proper flags, assume all EABIv4 or later objects are
2054 interworkable. */
2055 #define INTERWORK_FLAG(abfd) \
2056 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2057 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2058 || ((abfd)->flags & BFD_LINKER_CREATED))
2059
2060 /* The linker script knows the section names for placement.
2061 The entry_names are used to do simple name mangling on the stubs.
2062 Given a function name, and its type, the stub can be found. The
2063 name can be changed. The only requirement is the %s be present. */
2064 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2065 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
2066
2067 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2068 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
2069
2070 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2071 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
2072
2073 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2074 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
2075
2076 #define STUB_ENTRY_NAME "__%s_veneer"
2077
2078 /* The name of the dynamic interpreter. This is put in the .interp
2079 section. */
2080 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
2081
2082 static const unsigned long tls_trampoline [] =
2083 {
2084 0xe08e0000, /* add r0, lr, r0 */
2085 0xe5901004, /* ldr r1, [r0,#4] */
2086 0xe12fff11, /* bx r1 */
2087 };
2088
2089 static const unsigned long dl_tlsdesc_lazy_trampoline [] =
2090 {
2091 0xe52d2004, /* push {r2} */
2092 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
2093 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
2094 0xe79f2002, /* 1: ldr r2, [pc, r2] */
2095 0xe081100f, /* 2: add r1, pc */
2096 0xe12fff12, /* bx r2 */
2097 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
2098 + dl_tlsdesc_lazy_resolver(GOT) */
2099 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2100 };
2101
2102 #ifdef FOUR_WORD_PLT
2103
2104 /* The first entry in a procedure linkage table looks like
2105 this. It is set up so that any shared library function that is
2106 called before the relocation has been set up calls the dynamic
2107 linker first. */
2108 static const bfd_vma elf32_arm_plt0_entry [] =
2109 {
2110 0xe52de004, /* str lr, [sp, #-4]! */
2111 0xe59fe010, /* ldr lr, [pc, #16] */
2112 0xe08fe00e, /* add lr, pc, lr */
2113 0xe5bef008, /* ldr pc, [lr, #8]! */
2114 };
2115
2116 /* Subsequent entries in a procedure linkage table look like
2117 this. */
2118 static const bfd_vma elf32_arm_plt_entry [] =
2119 {
2120 0xe28fc600, /* add ip, pc, #NN */
2121 0xe28cca00, /* add ip, ip, #NN */
2122 0xe5bcf000, /* ldr pc, [ip, #NN]! */
2123 0x00000000, /* unused */
2124 };
2125
2126 #else
2127
2128 /* The first entry in a procedure linkage table looks like
2129 this. It is set up so that any shared library function that is
2130 called before the relocation has been set up calls the dynamic
2131 linker first. */
2132 static const bfd_vma elf32_arm_plt0_entry [] =
2133 {
2134 0xe52de004, /* str lr, [sp, #-4]! */
2135 0xe59fe004, /* ldr lr, [pc, #4] */
2136 0xe08fe00e, /* add lr, pc, lr */
2137 0xe5bef008, /* ldr pc, [lr, #8]! */
2138 0x00000000, /* &GOT[0] - . */
2139 };
2140
2141 /* Subsequent entries in a procedure linkage table look like
2142 this. */
2143 static const bfd_vma elf32_arm_plt_entry [] =
2144 {
2145 0xe28fc600, /* add ip, pc, #0xNN00000 */
2146 0xe28cca00, /* add ip, ip, #0xNN000 */
2147 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2148 };
2149
2150 #endif
2151
2152 /* The format of the first entry in the procedure linkage table
2153 for a VxWorks executable. */
2154 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
2155 {
2156 0xe52dc008, /* str ip,[sp,#-8]! */
2157 0xe59fc000, /* ldr ip,[pc] */
2158 0xe59cf008, /* ldr pc,[ip,#8] */
2159 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
2160 };
2161
2162 /* The format of subsequent entries in a VxWorks executable. */
2163 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
2164 {
2165 0xe59fc000, /* ldr ip,[pc] */
2166 0xe59cf000, /* ldr pc,[ip] */
2167 0x00000000, /* .long @got */
2168 0xe59fc000, /* ldr ip,[pc] */
2169 0xea000000, /* b _PLT */
2170 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2171 };
2172
2173 /* The format of entries in a VxWorks shared library. */
2174 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
2175 {
2176 0xe59fc000, /* ldr ip,[pc] */
2177 0xe79cf009, /* ldr pc,[ip,r9] */
2178 0x00000000, /* .long @got */
2179 0xe59fc000, /* ldr ip,[pc] */
2180 0xe599f008, /* ldr pc,[r9,#8] */
2181 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2182 };
2183
2184 /* An initial stub used if the PLT entry is referenced from Thumb code. */
2185 #define PLT_THUMB_STUB_SIZE 4
2186 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2187 {
2188 0x4778, /* bx pc */
2189 0x46c0 /* nop */
2190 };
2191
2192 /* The entries in a PLT when using a DLL-based target with multiple
2193 address spaces. */
2194 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2195 {
2196 0xe51ff004, /* ldr pc, [pc, #-4] */
2197 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2198 };
2199
2200 /* The first entry in a procedure linkage table looks like
2201 this. It is set up so that any shared library function that is
2202 called before the relocation has been set up calls the dynamic
2203 linker first. */
2204 static const bfd_vma elf32_arm_nacl_plt0_entry [] =
2205 {
2206 /* First bundle: */
2207 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */
2208 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */
2209 0xe08cc00f, /* add ip, ip, pc */
2210 0xe52dc008, /* str ip, [sp, #-8]! */
2211 /* Second bundle: */
2212 0xe7dfcf1f, /* bfc ip, #30, #2 */
2213 0xe59cc000, /* ldr ip, [ip] */
2214 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2215 0xe12fff1c, /* bx ip */
2216 /* Third bundle: */
2217 0xe320f000, /* nop */
2218 0xe320f000, /* nop */
2219 0xe320f000, /* nop */
2220 /* .Lplt_tail: */
2221 0xe50dc004, /* str ip, [sp, #-4] */
2222 /* Fourth bundle: */
2223 0xe7dfcf1f, /* bfc ip, #30, #2 */
2224 0xe59cc000, /* ldr ip, [ip] */
2225 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2226 0xe12fff1c, /* bx ip */
2227 };
2228 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
2229
2230 /* Subsequent entries in a procedure linkage table look like this. */
2231 static const bfd_vma elf32_arm_nacl_plt_entry [] =
2232 {
2233 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */
2234 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */
2235 0xe08cc00f, /* add ip, ip, pc */
2236 0xea000000, /* b .Lplt_tail */
2237 };
2238
2239 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2240 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2241 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2242 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2243 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2244 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2245
2246 enum stub_insn_type
2247 {
2248 THUMB16_TYPE = 1,
2249 THUMB32_TYPE,
2250 ARM_TYPE,
2251 DATA_TYPE
2252 };
2253
2254 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2255 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2256 is inserted in arm_build_one_stub(). */
2257 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2258 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2259 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2260 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2261 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2262 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2263
2264 typedef struct
2265 {
2266 bfd_vma data;
2267 enum stub_insn_type type;
2268 unsigned int r_type;
2269 int reloc_addend;
2270 } insn_sequence;
2271
2272 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2273 to reach the stub if necessary. */
2274 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2275 {
2276 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2277 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2278 };
2279
2280 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2281 available. */
2282 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2283 {
2284 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2285 ARM_INSN (0xe12fff1c), /* bx ip */
2286 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2287 };
2288
2289 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2290 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2291 {
2292 THUMB16_INSN (0xb401), /* push {r0} */
2293 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2294 THUMB16_INSN (0x4684), /* mov ip, r0 */
2295 THUMB16_INSN (0xbc01), /* pop {r0} */
2296 THUMB16_INSN (0x4760), /* bx ip */
2297 THUMB16_INSN (0xbf00), /* nop */
2298 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2299 };
2300
2301 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2302 allowed. */
2303 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2304 {
2305 THUMB16_INSN (0x4778), /* bx pc */
2306 THUMB16_INSN (0x46c0), /* nop */
2307 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2308 ARM_INSN (0xe12fff1c), /* bx ip */
2309 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2310 };
2311
2312 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2313 available. */
2314 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2315 {
2316 THUMB16_INSN (0x4778), /* bx pc */
2317 THUMB16_INSN (0x46c0), /* nop */
2318 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2319 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2320 };
2321
2322 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2323 one, when the destination is close enough. */
2324 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2325 {
2326 THUMB16_INSN (0x4778), /* bx pc */
2327 THUMB16_INSN (0x46c0), /* nop */
2328 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */
2329 };
2330
2331 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2332 blx to reach the stub if necessary. */
2333 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2334 {
2335 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */
2336 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */
2337 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2338 };
2339
2340 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2341 blx to reach the stub if necessary. We can not add into pc;
2342 it is not guaranteed to mode switch (different in ARMv6 and
2343 ARMv7). */
2344 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2345 {
2346 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2347 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2348 ARM_INSN (0xe12fff1c), /* bx ip */
2349 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2350 };
2351
2352 /* V4T ARM -> ARM long branch stub, PIC. */
2353 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2354 {
2355 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2356 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2357 ARM_INSN (0xe12fff1c), /* bx ip */
2358 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2359 };
2360
2361 /* V4T Thumb -> ARM long branch stub, PIC. */
2362 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2363 {
2364 THUMB16_INSN (0x4778), /* bx pc */
2365 THUMB16_INSN (0x46c0), /* nop */
2366 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2367 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */
2368 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2369 };
2370
2371 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2372 architectures. */
2373 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2374 {
2375 THUMB16_INSN (0xb401), /* push {r0} */
2376 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2377 THUMB16_INSN (0x46fc), /* mov ip, pc */
2378 THUMB16_INSN (0x4484), /* add ip, r0 */
2379 THUMB16_INSN (0xbc01), /* pop {r0} */
2380 THUMB16_INSN (0x4760), /* bx ip */
2381 DATA_WORD (0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2382 };
2383
2384 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2385 allowed. */
2386 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2387 {
2388 THUMB16_INSN (0x4778), /* bx pc */
2389 THUMB16_INSN (0x46c0), /* nop */
2390 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2391 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2392 ARM_INSN (0xe12fff1c), /* bx ip */
2393 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2394 };
2395
2396 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
2397 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2398 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic[] =
2399 {
2400 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */
2401 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */
2402 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2403 };
2404
2405 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
2406 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2407 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] =
2408 {
2409 THUMB16_INSN (0x4778), /* bx pc */
2410 THUMB16_INSN (0x46c0), /* nop */
2411 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */
2412 ARM_INSN (0xe081f00f), /* add pc, r1, pc */
2413 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2414 };
2415
2416 /* Cortex-A8 erratum-workaround stubs. */
2417
2418 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2419 can't use a conditional branch to reach this stub). */
2420
2421 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2422 {
2423 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */
2424 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */
2425 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */
2426 };
2427
2428 /* Stub used for b.w and bl.w instructions. */
2429
2430 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2431 {
2432 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2433 };
2434
2435 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2436 {
2437 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2438 };
2439
2440 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2441 instruction (which switches to ARM mode) to point to this stub. Jump to the
2442 real destination using an ARM-mode branch. */
2443
2444 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2445 {
2446 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */
2447 };
2448
2449 /* For each section group there can be a specially created linker section
2450 to hold the stubs for that group. The name of the stub section is based
2451 upon the name of another section within that group with the suffix below
2452 applied.
2453
2454 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2455 create what appeared to be a linker stub section when it actually
2456 contained user code/data. For example, consider this fragment:
2457
2458 const char * stubborn_problems[] = { "np" };
2459
2460 If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2461 section called:
2462
2463 .data.rel.local.stubborn_problems
2464
2465 This then causes problems in arm32_arm_build_stubs() as it triggers:
2466
2467 // Ignore non-stub sections.
2468 if (!strstr (stub_sec->name, STUB_SUFFIX))
2469 continue;
2470
2471 And so the section would be ignored instead of being processed. Hence
2472 the change in definition of STUB_SUFFIX to a name that cannot be a valid
2473 C identifier. */
2474 #define STUB_SUFFIX ".__stub"
2475
2476 /* One entry per long/short branch stub defined above. */
2477 #define DEF_STUBS \
2478 DEF_STUB(long_branch_any_any) \
2479 DEF_STUB(long_branch_v4t_arm_thumb) \
2480 DEF_STUB(long_branch_thumb_only) \
2481 DEF_STUB(long_branch_v4t_thumb_thumb) \
2482 DEF_STUB(long_branch_v4t_thumb_arm) \
2483 DEF_STUB(short_branch_v4t_thumb_arm) \
2484 DEF_STUB(long_branch_any_arm_pic) \
2485 DEF_STUB(long_branch_any_thumb_pic) \
2486 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2487 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2488 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2489 DEF_STUB(long_branch_thumb_only_pic) \
2490 DEF_STUB(long_branch_any_tls_pic) \
2491 DEF_STUB(long_branch_v4t_thumb_tls_pic) \
2492 DEF_STUB(a8_veneer_b_cond) \
2493 DEF_STUB(a8_veneer_b) \
2494 DEF_STUB(a8_veneer_bl) \
2495 DEF_STUB(a8_veneer_blx)
2496
2497 #define DEF_STUB(x) arm_stub_##x,
2498 enum elf32_arm_stub_type
2499 {
2500 arm_stub_none,
2501 DEF_STUBS
2502 /* Note the first a8_veneer type */
2503 arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond
2504 };
2505 #undef DEF_STUB
2506
2507 typedef struct
2508 {
2509 const insn_sequence* template_sequence;
2510 int template_size;
2511 } stub_def;
2512
2513 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2514 static const stub_def stub_definitions[] =
2515 {
2516 {NULL, 0},
2517 DEF_STUBS
2518 };
2519
2520 struct elf32_arm_stub_hash_entry
2521 {
2522 /* Base hash table entry structure. */
2523 struct bfd_hash_entry root;
2524
2525 /* The stub section. */
2526 asection *stub_sec;
2527
2528 /* Offset within stub_sec of the beginning of this stub. */
2529 bfd_vma stub_offset;
2530
2531 /* Given the symbol's value and its section we can determine its final
2532 value when building the stubs (so the stub knows where to jump). */
2533 bfd_vma target_value;
2534 asection *target_section;
2535
2536 /* Offset to apply to relocation referencing target_value. */
2537 bfd_vma target_addend;
2538
2539 /* The instruction which caused this stub to be generated (only valid for
2540 Cortex-A8 erratum workaround stubs at present). */
2541 unsigned long orig_insn;
2542
2543 /* The stub type. */
2544 enum elf32_arm_stub_type stub_type;
2545 /* Its encoding size in bytes. */
2546 int stub_size;
2547 /* Its template. */
2548 const insn_sequence *stub_template;
2549 /* The size of the template (number of entries). */
2550 int stub_template_size;
2551
2552 /* The symbol table entry, if any, that this was derived from. */
2553 struct elf32_arm_link_hash_entry *h;
2554
2555 /* Type of branch. */
2556 enum arm_st_branch_type branch_type;
2557
2558 /* Where this stub is being called from, or, in the case of combined
2559 stub sections, the first input section in the group. */
2560 asection *id_sec;
2561
2562 /* The name for the local symbol at the start of this stub. The
2563 stub name in the hash table has to be unique; this does not, so
2564 it can be friendlier. */
2565 char *output_name;
2566 };
2567
2568 /* Used to build a map of a section. This is required for mixed-endian
2569 code/data. */
2570
2571 typedef struct elf32_elf_section_map
2572 {
2573 bfd_vma vma;
2574 char type;
2575 }
2576 elf32_arm_section_map;
2577
2578 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2579
2580 typedef enum
2581 {
2582 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2583 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2584 VFP11_ERRATUM_ARM_VENEER,
2585 VFP11_ERRATUM_THUMB_VENEER
2586 }
2587 elf32_vfp11_erratum_type;
2588
2589 typedef struct elf32_vfp11_erratum_list
2590 {
2591 struct elf32_vfp11_erratum_list *next;
2592 bfd_vma vma;
2593 union
2594 {
2595 struct
2596 {
2597 struct elf32_vfp11_erratum_list *veneer;
2598 unsigned int vfp_insn;
2599 } b;
2600 struct
2601 {
2602 struct elf32_vfp11_erratum_list *branch;
2603 unsigned int id;
2604 } v;
2605 } u;
2606 elf32_vfp11_erratum_type type;
2607 }
2608 elf32_vfp11_erratum_list;
2609
2610 typedef enum
2611 {
2612 DELETE_EXIDX_ENTRY,
2613 INSERT_EXIDX_CANTUNWIND_AT_END
2614 }
2615 arm_unwind_edit_type;
2616
2617 /* A (sorted) list of edits to apply to an unwind table. */
2618 typedef struct arm_unwind_table_edit
2619 {
2620 arm_unwind_edit_type type;
2621 /* Note: we sometimes want to insert an unwind entry corresponding to a
2622 section different from the one we're currently writing out, so record the
2623 (text) section this edit relates to here. */
2624 asection *linked_section;
2625 unsigned int index;
2626 struct arm_unwind_table_edit *next;
2627 }
2628 arm_unwind_table_edit;
2629
2630 typedef struct _arm_elf_section_data
2631 {
2632 /* Information about mapping symbols. */
2633 struct bfd_elf_section_data elf;
2634 unsigned int mapcount;
2635 unsigned int mapsize;
2636 elf32_arm_section_map *map;
2637 /* Information about CPU errata. */
2638 unsigned int erratumcount;
2639 elf32_vfp11_erratum_list *erratumlist;
2640 /* Information about unwind tables. */
2641 union
2642 {
2643 /* Unwind info attached to a text section. */
2644 struct
2645 {
2646 asection *arm_exidx_sec;
2647 } text;
2648
2649 /* Unwind info attached to an .ARM.exidx section. */
2650 struct
2651 {
2652 arm_unwind_table_edit *unwind_edit_list;
2653 arm_unwind_table_edit *unwind_edit_tail;
2654 } exidx;
2655 } u;
2656 }
2657 _arm_elf_section_data;
2658
2659 #define elf32_arm_section_data(sec) \
2660 ((_arm_elf_section_data *) elf_section_data (sec))
2661
2662 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2663 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2664 so may be created multiple times: we use an array of these entries whilst
2665 relaxing which we can refresh easily, then create stubs for each potentially
2666 erratum-triggering instruction once we've settled on a solution. */
2667
2668 struct a8_erratum_fix
2669 {
2670 bfd *input_bfd;
2671 asection *section;
2672 bfd_vma offset;
2673 bfd_vma addend;
2674 unsigned long orig_insn;
2675 char *stub_name;
2676 enum elf32_arm_stub_type stub_type;
2677 enum arm_st_branch_type branch_type;
2678 };
2679
2680 /* A table of relocs applied to branches which might trigger Cortex-A8
2681 erratum. */
2682
2683 struct a8_erratum_reloc
2684 {
2685 bfd_vma from;
2686 bfd_vma destination;
2687 struct elf32_arm_link_hash_entry *hash;
2688 const char *sym_name;
2689 unsigned int r_type;
2690 enum arm_st_branch_type branch_type;
2691 bfd_boolean non_a8_stub;
2692 };
2693
2694 /* The size of the thread control block. */
2695 #define TCB_SIZE 8
2696
2697 /* ARM-specific information about a PLT entry, over and above the usual
2698 gotplt_union. */
2699 struct arm_plt_info
2700 {
2701 /* We reference count Thumb references to a PLT entry separately,
2702 so that we can emit the Thumb trampoline only if needed. */
2703 bfd_signed_vma thumb_refcount;
2704
2705 /* Some references from Thumb code may be eliminated by BL->BLX
2706 conversion, so record them separately. */
2707 bfd_signed_vma maybe_thumb_refcount;
2708
2709 /* How many of the recorded PLT accesses were from non-call relocations.
2710 This information is useful when deciding whether anything takes the
2711 address of an STT_GNU_IFUNC PLT. A value of 0 means that all
2712 non-call references to the function should resolve directly to the
2713 real runtime target. */
2714 unsigned int noncall_refcount;
2715
2716 /* Since PLT entries have variable size if the Thumb prologue is
2717 used, we need to record the index into .got.plt instead of
2718 recomputing it from the PLT offset. */
2719 bfd_signed_vma got_offset;
2720 };
2721
2722 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
2723 struct arm_local_iplt_info
2724 {
2725 /* The information that is usually found in the generic ELF part of
2726 the hash table entry. */
2727 union gotplt_union root;
2728
2729 /* The information that is usually found in the ARM-specific part of
2730 the hash table entry. */
2731 struct arm_plt_info arm;
2732
2733 /* A list of all potential dynamic relocations against this symbol. */
2734 struct elf_dyn_relocs *dyn_relocs;
2735 };
2736
2737 struct elf_arm_obj_tdata
2738 {
2739 struct elf_obj_tdata root;
2740
2741 /* tls_type for each local got entry. */
2742 char *local_got_tls_type;
2743
2744 /* GOTPLT entries for TLS descriptors. */
2745 bfd_vma *local_tlsdesc_gotent;
2746
2747 /* Information for local symbols that need entries in .iplt. */
2748 struct arm_local_iplt_info **local_iplt;
2749
2750 /* Zero to warn when linking objects with incompatible enum sizes. */
2751 int no_enum_size_warning;
2752
2753 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2754 int no_wchar_size_warning;
2755 };
2756
2757 #define elf_arm_tdata(bfd) \
2758 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2759
2760 #define elf32_arm_local_got_tls_type(bfd) \
2761 (elf_arm_tdata (bfd)->local_got_tls_type)
2762
2763 #define elf32_arm_local_tlsdesc_gotent(bfd) \
2764 (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
2765
2766 #define elf32_arm_local_iplt(bfd) \
2767 (elf_arm_tdata (bfd)->local_iplt)
2768
2769 #define is_arm_elf(bfd) \
2770 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2771 && elf_tdata (bfd) != NULL \
2772 && elf_object_id (bfd) == ARM_ELF_DATA)
2773
2774 static bfd_boolean
2775 elf32_arm_mkobject (bfd *abfd)
2776 {
2777 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2778 ARM_ELF_DATA);
2779 }
2780
2781 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2782
2783 /* Arm ELF linker hash entry. */
2784 struct elf32_arm_link_hash_entry
2785 {
2786 struct elf_link_hash_entry root;
2787
2788 /* Track dynamic relocs copied for this symbol. */
2789 struct elf_dyn_relocs *dyn_relocs;
2790
2791 /* ARM-specific PLT information. */
2792 struct arm_plt_info plt;
2793
2794 #define GOT_UNKNOWN 0
2795 #define GOT_NORMAL 1
2796 #define GOT_TLS_GD 2
2797 #define GOT_TLS_IE 4
2798 #define GOT_TLS_GDESC 8
2799 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
2800 unsigned int tls_type : 8;
2801
2802 /* True if the symbol's PLT entry is in .iplt rather than .plt. */
2803 unsigned int is_iplt : 1;
2804
2805 unsigned int unused : 23;
2806
2807 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
2808 starting at the end of the jump table. */
2809 bfd_vma tlsdesc_got;
2810
2811 /* The symbol marking the real symbol location for exported thumb
2812 symbols with Arm stubs. */
2813 struct elf_link_hash_entry *export_glue;
2814
2815 /* A pointer to the most recently used stub hash entry against this
2816 symbol. */
2817 struct elf32_arm_stub_hash_entry *stub_cache;
2818 };
2819
2820 /* Traverse an arm ELF linker hash table. */
2821 #define elf32_arm_link_hash_traverse(table, func, info) \
2822 (elf_link_hash_traverse \
2823 (&(table)->root, \
2824 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
2825 (info)))
2826
2827 /* Get the ARM elf linker hash table from a link_info structure. */
2828 #define elf32_arm_hash_table(info) \
2829 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
2830 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
2831
2832 #define arm_stub_hash_lookup(table, string, create, copy) \
2833 ((struct elf32_arm_stub_hash_entry *) \
2834 bfd_hash_lookup ((table), (string), (create), (copy)))
2835
2836 /* Array to keep track of which stub sections have been created, and
2837 information on stub grouping. */
2838 struct map_stub
2839 {
2840 /* This is the section to which stubs in the group will be
2841 attached. */
2842 asection *link_sec;
2843 /* The stub section. */
2844 asection *stub_sec;
2845 };
2846
2847 #define elf32_arm_compute_jump_table_size(htab) \
2848 ((htab)->next_tls_desc_index * 4)
2849
2850 /* ARM ELF linker hash table. */
2851 struct elf32_arm_link_hash_table
2852 {
2853 /* The main hash table. */
2854 struct elf_link_hash_table root;
2855
2856 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
2857 bfd_size_type thumb_glue_size;
2858
2859 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
2860 bfd_size_type arm_glue_size;
2861
2862 /* The size in bytes of section containing the ARMv4 BX veneers. */
2863 bfd_size_type bx_glue_size;
2864
2865 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
2866 veneer has been populated. */
2867 bfd_vma bx_glue_offset[15];
2868
2869 /* The size in bytes of the section containing glue for VFP11 erratum
2870 veneers. */
2871 bfd_size_type vfp11_erratum_glue_size;
2872
2873 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
2874 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
2875 elf32_arm_write_section(). */
2876 struct a8_erratum_fix *a8_erratum_fixes;
2877 unsigned int num_a8_erratum_fixes;
2878
2879 /* An arbitrary input BFD chosen to hold the glue sections. */
2880 bfd * bfd_of_glue_owner;
2881
2882 /* Nonzero to output a BE8 image. */
2883 int byteswap_code;
2884
2885 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
2886 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
2887 int target1_is_rel;
2888
2889 /* The relocation to use for R_ARM_TARGET2 relocations. */
2890 int target2_reloc;
2891
2892 /* 0 = Ignore R_ARM_V4BX.
2893 1 = Convert BX to MOV PC.
2894 2 = Generate v4 interworing stubs. */
2895 int fix_v4bx;
2896
2897 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
2898 int fix_cortex_a8;
2899
2900 /* Whether we should fix the ARM1176 BLX immediate issue. */
2901 int fix_arm1176;
2902
2903 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
2904 int use_blx;
2905
2906 /* What sort of code sequences we should look for which may trigger the
2907 VFP11 denorm erratum. */
2908 bfd_arm_vfp11_fix vfp11_fix;
2909
2910 /* Global counter for the number of fixes we have emitted. */
2911 int num_vfp11_fixes;
2912
2913 /* Nonzero to force PIC branch veneers. */
2914 int pic_veneer;
2915
2916 /* The number of bytes in the initial entry in the PLT. */
2917 bfd_size_type plt_header_size;
2918
2919 /* The number of bytes in the subsequent PLT etries. */
2920 bfd_size_type plt_entry_size;
2921
2922 /* True if the target system is VxWorks. */
2923 int vxworks_p;
2924
2925 /* True if the target system is Symbian OS. */
2926 int symbian_p;
2927
2928 /* True if the target system is Native Client. */
2929 int nacl_p;
2930
2931 /* True if the target uses REL relocations. */
2932 int use_rel;
2933
2934 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
2935 bfd_vma next_tls_desc_index;
2936
2937 /* How many R_ARM_TLS_DESC relocations were generated so far. */
2938 bfd_vma num_tls_desc;
2939
2940 /* Short-cuts to get to dynamic linker sections. */
2941 asection *sdynbss;
2942 asection *srelbss;
2943
2944 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
2945 asection *srelplt2;
2946
2947 /* The offset into splt of the PLT entry for the TLS descriptor
2948 resolver. Special values are 0, if not necessary (or not found
2949 to be necessary yet), and -1 if needed but not determined
2950 yet. */
2951 bfd_vma dt_tlsdesc_plt;
2952
2953 /* The offset into sgot of the GOT entry used by the PLT entry
2954 above. */
2955 bfd_vma dt_tlsdesc_got;
2956
2957 /* Offset in .plt section of tls_arm_trampoline. */
2958 bfd_vma tls_trampoline;
2959
2960 /* Data for R_ARM_TLS_LDM32 relocations. */
2961 union
2962 {
2963 bfd_signed_vma refcount;
2964 bfd_vma offset;
2965 } tls_ldm_got;
2966
2967 /* Small local sym cache. */
2968 struct sym_cache sym_cache;
2969
2970 /* For convenience in allocate_dynrelocs. */
2971 bfd * obfd;
2972
2973 /* The amount of space used by the reserved portion of the sgotplt
2974 section, plus whatever space is used by the jump slots. */
2975 bfd_vma sgotplt_jump_table_size;
2976
2977 /* The stub hash table. */
2978 struct bfd_hash_table stub_hash_table;
2979
2980 /* Linker stub bfd. */
2981 bfd *stub_bfd;
2982
2983 /* Linker call-backs. */
2984 asection * (*add_stub_section) (const char *, asection *);
2985 void (*layout_sections_again) (void);
2986
2987 /* Array to keep track of which stub sections have been created, and
2988 information on stub grouping. */
2989 struct map_stub *stub_group;
2990
2991 /* Number of elements in stub_group. */
2992 int top_id;
2993
2994 /* Assorted information used by elf32_arm_size_stubs. */
2995 unsigned int bfd_count;
2996 int top_index;
2997 asection **input_list;
2998 };
2999
3000 /* Create an entry in an ARM ELF linker hash table. */
3001
3002 static struct bfd_hash_entry *
3003 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
3004 struct bfd_hash_table * table,
3005 const char * string)
3006 {
3007 struct elf32_arm_link_hash_entry * ret =
3008 (struct elf32_arm_link_hash_entry *) entry;
3009
3010 /* Allocate the structure if it has not already been allocated by a
3011 subclass. */
3012 if (ret == NULL)
3013 ret = (struct elf32_arm_link_hash_entry *)
3014 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
3015 if (ret == NULL)
3016 return (struct bfd_hash_entry *) ret;
3017
3018 /* Call the allocation method of the superclass. */
3019 ret = ((struct elf32_arm_link_hash_entry *)
3020 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
3021 table, string));
3022 if (ret != NULL)
3023 {
3024 ret->dyn_relocs = NULL;
3025 ret->tls_type = GOT_UNKNOWN;
3026 ret->tlsdesc_got = (bfd_vma) -1;
3027 ret->plt.thumb_refcount = 0;
3028 ret->plt.maybe_thumb_refcount = 0;
3029 ret->plt.noncall_refcount = 0;
3030 ret->plt.got_offset = -1;
3031 ret->is_iplt = FALSE;
3032 ret->export_glue = NULL;
3033
3034 ret->stub_cache = NULL;
3035 }
3036
3037 return (struct bfd_hash_entry *) ret;
3038 }
3039
3040 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3041 symbols. */
3042
3043 static bfd_boolean
3044 elf32_arm_allocate_local_sym_info (bfd *abfd)
3045 {
3046 if (elf_local_got_refcounts (abfd) == NULL)
3047 {
3048 bfd_size_type num_syms;
3049 bfd_size_type size;
3050 char *data;
3051
3052 num_syms = elf_tdata (abfd)->symtab_hdr.sh_info;
3053 size = num_syms * (sizeof (bfd_signed_vma)
3054 + sizeof (struct arm_local_iplt_info *)
3055 + sizeof (bfd_vma)
3056 + sizeof (char));
3057 data = bfd_zalloc (abfd, size);
3058 if (data == NULL)
3059 return FALSE;
3060
3061 elf_local_got_refcounts (abfd) = (bfd_signed_vma *) data;
3062 data += num_syms * sizeof (bfd_signed_vma);
3063
3064 elf32_arm_local_iplt (abfd) = (struct arm_local_iplt_info **) data;
3065 data += num_syms * sizeof (struct arm_local_iplt_info *);
3066
3067 elf32_arm_local_tlsdesc_gotent (abfd) = (bfd_vma *) data;
3068 data += num_syms * sizeof (bfd_vma);
3069
3070 elf32_arm_local_got_tls_type (abfd) = data;
3071 }
3072 return TRUE;
3073 }
3074
3075 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3076 to input bfd ABFD. Create the information if it doesn't already exist.
3077 Return null if an allocation fails. */
3078
3079 static struct arm_local_iplt_info *
3080 elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx)
3081 {
3082 struct arm_local_iplt_info **ptr;
3083
3084 if (!elf32_arm_allocate_local_sym_info (abfd))
3085 return NULL;
3086
3087 BFD_ASSERT (r_symndx < elf_tdata (abfd)->symtab_hdr.sh_info);
3088 ptr = &elf32_arm_local_iplt (abfd)[r_symndx];
3089 if (*ptr == NULL)
3090 *ptr = bfd_zalloc (abfd, sizeof (**ptr));
3091 return *ptr;
3092 }
3093
3094 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3095 in ABFD's symbol table. If the symbol is global, H points to its
3096 hash table entry, otherwise H is null.
3097
3098 Return true if the symbol does have PLT information. When returning
3099 true, point *ROOT_PLT at the target-independent reference count/offset
3100 union and *ARM_PLT at the ARM-specific information. */
3101
3102 static bfd_boolean
3103 elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_entry *h,
3104 unsigned long r_symndx, union gotplt_union **root_plt,
3105 struct arm_plt_info **arm_plt)
3106 {
3107 struct arm_local_iplt_info *local_iplt;
3108
3109 if (h != NULL)
3110 {
3111 *root_plt = &h->root.plt;
3112 *arm_plt = &h->plt;
3113 return TRUE;
3114 }
3115
3116 if (elf32_arm_local_iplt (abfd) == NULL)
3117 return FALSE;
3118
3119 local_iplt = elf32_arm_local_iplt (abfd)[r_symndx];
3120 if (local_iplt == NULL)
3121 return FALSE;
3122
3123 *root_plt = &local_iplt->root;
3124 *arm_plt = &local_iplt->arm;
3125 return TRUE;
3126 }
3127
3128 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3129 before it. */
3130
3131 static bfd_boolean
3132 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info *info,
3133 struct arm_plt_info *arm_plt)
3134 {
3135 struct elf32_arm_link_hash_table *htab;
3136
3137 htab = elf32_arm_hash_table (info);
3138 return (arm_plt->thumb_refcount != 0
3139 || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0));
3140 }
3141
3142 /* Return a pointer to the head of the dynamic reloc list that should
3143 be used for local symbol ISYM, which is symbol number R_SYMNDX in
3144 ABFD's symbol table. Return null if an error occurs. */
3145
3146 static struct elf_dyn_relocs **
3147 elf32_arm_get_local_dynreloc_list (bfd *abfd, unsigned long r_symndx,
3148 Elf_Internal_Sym *isym)
3149 {
3150 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
3151 {
3152 struct arm_local_iplt_info *local_iplt;
3153
3154 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
3155 if (local_iplt == NULL)
3156 return NULL;
3157 return &local_iplt->dyn_relocs;
3158 }
3159 else
3160 {
3161 /* Track dynamic relocs needed for local syms too.
3162 We really need local syms available to do this
3163 easily. Oh well. */
3164 asection *s;
3165 void *vpp;
3166
3167 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3168 if (s == NULL)
3169 abort ();
3170
3171 vpp = &elf_section_data (s)->local_dynrel;
3172 return (struct elf_dyn_relocs **) vpp;
3173 }
3174 }
3175
3176 /* Initialize an entry in the stub hash table. */
3177
3178 static struct bfd_hash_entry *
3179 stub_hash_newfunc (struct bfd_hash_entry *entry,
3180 struct bfd_hash_table *table,
3181 const char *string)
3182 {
3183 /* Allocate the structure if it has not already been allocated by a
3184 subclass. */
3185 if (entry == NULL)
3186 {
3187 entry = (struct bfd_hash_entry *)
3188 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
3189 if (entry == NULL)
3190 return entry;
3191 }
3192
3193 /* Call the allocation method of the superclass. */
3194 entry = bfd_hash_newfunc (entry, table, string);
3195 if (entry != NULL)
3196 {
3197 struct elf32_arm_stub_hash_entry *eh;
3198
3199 /* Initialize the local fields. */
3200 eh = (struct elf32_arm_stub_hash_entry *) entry;
3201 eh->stub_sec = NULL;
3202 eh->stub_offset = 0;
3203 eh->target_value = 0;
3204 eh->target_section = NULL;
3205 eh->target_addend = 0;
3206 eh->orig_insn = 0;
3207 eh->stub_type = arm_stub_none;
3208 eh->stub_size = 0;
3209 eh->stub_template = NULL;
3210 eh->stub_template_size = 0;
3211 eh->h = NULL;
3212 eh->id_sec = NULL;
3213 eh->output_name = NULL;
3214 }
3215
3216 return entry;
3217 }
3218
3219 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3220 shortcuts to them in our hash table. */
3221
3222 static bfd_boolean
3223 create_got_section (bfd *dynobj, struct bfd_link_info *info)
3224 {
3225 struct elf32_arm_link_hash_table *htab;
3226
3227 htab = elf32_arm_hash_table (info);
3228 if (htab == NULL)
3229 return FALSE;
3230
3231 /* BPABI objects never have a GOT, or associated sections. */
3232 if (htab->symbian_p)
3233 return TRUE;
3234
3235 if (! _bfd_elf_create_got_section (dynobj, info))
3236 return FALSE;
3237
3238 return TRUE;
3239 }
3240
3241 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
3242
3243 static bfd_boolean
3244 create_ifunc_sections (struct bfd_link_info *info)
3245 {
3246 struct elf32_arm_link_hash_table *htab;
3247 const struct elf_backend_data *bed;
3248 bfd *dynobj;
3249 asection *s;
3250 flagword flags;
3251
3252 htab = elf32_arm_hash_table (info);
3253 dynobj = htab->root.dynobj;
3254 bed = get_elf_backend_data (dynobj);
3255 flags = bed->dynamic_sec_flags;
3256
3257 if (htab->root.iplt == NULL)
3258 {
3259 s = bfd_make_section_with_flags (dynobj, ".iplt",
3260 flags | SEC_READONLY | SEC_CODE);
3261 if (s == NULL
3262 || !bfd_set_section_alignment (dynobj, s, bed->plt_alignment))
3263 return FALSE;
3264 htab->root.iplt = s;
3265 }
3266
3267 if (htab->root.irelplt == NULL)
3268 {
3269 s = bfd_make_section_with_flags (dynobj, RELOC_SECTION (htab, ".iplt"),
3270 flags | SEC_READONLY);
3271 if (s == NULL
3272 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3273 return FALSE;
3274 htab->root.irelplt = s;
3275 }
3276
3277 if (htab->root.igotplt == NULL)
3278 {
3279 s = bfd_make_section_with_flags (dynobj, ".igot.plt", flags);
3280 if (s == NULL
3281 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3282 return FALSE;
3283 htab->root.igotplt = s;
3284 }
3285 return TRUE;
3286 }
3287
3288 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3289 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3290 hash table. */
3291
3292 static bfd_boolean
3293 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
3294 {
3295 struct elf32_arm_link_hash_table *htab;
3296
3297 htab = elf32_arm_hash_table (info);
3298 if (htab == NULL)
3299 return FALSE;
3300
3301 if (!htab->root.sgot && !create_got_section (dynobj, info))
3302 return FALSE;
3303
3304 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3305 return FALSE;
3306
3307 htab->sdynbss = bfd_get_section_by_name (dynobj, ".dynbss");
3308 if (!info->shared)
3309 htab->srelbss = bfd_get_section_by_name (dynobj,
3310 RELOC_SECTION (htab, ".bss"));
3311
3312 if (htab->vxworks_p)
3313 {
3314 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
3315 return FALSE;
3316
3317 if (info->shared)
3318 {
3319 htab->plt_header_size = 0;
3320 htab->plt_entry_size
3321 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
3322 }
3323 else
3324 {
3325 htab->plt_header_size
3326 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
3327 htab->plt_entry_size
3328 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
3329 }
3330 }
3331
3332 if (!htab->root.splt
3333 || !htab->root.srelplt
3334 || !htab->sdynbss
3335 || (!info->shared && !htab->srelbss))
3336 abort ();
3337
3338 return TRUE;
3339 }
3340
3341 /* Copy the extra info we tack onto an elf_link_hash_entry. */
3342
3343 static void
3344 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
3345 struct elf_link_hash_entry *dir,
3346 struct elf_link_hash_entry *ind)
3347 {
3348 struct elf32_arm_link_hash_entry *edir, *eind;
3349
3350 edir = (struct elf32_arm_link_hash_entry *) dir;
3351 eind = (struct elf32_arm_link_hash_entry *) ind;
3352
3353 if (eind->dyn_relocs != NULL)
3354 {
3355 if (edir->dyn_relocs != NULL)
3356 {
3357 struct elf_dyn_relocs **pp;
3358 struct elf_dyn_relocs *p;
3359
3360 /* Add reloc counts against the indirect sym to the direct sym
3361 list. Merge any entries against the same section. */
3362 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
3363 {
3364 struct elf_dyn_relocs *q;
3365
3366 for (q = edir->dyn_relocs; q != NULL; q = q->next)
3367 if (q->sec == p->sec)
3368 {
3369 q->pc_count += p->pc_count;
3370 q->count += p->count;
3371 *pp = p->next;
3372 break;
3373 }
3374 if (q == NULL)
3375 pp = &p->next;
3376 }
3377 *pp = edir->dyn_relocs;
3378 }
3379
3380 edir->dyn_relocs = eind->dyn_relocs;
3381 eind->dyn_relocs = NULL;
3382 }
3383
3384 if (ind->root.type == bfd_link_hash_indirect)
3385 {
3386 /* Copy over PLT info. */
3387 edir->plt.thumb_refcount += eind->plt.thumb_refcount;
3388 eind->plt.thumb_refcount = 0;
3389 edir->plt.maybe_thumb_refcount += eind->plt.maybe_thumb_refcount;
3390 eind->plt.maybe_thumb_refcount = 0;
3391 edir->plt.noncall_refcount += eind->plt.noncall_refcount;
3392 eind->plt.noncall_refcount = 0;
3393
3394 /* We should only allocate a function to .iplt once the final
3395 symbol information is known. */
3396 BFD_ASSERT (!eind->is_iplt);
3397
3398 if (dir->got.refcount <= 0)
3399 {
3400 edir->tls_type = eind->tls_type;
3401 eind->tls_type = GOT_UNKNOWN;
3402 }
3403 }
3404
3405 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
3406 }
3407
3408 /* Create an ARM elf linker hash table. */
3409
3410 static struct bfd_link_hash_table *
3411 elf32_arm_link_hash_table_create (bfd *abfd)
3412 {
3413 struct elf32_arm_link_hash_table *ret;
3414 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
3415
3416 ret = (struct elf32_arm_link_hash_table *) bfd_malloc (amt);
3417 if (ret == NULL)
3418 return NULL;
3419
3420 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
3421 elf32_arm_link_hash_newfunc,
3422 sizeof (struct elf32_arm_link_hash_entry),
3423 ARM_ELF_DATA))
3424 {
3425 free (ret);
3426 return NULL;
3427 }
3428
3429 ret->sdynbss = NULL;
3430 ret->srelbss = NULL;
3431 ret->srelplt2 = NULL;
3432 ret->dt_tlsdesc_plt = 0;
3433 ret->dt_tlsdesc_got = 0;
3434 ret->tls_trampoline = 0;
3435 ret->next_tls_desc_index = 0;
3436 ret->num_tls_desc = 0;
3437 ret->thumb_glue_size = 0;
3438 ret->arm_glue_size = 0;
3439 ret->bx_glue_size = 0;
3440 memset (ret->bx_glue_offset, 0, sizeof (ret->bx_glue_offset));
3441 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
3442 ret->vfp11_erratum_glue_size = 0;
3443 ret->num_vfp11_fixes = 0;
3444 ret->fix_cortex_a8 = 0;
3445 ret->fix_arm1176 = 0;
3446 ret->bfd_of_glue_owner = NULL;
3447 ret->byteswap_code = 0;
3448 ret->target1_is_rel = 0;
3449 ret->target2_reloc = R_ARM_NONE;
3450 #ifdef FOUR_WORD_PLT
3451 ret->plt_header_size = 16;
3452 ret->plt_entry_size = 16;
3453 #else
3454 ret->plt_header_size = 20;
3455 ret->plt_entry_size = 12;
3456 #endif
3457 ret->fix_v4bx = 0;
3458 ret->use_blx = 0;
3459 ret->vxworks_p = 0;
3460 ret->symbian_p = 0;
3461 ret->nacl_p = 0;
3462 ret->use_rel = 1;
3463 ret->sym_cache.abfd = NULL;
3464 ret->obfd = abfd;
3465 ret->tls_ldm_got.refcount = 0;
3466 ret->stub_bfd = NULL;
3467 ret->add_stub_section = NULL;
3468 ret->layout_sections_again = NULL;
3469 ret->stub_group = NULL;
3470 ret->top_id = 0;
3471 ret->bfd_count = 0;
3472 ret->top_index = 0;
3473 ret->input_list = NULL;
3474
3475 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
3476 sizeof (struct elf32_arm_stub_hash_entry)))
3477 {
3478 free (ret);
3479 return NULL;
3480 }
3481
3482 return &ret->root.root;
3483 }
3484
3485 /* Free the derived linker hash table. */
3486
3487 static void
3488 elf32_arm_hash_table_free (struct bfd_link_hash_table *hash)
3489 {
3490 struct elf32_arm_link_hash_table *ret
3491 = (struct elf32_arm_link_hash_table *) hash;
3492
3493 bfd_hash_table_free (&ret->stub_hash_table);
3494 _bfd_generic_link_hash_table_free (hash);
3495 }
3496
3497 /* Determine if we're dealing with a Thumb only architecture. */
3498
3499 static bfd_boolean
3500 using_thumb_only (struct elf32_arm_link_hash_table *globals)
3501 {
3502 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3503 Tag_CPU_arch);
3504 int profile;
3505
3506 if (arch == TAG_CPU_ARCH_V6_M || arch == TAG_CPU_ARCH_V6S_M)
3507 return TRUE;
3508
3509 if (arch != TAG_CPU_ARCH_V7 && arch != TAG_CPU_ARCH_V7E_M)
3510 return FALSE;
3511
3512 profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3513 Tag_CPU_arch_profile);
3514
3515 return profile == 'M';
3516 }
3517
3518 /* Determine if we're dealing with a Thumb-2 object. */
3519
3520 static bfd_boolean
3521 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3522 {
3523 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3524 Tag_CPU_arch);
3525 return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7;
3526 }
3527
3528 /* Determine what kind of NOPs are available. */
3529
3530 static bfd_boolean
3531 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
3532 {
3533 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3534 Tag_CPU_arch);
3535 return arch == TAG_CPU_ARCH_V6T2
3536 || arch == TAG_CPU_ARCH_V6K
3537 || arch == TAG_CPU_ARCH_V7
3538 || arch == TAG_CPU_ARCH_V7E_M;
3539 }
3540
3541 static bfd_boolean
3542 arch_has_thumb2_nop (struct elf32_arm_link_hash_table *globals)
3543 {
3544 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3545 Tag_CPU_arch);
3546 return (arch == TAG_CPU_ARCH_V6T2 || arch == TAG_CPU_ARCH_V7
3547 || arch == TAG_CPU_ARCH_V7E_M);
3548 }
3549
3550 static bfd_boolean
3551 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
3552 {
3553 switch (stub_type)
3554 {
3555 case arm_stub_long_branch_thumb_only:
3556 case arm_stub_long_branch_v4t_thumb_arm:
3557 case arm_stub_short_branch_v4t_thumb_arm:
3558 case arm_stub_long_branch_v4t_thumb_arm_pic:
3559 case arm_stub_long_branch_v4t_thumb_tls_pic:
3560 case arm_stub_long_branch_thumb_only_pic:
3561 return TRUE;
3562 case arm_stub_none:
3563 BFD_FAIL ();
3564 return FALSE;
3565 break;
3566 default:
3567 return FALSE;
3568 }
3569 }
3570
3571 /* Determine the type of stub needed, if any, for a call. */
3572
3573 static enum elf32_arm_stub_type
3574 arm_type_of_stub (struct bfd_link_info *info,
3575 asection *input_sec,
3576 const Elf_Internal_Rela *rel,
3577 unsigned char st_type,
3578 enum arm_st_branch_type *actual_branch_type,
3579 struct elf32_arm_link_hash_entry *hash,
3580 bfd_vma destination,
3581 asection *sym_sec,
3582 bfd *input_bfd,
3583 const char *name)
3584 {
3585 bfd_vma location;
3586 bfd_signed_vma branch_offset;
3587 unsigned int r_type;
3588 struct elf32_arm_link_hash_table * globals;
3589 int thumb2;
3590 int thumb_only;
3591 enum elf32_arm_stub_type stub_type = arm_stub_none;
3592 int use_plt = 0;
3593 enum arm_st_branch_type branch_type = *actual_branch_type;
3594 union gotplt_union *root_plt;
3595 struct arm_plt_info *arm_plt;
3596
3597 if (branch_type == ST_BRANCH_LONG)
3598 return stub_type;
3599
3600 globals = elf32_arm_hash_table (info);
3601 if (globals == NULL)
3602 return stub_type;
3603
3604 thumb_only = using_thumb_only (globals);
3605
3606 thumb2 = using_thumb2 (globals);
3607
3608 /* Determine where the call point is. */
3609 location = (input_sec->output_offset
3610 + input_sec->output_section->vma
3611 + rel->r_offset);
3612
3613 r_type = ELF32_R_TYPE (rel->r_info);
3614
3615 /* For TLS call relocs, it is the caller's responsibility to provide
3616 the address of the appropriate trampoline. */
3617 if (r_type != R_ARM_TLS_CALL
3618 && r_type != R_ARM_THM_TLS_CALL
3619 && elf32_arm_get_plt_info (input_bfd, hash, ELF32_R_SYM (rel->r_info),
3620 &root_plt, &arm_plt)
3621 && root_plt->offset != (bfd_vma) -1)
3622 {
3623 asection *splt;
3624
3625 if (hash == NULL || hash->is_iplt)
3626 splt = globals->root.iplt;
3627 else
3628 splt = globals->root.splt;
3629 if (splt != NULL)
3630 {
3631 use_plt = 1;
3632
3633 /* Note when dealing with PLT entries: the main PLT stub is in
3634 ARM mode, so if the branch is in Thumb mode, another
3635 Thumb->ARM stub will be inserted later just before the ARM
3636 PLT stub. We don't take this extra distance into account
3637 here, because if a long branch stub is needed, we'll add a
3638 Thumb->Arm one and branch directly to the ARM PLT entry
3639 because it avoids spreading offset corrections in several
3640 places. */
3641
3642 destination = (splt->output_section->vma
3643 + splt->output_offset
3644 + root_plt->offset);
3645 st_type = STT_FUNC;
3646 branch_type = ST_BRANCH_TO_ARM;
3647 }
3648 }
3649 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
3650 BFD_ASSERT (st_type != STT_GNU_IFUNC);
3651
3652 branch_offset = (bfd_signed_vma)(destination - location);
3653
3654 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
3655 || r_type == R_ARM_THM_TLS_CALL)
3656 {
3657 /* Handle cases where:
3658 - this call goes too far (different Thumb/Thumb2 max
3659 distance)
3660 - it's a Thumb->Arm call and blx is not available, or it's a
3661 Thumb->Arm branch (not bl). A stub is needed in this case,
3662 but only if this call is not through a PLT entry. Indeed,
3663 PLT stubs handle mode switching already.
3664 */
3665 if ((!thumb2
3666 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3667 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3668 || (thumb2
3669 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3670 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3671 || (branch_type == ST_BRANCH_TO_ARM
3672 && (((r_type == R_ARM_THM_CALL
3673 || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx)
3674 || (r_type == R_ARM_THM_JUMP24))
3675 && !use_plt))
3676 {
3677 if (branch_type == ST_BRANCH_TO_THUMB)
3678 {
3679 /* Thumb to thumb. */
3680 if (!thumb_only)
3681 {
3682 stub_type = (info->shared | globals->pic_veneer)
3683 /* PIC stubs. */
3684 ? ((globals->use_blx
3685 && (r_type == R_ARM_THM_CALL))
3686 /* V5T and above. Stub starts with ARM code, so
3687 we must be able to switch mode before
3688 reaching it, which is only possible for 'bl'
3689 (ie R_ARM_THM_CALL relocation). */
3690 ? arm_stub_long_branch_any_thumb_pic
3691 /* On V4T, use Thumb code only. */
3692 : arm_stub_long_branch_v4t_thumb_thumb_pic)
3693
3694 /* non-PIC stubs. */
3695 : ((globals->use_blx
3696 && (r_type == R_ARM_THM_CALL))
3697 /* V5T and above. */
3698 ? arm_stub_long_branch_any_any
3699 /* V4T. */
3700 : arm_stub_long_branch_v4t_thumb_thumb);
3701 }
3702 else
3703 {
3704 stub_type = (info->shared | globals->pic_veneer)
3705 /* PIC stub. */
3706 ? arm_stub_long_branch_thumb_only_pic
3707 /* non-PIC stub. */
3708 : arm_stub_long_branch_thumb_only;
3709 }
3710 }
3711 else
3712 {
3713 /* Thumb to arm. */
3714 if (sym_sec != NULL
3715 && sym_sec->owner != NULL
3716 && !INTERWORK_FLAG (sym_sec->owner))
3717 {
3718 (*_bfd_error_handler)
3719 (_("%B(%s): warning: interworking not enabled.\n"
3720 " first occurrence: %B: Thumb call to ARM"),
3721 sym_sec->owner, input_bfd, name);
3722 }
3723
3724 stub_type =
3725 (info->shared | globals->pic_veneer)
3726 /* PIC stubs. */
3727 ? (r_type == R_ARM_THM_TLS_CALL
3728 /* TLS PIC stubs */
3729 ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic
3730 : arm_stub_long_branch_v4t_thumb_tls_pic)
3731 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
3732 /* V5T PIC and above. */
3733 ? arm_stub_long_branch_any_arm_pic
3734 /* V4T PIC stub. */
3735 : arm_stub_long_branch_v4t_thumb_arm_pic))
3736
3737 /* non-PIC stubs. */
3738 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
3739 /* V5T and above. */
3740 ? arm_stub_long_branch_any_any
3741 /* V4T. */
3742 : arm_stub_long_branch_v4t_thumb_arm);
3743
3744 /* Handle v4t short branches. */
3745 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
3746 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
3747 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
3748 stub_type = arm_stub_short_branch_v4t_thumb_arm;
3749 }
3750 }
3751 }
3752 else if (r_type == R_ARM_CALL
3753 || r_type == R_ARM_JUMP24
3754 || r_type == R_ARM_PLT32
3755 || r_type == R_ARM_TLS_CALL)
3756 {
3757 if (branch_type == ST_BRANCH_TO_THUMB)
3758 {
3759 /* Arm to thumb. */
3760
3761 if (sym_sec != NULL
3762 && sym_sec->owner != NULL
3763 && !INTERWORK_FLAG (sym_sec->owner))
3764 {
3765 (*_bfd_error_handler)
3766 (_("%B(%s): warning: interworking not enabled.\n"
3767 " first occurrence: %B: ARM call to Thumb"),
3768 sym_sec->owner, input_bfd, name);
3769 }
3770
3771 /* We have an extra 2-bytes reach because of
3772 the mode change (bit 24 (H) of BLX encoding). */
3773 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
3774 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
3775 || (r_type == R_ARM_CALL && !globals->use_blx)
3776 || (r_type == R_ARM_JUMP24)
3777 || (r_type == R_ARM_PLT32))
3778 {
3779 stub_type = (info->shared | globals->pic_veneer)
3780 /* PIC stubs. */
3781 ? ((globals->use_blx)
3782 /* V5T and above. */
3783 ? arm_stub_long_branch_any_thumb_pic
3784 /* V4T stub. */
3785 : arm_stub_long_branch_v4t_arm_thumb_pic)
3786
3787 /* non-PIC stubs. */
3788 : ((globals->use_blx)
3789 /* V5T and above. */
3790 ? arm_stub_long_branch_any_any
3791 /* V4T. */
3792 : arm_stub_long_branch_v4t_arm_thumb);
3793 }
3794 }
3795 else
3796 {
3797 /* Arm to arm. */
3798 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
3799 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
3800 {
3801 stub_type =
3802 (info->shared | globals->pic_veneer)
3803 /* PIC stubs. */
3804 ? (r_type == R_ARM_TLS_CALL
3805 /* TLS PIC Stub */
3806 ? arm_stub_long_branch_any_tls_pic
3807 : arm_stub_long_branch_any_arm_pic)
3808 /* non-PIC stubs. */
3809 : arm_stub_long_branch_any_any;
3810 }
3811 }
3812 }
3813
3814 /* If a stub is needed, record the actual destination type. */
3815 if (stub_type != arm_stub_none)
3816 *actual_branch_type = branch_type;
3817
3818 return stub_type;
3819 }
3820
3821 /* Build a name for an entry in the stub hash table. */
3822
3823 static char *
3824 elf32_arm_stub_name (const asection *input_section,
3825 const asection *sym_sec,
3826 const struct elf32_arm_link_hash_entry *hash,
3827 const Elf_Internal_Rela *rel,
3828 enum elf32_arm_stub_type stub_type)
3829 {
3830 char *stub_name;
3831 bfd_size_type len;
3832
3833 if (hash)
3834 {
3835 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
3836 stub_name = (char *) bfd_malloc (len);
3837 if (stub_name != NULL)
3838 sprintf (stub_name, "%08x_%s+%x_%d",
3839 input_section->id & 0xffffffff,
3840 hash->root.root.root.string,
3841 (int) rel->r_addend & 0xffffffff,
3842 (int) stub_type);
3843 }
3844 else
3845 {
3846 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
3847 stub_name = (char *) bfd_malloc (len);
3848 if (stub_name != NULL)
3849 sprintf (stub_name, "%08x_%x:%x+%x_%d",
3850 input_section->id & 0xffffffff,
3851 sym_sec->id & 0xffffffff,
3852 ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
3853 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL
3854 ? 0 : (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
3855 (int) rel->r_addend & 0xffffffff,
3856 (int) stub_type);
3857 }
3858
3859 return stub_name;
3860 }
3861
3862 /* Look up an entry in the stub hash. Stub entries are cached because
3863 creating the stub name takes a bit of time. */
3864
3865 static struct elf32_arm_stub_hash_entry *
3866 elf32_arm_get_stub_entry (const asection *input_section,
3867 const asection *sym_sec,
3868 struct elf_link_hash_entry *hash,
3869 const Elf_Internal_Rela *rel,
3870 struct elf32_arm_link_hash_table *htab,
3871 enum elf32_arm_stub_type stub_type)
3872 {
3873 struct elf32_arm_stub_hash_entry *stub_entry;
3874 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
3875 const asection *id_sec;
3876
3877 if ((input_section->flags & SEC_CODE) == 0)
3878 return NULL;
3879
3880 /* If this input section is part of a group of sections sharing one
3881 stub section, then use the id of the first section in the group.
3882 Stub names need to include a section id, as there may well be
3883 more than one stub used to reach say, printf, and we need to
3884 distinguish between them. */
3885 id_sec = htab->stub_group[input_section->id].link_sec;
3886
3887 if (h != NULL && h->stub_cache != NULL
3888 && h->stub_cache->h == h
3889 && h->stub_cache->id_sec == id_sec
3890 && h->stub_cache->stub_type == stub_type)
3891 {
3892 stub_entry = h->stub_cache;
3893 }
3894 else
3895 {
3896 char *stub_name;
3897
3898 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
3899 if (stub_name == NULL)
3900 return NULL;
3901
3902 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
3903 stub_name, FALSE, FALSE);
3904 if (h != NULL)
3905 h->stub_cache = stub_entry;
3906
3907 free (stub_name);
3908 }
3909
3910 return stub_entry;
3911 }
3912
3913 /* Find or create a stub section. Returns a pointer to the stub section, and
3914 the section to which the stub section will be attached (in *LINK_SEC_P).
3915 LINK_SEC_P may be NULL. */
3916
3917 static asection *
3918 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
3919 struct elf32_arm_link_hash_table *htab)
3920 {
3921 asection *link_sec;
3922 asection *stub_sec;
3923
3924 link_sec = htab->stub_group[section->id].link_sec;
3925 BFD_ASSERT (link_sec != NULL);
3926 stub_sec = htab->stub_group[section->id].stub_sec;
3927
3928 if (stub_sec == NULL)
3929 {
3930 stub_sec = htab->stub_group[link_sec->id].stub_sec;
3931 if (stub_sec == NULL)
3932 {
3933 size_t namelen;
3934 bfd_size_type len;
3935 char *s_name;
3936
3937 namelen = strlen (link_sec->name);
3938 len = namelen + sizeof (STUB_SUFFIX);
3939 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
3940 if (s_name == NULL)
3941 return NULL;
3942
3943 memcpy (s_name, link_sec->name, namelen);
3944 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
3945 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
3946 if (stub_sec == NULL)
3947 return NULL;
3948 htab->stub_group[link_sec->id].stub_sec = stub_sec;
3949 }
3950 htab->stub_group[section->id].stub_sec = stub_sec;
3951 }
3952
3953 if (link_sec_p)
3954 *link_sec_p = link_sec;
3955
3956 return stub_sec;
3957 }
3958
3959 /* Add a new stub entry to the stub hash. Not all fields of the new
3960 stub entry are initialised. */
3961
3962 static struct elf32_arm_stub_hash_entry *
3963 elf32_arm_add_stub (const char *stub_name,
3964 asection *section,
3965 struct elf32_arm_link_hash_table *htab)
3966 {
3967 asection *link_sec;
3968 asection *stub_sec;
3969 struct elf32_arm_stub_hash_entry *stub_entry;
3970
3971 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab);
3972 if (stub_sec == NULL)
3973 return NULL;
3974
3975 /* Enter this entry into the linker stub hash table. */
3976 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3977 TRUE, FALSE);
3978 if (stub_entry == NULL)
3979 {
3980 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
3981 section->owner,
3982 stub_name);
3983 return NULL;
3984 }
3985
3986 stub_entry->stub_sec = stub_sec;
3987 stub_entry->stub_offset = 0;
3988 stub_entry->id_sec = link_sec;
3989
3990 return stub_entry;
3991 }
3992
3993 /* Store an Arm insn into an output section not processed by
3994 elf32_arm_write_section. */
3995
3996 static void
3997 put_arm_insn (struct elf32_arm_link_hash_table * htab,
3998 bfd * output_bfd, bfd_vma val, void * ptr)
3999 {
4000 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4001 bfd_putl32 (val, ptr);
4002 else
4003 bfd_putb32 (val, ptr);
4004 }
4005
4006 /* Store a 16-bit Thumb insn into an output section not processed by
4007 elf32_arm_write_section. */
4008
4009 static void
4010 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
4011 bfd * output_bfd, bfd_vma val, void * ptr)
4012 {
4013 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4014 bfd_putl16 (val, ptr);
4015 else
4016 bfd_putb16 (val, ptr);
4017 }
4018
4019 /* If it's possible to change R_TYPE to a more efficient access
4020 model, return the new reloc type. */
4021
4022 static unsigned
4023 elf32_arm_tls_transition (struct bfd_link_info *info, int r_type,
4024 struct elf_link_hash_entry *h)
4025 {
4026 int is_local = (h == NULL);
4027
4028 if (info->shared || (h && h->root.type == bfd_link_hash_undefweak))
4029 return r_type;
4030
4031 /* We do not support relaxations for Old TLS models. */
4032 switch (r_type)
4033 {
4034 case R_ARM_TLS_GOTDESC:
4035 case R_ARM_TLS_CALL:
4036 case R_ARM_THM_TLS_CALL:
4037 case R_ARM_TLS_DESCSEQ:
4038 case R_ARM_THM_TLS_DESCSEQ:
4039 return is_local ? R_ARM_TLS_LE32 : R_ARM_TLS_IE32;
4040 }
4041
4042 return r_type;
4043 }
4044
4045 static bfd_reloc_status_type elf32_arm_final_link_relocate
4046 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
4047 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
4048 const char *, unsigned char, enum arm_st_branch_type,
4049 struct elf_link_hash_entry *, bfd_boolean *, char **);
4050
4051 static unsigned int
4052 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
4053 {
4054 switch (stub_type)
4055 {
4056 case arm_stub_a8_veneer_b_cond:
4057 case arm_stub_a8_veneer_b:
4058 case arm_stub_a8_veneer_bl:
4059 return 2;
4060
4061 case arm_stub_long_branch_any_any:
4062 case arm_stub_long_branch_v4t_arm_thumb:
4063 case arm_stub_long_branch_thumb_only:
4064 case arm_stub_long_branch_v4t_thumb_thumb:
4065 case arm_stub_long_branch_v4t_thumb_arm:
4066 case arm_stub_short_branch_v4t_thumb_arm:
4067 case arm_stub_long_branch_any_arm_pic:
4068 case arm_stub_long_branch_any_thumb_pic:
4069 case arm_stub_long_branch_v4t_thumb_thumb_pic:
4070 case arm_stub_long_branch_v4t_arm_thumb_pic:
4071 case arm_stub_long_branch_v4t_thumb_arm_pic:
4072 case arm_stub_long_branch_thumb_only_pic:
4073 case arm_stub_long_branch_any_tls_pic:
4074 case arm_stub_long_branch_v4t_thumb_tls_pic:
4075 case arm_stub_a8_veneer_blx:
4076 return 4;
4077
4078 default:
4079 abort (); /* Should be unreachable. */
4080 }
4081 }
4082
4083 static bfd_boolean
4084 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
4085 void * in_arg)
4086 {
4087 #define MAXRELOCS 2
4088 struct elf32_arm_stub_hash_entry *stub_entry;
4089 struct elf32_arm_link_hash_table *globals;
4090 struct bfd_link_info *info;
4091 asection *stub_sec;
4092 bfd *stub_bfd;
4093 bfd_byte *loc;
4094 bfd_vma sym_value;
4095 int template_size;
4096 int size;
4097 const insn_sequence *template_sequence;
4098 int i;
4099 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
4100 int stub_reloc_offset[MAXRELOCS] = {0, 0};
4101 int nrelocs = 0;
4102
4103 /* Massage our args to the form they really have. */
4104 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4105 info = (struct bfd_link_info *) in_arg;
4106
4107 globals = elf32_arm_hash_table (info);
4108 if (globals == NULL)
4109 return FALSE;
4110
4111 stub_sec = stub_entry->stub_sec;
4112
4113 if ((globals->fix_cortex_a8 < 0)
4114 != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
4115 /* We have to do less-strictly-aligned fixes last. */
4116 return TRUE;
4117
4118 /* Make a note of the offset within the stubs for this entry. */
4119 stub_entry->stub_offset = stub_sec->size;
4120 loc = stub_sec->contents + stub_entry->stub_offset;
4121
4122 stub_bfd = stub_sec->owner;
4123
4124 /* This is the address of the stub destination. */
4125 sym_value = (stub_entry->target_value
4126 + stub_entry->target_section->output_offset
4127 + stub_entry->target_section->output_section->vma);
4128
4129 template_sequence = stub_entry->stub_template;
4130 template_size = stub_entry->stub_template_size;
4131
4132 size = 0;
4133 for (i = 0; i < template_size; i++)
4134 {
4135 switch (template_sequence[i].type)
4136 {
4137 case THUMB16_TYPE:
4138 {
4139 bfd_vma data = (bfd_vma) template_sequence[i].data;
4140 if (template_sequence[i].reloc_addend != 0)
4141 {
4142 /* We've borrowed the reloc_addend field to mean we should
4143 insert a condition code into this (Thumb-1 branch)
4144 instruction. See THUMB16_BCOND_INSN. */
4145 BFD_ASSERT ((data & 0xff00) == 0xd000);
4146 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
4147 }
4148 bfd_put_16 (stub_bfd, data, loc + size);
4149 size += 2;
4150 }
4151 break;
4152
4153 case THUMB32_TYPE:
4154 bfd_put_16 (stub_bfd,
4155 (template_sequence[i].data >> 16) & 0xffff,
4156 loc + size);
4157 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
4158 loc + size + 2);
4159 if (template_sequence[i].r_type != R_ARM_NONE)
4160 {
4161 stub_reloc_idx[nrelocs] = i;
4162 stub_reloc_offset[nrelocs++] = size;
4163 }
4164 size += 4;
4165 break;
4166
4167 case ARM_TYPE:
4168 bfd_put_32 (stub_bfd, template_sequence[i].data,
4169 loc + size);
4170 /* Handle cases where the target is encoded within the
4171 instruction. */
4172 if (template_sequence[i].r_type == R_ARM_JUMP24)
4173 {
4174 stub_reloc_idx[nrelocs] = i;
4175 stub_reloc_offset[nrelocs++] = size;
4176 }
4177 size += 4;
4178 break;
4179
4180 case DATA_TYPE:
4181 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
4182 stub_reloc_idx[nrelocs] = i;
4183 stub_reloc_offset[nrelocs++] = size;
4184 size += 4;
4185 break;
4186
4187 default:
4188 BFD_FAIL ();
4189 return FALSE;
4190 }
4191 }
4192
4193 stub_sec->size += size;
4194
4195 /* Stub size has already been computed in arm_size_one_stub. Check
4196 consistency. */
4197 BFD_ASSERT (size == stub_entry->stub_size);
4198
4199 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
4200 if (stub_entry->branch_type == ST_BRANCH_TO_THUMB)
4201 sym_value |= 1;
4202
4203 /* Assume there is at least one and at most MAXRELOCS entries to relocate
4204 in each stub. */
4205 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
4206
4207 for (i = 0; i < nrelocs; i++)
4208 if (template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP24
4209 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP19
4210 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_CALL
4211 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_XPC22)
4212 {
4213 Elf_Internal_Rela rel;
4214 bfd_boolean unresolved_reloc;
4215 char *error_message;
4216 enum arm_st_branch_type branch_type
4217 = (template_sequence[stub_reloc_idx[i]].r_type != R_ARM_THM_XPC22
4218 ? ST_BRANCH_TO_THUMB : ST_BRANCH_TO_ARM);
4219 bfd_vma points_to = sym_value + stub_entry->target_addend;
4220
4221 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
4222 rel.r_info = ELF32_R_INFO (0,
4223 template_sequence[stub_reloc_idx[i]].r_type);
4224 rel.r_addend = template_sequence[stub_reloc_idx[i]].reloc_addend;
4225
4226 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
4227 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
4228 template should refer back to the instruction after the original
4229 branch. */
4230 points_to = sym_value;
4231
4232 /* There may be unintended consequences if this is not true. */
4233 BFD_ASSERT (stub_entry->h == NULL);
4234
4235 /* Note: _bfd_final_link_relocate doesn't handle these relocations
4236 properly. We should probably use this function unconditionally,
4237 rather than only for certain relocations listed in the enclosing
4238 conditional, for the sake of consistency. */
4239 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
4240 (template_sequence[stub_reloc_idx[i]].r_type),
4241 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
4242 points_to, info, stub_entry->target_section, "", STT_FUNC,
4243 branch_type, (struct elf_link_hash_entry *) stub_entry->h,
4244 &unresolved_reloc, &error_message);
4245 }
4246 else
4247 {
4248 Elf_Internal_Rela rel;
4249 bfd_boolean unresolved_reloc;
4250 char *error_message;
4251 bfd_vma points_to = sym_value + stub_entry->target_addend
4252 + template_sequence[stub_reloc_idx[i]].reloc_addend;
4253
4254 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
4255 rel.r_info = ELF32_R_INFO (0,
4256 template_sequence[stub_reloc_idx[i]].r_type);
4257 rel.r_addend = 0;
4258
4259 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
4260 (template_sequence[stub_reloc_idx[i]].r_type),
4261 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
4262 points_to, info, stub_entry->target_section, "", STT_FUNC,
4263 stub_entry->branch_type,
4264 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
4265 &error_message);
4266 }
4267
4268 return TRUE;
4269 #undef MAXRELOCS
4270 }
4271
4272 /* Calculate the template, template size and instruction size for a stub.
4273 Return value is the instruction size. */
4274
4275 static unsigned int
4276 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
4277 const insn_sequence **stub_template,
4278 int *stub_template_size)
4279 {
4280 const insn_sequence *template_sequence = NULL;
4281 int template_size = 0, i;
4282 unsigned int size;
4283
4284 template_sequence = stub_definitions[stub_type].template_sequence;
4285 if (stub_template)
4286 *stub_template = template_sequence;
4287
4288 template_size = stub_definitions[stub_type].template_size;
4289 if (stub_template_size)
4290 *stub_template_size = template_size;
4291
4292 size = 0;
4293 for (i = 0; i < template_size; i++)
4294 {
4295 switch (template_sequence[i].type)
4296 {
4297 case THUMB16_TYPE:
4298 size += 2;
4299 break;
4300
4301 case ARM_TYPE:
4302 case THUMB32_TYPE:
4303 case DATA_TYPE:
4304 size += 4;
4305 break;
4306
4307 default:
4308 BFD_FAIL ();
4309 return 0;
4310 }
4311 }
4312
4313 return size;
4314 }
4315
4316 /* As above, but don't actually build the stub. Just bump offset so
4317 we know stub section sizes. */
4318
4319 static bfd_boolean
4320 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
4321 void *in_arg ATTRIBUTE_UNUSED)
4322 {
4323 struct elf32_arm_stub_hash_entry *stub_entry;
4324 const insn_sequence *template_sequence;
4325 int template_size, size;
4326
4327 /* Massage our args to the form they really have. */
4328 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4329
4330 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
4331 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
4332
4333 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
4334 &template_size);
4335
4336 stub_entry->stub_size = size;
4337 stub_entry->stub_template = template_sequence;
4338 stub_entry->stub_template_size = template_size;
4339
4340 size = (size + 7) & ~7;
4341 stub_entry->stub_sec->size += size;
4342
4343 return TRUE;
4344 }
4345
4346 /* External entry points for sizing and building linker stubs. */
4347
4348 /* Set up various things so that we can make a list of input sections
4349 for each output section included in the link. Returns -1 on error,
4350 0 when no stubs will be needed, and 1 on success. */
4351
4352 int
4353 elf32_arm_setup_section_lists (bfd *output_bfd,
4354 struct bfd_link_info *info)
4355 {
4356 bfd *input_bfd;
4357 unsigned int bfd_count;
4358 int top_id, top_index;
4359 asection *section;
4360 asection **input_list, **list;
4361 bfd_size_type amt;
4362 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4363
4364 if (htab == NULL)
4365 return 0;
4366 if (! is_elf_hash_table (htab))
4367 return 0;
4368
4369 /* Count the number of input BFDs and find the top input section id. */
4370 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
4371 input_bfd != NULL;
4372 input_bfd = input_bfd->link_next)
4373 {
4374 bfd_count += 1;
4375 for (section = input_bfd->sections;
4376 section != NULL;
4377 section = section->next)
4378 {
4379 if (top_id < section->id)
4380 top_id = section->id;
4381 }
4382 }
4383 htab->bfd_count = bfd_count;
4384
4385 amt = sizeof (struct map_stub) * (top_id + 1);
4386 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
4387 if (htab->stub_group == NULL)
4388 return -1;
4389 htab->top_id = top_id;
4390
4391 /* We can't use output_bfd->section_count here to find the top output
4392 section index as some sections may have been removed, and
4393 _bfd_strip_section_from_output doesn't renumber the indices. */
4394 for (section = output_bfd->sections, top_index = 0;
4395 section != NULL;
4396 section = section->next)
4397 {
4398 if (top_index < section->index)
4399 top_index = section->index;
4400 }
4401
4402 htab->top_index = top_index;
4403 amt = sizeof (asection *) * (top_index + 1);
4404 input_list = (asection **) bfd_malloc (amt);
4405 htab->input_list = input_list;
4406 if (input_list == NULL)
4407 return -1;
4408
4409 /* For sections we aren't interested in, mark their entries with a
4410 value we can check later. */
4411 list = input_list + top_index;
4412 do
4413 *list = bfd_abs_section_ptr;
4414 while (list-- != input_list);
4415
4416 for (section = output_bfd->sections;
4417 section != NULL;
4418 section = section->next)
4419 {
4420 if ((section->flags & SEC_CODE) != 0)
4421 input_list[section->index] = NULL;
4422 }
4423
4424 return 1;
4425 }
4426
4427 /* The linker repeatedly calls this function for each input section,
4428 in the order that input sections are linked into output sections.
4429 Build lists of input sections to determine groupings between which
4430 we may insert linker stubs. */
4431
4432 void
4433 elf32_arm_next_input_section (struct bfd_link_info *info,
4434 asection *isec)
4435 {
4436 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4437
4438 if (htab == NULL)
4439 return;
4440
4441 if (isec->output_section->index <= htab->top_index)
4442 {
4443 asection **list = htab->input_list + isec->output_section->index;
4444
4445 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
4446 {
4447 /* Steal the link_sec pointer for our list. */
4448 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
4449 /* This happens to make the list in reverse order,
4450 which we reverse later. */
4451 PREV_SEC (isec) = *list;
4452 *list = isec;
4453 }
4454 }
4455 }
4456
4457 /* See whether we can group stub sections together. Grouping stub
4458 sections may result in fewer stubs. More importantly, we need to
4459 put all .init* and .fini* stubs at the end of the .init or
4460 .fini output sections respectively, because glibc splits the
4461 _init and _fini functions into multiple parts. Putting a stub in
4462 the middle of a function is not a good idea. */
4463
4464 static void
4465 group_sections (struct elf32_arm_link_hash_table *htab,
4466 bfd_size_type stub_group_size,
4467 bfd_boolean stubs_always_after_branch)
4468 {
4469 asection **list = htab->input_list;
4470
4471 do
4472 {
4473 asection *tail = *list;
4474 asection *head;
4475
4476 if (tail == bfd_abs_section_ptr)
4477 continue;
4478
4479 /* Reverse the list: we must avoid placing stubs at the
4480 beginning of the section because the beginning of the text
4481 section may be required for an interrupt vector in bare metal
4482 code. */
4483 #define NEXT_SEC PREV_SEC
4484 head = NULL;
4485 while (tail != NULL)
4486 {
4487 /* Pop from tail. */
4488 asection *item = tail;
4489 tail = PREV_SEC (item);
4490
4491 /* Push on head. */
4492 NEXT_SEC (item) = head;
4493 head = item;
4494 }
4495
4496 while (head != NULL)
4497 {
4498 asection *curr;
4499 asection *next;
4500 bfd_vma stub_group_start = head->output_offset;
4501 bfd_vma end_of_next;
4502
4503 curr = head;
4504 while (NEXT_SEC (curr) != NULL)
4505 {
4506 next = NEXT_SEC (curr);
4507 end_of_next = next->output_offset + next->size;
4508 if (end_of_next - stub_group_start >= stub_group_size)
4509 /* End of NEXT is too far from start, so stop. */
4510 break;
4511 /* Add NEXT to the group. */
4512 curr = next;
4513 }
4514
4515 /* OK, the size from the start to the start of CURR is less
4516 than stub_group_size and thus can be handled by one stub
4517 section. (Or the head section is itself larger than
4518 stub_group_size, in which case we may be toast.)
4519 We should really be keeping track of the total size of
4520 stubs added here, as stubs contribute to the final output
4521 section size. */
4522 do
4523 {
4524 next = NEXT_SEC (head);
4525 /* Set up this stub group. */
4526 htab->stub_group[head->id].link_sec = curr;
4527 }
4528 while (head != curr && (head = next) != NULL);
4529
4530 /* But wait, there's more! Input sections up to stub_group_size
4531 bytes after the stub section can be handled by it too. */
4532 if (!stubs_always_after_branch)
4533 {
4534 stub_group_start = curr->output_offset + curr->size;
4535
4536 while (next != NULL)
4537 {
4538 end_of_next = next->output_offset + next->size;
4539 if (end_of_next - stub_group_start >= stub_group_size)
4540 /* End of NEXT is too far from stubs, so stop. */
4541 break;
4542 /* Add NEXT to the stub group. */
4543 head = next;
4544 next = NEXT_SEC (head);
4545 htab->stub_group[head->id].link_sec = curr;
4546 }
4547 }
4548 head = next;
4549 }
4550 }
4551 while (list++ != htab->input_list + htab->top_index);
4552
4553 free (htab->input_list);
4554 #undef PREV_SEC
4555 #undef NEXT_SEC
4556 }
4557
4558 /* Comparison function for sorting/searching relocations relating to Cortex-A8
4559 erratum fix. */
4560
4561 static int
4562 a8_reloc_compare (const void *a, const void *b)
4563 {
4564 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
4565 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
4566
4567 if (ra->from < rb->from)
4568 return -1;
4569 else if (ra->from > rb->from)
4570 return 1;
4571 else
4572 return 0;
4573 }
4574
4575 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
4576 const char *, char **);
4577
4578 /* Helper function to scan code for sequences which might trigger the Cortex-A8
4579 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
4580 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
4581 otherwise. */
4582
4583 static bfd_boolean
4584 cortex_a8_erratum_scan (bfd *input_bfd,
4585 struct bfd_link_info *info,
4586 struct a8_erratum_fix **a8_fixes_p,
4587 unsigned int *num_a8_fixes_p,
4588 unsigned int *a8_fix_table_size_p,
4589 struct a8_erratum_reloc *a8_relocs,
4590 unsigned int num_a8_relocs,
4591 unsigned prev_num_a8_fixes,
4592 bfd_boolean *stub_changed_p)
4593 {
4594 asection *section;
4595 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4596 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
4597 unsigned int num_a8_fixes = *num_a8_fixes_p;
4598 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
4599
4600 if (htab == NULL)
4601 return FALSE;
4602
4603 for (section = input_bfd->sections;
4604 section != NULL;
4605 section = section->next)
4606 {
4607 bfd_byte *contents = NULL;
4608 struct _arm_elf_section_data *sec_data;
4609 unsigned int span;
4610 bfd_vma base_vma;
4611
4612 if (elf_section_type (section) != SHT_PROGBITS
4613 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
4614 || (section->flags & SEC_EXCLUDE) != 0
4615 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
4616 || (section->output_section == bfd_abs_section_ptr))
4617 continue;
4618
4619 base_vma = section->output_section->vma + section->output_offset;
4620
4621 if (elf_section_data (section)->this_hdr.contents != NULL)
4622 contents = elf_section_data (section)->this_hdr.contents;
4623 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
4624 return TRUE;
4625
4626 sec_data = elf32_arm_section_data (section);
4627
4628 for (span = 0; span < sec_data->mapcount; span++)
4629 {
4630 unsigned int span_start = sec_data->map[span].vma;
4631 unsigned int span_end = (span == sec_data->mapcount - 1)
4632 ? section->size : sec_data->map[span + 1].vma;
4633 unsigned int i;
4634 char span_type = sec_data->map[span].type;
4635 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
4636
4637 if (span_type != 't')
4638 continue;
4639
4640 /* Span is entirely within a single 4KB region: skip scanning. */
4641 if (((base_vma + span_start) & ~0xfff)
4642 == ((base_vma + span_end) & ~0xfff))
4643 continue;
4644
4645 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
4646
4647 * The opcode is BLX.W, BL.W, B.W, Bcc.W
4648 * The branch target is in the same 4KB region as the
4649 first half of the branch.
4650 * The instruction before the branch is a 32-bit
4651 length non-branch instruction. */
4652 for (i = span_start; i < span_end;)
4653 {
4654 unsigned int insn = bfd_getl16 (&contents[i]);
4655 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
4656 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
4657
4658 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
4659 insn_32bit = TRUE;
4660
4661 if (insn_32bit)
4662 {
4663 /* Load the rest of the insn (in manual-friendly order). */
4664 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
4665
4666 /* Encoding T4: B<c>.W. */
4667 is_b = (insn & 0xf800d000) == 0xf0009000;
4668 /* Encoding T1: BL<c>.W. */
4669 is_bl = (insn & 0xf800d000) == 0xf000d000;
4670 /* Encoding T2: BLX<c>.W. */
4671 is_blx = (insn & 0xf800d000) == 0xf000c000;
4672 /* Encoding T3: B<c>.W (not permitted in IT block). */
4673 is_bcc = (insn & 0xf800d000) == 0xf0008000
4674 && (insn & 0x07f00000) != 0x03800000;
4675 }
4676
4677 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
4678
4679 if (((base_vma + i) & 0xfff) == 0xffe
4680 && insn_32bit
4681 && is_32bit_branch
4682 && last_was_32bit
4683 && ! last_was_branch)
4684 {
4685 bfd_signed_vma offset = 0;
4686 bfd_boolean force_target_arm = FALSE;
4687 bfd_boolean force_target_thumb = FALSE;
4688 bfd_vma target;
4689 enum elf32_arm_stub_type stub_type = arm_stub_none;
4690 struct a8_erratum_reloc key, *found;
4691 bfd_boolean use_plt = FALSE;
4692
4693 key.from = base_vma + i;
4694 found = (struct a8_erratum_reloc *)
4695 bsearch (&key, a8_relocs, num_a8_relocs,
4696 sizeof (struct a8_erratum_reloc),
4697 &a8_reloc_compare);
4698
4699 if (found)
4700 {
4701 char *error_message = NULL;
4702 struct elf_link_hash_entry *entry;
4703
4704 /* We don't care about the error returned from this
4705 function, only if there is glue or not. */
4706 entry = find_thumb_glue (info, found->sym_name,
4707 &error_message);
4708
4709 if (entry)
4710 found->non_a8_stub = TRUE;
4711
4712 /* Keep a simpler condition, for the sake of clarity. */
4713 if (htab->root.splt != NULL && found->hash != NULL
4714 && found->hash->root.plt.offset != (bfd_vma) -1)
4715 use_plt = TRUE;
4716
4717 if (found->r_type == R_ARM_THM_CALL)
4718 {
4719 if (found->branch_type == ST_BRANCH_TO_ARM
4720 || use_plt)
4721 force_target_arm = TRUE;
4722 else
4723 force_target_thumb = TRUE;
4724 }
4725 }
4726
4727 /* Check if we have an offending branch instruction. */
4728
4729 if (found && found->non_a8_stub)
4730 /* We've already made a stub for this instruction, e.g.
4731 it's a long branch or a Thumb->ARM stub. Assume that
4732 stub will suffice to work around the A8 erratum (see
4733 setting of always_after_branch above). */
4734 ;
4735 else if (is_bcc)
4736 {
4737 offset = (insn & 0x7ff) << 1;
4738 offset |= (insn & 0x3f0000) >> 4;
4739 offset |= (insn & 0x2000) ? 0x40000 : 0;
4740 offset |= (insn & 0x800) ? 0x80000 : 0;
4741 offset |= (insn & 0x4000000) ? 0x100000 : 0;
4742 if (offset & 0x100000)
4743 offset |= ~ ((bfd_signed_vma) 0xfffff);
4744 stub_type = arm_stub_a8_veneer_b_cond;
4745 }
4746 else if (is_b || is_bl || is_blx)
4747 {
4748 int s = (insn & 0x4000000) != 0;
4749 int j1 = (insn & 0x2000) != 0;
4750 int j2 = (insn & 0x800) != 0;
4751 int i1 = !(j1 ^ s);
4752 int i2 = !(j2 ^ s);
4753
4754 offset = (insn & 0x7ff) << 1;
4755 offset |= (insn & 0x3ff0000) >> 4;
4756 offset |= i2 << 22;
4757 offset |= i1 << 23;
4758 offset |= s << 24;
4759 if (offset & 0x1000000)
4760 offset |= ~ ((bfd_signed_vma) 0xffffff);
4761
4762 if (is_blx)
4763 offset &= ~ ((bfd_signed_vma) 3);
4764
4765 stub_type = is_blx ? arm_stub_a8_veneer_blx :
4766 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
4767 }
4768
4769 if (stub_type != arm_stub_none)
4770 {
4771 bfd_vma pc_for_insn = base_vma + i + 4;
4772
4773 /* The original instruction is a BL, but the target is
4774 an ARM instruction. If we were not making a stub,
4775 the BL would have been converted to a BLX. Use the
4776 BLX stub instead in that case. */
4777 if (htab->use_blx && force_target_arm
4778 && stub_type == arm_stub_a8_veneer_bl)
4779 {
4780 stub_type = arm_stub_a8_veneer_blx;
4781 is_blx = TRUE;
4782 is_bl = FALSE;
4783 }
4784 /* Conversely, if the original instruction was
4785 BLX but the target is Thumb mode, use the BL
4786 stub. */
4787 else if (force_target_thumb
4788 && stub_type == arm_stub_a8_veneer_blx)
4789 {
4790 stub_type = arm_stub_a8_veneer_bl;
4791 is_blx = FALSE;
4792 is_bl = TRUE;
4793 }
4794
4795 if (is_blx)
4796 pc_for_insn &= ~ ((bfd_vma) 3);
4797
4798 /* If we found a relocation, use the proper destination,
4799 not the offset in the (unrelocated) instruction.
4800 Note this is always done if we switched the stub type
4801 above. */
4802 if (found)
4803 offset =
4804 (bfd_signed_vma) (found->destination - pc_for_insn);
4805
4806 /* If the stub will use a Thumb-mode branch to a
4807 PLT target, redirect it to the preceding Thumb
4808 entry point. */
4809 if (stub_type != arm_stub_a8_veneer_blx && use_plt)
4810 offset -= PLT_THUMB_STUB_SIZE;
4811
4812 target = pc_for_insn + offset;
4813
4814 /* The BLX stub is ARM-mode code. Adjust the offset to
4815 take the different PC value (+8 instead of +4) into
4816 account. */
4817 if (stub_type == arm_stub_a8_veneer_blx)
4818 offset += 4;
4819
4820 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
4821 {
4822 char *stub_name = NULL;
4823
4824 if (num_a8_fixes == a8_fix_table_size)
4825 {
4826 a8_fix_table_size *= 2;
4827 a8_fixes = (struct a8_erratum_fix *)
4828 bfd_realloc (a8_fixes,
4829 sizeof (struct a8_erratum_fix)
4830 * a8_fix_table_size);
4831 }
4832
4833 if (num_a8_fixes < prev_num_a8_fixes)
4834 {
4835 /* If we're doing a subsequent scan,
4836 check if we've found the same fix as
4837 before, and try and reuse the stub
4838 name. */
4839 stub_name = a8_fixes[num_a8_fixes].stub_name;
4840 if ((a8_fixes[num_a8_fixes].section != section)
4841 || (a8_fixes[num_a8_fixes].offset != i))
4842 {
4843 free (stub_name);
4844 stub_name = NULL;
4845 *stub_changed_p = TRUE;
4846 }
4847 }
4848
4849 if (!stub_name)
4850 {
4851 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
4852 if (stub_name != NULL)
4853 sprintf (stub_name, "%x:%x", section->id, i);
4854 }
4855
4856 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
4857 a8_fixes[num_a8_fixes].section = section;
4858 a8_fixes[num_a8_fixes].offset = i;
4859 a8_fixes[num_a8_fixes].addend = offset;
4860 a8_fixes[num_a8_fixes].orig_insn = insn;
4861 a8_fixes[num_a8_fixes].stub_name = stub_name;
4862 a8_fixes[num_a8_fixes].stub_type = stub_type;
4863 a8_fixes[num_a8_fixes].branch_type =
4864 is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB;
4865
4866 num_a8_fixes++;
4867 }
4868 }
4869 }
4870
4871 i += insn_32bit ? 4 : 2;
4872 last_was_32bit = insn_32bit;
4873 last_was_branch = is_32bit_branch;
4874 }
4875 }
4876
4877 if (elf_section_data (section)->this_hdr.contents == NULL)
4878 free (contents);
4879 }
4880
4881 *a8_fixes_p = a8_fixes;
4882 *num_a8_fixes_p = num_a8_fixes;
4883 *a8_fix_table_size_p = a8_fix_table_size;
4884
4885 return FALSE;
4886 }
4887
4888 /* Determine and set the size of the stub section for a final link.
4889
4890 The basic idea here is to examine all the relocations looking for
4891 PC-relative calls to a target that is unreachable with a "bl"
4892 instruction. */
4893
4894 bfd_boolean
4895 elf32_arm_size_stubs (bfd *output_bfd,
4896 bfd *stub_bfd,
4897 struct bfd_link_info *info,
4898 bfd_signed_vma group_size,
4899 asection * (*add_stub_section) (const char *, asection *),
4900 void (*layout_sections_again) (void))
4901 {
4902 bfd_size_type stub_group_size;
4903 bfd_boolean stubs_always_after_branch;
4904 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4905 struct a8_erratum_fix *a8_fixes = NULL;
4906 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
4907 struct a8_erratum_reloc *a8_relocs = NULL;
4908 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
4909
4910 if (htab == NULL)
4911 return FALSE;
4912
4913 if (htab->fix_cortex_a8)
4914 {
4915 a8_fixes = (struct a8_erratum_fix *)
4916 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
4917 a8_relocs = (struct a8_erratum_reloc *)
4918 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
4919 }
4920
4921 /* Propagate mach to stub bfd, because it may not have been
4922 finalized when we created stub_bfd. */
4923 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
4924 bfd_get_mach (output_bfd));
4925
4926 /* Stash our params away. */
4927 htab->stub_bfd = stub_bfd;
4928 htab->add_stub_section = add_stub_section;
4929 htab->layout_sections_again = layout_sections_again;
4930 stubs_always_after_branch = group_size < 0;
4931
4932 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
4933 as the first half of a 32-bit branch straddling two 4K pages. This is a
4934 crude way of enforcing that. */
4935 if (htab->fix_cortex_a8)
4936 stubs_always_after_branch = 1;
4937
4938 if (group_size < 0)
4939 stub_group_size = -group_size;
4940 else
4941 stub_group_size = group_size;
4942
4943 if (stub_group_size == 1)
4944 {
4945 /* Default values. */
4946 /* Thumb branch range is +-4MB has to be used as the default
4947 maximum size (a given section can contain both ARM and Thumb
4948 code, so the worst case has to be taken into account).
4949
4950 This value is 24K less than that, which allows for 2025
4951 12-byte stubs. If we exceed that, then we will fail to link.
4952 The user will have to relink with an explicit group size
4953 option. */
4954 stub_group_size = 4170000;
4955 }
4956
4957 group_sections (htab, stub_group_size, stubs_always_after_branch);
4958
4959 /* If we're applying the cortex A8 fix, we need to determine the
4960 program header size now, because we cannot change it later --
4961 that could alter section placements. Notice the A8 erratum fix
4962 ends up requiring the section addresses to remain unchanged
4963 modulo the page size. That's something we cannot represent
4964 inside BFD, and we don't want to force the section alignment to
4965 be the page size. */
4966 if (htab->fix_cortex_a8)
4967 (*htab->layout_sections_again) ();
4968
4969 while (1)
4970 {
4971 bfd *input_bfd;
4972 unsigned int bfd_indx;
4973 asection *stub_sec;
4974 bfd_boolean stub_changed = FALSE;
4975 unsigned prev_num_a8_fixes = num_a8_fixes;
4976
4977 num_a8_fixes = 0;
4978 for (input_bfd = info->input_bfds, bfd_indx = 0;
4979 input_bfd != NULL;
4980 input_bfd = input_bfd->link_next, bfd_indx++)
4981 {
4982 Elf_Internal_Shdr *symtab_hdr;
4983 asection *section;
4984 Elf_Internal_Sym *local_syms = NULL;
4985
4986 num_a8_relocs = 0;
4987
4988 /* We'll need the symbol table in a second. */
4989 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4990 if (symtab_hdr->sh_info == 0)
4991 continue;
4992
4993 /* Walk over each section attached to the input bfd. */
4994 for (section = input_bfd->sections;
4995 section != NULL;
4996 section = section->next)
4997 {
4998 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
4999
5000 /* If there aren't any relocs, then there's nothing more
5001 to do. */
5002 if ((section->flags & SEC_RELOC) == 0
5003 || section->reloc_count == 0
5004 || (section->flags & SEC_CODE) == 0)
5005 continue;
5006
5007 /* If this section is a link-once section that will be
5008 discarded, then don't create any stubs. */
5009 if (section->output_section == NULL
5010 || section->output_section->owner != output_bfd)
5011 continue;
5012
5013 /* Get the relocs. */
5014 internal_relocs
5015 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
5016 NULL, info->keep_memory);
5017 if (internal_relocs == NULL)
5018 goto error_ret_free_local;
5019
5020 /* Now examine each relocation. */
5021 irela = internal_relocs;
5022 irelaend = irela + section->reloc_count;
5023 for (; irela < irelaend; irela++)
5024 {
5025 unsigned int r_type, r_indx;
5026 enum elf32_arm_stub_type stub_type;
5027 struct elf32_arm_stub_hash_entry *stub_entry;
5028 asection *sym_sec;
5029 bfd_vma sym_value;
5030 bfd_vma destination;
5031 struct elf32_arm_link_hash_entry *hash;
5032 const char *sym_name;
5033 char *stub_name;
5034 const asection *id_sec;
5035 unsigned char st_type;
5036 enum arm_st_branch_type branch_type;
5037 bfd_boolean created_stub = FALSE;
5038
5039 r_type = ELF32_R_TYPE (irela->r_info);
5040 r_indx = ELF32_R_SYM (irela->r_info);
5041
5042 if (r_type >= (unsigned int) R_ARM_max)
5043 {
5044 bfd_set_error (bfd_error_bad_value);
5045 error_ret_free_internal:
5046 if (elf_section_data (section)->relocs == NULL)
5047 free (internal_relocs);
5048 goto error_ret_free_local;
5049 }
5050
5051 hash = NULL;
5052 if (r_indx >= symtab_hdr->sh_info)
5053 hash = elf32_arm_hash_entry
5054 (elf_sym_hashes (input_bfd)
5055 [r_indx - symtab_hdr->sh_info]);
5056
5057 /* Only look for stubs on branch instructions, or
5058 non-relaxed TLSCALL */
5059 if ((r_type != (unsigned int) R_ARM_CALL)
5060 && (r_type != (unsigned int) R_ARM_THM_CALL)
5061 && (r_type != (unsigned int) R_ARM_JUMP24)
5062 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
5063 && (r_type != (unsigned int) R_ARM_THM_XPC22)
5064 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
5065 && (r_type != (unsigned int) R_ARM_PLT32)
5066 && !((r_type == (unsigned int) R_ARM_TLS_CALL
5067 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
5068 && r_type == elf32_arm_tls_transition
5069 (info, r_type, &hash->root)
5070 && ((hash ? hash->tls_type
5071 : (elf32_arm_local_got_tls_type
5072 (input_bfd)[r_indx]))
5073 & GOT_TLS_GDESC) != 0))
5074 continue;
5075
5076 /* Now determine the call target, its name, value,
5077 section. */
5078 sym_sec = NULL;
5079 sym_value = 0;
5080 destination = 0;
5081 sym_name = NULL;
5082
5083 if (r_type == (unsigned int) R_ARM_TLS_CALL
5084 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
5085 {
5086 /* A non-relaxed TLS call. The target is the
5087 plt-resident trampoline and nothing to do
5088 with the symbol. */
5089 BFD_ASSERT (htab->tls_trampoline > 0);
5090 sym_sec = htab->root.splt;
5091 sym_value = htab->tls_trampoline;
5092 hash = 0;
5093 st_type = STT_FUNC;
5094 branch_type = ST_BRANCH_TO_ARM;
5095 }
5096 else if (!hash)
5097 {
5098 /* It's a local symbol. */
5099 Elf_Internal_Sym *sym;
5100
5101 if (local_syms == NULL)
5102 {
5103 local_syms
5104 = (Elf_Internal_Sym *) symtab_hdr->contents;
5105 if (local_syms == NULL)
5106 local_syms
5107 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
5108 symtab_hdr->sh_info, 0,
5109 NULL, NULL, NULL);
5110 if (local_syms == NULL)
5111 goto error_ret_free_internal;
5112 }
5113
5114 sym = local_syms + r_indx;
5115 if (sym->st_shndx == SHN_UNDEF)
5116 sym_sec = bfd_und_section_ptr;
5117 else if (sym->st_shndx == SHN_ABS)
5118 sym_sec = bfd_abs_section_ptr;
5119 else if (sym->st_shndx == SHN_COMMON)
5120 sym_sec = bfd_com_section_ptr;
5121 else
5122 sym_sec =
5123 bfd_section_from_elf_index (input_bfd, sym->st_shndx);
5124
5125 if (!sym_sec)
5126 /* This is an undefined symbol. It can never
5127 be resolved. */
5128 continue;
5129
5130 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
5131 sym_value = sym->st_value;
5132 destination = (sym_value + irela->r_addend
5133 + sym_sec->output_offset
5134 + sym_sec->output_section->vma);
5135 st_type = ELF_ST_TYPE (sym->st_info);
5136 branch_type = ARM_SYM_BRANCH_TYPE (sym);
5137 sym_name
5138 = bfd_elf_string_from_elf_section (input_bfd,
5139 symtab_hdr->sh_link,
5140 sym->st_name);
5141 }
5142 else
5143 {
5144 /* It's an external symbol. */
5145 while (hash->root.root.type == bfd_link_hash_indirect
5146 || hash->root.root.type == bfd_link_hash_warning)
5147 hash = ((struct elf32_arm_link_hash_entry *)
5148 hash->root.root.u.i.link);
5149
5150 if (hash->root.root.type == bfd_link_hash_defined
5151 || hash->root.root.type == bfd_link_hash_defweak)
5152 {
5153 sym_sec = hash->root.root.u.def.section;
5154 sym_value = hash->root.root.u.def.value;
5155
5156 struct elf32_arm_link_hash_table *globals =
5157 elf32_arm_hash_table (info);
5158
5159 /* For a destination in a shared library,
5160 use the PLT stub as target address to
5161 decide whether a branch stub is
5162 needed. */
5163 if (globals != NULL
5164 && globals->root.splt != NULL
5165 && hash != NULL
5166 && hash->root.plt.offset != (bfd_vma) -1)
5167 {
5168 sym_sec = globals->root.splt;
5169 sym_value = hash->root.plt.offset;
5170 if (sym_sec->output_section != NULL)
5171 destination = (sym_value
5172 + sym_sec->output_offset
5173 + sym_sec->output_section->vma);
5174 }
5175 else if (sym_sec->output_section != NULL)
5176 destination = (sym_value + irela->r_addend
5177 + sym_sec->output_offset
5178 + sym_sec->output_section->vma);
5179 }
5180 else if ((hash->root.root.type == bfd_link_hash_undefined)
5181 || (hash->root.root.type == bfd_link_hash_undefweak))
5182 {
5183 /* For a shared library, use the PLT stub as
5184 target address to decide whether a long
5185 branch stub is needed.
5186 For absolute code, they cannot be handled. */
5187 struct elf32_arm_link_hash_table *globals =
5188 elf32_arm_hash_table (info);
5189
5190 if (globals != NULL
5191 && globals->root.splt != NULL
5192 && hash != NULL
5193 && hash->root.plt.offset != (bfd_vma) -1)
5194 {
5195 sym_sec = globals->root.splt;
5196 sym_value = hash->root.plt.offset;
5197 if (sym_sec->output_section != NULL)
5198 destination = (sym_value
5199 + sym_sec->output_offset
5200 + sym_sec->output_section->vma);
5201 }
5202 else
5203 continue;
5204 }
5205 else
5206 {
5207 bfd_set_error (bfd_error_bad_value);
5208 goto error_ret_free_internal;
5209 }
5210 st_type = hash->root.type;
5211 branch_type = hash->root.target_internal;
5212 sym_name = hash->root.root.root.string;
5213 }
5214
5215 do
5216 {
5217 /* Determine what (if any) linker stub is needed. */
5218 stub_type = arm_type_of_stub (info, section, irela,
5219 st_type, &branch_type,
5220 hash, destination, sym_sec,
5221 input_bfd, sym_name);
5222 if (stub_type == arm_stub_none)
5223 break;
5224
5225 /* Support for grouping stub sections. */
5226 id_sec = htab->stub_group[section->id].link_sec;
5227
5228 /* Get the name of this stub. */
5229 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash,
5230 irela, stub_type);
5231 if (!stub_name)
5232 goto error_ret_free_internal;
5233
5234 /* We've either created a stub for this reloc already,
5235 or we are about to. */
5236 created_stub = TRUE;
5237
5238 stub_entry = arm_stub_hash_lookup
5239 (&htab->stub_hash_table, stub_name,
5240 FALSE, FALSE);
5241 if (stub_entry != NULL)
5242 {
5243 /* The proper stub has already been created. */
5244 free (stub_name);
5245 stub_entry->target_value = sym_value;
5246 break;
5247 }
5248
5249 stub_entry = elf32_arm_add_stub (stub_name, section,
5250 htab);
5251 if (stub_entry == NULL)
5252 {
5253 free (stub_name);
5254 goto error_ret_free_internal;
5255 }
5256
5257 stub_entry->target_value = sym_value;
5258 stub_entry->target_section = sym_sec;
5259 stub_entry->stub_type = stub_type;
5260 stub_entry->h = hash;
5261 stub_entry->branch_type = branch_type;
5262
5263 if (sym_name == NULL)
5264 sym_name = "unnamed";
5265 stub_entry->output_name = (char *)
5266 bfd_alloc (htab->stub_bfd,
5267 sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
5268 + strlen (sym_name));
5269 if (stub_entry->output_name == NULL)
5270 {
5271 free (stub_name);
5272 goto error_ret_free_internal;
5273 }
5274
5275 /* For historical reasons, use the existing names for
5276 ARM-to-Thumb and Thumb-to-ARM stubs. */
5277 if ((r_type == (unsigned int) R_ARM_THM_CALL
5278 || r_type == (unsigned int) R_ARM_THM_JUMP24)
5279 && branch_type == ST_BRANCH_TO_ARM)
5280 sprintf (stub_entry->output_name,
5281 THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
5282 else if ((r_type == (unsigned int) R_ARM_CALL
5283 || r_type == (unsigned int) R_ARM_JUMP24)
5284 && branch_type == ST_BRANCH_TO_THUMB)
5285 sprintf (stub_entry->output_name,
5286 ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
5287 else
5288 sprintf (stub_entry->output_name, STUB_ENTRY_NAME,
5289 sym_name);
5290
5291 stub_changed = TRUE;
5292 }
5293 while (0);
5294
5295 /* Look for relocations which might trigger Cortex-A8
5296 erratum. */
5297 if (htab->fix_cortex_a8
5298 && (r_type == (unsigned int) R_ARM_THM_JUMP24
5299 || r_type == (unsigned int) R_ARM_THM_JUMP19
5300 || r_type == (unsigned int) R_ARM_THM_CALL
5301 || r_type == (unsigned int) R_ARM_THM_XPC22))
5302 {
5303 bfd_vma from = section->output_section->vma
5304 + section->output_offset
5305 + irela->r_offset;
5306
5307 if ((from & 0xfff) == 0xffe)
5308 {
5309 /* Found a candidate. Note we haven't checked the
5310 destination is within 4K here: if we do so (and
5311 don't create an entry in a8_relocs) we can't tell
5312 that a branch should have been relocated when
5313 scanning later. */
5314 if (num_a8_relocs == a8_reloc_table_size)
5315 {
5316 a8_reloc_table_size *= 2;
5317 a8_relocs = (struct a8_erratum_reloc *)
5318 bfd_realloc (a8_relocs,
5319 sizeof (struct a8_erratum_reloc)
5320 * a8_reloc_table_size);
5321 }
5322
5323 a8_relocs[num_a8_relocs].from = from;
5324 a8_relocs[num_a8_relocs].destination = destination;
5325 a8_relocs[num_a8_relocs].r_type = r_type;
5326 a8_relocs[num_a8_relocs].branch_type = branch_type;
5327 a8_relocs[num_a8_relocs].sym_name = sym_name;
5328 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
5329 a8_relocs[num_a8_relocs].hash = hash;
5330
5331 num_a8_relocs++;
5332 }
5333 }
5334 }
5335
5336 /* We're done with the internal relocs, free them. */
5337 if (elf_section_data (section)->relocs == NULL)
5338 free (internal_relocs);
5339 }
5340
5341 if (htab->fix_cortex_a8)
5342 {
5343 /* Sort relocs which might apply to Cortex-A8 erratum. */
5344 qsort (a8_relocs, num_a8_relocs,
5345 sizeof (struct a8_erratum_reloc),
5346 &a8_reloc_compare);
5347
5348 /* Scan for branches which might trigger Cortex-A8 erratum. */
5349 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
5350 &num_a8_fixes, &a8_fix_table_size,
5351 a8_relocs, num_a8_relocs,
5352 prev_num_a8_fixes, &stub_changed)
5353 != 0)
5354 goto error_ret_free_local;
5355 }
5356 }
5357
5358 if (prev_num_a8_fixes != num_a8_fixes)
5359 stub_changed = TRUE;
5360
5361 if (!stub_changed)
5362 break;
5363
5364 /* OK, we've added some stubs. Find out the new size of the
5365 stub sections. */
5366 for (stub_sec = htab->stub_bfd->sections;
5367 stub_sec != NULL;
5368 stub_sec = stub_sec->next)
5369 {
5370 /* Ignore non-stub sections. */
5371 if (!strstr (stub_sec->name, STUB_SUFFIX))
5372 continue;
5373
5374 stub_sec->size = 0;
5375 }
5376
5377 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
5378
5379 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
5380 if (htab->fix_cortex_a8)
5381 for (i = 0; i < num_a8_fixes; i++)
5382 {
5383 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
5384 a8_fixes[i].section, htab);
5385
5386 if (stub_sec == NULL)
5387 goto error_ret_free_local;
5388
5389 stub_sec->size
5390 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
5391 NULL);
5392 }
5393
5394
5395 /* Ask the linker to do its stuff. */
5396 (*htab->layout_sections_again) ();
5397 }
5398
5399 /* Add stubs for Cortex-A8 erratum fixes now. */
5400 if (htab->fix_cortex_a8)
5401 {
5402 for (i = 0; i < num_a8_fixes; i++)
5403 {
5404 struct elf32_arm_stub_hash_entry *stub_entry;
5405 char *stub_name = a8_fixes[i].stub_name;
5406 asection *section = a8_fixes[i].section;
5407 unsigned int section_id = a8_fixes[i].section->id;
5408 asection *link_sec = htab->stub_group[section_id].link_sec;
5409 asection *stub_sec = htab->stub_group[section_id].stub_sec;
5410 const insn_sequence *template_sequence;
5411 int template_size, size = 0;
5412
5413 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
5414 TRUE, FALSE);
5415 if (stub_entry == NULL)
5416 {
5417 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
5418 section->owner,
5419 stub_name);
5420 return FALSE;
5421 }
5422
5423 stub_entry->stub_sec = stub_sec;
5424 stub_entry->stub_offset = 0;
5425 stub_entry->id_sec = link_sec;
5426 stub_entry->stub_type = a8_fixes[i].stub_type;
5427 stub_entry->target_section = a8_fixes[i].section;
5428 stub_entry->target_value = a8_fixes[i].offset;
5429 stub_entry->target_addend = a8_fixes[i].addend;
5430 stub_entry->orig_insn = a8_fixes[i].orig_insn;
5431 stub_entry->branch_type = a8_fixes[i].branch_type;
5432
5433 size = find_stub_size_and_template (a8_fixes[i].stub_type,
5434 &template_sequence,
5435 &template_size);
5436
5437 stub_entry->stub_size = size;
5438 stub_entry->stub_template = template_sequence;
5439 stub_entry->stub_template_size = template_size;
5440 }
5441
5442 /* Stash the Cortex-A8 erratum fix array for use later in
5443 elf32_arm_write_section(). */
5444 htab->a8_erratum_fixes = a8_fixes;
5445 htab->num_a8_erratum_fixes = num_a8_fixes;
5446 }
5447 else
5448 {
5449 htab->a8_erratum_fixes = NULL;
5450 htab->num_a8_erratum_fixes = 0;
5451 }
5452 return TRUE;
5453
5454 error_ret_free_local:
5455 return FALSE;
5456 }
5457
5458 /* Build all the stubs associated with the current output file. The
5459 stubs are kept in a hash table attached to the main linker hash
5460 table. We also set up the .plt entries for statically linked PIC
5461 functions here. This function is called via arm_elf_finish in the
5462 linker. */
5463
5464 bfd_boolean
5465 elf32_arm_build_stubs (struct bfd_link_info *info)
5466 {
5467 asection *stub_sec;
5468 struct bfd_hash_table *table;
5469 struct elf32_arm_link_hash_table *htab;
5470
5471 htab = elf32_arm_hash_table (info);
5472 if (htab == NULL)
5473 return FALSE;
5474
5475 for (stub_sec = htab->stub_bfd->sections;
5476 stub_sec != NULL;
5477 stub_sec = stub_sec->next)
5478 {
5479 bfd_size_type size;
5480
5481 /* Ignore non-stub sections. */
5482 if (!strstr (stub_sec->name, STUB_SUFFIX))
5483 continue;
5484
5485 /* Allocate memory to hold the linker stubs. */
5486 size = stub_sec->size;
5487 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
5488 if (stub_sec->contents == NULL && size != 0)
5489 return FALSE;
5490 stub_sec->size = 0;
5491 }
5492
5493 /* Build the stubs as directed by the stub hash table. */
5494 table = &htab->stub_hash_table;
5495 bfd_hash_traverse (table, arm_build_one_stub, info);
5496 if (htab->fix_cortex_a8)
5497 {
5498 /* Place the cortex a8 stubs last. */
5499 htab->fix_cortex_a8 = -1;
5500 bfd_hash_traverse (table, arm_build_one_stub, info);
5501 }
5502
5503 return TRUE;
5504 }
5505
5506 /* Locate the Thumb encoded calling stub for NAME. */
5507
5508 static struct elf_link_hash_entry *
5509 find_thumb_glue (struct bfd_link_info *link_info,
5510 const char *name,
5511 char **error_message)
5512 {
5513 char *tmp_name;
5514 struct elf_link_hash_entry *hash;
5515 struct elf32_arm_link_hash_table *hash_table;
5516
5517 /* We need a pointer to the armelf specific hash table. */
5518 hash_table = elf32_arm_hash_table (link_info);
5519 if (hash_table == NULL)
5520 return NULL;
5521
5522 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5523 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
5524
5525 BFD_ASSERT (tmp_name);
5526
5527 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
5528
5529 hash = elf_link_hash_lookup
5530 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
5531
5532 if (hash == NULL
5533 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
5534 tmp_name, name) == -1)
5535 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
5536
5537 free (tmp_name);
5538
5539 return hash;
5540 }
5541
5542 /* Locate the ARM encoded calling stub for NAME. */
5543
5544 static struct elf_link_hash_entry *
5545 find_arm_glue (struct bfd_link_info *link_info,
5546 const char *name,
5547 char **error_message)
5548 {
5549 char *tmp_name;
5550 struct elf_link_hash_entry *myh;
5551 struct elf32_arm_link_hash_table *hash_table;
5552
5553 /* We need a pointer to the elfarm specific hash table. */
5554 hash_table = elf32_arm_hash_table (link_info);
5555 if (hash_table == NULL)
5556 return NULL;
5557
5558 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5559 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
5560
5561 BFD_ASSERT (tmp_name);
5562
5563 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
5564
5565 myh = elf_link_hash_lookup
5566 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
5567
5568 if (myh == NULL
5569 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
5570 tmp_name, name) == -1)
5571 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
5572
5573 free (tmp_name);
5574
5575 return myh;
5576 }
5577
5578 /* ARM->Thumb glue (static images):
5579
5580 .arm
5581 __func_from_arm:
5582 ldr r12, __func_addr
5583 bx r12
5584 __func_addr:
5585 .word func @ behave as if you saw a ARM_32 reloc.
5586
5587 (v5t static images)
5588 .arm
5589 __func_from_arm:
5590 ldr pc, __func_addr
5591 __func_addr:
5592 .word func @ behave as if you saw a ARM_32 reloc.
5593
5594 (relocatable images)
5595 .arm
5596 __func_from_arm:
5597 ldr r12, __func_offset
5598 add r12, r12, pc
5599 bx r12
5600 __func_offset:
5601 .word func - . */
5602
5603 #define ARM2THUMB_STATIC_GLUE_SIZE 12
5604 static const insn32 a2t1_ldr_insn = 0xe59fc000;
5605 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
5606 static const insn32 a2t3_func_addr_insn = 0x00000001;
5607
5608 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
5609 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
5610 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
5611
5612 #define ARM2THUMB_PIC_GLUE_SIZE 16
5613 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
5614 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
5615 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
5616
5617 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
5618
5619 .thumb .thumb
5620 .align 2 .align 2
5621 __func_from_thumb: __func_from_thumb:
5622 bx pc push {r6, lr}
5623 nop ldr r6, __func_addr
5624 .arm mov lr, pc
5625 b func bx r6
5626 .arm
5627 ;; back_to_thumb
5628 ldmia r13! {r6, lr}
5629 bx lr
5630 __func_addr:
5631 .word func */
5632
5633 #define THUMB2ARM_GLUE_SIZE 8
5634 static const insn16 t2a1_bx_pc_insn = 0x4778;
5635 static const insn16 t2a2_noop_insn = 0x46c0;
5636 static const insn32 t2a3_b_insn = 0xea000000;
5637
5638 #define VFP11_ERRATUM_VENEER_SIZE 8
5639
5640 #define ARM_BX_VENEER_SIZE 12
5641 static const insn32 armbx1_tst_insn = 0xe3100001;
5642 static const insn32 armbx2_moveq_insn = 0x01a0f000;
5643 static const insn32 armbx3_bx_insn = 0xe12fff10;
5644
5645 #ifndef ELFARM_NABI_C_INCLUDED
5646 static void
5647 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
5648 {
5649 asection * s;
5650 bfd_byte * contents;
5651
5652 if (size == 0)
5653 {
5654 /* Do not include empty glue sections in the output. */
5655 if (abfd != NULL)
5656 {
5657 s = bfd_get_section_by_name (abfd, name);
5658 if (s != NULL)
5659 s->flags |= SEC_EXCLUDE;
5660 }
5661 return;
5662 }
5663
5664 BFD_ASSERT (abfd != NULL);
5665
5666 s = bfd_get_section_by_name (abfd, name);
5667 BFD_ASSERT (s != NULL);
5668
5669 contents = (bfd_byte *) bfd_alloc (abfd, size);
5670
5671 BFD_ASSERT (s->size == size);
5672 s->contents = contents;
5673 }
5674
5675 bfd_boolean
5676 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
5677 {
5678 struct elf32_arm_link_hash_table * globals;
5679
5680 globals = elf32_arm_hash_table (info);
5681 BFD_ASSERT (globals != NULL);
5682
5683 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5684 globals->arm_glue_size,
5685 ARM2THUMB_GLUE_SECTION_NAME);
5686
5687 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5688 globals->thumb_glue_size,
5689 THUMB2ARM_GLUE_SECTION_NAME);
5690
5691 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5692 globals->vfp11_erratum_glue_size,
5693 VFP11_ERRATUM_VENEER_SECTION_NAME);
5694
5695 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5696 globals->bx_glue_size,
5697 ARM_BX_GLUE_SECTION_NAME);
5698
5699 return TRUE;
5700 }
5701
5702 /* Allocate space and symbols for calling a Thumb function from Arm mode.
5703 returns the symbol identifying the stub. */
5704
5705 static struct elf_link_hash_entry *
5706 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
5707 struct elf_link_hash_entry * h)
5708 {
5709 const char * name = h->root.root.string;
5710 asection * s;
5711 char * tmp_name;
5712 struct elf_link_hash_entry * myh;
5713 struct bfd_link_hash_entry * bh;
5714 struct elf32_arm_link_hash_table * globals;
5715 bfd_vma val;
5716 bfd_size_type size;
5717
5718 globals = elf32_arm_hash_table (link_info);
5719 BFD_ASSERT (globals != NULL);
5720 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5721
5722 s = bfd_get_section_by_name
5723 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
5724
5725 BFD_ASSERT (s != NULL);
5726
5727 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5728 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
5729
5730 BFD_ASSERT (tmp_name);
5731
5732 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
5733
5734 myh = elf_link_hash_lookup
5735 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
5736
5737 if (myh != NULL)
5738 {
5739 /* We've already seen this guy. */
5740 free (tmp_name);
5741 return myh;
5742 }
5743
5744 /* The only trick here is using hash_table->arm_glue_size as the value.
5745 Even though the section isn't allocated yet, this is where we will be
5746 putting it. The +1 on the value marks that the stub has not been
5747 output yet - not that it is a Thumb function. */
5748 bh = NULL;
5749 val = globals->arm_glue_size + 1;
5750 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5751 tmp_name, BSF_GLOBAL, s, val,
5752 NULL, TRUE, FALSE, &bh);
5753
5754 myh = (struct elf_link_hash_entry *) bh;
5755 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5756 myh->forced_local = 1;
5757
5758 free (tmp_name);
5759
5760 if (link_info->shared || globals->root.is_relocatable_executable
5761 || globals->pic_veneer)
5762 size = ARM2THUMB_PIC_GLUE_SIZE;
5763 else if (globals->use_blx)
5764 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
5765 else
5766 size = ARM2THUMB_STATIC_GLUE_SIZE;
5767
5768 s->size += size;
5769 globals->arm_glue_size += size;
5770
5771 return myh;
5772 }
5773
5774 /* Allocate space for ARMv4 BX veneers. */
5775
5776 static void
5777 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
5778 {
5779 asection * s;
5780 struct elf32_arm_link_hash_table *globals;
5781 char *tmp_name;
5782 struct elf_link_hash_entry *myh;
5783 struct bfd_link_hash_entry *bh;
5784 bfd_vma val;
5785
5786 /* BX PC does not need a veneer. */
5787 if (reg == 15)
5788 return;
5789
5790 globals = elf32_arm_hash_table (link_info);
5791 BFD_ASSERT (globals != NULL);
5792 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5793
5794 /* Check if this veneer has already been allocated. */
5795 if (globals->bx_glue_offset[reg])
5796 return;
5797
5798 s = bfd_get_section_by_name
5799 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
5800
5801 BFD_ASSERT (s != NULL);
5802
5803 /* Add symbol for veneer. */
5804 tmp_name = (char *)
5805 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
5806
5807 BFD_ASSERT (tmp_name);
5808
5809 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
5810
5811 myh = elf_link_hash_lookup
5812 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
5813
5814 BFD_ASSERT (myh == NULL);
5815
5816 bh = NULL;
5817 val = globals->bx_glue_size;
5818 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5819 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5820 NULL, TRUE, FALSE, &bh);
5821
5822 myh = (struct elf_link_hash_entry *) bh;
5823 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5824 myh->forced_local = 1;
5825
5826 s->size += ARM_BX_VENEER_SIZE;
5827 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
5828 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
5829 }
5830
5831
5832 /* Add an entry to the code/data map for section SEC. */
5833
5834 static void
5835 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
5836 {
5837 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
5838 unsigned int newidx;
5839
5840 if (sec_data->map == NULL)
5841 {
5842 sec_data->map = (elf32_arm_section_map *)
5843 bfd_malloc (sizeof (elf32_arm_section_map));
5844 sec_data->mapcount = 0;
5845 sec_data->mapsize = 1;
5846 }
5847
5848 newidx = sec_data->mapcount++;
5849
5850 if (sec_data->mapcount > sec_data->mapsize)
5851 {
5852 sec_data->mapsize *= 2;
5853 sec_data->map = (elf32_arm_section_map *)
5854 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
5855 * sizeof (elf32_arm_section_map));
5856 }
5857
5858 if (sec_data->map)
5859 {
5860 sec_data->map[newidx].vma = vma;
5861 sec_data->map[newidx].type = type;
5862 }
5863 }
5864
5865
5866 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
5867 veneers are handled for now. */
5868
5869 static bfd_vma
5870 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
5871 elf32_vfp11_erratum_list *branch,
5872 bfd *branch_bfd,
5873 asection *branch_sec,
5874 unsigned int offset)
5875 {
5876 asection *s;
5877 struct elf32_arm_link_hash_table *hash_table;
5878 char *tmp_name;
5879 struct elf_link_hash_entry *myh;
5880 struct bfd_link_hash_entry *bh;
5881 bfd_vma val;
5882 struct _arm_elf_section_data *sec_data;
5883 elf32_vfp11_erratum_list *newerr;
5884
5885 hash_table = elf32_arm_hash_table (link_info);
5886 BFD_ASSERT (hash_table != NULL);
5887 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
5888
5889 s = bfd_get_section_by_name
5890 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
5891
5892 sec_data = elf32_arm_section_data (s);
5893
5894 BFD_ASSERT (s != NULL);
5895
5896 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
5897 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
5898
5899 BFD_ASSERT (tmp_name);
5900
5901 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
5902 hash_table->num_vfp11_fixes);
5903
5904 myh = elf_link_hash_lookup
5905 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5906
5907 BFD_ASSERT (myh == NULL);
5908
5909 bh = NULL;
5910 val = hash_table->vfp11_erratum_glue_size;
5911 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
5912 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5913 NULL, TRUE, FALSE, &bh);
5914
5915 myh = (struct elf_link_hash_entry *) bh;
5916 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5917 myh->forced_local = 1;
5918
5919 /* Link veneer back to calling location. */
5920 sec_data->erratumcount += 1;
5921 newerr = (elf32_vfp11_erratum_list *)
5922 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
5923
5924 newerr->type = VFP11_ERRATUM_ARM_VENEER;
5925 newerr->vma = -1;
5926 newerr->u.v.branch = branch;
5927 newerr->u.v.id = hash_table->num_vfp11_fixes;
5928 branch->u.b.veneer = newerr;
5929
5930 newerr->next = sec_data->erratumlist;
5931 sec_data->erratumlist = newerr;
5932
5933 /* A symbol for the return from the veneer. */
5934 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
5935 hash_table->num_vfp11_fixes);
5936
5937 myh = elf_link_hash_lookup
5938 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5939
5940 if (myh != NULL)
5941 abort ();
5942
5943 bh = NULL;
5944 val = offset + 4;
5945 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
5946 branch_sec, val, NULL, TRUE, FALSE, &bh);
5947
5948 myh = (struct elf_link_hash_entry *) bh;
5949 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5950 myh->forced_local = 1;
5951
5952 free (tmp_name);
5953
5954 /* Generate a mapping symbol for the veneer section, and explicitly add an
5955 entry for that symbol to the code/data map for the section. */
5956 if (hash_table->vfp11_erratum_glue_size == 0)
5957 {
5958 bh = NULL;
5959 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
5960 ever requires this erratum fix. */
5961 _bfd_generic_link_add_one_symbol (link_info,
5962 hash_table->bfd_of_glue_owner, "$a",
5963 BSF_LOCAL, s, 0, NULL,
5964 TRUE, FALSE, &bh);
5965
5966 myh = (struct elf_link_hash_entry *) bh;
5967 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
5968 myh->forced_local = 1;
5969
5970 /* The elf32_arm_init_maps function only cares about symbols from input
5971 BFDs. We must make a note of this generated mapping symbol
5972 ourselves so that code byteswapping works properly in
5973 elf32_arm_write_section. */
5974 elf32_arm_section_map_add (s, 'a', 0);
5975 }
5976
5977 s->size += VFP11_ERRATUM_VENEER_SIZE;
5978 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
5979 hash_table->num_vfp11_fixes++;
5980
5981 /* The offset of the veneer. */
5982 return val;
5983 }
5984
5985 #define ARM_GLUE_SECTION_FLAGS \
5986 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
5987 | SEC_READONLY | SEC_LINKER_CREATED)
5988
5989 /* Create a fake section for use by the ARM backend of the linker. */
5990
5991 static bfd_boolean
5992 arm_make_glue_section (bfd * abfd, const char * name)
5993 {
5994 asection * sec;
5995
5996 sec = bfd_get_section_by_name (abfd, name);
5997 if (sec != NULL)
5998 /* Already made. */
5999 return TRUE;
6000
6001 sec = bfd_make_section_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
6002
6003 if (sec == NULL
6004 || !bfd_set_section_alignment (abfd, sec, 2))
6005 return FALSE;
6006
6007 /* Set the gc mark to prevent the section from being removed by garbage
6008 collection, despite the fact that no relocs refer to this section. */
6009 sec->gc_mark = 1;
6010
6011 return TRUE;
6012 }
6013
6014 /* Add the glue sections to ABFD. This function is called from the
6015 linker scripts in ld/emultempl/{armelf}.em. */
6016
6017 bfd_boolean
6018 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
6019 struct bfd_link_info *info)
6020 {
6021 /* If we are only performing a partial
6022 link do not bother adding the glue. */
6023 if (info->relocatable)
6024 return TRUE;
6025
6026 return arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
6027 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
6028 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
6029 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
6030 }
6031
6032 /* Select a BFD to be used to hold the sections used by the glue code.
6033 This function is called from the linker scripts in ld/emultempl/
6034 {armelf/pe}.em. */
6035
6036 bfd_boolean
6037 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
6038 {
6039 struct elf32_arm_link_hash_table *globals;
6040
6041 /* If we are only performing a partial link
6042 do not bother getting a bfd to hold the glue. */
6043 if (info->relocatable)
6044 return TRUE;
6045
6046 /* Make sure we don't attach the glue sections to a dynamic object. */
6047 BFD_ASSERT (!(abfd->flags & DYNAMIC));
6048
6049 globals = elf32_arm_hash_table (info);
6050 BFD_ASSERT (globals != NULL);
6051
6052 if (globals->bfd_of_glue_owner != NULL)
6053 return TRUE;
6054
6055 /* Save the bfd for later use. */
6056 globals->bfd_of_glue_owner = abfd;
6057
6058 return TRUE;
6059 }
6060
6061 static void
6062 check_use_blx (struct elf32_arm_link_hash_table *globals)
6063 {
6064 int cpu_arch;
6065
6066 cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
6067 Tag_CPU_arch);
6068
6069 if (globals->fix_arm1176)
6070 {
6071 if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K)
6072 globals->use_blx = 1;
6073 }
6074 else
6075 {
6076 if (cpu_arch > TAG_CPU_ARCH_V4T)
6077 globals->use_blx = 1;
6078 }
6079 }
6080
6081 bfd_boolean
6082 bfd_elf32_arm_process_before_allocation (bfd *abfd,
6083 struct bfd_link_info *link_info)
6084 {
6085 Elf_Internal_Shdr *symtab_hdr;
6086 Elf_Internal_Rela *internal_relocs = NULL;
6087 Elf_Internal_Rela *irel, *irelend;
6088 bfd_byte *contents = NULL;
6089
6090 asection *sec;
6091 struct elf32_arm_link_hash_table *globals;
6092
6093 /* If we are only performing a partial link do not bother
6094 to construct any glue. */
6095 if (link_info->relocatable)
6096 return TRUE;
6097
6098 /* Here we have a bfd that is to be included on the link. We have a
6099 hook to do reloc rummaging, before section sizes are nailed down. */
6100 globals = elf32_arm_hash_table (link_info);
6101 BFD_ASSERT (globals != NULL);
6102
6103 check_use_blx (globals);
6104
6105 if (globals->byteswap_code && !bfd_big_endian (abfd))
6106 {
6107 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
6108 abfd);
6109 return FALSE;
6110 }
6111
6112 /* PR 5398: If we have not decided to include any loadable sections in
6113 the output then we will not have a glue owner bfd. This is OK, it
6114 just means that there is nothing else for us to do here. */
6115 if (globals->bfd_of_glue_owner == NULL)
6116 return TRUE;
6117
6118 /* Rummage around all the relocs and map the glue vectors. */
6119 sec = abfd->sections;
6120
6121 if (sec == NULL)
6122 return TRUE;
6123
6124 for (; sec != NULL; sec = sec->next)
6125 {
6126 if (sec->reloc_count == 0)
6127 continue;
6128
6129 if ((sec->flags & SEC_EXCLUDE) != 0)
6130 continue;
6131
6132 symtab_hdr = & elf_symtab_hdr (abfd);
6133
6134 /* Load the relocs. */
6135 internal_relocs
6136 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
6137
6138 if (internal_relocs == NULL)
6139 goto error_return;
6140
6141 irelend = internal_relocs + sec->reloc_count;
6142 for (irel = internal_relocs; irel < irelend; irel++)
6143 {
6144 long r_type;
6145 unsigned long r_index;
6146
6147 struct elf_link_hash_entry *h;
6148
6149 r_type = ELF32_R_TYPE (irel->r_info);
6150 r_index = ELF32_R_SYM (irel->r_info);
6151
6152 /* These are the only relocation types we care about. */
6153 if ( r_type != R_ARM_PC24
6154 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
6155 continue;
6156
6157 /* Get the section contents if we haven't done so already. */
6158 if (contents == NULL)
6159 {
6160 /* Get cached copy if it exists. */
6161 if (elf_section_data (sec)->this_hdr.contents != NULL)
6162 contents = elf_section_data (sec)->this_hdr.contents;
6163 else
6164 {
6165 /* Go get them off disk. */
6166 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
6167 goto error_return;
6168 }
6169 }
6170
6171 if (r_type == R_ARM_V4BX)
6172 {
6173 int reg;
6174
6175 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
6176 record_arm_bx_glue (link_info, reg);
6177 continue;
6178 }
6179
6180 /* If the relocation is not against a symbol it cannot concern us. */
6181 h = NULL;
6182
6183 /* We don't care about local symbols. */
6184 if (r_index < symtab_hdr->sh_info)
6185 continue;
6186
6187 /* This is an external symbol. */
6188 r_index -= symtab_hdr->sh_info;
6189 h = (struct elf_link_hash_entry *)
6190 elf_sym_hashes (abfd)[r_index];
6191
6192 /* If the relocation is against a static symbol it must be within
6193 the current section and so cannot be a cross ARM/Thumb relocation. */
6194 if (h == NULL)
6195 continue;
6196
6197 /* If the call will go through a PLT entry then we do not need
6198 glue. */
6199 if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1)
6200 continue;
6201
6202 switch (r_type)
6203 {
6204 case R_ARM_PC24:
6205 /* This one is a call from arm code. We need to look up
6206 the target of the call. If it is a thumb target, we
6207 insert glue. */
6208 if (h->target_internal == ST_BRANCH_TO_THUMB)
6209 record_arm_to_thumb_glue (link_info, h);
6210 break;
6211
6212 default:
6213 abort ();
6214 }
6215 }
6216
6217 if (contents != NULL
6218 && elf_section_data (sec)->this_hdr.contents != contents)
6219 free (contents);
6220 contents = NULL;
6221
6222 if (internal_relocs != NULL
6223 && elf_section_data (sec)->relocs != internal_relocs)
6224 free (internal_relocs);
6225 internal_relocs = NULL;
6226 }
6227
6228 return TRUE;
6229
6230 error_return:
6231 if (contents != NULL
6232 && elf_section_data (sec)->this_hdr.contents != contents)
6233 free (contents);
6234 if (internal_relocs != NULL
6235 && elf_section_data (sec)->relocs != internal_relocs)
6236 free (internal_relocs);
6237
6238 return FALSE;
6239 }
6240 #endif
6241
6242
6243 /* Initialise maps of ARM/Thumb/data for input BFDs. */
6244
6245 void
6246 bfd_elf32_arm_init_maps (bfd *abfd)
6247 {
6248 Elf_Internal_Sym *isymbuf;
6249 Elf_Internal_Shdr *hdr;
6250 unsigned int i, localsyms;
6251
6252 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
6253 if (! is_arm_elf (abfd))
6254 return;
6255
6256 if ((abfd->flags & DYNAMIC) != 0)
6257 return;
6258
6259 hdr = & elf_symtab_hdr (abfd);
6260 localsyms = hdr->sh_info;
6261
6262 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
6263 should contain the number of local symbols, which should come before any
6264 global symbols. Mapping symbols are always local. */
6265 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
6266 NULL);
6267
6268 /* No internal symbols read? Skip this BFD. */
6269 if (isymbuf == NULL)
6270 return;
6271
6272 for (i = 0; i < localsyms; i++)
6273 {
6274 Elf_Internal_Sym *isym = &isymbuf[i];
6275 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
6276 const char *name;
6277
6278 if (sec != NULL
6279 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
6280 {
6281 name = bfd_elf_string_from_elf_section (abfd,
6282 hdr->sh_link, isym->st_name);
6283
6284 if (bfd_is_arm_special_symbol_name (name,
6285 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
6286 elf32_arm_section_map_add (sec, name[1], isym->st_value);
6287 }
6288 }
6289 }
6290
6291
6292 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
6293 say what they wanted. */
6294
6295 void
6296 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
6297 {
6298 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6299 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
6300
6301 if (globals == NULL)
6302 return;
6303
6304 if (globals->fix_cortex_a8 == -1)
6305 {
6306 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
6307 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
6308 && (out_attr[Tag_CPU_arch_profile].i == 'A'
6309 || out_attr[Tag_CPU_arch_profile].i == 0))
6310 globals->fix_cortex_a8 = 1;
6311 else
6312 globals->fix_cortex_a8 = 0;
6313 }
6314 }
6315
6316
6317 void
6318 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
6319 {
6320 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6321 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
6322
6323 if (globals == NULL)
6324 return;
6325 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
6326 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
6327 {
6328 switch (globals->vfp11_fix)
6329 {
6330 case BFD_ARM_VFP11_FIX_DEFAULT:
6331 case BFD_ARM_VFP11_FIX_NONE:
6332 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
6333 break;
6334
6335 default:
6336 /* Give a warning, but do as the user requests anyway. */
6337 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
6338 "workaround is not necessary for target architecture"), obfd);
6339 }
6340 }
6341 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
6342 /* For earlier architectures, we might need the workaround, but do not
6343 enable it by default. If users is running with broken hardware, they
6344 must enable the erratum fix explicitly. */
6345 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
6346 }
6347
6348
6349 enum bfd_arm_vfp11_pipe
6350 {
6351 VFP11_FMAC,
6352 VFP11_LS,
6353 VFP11_DS,
6354 VFP11_BAD
6355 };
6356
6357 /* Return a VFP register number. This is encoded as RX:X for single-precision
6358 registers, or X:RX for double-precision registers, where RX is the group of
6359 four bits in the instruction encoding and X is the single extension bit.
6360 RX and X fields are specified using their lowest (starting) bit. The return
6361 value is:
6362
6363 0...31: single-precision registers s0...s31
6364 32...63: double-precision registers d0...d31.
6365
6366 Although X should be zero for VFP11 (encoding d0...d15 only), we might
6367 encounter VFP3 instructions, so we allow the full range for DP registers. */
6368
6369 static unsigned int
6370 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
6371 unsigned int x)
6372 {
6373 if (is_double)
6374 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
6375 else
6376 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
6377 }
6378
6379 /* Set bits in *WMASK according to a register number REG as encoded by
6380 bfd_arm_vfp11_regno(). Ignore d16-d31. */
6381
6382 static void
6383 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
6384 {
6385 if (reg < 32)
6386 *wmask |= 1 << reg;
6387 else if (reg < 48)
6388 *wmask |= 3 << ((reg - 32) * 2);
6389 }
6390
6391 /* Return TRUE if WMASK overwrites anything in REGS. */
6392
6393 static bfd_boolean
6394 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
6395 {
6396 int i;
6397
6398 for (i = 0; i < numregs; i++)
6399 {
6400 unsigned int reg = regs[i];
6401
6402 if (reg < 32 && (wmask & (1 << reg)) != 0)
6403 return TRUE;
6404
6405 reg -= 32;
6406
6407 if (reg >= 16)
6408 continue;
6409
6410 if ((wmask & (3 << (reg * 2))) != 0)
6411 return TRUE;
6412 }
6413
6414 return FALSE;
6415 }
6416
6417 /* In this function, we're interested in two things: finding input registers
6418 for VFP data-processing instructions, and finding the set of registers which
6419 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
6420 hold the written set, so FLDM etc. are easy to deal with (we're only
6421 interested in 32 SP registers or 16 dp registers, due to the VFP version
6422 implemented by the chip in question). DP registers are marked by setting
6423 both SP registers in the write mask). */
6424
6425 static enum bfd_arm_vfp11_pipe
6426 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
6427 int *numregs)
6428 {
6429 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
6430 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
6431
6432 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
6433 {
6434 unsigned int pqrs;
6435 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
6436 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
6437
6438 pqrs = ((insn & 0x00800000) >> 20)
6439 | ((insn & 0x00300000) >> 19)
6440 | ((insn & 0x00000040) >> 6);
6441
6442 switch (pqrs)
6443 {
6444 case 0: /* fmac[sd]. */
6445 case 1: /* fnmac[sd]. */
6446 case 2: /* fmsc[sd]. */
6447 case 3: /* fnmsc[sd]. */
6448 vpipe = VFP11_FMAC;
6449 bfd_arm_vfp11_write_mask (destmask, fd);
6450 regs[0] = fd;
6451 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
6452 regs[2] = fm;
6453 *numregs = 3;
6454 break;
6455
6456 case 4: /* fmul[sd]. */
6457 case 5: /* fnmul[sd]. */
6458 case 6: /* fadd[sd]. */
6459 case 7: /* fsub[sd]. */
6460 vpipe = VFP11_FMAC;
6461 goto vfp_binop;
6462
6463 case 8: /* fdiv[sd]. */
6464 vpipe = VFP11_DS;
6465 vfp_binop:
6466 bfd_arm_vfp11_write_mask (destmask, fd);
6467 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
6468 regs[1] = fm;
6469 *numregs = 2;
6470 break;
6471
6472 case 15: /* extended opcode. */
6473 {
6474 unsigned int extn = ((insn >> 15) & 0x1e)
6475 | ((insn >> 7) & 1);
6476
6477 switch (extn)
6478 {
6479 case 0: /* fcpy[sd]. */
6480 case 1: /* fabs[sd]. */
6481 case 2: /* fneg[sd]. */
6482 case 8: /* fcmp[sd]. */
6483 case 9: /* fcmpe[sd]. */
6484 case 10: /* fcmpz[sd]. */
6485 case 11: /* fcmpez[sd]. */
6486 case 16: /* fuito[sd]. */
6487 case 17: /* fsito[sd]. */
6488 case 24: /* ftoui[sd]. */
6489 case 25: /* ftouiz[sd]. */
6490 case 26: /* ftosi[sd]. */
6491 case 27: /* ftosiz[sd]. */
6492 /* These instructions will not bounce due to underflow. */
6493 *numregs = 0;
6494 vpipe = VFP11_FMAC;
6495 break;
6496
6497 case 3: /* fsqrt[sd]. */
6498 /* fsqrt cannot underflow, but it can (perhaps) overwrite
6499 registers to cause the erratum in previous instructions. */
6500 bfd_arm_vfp11_write_mask (destmask, fd);
6501 vpipe = VFP11_DS;
6502 break;
6503
6504 case 15: /* fcvt{ds,sd}. */
6505 {
6506 int rnum = 0;
6507
6508 bfd_arm_vfp11_write_mask (destmask, fd);
6509
6510 /* Only FCVTSD can underflow. */
6511 if ((insn & 0x100) != 0)
6512 regs[rnum++] = fm;
6513
6514 *numregs = rnum;
6515
6516 vpipe = VFP11_FMAC;
6517 }
6518 break;
6519
6520 default:
6521 return VFP11_BAD;
6522 }
6523 }
6524 break;
6525
6526 default:
6527 return VFP11_BAD;
6528 }
6529 }
6530 /* Two-register transfer. */
6531 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
6532 {
6533 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
6534
6535 if ((insn & 0x100000) == 0)
6536 {
6537 if (is_double)
6538 bfd_arm_vfp11_write_mask (destmask, fm);
6539 else
6540 {
6541 bfd_arm_vfp11_write_mask (destmask, fm);
6542 bfd_arm_vfp11_write_mask (destmask, fm + 1);
6543 }
6544 }
6545
6546 vpipe = VFP11_LS;
6547 }
6548 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
6549 {
6550 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
6551 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
6552
6553 switch (puw)
6554 {
6555 case 0: /* Two-reg transfer. We should catch these above. */
6556 abort ();
6557
6558 case 2: /* fldm[sdx]. */
6559 case 3:
6560 case 5:
6561 {
6562 unsigned int i, offset = insn & 0xff;
6563
6564 if (is_double)
6565 offset >>= 1;
6566
6567 for (i = fd; i < fd + offset; i++)
6568 bfd_arm_vfp11_write_mask (destmask, i);
6569 }
6570 break;
6571
6572 case 4: /* fld[sd]. */
6573 case 6:
6574 bfd_arm_vfp11_write_mask (destmask, fd);
6575 break;
6576
6577 default:
6578 return VFP11_BAD;
6579 }
6580
6581 vpipe = VFP11_LS;
6582 }
6583 /* Single-register transfer. Note L==0. */
6584 else if ((insn & 0x0f100e10) == 0x0e000a10)
6585 {
6586 unsigned int opcode = (insn >> 21) & 7;
6587 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
6588
6589 switch (opcode)
6590 {
6591 case 0: /* fmsr/fmdlr. */
6592 case 1: /* fmdhr. */
6593 /* Mark fmdhr and fmdlr as writing to the whole of the DP
6594 destination register. I don't know if this is exactly right,
6595 but it is the conservative choice. */
6596 bfd_arm_vfp11_write_mask (destmask, fn);
6597 break;
6598
6599 case 7: /* fmxr. */
6600 break;
6601 }
6602
6603 vpipe = VFP11_LS;
6604 }
6605
6606 return vpipe;
6607 }
6608
6609
6610 static int elf32_arm_compare_mapping (const void * a, const void * b);
6611
6612
6613 /* Look for potentially-troublesome code sequences which might trigger the
6614 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
6615 (available from ARM) for details of the erratum. A short version is
6616 described in ld.texinfo. */
6617
6618 bfd_boolean
6619 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
6620 {
6621 asection *sec;
6622 bfd_byte *contents = NULL;
6623 int state = 0;
6624 int regs[3], numregs = 0;
6625 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6626 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
6627
6628 if (globals == NULL)
6629 return FALSE;
6630
6631 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
6632 The states transition as follows:
6633
6634 0 -> 1 (vector) or 0 -> 2 (scalar)
6635 A VFP FMAC-pipeline instruction has been seen. Fill
6636 regs[0]..regs[numregs-1] with its input operands. Remember this
6637 instruction in 'first_fmac'.
6638
6639 1 -> 2
6640 Any instruction, except for a VFP instruction which overwrites
6641 regs[*].
6642
6643 1 -> 3 [ -> 0 ] or
6644 2 -> 3 [ -> 0 ]
6645 A VFP instruction has been seen which overwrites any of regs[*].
6646 We must make a veneer! Reset state to 0 before examining next
6647 instruction.
6648
6649 2 -> 0
6650 If we fail to match anything in state 2, reset to state 0 and reset
6651 the instruction pointer to the instruction after 'first_fmac'.
6652
6653 If the VFP11 vector mode is in use, there must be at least two unrelated
6654 instructions between anti-dependent VFP11 instructions to properly avoid
6655 triggering the erratum, hence the use of the extra state 1. */
6656
6657 /* If we are only performing a partial link do not bother
6658 to construct any glue. */
6659 if (link_info->relocatable)
6660 return TRUE;
6661
6662 /* Skip if this bfd does not correspond to an ELF image. */
6663 if (! is_arm_elf (abfd))
6664 return TRUE;
6665
6666 /* We should have chosen a fix type by the time we get here. */
6667 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
6668
6669 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
6670 return TRUE;
6671
6672 /* Skip this BFD if it corresponds to an executable or dynamic object. */
6673 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
6674 return TRUE;
6675
6676 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6677 {
6678 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
6679 struct _arm_elf_section_data *sec_data;
6680
6681 /* If we don't have executable progbits, we're not interested in this
6682 section. Also skip if section is to be excluded. */
6683 if (elf_section_type (sec) != SHT_PROGBITS
6684 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
6685 || (sec->flags & SEC_EXCLUDE) != 0
6686 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
6687 || sec->output_section == bfd_abs_section_ptr
6688 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
6689 continue;
6690
6691 sec_data = elf32_arm_section_data (sec);
6692
6693 if (sec_data->mapcount == 0)
6694 continue;
6695
6696 if (elf_section_data (sec)->this_hdr.contents != NULL)
6697 contents = elf_section_data (sec)->this_hdr.contents;
6698 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
6699 goto error_return;
6700
6701 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
6702 elf32_arm_compare_mapping);
6703
6704 for (span = 0; span < sec_data->mapcount; span++)
6705 {
6706 unsigned int span_start = sec_data->map[span].vma;
6707 unsigned int span_end = (span == sec_data->mapcount - 1)
6708 ? sec->size : sec_data->map[span + 1].vma;
6709 char span_type = sec_data->map[span].type;
6710
6711 /* FIXME: Only ARM mode is supported at present. We may need to
6712 support Thumb-2 mode also at some point. */
6713 if (span_type != 'a')
6714 continue;
6715
6716 for (i = span_start; i < span_end;)
6717 {
6718 unsigned int next_i = i + 4;
6719 unsigned int insn = bfd_big_endian (abfd)
6720 ? (contents[i] << 24)
6721 | (contents[i + 1] << 16)
6722 | (contents[i + 2] << 8)
6723 | contents[i + 3]
6724 : (contents[i + 3] << 24)
6725 | (contents[i + 2] << 16)
6726 | (contents[i + 1] << 8)
6727 | contents[i];
6728 unsigned int writemask = 0;
6729 enum bfd_arm_vfp11_pipe vpipe;
6730
6731 switch (state)
6732 {
6733 case 0:
6734 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
6735 &numregs);
6736 /* I'm assuming the VFP11 erratum can trigger with denorm
6737 operands on either the FMAC or the DS pipeline. This might
6738 lead to slightly overenthusiastic veneer insertion. */
6739 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
6740 {
6741 state = use_vector ? 1 : 2;
6742 first_fmac = i;
6743 veneer_of_insn = insn;
6744 }
6745 break;
6746
6747 case 1:
6748 {
6749 int other_regs[3], other_numregs;
6750 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6751 other_regs,
6752 &other_numregs);
6753 if (vpipe != VFP11_BAD
6754 && bfd_arm_vfp11_antidependency (writemask, regs,
6755 numregs))
6756 state = 3;
6757 else
6758 state = 2;
6759 }
6760 break;
6761
6762 case 2:
6763 {
6764 int other_regs[3], other_numregs;
6765 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6766 other_regs,
6767 &other_numregs);
6768 if (vpipe != VFP11_BAD
6769 && bfd_arm_vfp11_antidependency (writemask, regs,
6770 numregs))
6771 state = 3;
6772 else
6773 {
6774 state = 0;
6775 next_i = first_fmac + 4;
6776 }
6777 }
6778 break;
6779
6780 case 3:
6781 abort (); /* Should be unreachable. */
6782 }
6783
6784 if (state == 3)
6785 {
6786 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
6787 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
6788
6789 elf32_arm_section_data (sec)->erratumcount += 1;
6790
6791 newerr->u.b.vfp_insn = veneer_of_insn;
6792
6793 switch (span_type)
6794 {
6795 case 'a':
6796 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
6797 break;
6798
6799 default:
6800 abort ();
6801 }
6802
6803 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
6804 first_fmac);
6805
6806 newerr->vma = -1;
6807
6808 newerr->next = sec_data->erratumlist;
6809 sec_data->erratumlist = newerr;
6810
6811 state = 0;
6812 }
6813
6814 i = next_i;
6815 }
6816 }
6817
6818 if (contents != NULL
6819 && elf_section_data (sec)->this_hdr.contents != contents)
6820 free (contents);
6821 contents = NULL;
6822 }
6823
6824 return TRUE;
6825
6826 error_return:
6827 if (contents != NULL
6828 && elf_section_data (sec)->this_hdr.contents != contents)
6829 free (contents);
6830
6831 return FALSE;
6832 }
6833
6834 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
6835 after sections have been laid out, using specially-named symbols. */
6836
6837 void
6838 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
6839 struct bfd_link_info *link_info)
6840 {
6841 asection *sec;
6842 struct elf32_arm_link_hash_table *globals;
6843 char *tmp_name;
6844
6845 if (link_info->relocatable)
6846 return;
6847
6848 /* Skip if this bfd does not correspond to an ELF image. */
6849 if (! is_arm_elf (abfd))
6850 return;
6851
6852 globals = elf32_arm_hash_table (link_info);
6853 if (globals == NULL)
6854 return;
6855
6856 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
6857 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
6858
6859 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6860 {
6861 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
6862 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
6863
6864 for (; errnode != NULL; errnode = errnode->next)
6865 {
6866 struct elf_link_hash_entry *myh;
6867 bfd_vma vma;
6868
6869 switch (errnode->type)
6870 {
6871 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
6872 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
6873 /* Find veneer symbol. */
6874 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
6875 errnode->u.b.veneer->u.v.id);
6876
6877 myh = elf_link_hash_lookup
6878 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6879
6880 if (myh == NULL)
6881 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6882 "`%s'"), abfd, tmp_name);
6883
6884 vma = myh->root.u.def.section->output_section->vma
6885 + myh->root.u.def.section->output_offset
6886 + myh->root.u.def.value;
6887
6888 errnode->u.b.veneer->vma = vma;
6889 break;
6890
6891 case VFP11_ERRATUM_ARM_VENEER:
6892 case VFP11_ERRATUM_THUMB_VENEER:
6893 /* Find return location. */
6894 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6895 errnode->u.v.id);
6896
6897 myh = elf_link_hash_lookup
6898 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6899
6900 if (myh == NULL)
6901 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6902 "`%s'"), abfd, tmp_name);
6903
6904 vma = myh->root.u.def.section->output_section->vma
6905 + myh->root.u.def.section->output_offset
6906 + myh->root.u.def.value;
6907
6908 errnode->u.v.branch->vma = vma;
6909 break;
6910
6911 default:
6912 abort ();
6913 }
6914 }
6915 }
6916
6917 free (tmp_name);
6918 }
6919
6920
6921 /* Set target relocation values needed during linking. */
6922
6923 void
6924 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
6925 struct bfd_link_info *link_info,
6926 int target1_is_rel,
6927 char * target2_type,
6928 int fix_v4bx,
6929 int use_blx,
6930 bfd_arm_vfp11_fix vfp11_fix,
6931 int no_enum_warn, int no_wchar_warn,
6932 int pic_veneer, int fix_cortex_a8,
6933 int fix_arm1176)
6934 {
6935 struct elf32_arm_link_hash_table *globals;
6936
6937 globals = elf32_arm_hash_table (link_info);
6938 if (globals == NULL)
6939 return;
6940
6941 globals->target1_is_rel = target1_is_rel;
6942 if (strcmp (target2_type, "rel") == 0)
6943 globals->target2_reloc = R_ARM_REL32;
6944 else if (strcmp (target2_type, "abs") == 0)
6945 globals->target2_reloc = R_ARM_ABS32;
6946 else if (strcmp (target2_type, "got-rel") == 0)
6947 globals->target2_reloc = R_ARM_GOT_PREL;
6948 else
6949 {
6950 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
6951 target2_type);
6952 }
6953 globals->fix_v4bx = fix_v4bx;
6954 globals->use_blx |= use_blx;
6955 globals->vfp11_fix = vfp11_fix;
6956 globals->pic_veneer = pic_veneer;
6957 globals->fix_cortex_a8 = fix_cortex_a8;
6958 globals->fix_arm1176 = fix_arm1176;
6959
6960 BFD_ASSERT (is_arm_elf (output_bfd));
6961 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
6962 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
6963 }
6964
6965 /* Replace the target offset of a Thumb bl or b.w instruction. */
6966
6967 static void
6968 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
6969 {
6970 bfd_vma upper;
6971 bfd_vma lower;
6972 int reloc_sign;
6973
6974 BFD_ASSERT ((offset & 1) == 0);
6975
6976 upper = bfd_get_16 (abfd, insn);
6977 lower = bfd_get_16 (abfd, insn + 2);
6978 reloc_sign = (offset < 0) ? 1 : 0;
6979 upper = (upper & ~(bfd_vma) 0x7ff)
6980 | ((offset >> 12) & 0x3ff)
6981 | (reloc_sign << 10);
6982 lower = (lower & ~(bfd_vma) 0x2fff)
6983 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
6984 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
6985 | ((offset >> 1) & 0x7ff);
6986 bfd_put_16 (abfd, upper, insn);
6987 bfd_put_16 (abfd, lower, insn + 2);
6988 }
6989
6990 /* Thumb code calling an ARM function. */
6991
6992 static int
6993 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
6994 const char * name,
6995 bfd * input_bfd,
6996 bfd * output_bfd,
6997 asection * input_section,
6998 bfd_byte * hit_data,
6999 asection * sym_sec,
7000 bfd_vma offset,
7001 bfd_signed_vma addend,
7002 bfd_vma val,
7003 char **error_message)
7004 {
7005 asection * s = 0;
7006 bfd_vma my_offset;
7007 long int ret_offset;
7008 struct elf_link_hash_entry * myh;
7009 struct elf32_arm_link_hash_table * globals;
7010
7011 myh = find_thumb_glue (info, name, error_message);
7012 if (myh == NULL)
7013 return FALSE;
7014
7015 globals = elf32_arm_hash_table (info);
7016 BFD_ASSERT (globals != NULL);
7017 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7018
7019 my_offset = myh->root.u.def.value;
7020
7021 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
7022 THUMB2ARM_GLUE_SECTION_NAME);
7023
7024 BFD_ASSERT (s != NULL);
7025 BFD_ASSERT (s->contents != NULL);
7026 BFD_ASSERT (s->output_section != NULL);
7027
7028 if ((my_offset & 0x01) == 0x01)
7029 {
7030 if (sym_sec != NULL
7031 && sym_sec->owner != NULL
7032 && !INTERWORK_FLAG (sym_sec->owner))
7033 {
7034 (*_bfd_error_handler)
7035 (_("%B(%s): warning: interworking not enabled.\n"
7036 " first occurrence: %B: Thumb call to ARM"),
7037 sym_sec->owner, input_bfd, name);
7038
7039 return FALSE;
7040 }
7041
7042 --my_offset;
7043 myh->root.u.def.value = my_offset;
7044
7045 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
7046 s->contents + my_offset);
7047
7048 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
7049 s->contents + my_offset + 2);
7050
7051 ret_offset =
7052 /* Address of destination of the stub. */
7053 ((bfd_signed_vma) val)
7054 - ((bfd_signed_vma)
7055 /* Offset from the start of the current section
7056 to the start of the stubs. */
7057 (s->output_offset
7058 /* Offset of the start of this stub from the start of the stubs. */
7059 + my_offset
7060 /* Address of the start of the current section. */
7061 + s->output_section->vma)
7062 /* The branch instruction is 4 bytes into the stub. */
7063 + 4
7064 /* ARM branches work from the pc of the instruction + 8. */
7065 + 8);
7066
7067 put_arm_insn (globals, output_bfd,
7068 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
7069 s->contents + my_offset + 4);
7070 }
7071
7072 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
7073
7074 /* Now go back and fix up the original BL insn to point to here. */
7075 ret_offset =
7076 /* Address of where the stub is located. */
7077 (s->output_section->vma + s->output_offset + my_offset)
7078 /* Address of where the BL is located. */
7079 - (input_section->output_section->vma + input_section->output_offset
7080 + offset)
7081 /* Addend in the relocation. */
7082 - addend
7083 /* Biassing for PC-relative addressing. */
7084 - 8;
7085
7086 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
7087
7088 return TRUE;
7089 }
7090
7091 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
7092
7093 static struct elf_link_hash_entry *
7094 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
7095 const char * name,
7096 bfd * input_bfd,
7097 bfd * output_bfd,
7098 asection * sym_sec,
7099 bfd_vma val,
7100 asection * s,
7101 char ** error_message)
7102 {
7103 bfd_vma my_offset;
7104 long int ret_offset;
7105 struct elf_link_hash_entry * myh;
7106 struct elf32_arm_link_hash_table * globals;
7107
7108 myh = find_arm_glue (info, name, error_message);
7109 if (myh == NULL)
7110 return NULL;
7111
7112 globals = elf32_arm_hash_table (info);
7113 BFD_ASSERT (globals != NULL);
7114 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7115
7116 my_offset = myh->root.u.def.value;
7117
7118 if ((my_offset & 0x01) == 0x01)
7119 {
7120 if (sym_sec != NULL
7121 && sym_sec->owner != NULL
7122 && !INTERWORK_FLAG (sym_sec->owner))
7123 {
7124 (*_bfd_error_handler)
7125 (_("%B(%s): warning: interworking not enabled.\n"
7126 " first occurrence: %B: arm call to thumb"),
7127 sym_sec->owner, input_bfd, name);
7128 }
7129
7130 --my_offset;
7131 myh->root.u.def.value = my_offset;
7132
7133 if (info->shared || globals->root.is_relocatable_executable
7134 || globals->pic_veneer)
7135 {
7136 /* For relocatable objects we can't use absolute addresses,
7137 so construct the address from a relative offset. */
7138 /* TODO: If the offset is small it's probably worth
7139 constructing the address with adds. */
7140 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
7141 s->contents + my_offset);
7142 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
7143 s->contents + my_offset + 4);
7144 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
7145 s->contents + my_offset + 8);
7146 /* Adjust the offset by 4 for the position of the add,
7147 and 8 for the pipeline offset. */
7148 ret_offset = (val - (s->output_offset
7149 + s->output_section->vma
7150 + my_offset + 12))
7151 | 1;
7152 bfd_put_32 (output_bfd, ret_offset,
7153 s->contents + my_offset + 12);
7154 }
7155 else if (globals->use_blx)
7156 {
7157 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
7158 s->contents + my_offset);
7159
7160 /* It's a thumb address. Add the low order bit. */
7161 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
7162 s->contents + my_offset + 4);
7163 }
7164 else
7165 {
7166 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
7167 s->contents + my_offset);
7168
7169 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
7170 s->contents + my_offset + 4);
7171
7172 /* It's a thumb address. Add the low order bit. */
7173 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
7174 s->contents + my_offset + 8);
7175
7176 my_offset += 12;
7177 }
7178 }
7179
7180 BFD_ASSERT (my_offset <= globals->arm_glue_size);
7181
7182 return myh;
7183 }
7184
7185 /* Arm code calling a Thumb function. */
7186
7187 static int
7188 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
7189 const char * name,
7190 bfd * input_bfd,
7191 bfd * output_bfd,
7192 asection * input_section,
7193 bfd_byte * hit_data,
7194 asection * sym_sec,
7195 bfd_vma offset,
7196 bfd_signed_vma addend,
7197 bfd_vma val,
7198 char **error_message)
7199 {
7200 unsigned long int tmp;
7201 bfd_vma my_offset;
7202 asection * s;
7203 long int ret_offset;
7204 struct elf_link_hash_entry * myh;
7205 struct elf32_arm_link_hash_table * globals;
7206
7207 globals = elf32_arm_hash_table (info);
7208 BFD_ASSERT (globals != NULL);
7209 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7210
7211 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
7212 ARM2THUMB_GLUE_SECTION_NAME);
7213 BFD_ASSERT (s != NULL);
7214 BFD_ASSERT (s->contents != NULL);
7215 BFD_ASSERT (s->output_section != NULL);
7216
7217 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
7218 sym_sec, val, s, error_message);
7219 if (!myh)
7220 return FALSE;
7221
7222 my_offset = myh->root.u.def.value;
7223 tmp = bfd_get_32 (input_bfd, hit_data);
7224 tmp = tmp & 0xFF000000;
7225
7226 /* Somehow these are both 4 too far, so subtract 8. */
7227 ret_offset = (s->output_offset
7228 + my_offset
7229 + s->output_section->vma
7230 - (input_section->output_offset
7231 + input_section->output_section->vma
7232 + offset + addend)
7233 - 8);
7234
7235 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
7236
7237 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
7238
7239 return TRUE;
7240 }
7241
7242 /* Populate Arm stub for an exported Thumb function. */
7243
7244 static bfd_boolean
7245 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
7246 {
7247 struct bfd_link_info * info = (struct bfd_link_info *) inf;
7248 asection * s;
7249 struct elf_link_hash_entry * myh;
7250 struct elf32_arm_link_hash_entry *eh;
7251 struct elf32_arm_link_hash_table * globals;
7252 asection *sec;
7253 bfd_vma val;
7254 char *error_message;
7255
7256 eh = elf32_arm_hash_entry (h);
7257 /* Allocate stubs for exported Thumb functions on v4t. */
7258 if (eh->export_glue == NULL)
7259 return TRUE;
7260
7261 globals = elf32_arm_hash_table (info);
7262 BFD_ASSERT (globals != NULL);
7263 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7264
7265 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
7266 ARM2THUMB_GLUE_SECTION_NAME);
7267 BFD_ASSERT (s != NULL);
7268 BFD_ASSERT (s->contents != NULL);
7269 BFD_ASSERT (s->output_section != NULL);
7270
7271 sec = eh->export_glue->root.u.def.section;
7272
7273 BFD_ASSERT (sec->output_section != NULL);
7274
7275 val = eh->export_glue->root.u.def.value + sec->output_offset
7276 + sec->output_section->vma;
7277
7278 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
7279 h->root.u.def.section->owner,
7280 globals->obfd, sec, val, s,
7281 &error_message);
7282 BFD_ASSERT (myh);
7283 return TRUE;
7284 }
7285
7286 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
7287
7288 static bfd_vma
7289 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
7290 {
7291 bfd_byte *p;
7292 bfd_vma glue_addr;
7293 asection *s;
7294 struct elf32_arm_link_hash_table *globals;
7295
7296 globals = elf32_arm_hash_table (info);
7297 BFD_ASSERT (globals != NULL);
7298 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7299
7300 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
7301 ARM_BX_GLUE_SECTION_NAME);
7302 BFD_ASSERT (s != NULL);
7303 BFD_ASSERT (s->contents != NULL);
7304 BFD_ASSERT (s->output_section != NULL);
7305
7306 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
7307
7308 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
7309
7310 if ((globals->bx_glue_offset[reg] & 1) == 0)
7311 {
7312 p = s->contents + glue_addr;
7313 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
7314 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
7315 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
7316 globals->bx_glue_offset[reg] |= 1;
7317 }
7318
7319 return glue_addr + s->output_section->vma + s->output_offset;
7320 }
7321
7322 /* Generate Arm stubs for exported Thumb symbols. */
7323 static void
7324 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
7325 struct bfd_link_info *link_info)
7326 {
7327 struct elf32_arm_link_hash_table * globals;
7328
7329 if (link_info == NULL)
7330 /* Ignore this if we are not called by the ELF backend linker. */
7331 return;
7332
7333 globals = elf32_arm_hash_table (link_info);
7334 if (globals == NULL)
7335 return;
7336
7337 /* If blx is available then exported Thumb symbols are OK and there is
7338 nothing to do. */
7339 if (globals->use_blx)
7340 return;
7341
7342 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
7343 link_info);
7344 }
7345
7346 /* Reserve space for COUNT dynamic relocations in relocation selection
7347 SRELOC. */
7348
7349 static void
7350 elf32_arm_allocate_dynrelocs (struct bfd_link_info *info, asection *sreloc,
7351 bfd_size_type count)
7352 {
7353 struct elf32_arm_link_hash_table *htab;
7354
7355 htab = elf32_arm_hash_table (info);
7356 BFD_ASSERT (htab->root.dynamic_sections_created);
7357 if (sreloc == NULL)
7358 abort ();
7359 sreloc->size += RELOC_SIZE (htab) * count;
7360 }
7361
7362 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
7363 dynamic, the relocations should go in SRELOC, otherwise they should
7364 go in the special .rel.iplt section. */
7365
7366 static void
7367 elf32_arm_allocate_irelocs (struct bfd_link_info *info, asection *sreloc,
7368 bfd_size_type count)
7369 {
7370 struct elf32_arm_link_hash_table *htab;
7371
7372 htab = elf32_arm_hash_table (info);
7373 if (!htab->root.dynamic_sections_created)
7374 htab->root.irelplt->size += RELOC_SIZE (htab) * count;
7375 else
7376 {
7377 BFD_ASSERT (sreloc != NULL);
7378 sreloc->size += RELOC_SIZE (htab) * count;
7379 }
7380 }
7381
7382 /* Add relocation REL to the end of relocation section SRELOC. */
7383
7384 static void
7385 elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
7386 asection *sreloc, Elf_Internal_Rela *rel)
7387 {
7388 bfd_byte *loc;
7389 struct elf32_arm_link_hash_table *htab;
7390
7391 htab = elf32_arm_hash_table (info);
7392 if (!htab->root.dynamic_sections_created
7393 && ELF32_R_TYPE (rel->r_info) == R_ARM_IRELATIVE)
7394 sreloc = htab->root.irelplt;
7395 if (sreloc == NULL)
7396 abort ();
7397 loc = sreloc->contents;
7398 loc += sreloc->reloc_count++ * RELOC_SIZE (htab);
7399 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
7400 abort ();
7401 SWAP_RELOC_OUT (htab) (output_bfd, rel, loc);
7402 }
7403
7404 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
7405 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
7406 to .plt. */
7407
7408 static void
7409 elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
7410 bfd_boolean is_iplt_entry,
7411 union gotplt_union *root_plt,
7412 struct arm_plt_info *arm_plt)
7413 {
7414 struct elf32_arm_link_hash_table *htab;
7415 asection *splt;
7416 asection *sgotplt;
7417
7418 htab = elf32_arm_hash_table (info);
7419
7420 if (is_iplt_entry)
7421 {
7422 splt = htab->root.iplt;
7423 sgotplt = htab->root.igotplt;
7424
7425 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
7426 elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1);
7427 }
7428 else
7429 {
7430 splt = htab->root.splt;
7431 sgotplt = htab->root.sgotplt;
7432
7433 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
7434 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
7435
7436 /* If this is the first .plt entry, make room for the special
7437 first entry. */
7438 if (splt->size == 0)
7439 splt->size += htab->plt_header_size;
7440 }
7441
7442 /* Allocate the PLT entry itself, including any leading Thumb stub. */
7443 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
7444 splt->size += PLT_THUMB_STUB_SIZE;
7445 root_plt->offset = splt->size;
7446 splt->size += htab->plt_entry_size;
7447
7448 if (!htab->symbian_p)
7449 {
7450 /* We also need to make an entry in the .got.plt section, which
7451 will be placed in the .got section by the linker script. */
7452 arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc;
7453 sgotplt->size += 4;
7454 }
7455 }
7456
7457 static bfd_vma
7458 arm_movw_immediate (bfd_vma value)
7459 {
7460 return (value & 0x00000fff) | ((value & 0x0000f000) << 4);
7461 }
7462
7463 static bfd_vma
7464 arm_movt_immediate (bfd_vma value)
7465 {
7466 return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12);
7467 }
7468
7469 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
7470 the entry lives in .iplt and resolves to (*SYM_VALUE)().
7471 Otherwise, DYNINDX is the index of the symbol in the dynamic
7472 symbol table and SYM_VALUE is undefined.
7473
7474 ROOT_PLT points to the offset of the PLT entry from the start of its
7475 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
7476 bookkeeping information. */
7477
7478 static void
7479 elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
7480 union gotplt_union *root_plt,
7481 struct arm_plt_info *arm_plt,
7482 int dynindx, bfd_vma sym_value)
7483 {
7484 struct elf32_arm_link_hash_table *htab;
7485 asection *sgot;
7486 asection *splt;
7487 asection *srel;
7488 bfd_byte *loc;
7489 bfd_vma plt_index;
7490 Elf_Internal_Rela rel;
7491 bfd_vma plt_header_size;
7492 bfd_vma got_header_size;
7493
7494 htab = elf32_arm_hash_table (info);
7495
7496 /* Pick the appropriate sections and sizes. */
7497 if (dynindx == -1)
7498 {
7499 splt = htab->root.iplt;
7500 sgot = htab->root.igotplt;
7501 srel = htab->root.irelplt;
7502
7503 /* There are no reserved entries in .igot.plt, and no special
7504 first entry in .iplt. */
7505 got_header_size = 0;
7506 plt_header_size = 0;
7507 }
7508 else
7509 {
7510 splt = htab->root.splt;
7511 sgot = htab->root.sgotplt;
7512 srel = htab->root.srelplt;
7513
7514 got_header_size = get_elf_backend_data (output_bfd)->got_header_size;
7515 plt_header_size = htab->plt_header_size;
7516 }
7517 BFD_ASSERT (splt != NULL && srel != NULL);
7518
7519 /* Fill in the entry in the procedure linkage table. */
7520 if (htab->symbian_p)
7521 {
7522 BFD_ASSERT (dynindx >= 0);
7523 put_arm_insn (htab, output_bfd,
7524 elf32_arm_symbian_plt_entry[0],
7525 splt->contents + root_plt->offset);
7526 bfd_put_32 (output_bfd,
7527 elf32_arm_symbian_plt_entry[1],
7528 splt->contents + root_plt->offset + 4);
7529
7530 /* Fill in the entry in the .rel.plt section. */
7531 rel.r_offset = (splt->output_section->vma
7532 + splt->output_offset
7533 + root_plt->offset + 4);
7534 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_GLOB_DAT);
7535
7536 /* Get the index in the procedure linkage table which
7537 corresponds to this symbol. This is the index of this symbol
7538 in all the symbols for which we are making plt entries. The
7539 first entry in the procedure linkage table is reserved. */
7540 plt_index = ((root_plt->offset - plt_header_size)
7541 / htab->plt_entry_size);
7542 }
7543 else
7544 {
7545 bfd_vma got_offset, got_address, plt_address;
7546 bfd_vma got_displacement, initial_got_entry;
7547 bfd_byte * ptr;
7548
7549 BFD_ASSERT (sgot != NULL);
7550
7551 /* Get the offset into the .(i)got.plt table of the entry that
7552 corresponds to this function. */
7553 got_offset = (arm_plt->got_offset & -2);
7554
7555 /* Get the index in the procedure linkage table which
7556 corresponds to this symbol. This is the index of this symbol
7557 in all the symbols for which we are making plt entries.
7558 After the reserved .got.plt entries, all symbols appear in
7559 the same order as in .plt. */
7560 plt_index = (got_offset - got_header_size) / 4;
7561
7562 /* Calculate the address of the GOT entry. */
7563 got_address = (sgot->output_section->vma
7564 + sgot->output_offset
7565 + got_offset);
7566
7567 /* ...and the address of the PLT entry. */
7568 plt_address = (splt->output_section->vma
7569 + splt->output_offset
7570 + root_plt->offset);
7571
7572 ptr = splt->contents + root_plt->offset;
7573 if (htab->vxworks_p && info->shared)
7574 {
7575 unsigned int i;
7576 bfd_vma val;
7577
7578 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
7579 {
7580 val = elf32_arm_vxworks_shared_plt_entry[i];
7581 if (i == 2)
7582 val |= got_address - sgot->output_section->vma;
7583 if (i == 5)
7584 val |= plt_index * RELOC_SIZE (htab);
7585 if (i == 2 || i == 5)
7586 bfd_put_32 (output_bfd, val, ptr);
7587 else
7588 put_arm_insn (htab, output_bfd, val, ptr);
7589 }
7590 }
7591 else if (htab->vxworks_p)
7592 {
7593 unsigned int i;
7594 bfd_vma val;
7595
7596 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
7597 {
7598 val = elf32_arm_vxworks_exec_plt_entry[i];
7599 if (i == 2)
7600 val |= got_address;
7601 if (i == 4)
7602 val |= 0xffffff & -((root_plt->offset + i * 4 + 8) >> 2);
7603 if (i == 5)
7604 val |= plt_index * RELOC_SIZE (htab);
7605 if (i == 2 || i == 5)
7606 bfd_put_32 (output_bfd, val, ptr);
7607 else
7608 put_arm_insn (htab, output_bfd, val, ptr);
7609 }
7610
7611 loc = (htab->srelplt2->contents
7612 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
7613
7614 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
7615 referencing the GOT for this PLT entry. */
7616 rel.r_offset = plt_address + 8;
7617 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
7618 rel.r_addend = got_offset;
7619 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
7620 loc += RELOC_SIZE (htab);
7621
7622 /* Create the R_ARM_ABS32 relocation referencing the
7623 beginning of the PLT for this GOT entry. */
7624 rel.r_offset = got_address;
7625 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
7626 rel.r_addend = 0;
7627 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
7628 }
7629 else if (htab->nacl_p)
7630 {
7631 /* Calculate the displacement between the PLT slot and the
7632 common tail that's part of the special initial PLT slot. */
7633 int32_t tail_displacement
7634 = ((splt->output_section->vma + splt->output_offset
7635 + ARM_NACL_PLT_TAIL_OFFSET)
7636 - (plt_address + htab->plt_entry_size + 4));
7637 BFD_ASSERT ((tail_displacement & 3) == 0);
7638 tail_displacement >>= 2;
7639
7640 BFD_ASSERT ((tail_displacement & 0xff000000) == 0
7641 || (-tail_displacement & 0xff000000) == 0);
7642
7643 /* Calculate the displacement between the PLT slot and the entry
7644 in the GOT. The offset accounts for the value produced by
7645 adding to pc in the penultimate instruction of the PLT stub. */
7646 got_displacement = (got_address
7647 - (plt_address + htab->plt_entry_size));
7648
7649 /* NaCl does not support interworking at all. */
7650 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt));
7651
7652 put_arm_insn (htab, output_bfd,
7653 elf32_arm_nacl_plt_entry[0]
7654 | arm_movw_immediate (got_displacement),
7655 ptr + 0);
7656 put_arm_insn (htab, output_bfd,
7657 elf32_arm_nacl_plt_entry[1]
7658 | arm_movt_immediate (got_displacement),
7659 ptr + 4);
7660 put_arm_insn (htab, output_bfd,
7661 elf32_arm_nacl_plt_entry[2],
7662 ptr + 8);
7663 put_arm_insn (htab, output_bfd,
7664 elf32_arm_nacl_plt_entry[3]
7665 | (tail_displacement & 0x00ffffff),
7666 ptr + 12);
7667 }
7668 else
7669 {
7670 /* Calculate the displacement between the PLT slot and the
7671 entry in the GOT. The eight-byte offset accounts for the
7672 value produced by adding to pc in the first instruction
7673 of the PLT stub. */
7674 got_displacement = got_address - (plt_address + 8);
7675
7676 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
7677
7678 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
7679 {
7680 put_thumb_insn (htab, output_bfd,
7681 elf32_arm_plt_thumb_stub[0], ptr - 4);
7682 put_thumb_insn (htab, output_bfd,
7683 elf32_arm_plt_thumb_stub[1], ptr - 2);
7684 }
7685
7686 put_arm_insn (htab, output_bfd,
7687 elf32_arm_plt_entry[0]
7688 | ((got_displacement & 0x0ff00000) >> 20),
7689 ptr + 0);
7690 put_arm_insn (htab, output_bfd,
7691 elf32_arm_plt_entry[1]
7692 | ((got_displacement & 0x000ff000) >> 12),
7693 ptr+ 4);
7694 put_arm_insn (htab, output_bfd,
7695 elf32_arm_plt_entry[2]
7696 | (got_displacement & 0x00000fff),
7697 ptr + 8);
7698 #ifdef FOUR_WORD_PLT
7699 bfd_put_32 (output_bfd, elf32_arm_plt_entry[3], ptr + 12);
7700 #endif
7701 }
7702
7703 /* Fill in the entry in the .rel(a).(i)plt section. */
7704 rel.r_offset = got_address;
7705 rel.r_addend = 0;
7706 if (dynindx == -1)
7707 {
7708 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
7709 The dynamic linker or static executable then calls SYM_VALUE
7710 to determine the correct run-time value of the .igot.plt entry. */
7711 rel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
7712 initial_got_entry = sym_value;
7713 }
7714 else
7715 {
7716 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT);
7717 initial_got_entry = (splt->output_section->vma
7718 + splt->output_offset);
7719 }
7720
7721 /* Fill in the entry in the global offset table. */
7722 bfd_put_32 (output_bfd, initial_got_entry,
7723 sgot->contents + got_offset);
7724 }
7725
7726 loc = srel->contents + plt_index * RELOC_SIZE (htab);
7727 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
7728 }
7729
7730 /* Some relocations map to different relocations depending on the
7731 target. Return the real relocation. */
7732
7733 static int
7734 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
7735 int r_type)
7736 {
7737 switch (r_type)
7738 {
7739 case R_ARM_TARGET1:
7740 if (globals->target1_is_rel)
7741 return R_ARM_REL32;
7742 else
7743 return R_ARM_ABS32;
7744
7745 case R_ARM_TARGET2:
7746 return globals->target2_reloc;
7747
7748 default:
7749 return r_type;
7750 }
7751 }
7752
7753 /* Return the base VMA address which should be subtracted from real addresses
7754 when resolving @dtpoff relocation.
7755 This is PT_TLS segment p_vaddr. */
7756
7757 static bfd_vma
7758 dtpoff_base (struct bfd_link_info *info)
7759 {
7760 /* If tls_sec is NULL, we should have signalled an error already. */
7761 if (elf_hash_table (info)->tls_sec == NULL)
7762 return 0;
7763 return elf_hash_table (info)->tls_sec->vma;
7764 }
7765
7766 /* Return the relocation value for @tpoff relocation
7767 if STT_TLS virtual address is ADDRESS. */
7768
7769 static bfd_vma
7770 tpoff (struct bfd_link_info *info, bfd_vma address)
7771 {
7772 struct elf_link_hash_table *htab = elf_hash_table (info);
7773 bfd_vma base;
7774
7775 /* If tls_sec is NULL, we should have signalled an error already. */
7776 if (htab->tls_sec == NULL)
7777 return 0;
7778 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
7779 return address - htab->tls_sec->vma + base;
7780 }
7781
7782 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
7783 VALUE is the relocation value. */
7784
7785 static bfd_reloc_status_type
7786 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
7787 {
7788 if (value > 0xfff)
7789 return bfd_reloc_overflow;
7790
7791 value |= bfd_get_32 (abfd, data) & 0xfffff000;
7792 bfd_put_32 (abfd, value, data);
7793 return bfd_reloc_ok;
7794 }
7795
7796 /* Handle TLS relaxations. Relaxing is possible for symbols that use
7797 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
7798 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
7799
7800 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
7801 is to then call final_link_relocate. Return other values in the
7802 case of error.
7803
7804 FIXME:When --emit-relocs is in effect, we'll emit relocs describing
7805 the pre-relaxed code. It would be nice if the relocs were updated
7806 to match the optimization. */
7807
7808 static bfd_reloc_status_type
7809 elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals,
7810 bfd *input_bfd, asection *input_sec, bfd_byte *contents,
7811 Elf_Internal_Rela *rel, unsigned long is_local)
7812 {
7813 unsigned long insn;
7814
7815 switch (ELF32_R_TYPE (rel->r_info))
7816 {
7817 default:
7818 return bfd_reloc_notsupported;
7819
7820 case R_ARM_TLS_GOTDESC:
7821 if (is_local)
7822 insn = 0;
7823 else
7824 {
7825 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
7826 if (insn & 1)
7827 insn -= 5; /* THUMB */
7828 else
7829 insn -= 8; /* ARM */
7830 }
7831 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
7832 return bfd_reloc_continue;
7833
7834 case R_ARM_THM_TLS_DESCSEQ:
7835 /* Thumb insn. */
7836 insn = bfd_get_16 (input_bfd, contents + rel->r_offset);
7837 if ((insn & 0xff78) == 0x4478) /* add rx, pc */
7838 {
7839 if (is_local)
7840 /* nop */
7841 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
7842 }
7843 else if ((insn & 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
7844 {
7845 if (is_local)
7846 /* nop */
7847 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
7848 else
7849 /* ldr rx,[ry] */
7850 bfd_put_16 (input_bfd, insn & 0xf83f, contents + rel->r_offset);
7851 }
7852 else if ((insn & 0xff87) == 0x4780) /* blx rx */
7853 {
7854 if (is_local)
7855 /* nop */
7856 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
7857 else
7858 /* mov r0, rx */
7859 bfd_put_16 (input_bfd, 0x4600 | (insn & 0x78),
7860 contents + rel->r_offset);
7861 }
7862 else
7863 {
7864 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
7865 /* It's a 32 bit instruction, fetch the rest of it for
7866 error generation. */
7867 insn = (insn << 16)
7868 | bfd_get_16 (input_bfd, contents + rel->r_offset + 2);
7869 (*_bfd_error_handler)
7870 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' in TLS trampoline"),
7871 input_bfd, input_sec, (unsigned long)rel->r_offset, insn);
7872 return bfd_reloc_notsupported;
7873 }
7874 break;
7875
7876 case R_ARM_TLS_DESCSEQ:
7877 /* arm insn. */
7878 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
7879 if ((insn & 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
7880 {
7881 if (is_local)
7882 /* mov rx, ry */
7883 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xffff),
7884 contents + rel->r_offset);
7885 }
7886 else if ((insn & 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
7887 {
7888 if (is_local)
7889 /* nop */
7890 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
7891 else
7892 /* ldr rx,[ry] */
7893 bfd_put_32 (input_bfd, insn & 0xfffff000,
7894 contents + rel->r_offset);
7895 }
7896 else if ((insn & 0xfffffff0) == 0xe12fff30) /* blx rx */
7897 {
7898 if (is_local)
7899 /* nop */
7900 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
7901 else
7902 /* mov r0, rx */
7903 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xf),
7904 contents + rel->r_offset);
7905 }
7906 else
7907 {
7908 (*_bfd_error_handler)
7909 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' in TLS trampoline"),
7910 input_bfd, input_sec, (unsigned long)rel->r_offset, insn);
7911 return bfd_reloc_notsupported;
7912 }
7913 break;
7914
7915 case R_ARM_TLS_CALL:
7916 /* GD->IE relaxation, turn the instruction into 'nop' or
7917 'ldr r0, [pc,r0]' */
7918 insn = is_local ? 0xe1a00000 : 0xe79f0000;
7919 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
7920 break;
7921
7922 case R_ARM_THM_TLS_CALL:
7923 /* GD->IE relaxation */
7924 if (!is_local)
7925 /* add r0,pc; ldr r0, [r0] */
7926 insn = 0x44786800;
7927 else if (arch_has_thumb2_nop (globals))
7928 /* nop.w */
7929 insn = 0xf3af8000;
7930 else
7931 /* nop; nop */
7932 insn = 0xbf00bf00;
7933
7934 bfd_put_16 (input_bfd, insn >> 16, contents + rel->r_offset);
7935 bfd_put_16 (input_bfd, insn & 0xffff, contents + rel->r_offset + 2);
7936 break;
7937 }
7938 return bfd_reloc_ok;
7939 }
7940
7941 /* For a given value of n, calculate the value of G_n as required to
7942 deal with group relocations. We return it in the form of an
7943 encoded constant-and-rotation, together with the final residual. If n is
7944 specified as less than zero, then final_residual is filled with the
7945 input value and no further action is performed. */
7946
7947 static bfd_vma
7948 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
7949 {
7950 int current_n;
7951 bfd_vma g_n;
7952 bfd_vma encoded_g_n = 0;
7953 bfd_vma residual = value; /* Also known as Y_n. */
7954
7955 for (current_n = 0; current_n <= n; current_n++)
7956 {
7957 int shift;
7958
7959 /* Calculate which part of the value to mask. */
7960 if (residual == 0)
7961 shift = 0;
7962 else
7963 {
7964 int msb;
7965
7966 /* Determine the most significant bit in the residual and
7967 align the resulting value to a 2-bit boundary. */
7968 for (msb = 30; msb >= 0; msb -= 2)
7969 if (residual & (3 << msb))
7970 break;
7971
7972 /* The desired shift is now (msb - 6), or zero, whichever
7973 is the greater. */
7974 shift = msb - 6;
7975 if (shift < 0)
7976 shift = 0;
7977 }
7978
7979 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
7980 g_n = residual & (0xff << shift);
7981 encoded_g_n = (g_n >> shift)
7982 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
7983
7984 /* Calculate the residual for the next time around. */
7985 residual &= ~g_n;
7986 }
7987
7988 *final_residual = residual;
7989
7990 return encoded_g_n;
7991 }
7992
7993 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
7994 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
7995
7996 static int
7997 identify_add_or_sub (bfd_vma insn)
7998 {
7999 int opcode = insn & 0x1e00000;
8000
8001 if (opcode == 1 << 23) /* ADD */
8002 return 1;
8003
8004 if (opcode == 1 << 22) /* SUB */
8005 return -1;
8006
8007 return 0;
8008 }
8009
8010 /* Perform a relocation as part of a final link. */
8011
8012 static bfd_reloc_status_type
8013 elf32_arm_final_link_relocate (reloc_howto_type * howto,
8014 bfd * input_bfd,
8015 bfd * output_bfd,
8016 asection * input_section,
8017 bfd_byte * contents,
8018 Elf_Internal_Rela * rel,
8019 bfd_vma value,
8020 struct bfd_link_info * info,
8021 asection * sym_sec,
8022 const char * sym_name,
8023 unsigned char st_type,
8024 enum arm_st_branch_type branch_type,
8025 struct elf_link_hash_entry * h,
8026 bfd_boolean * unresolved_reloc_p,
8027 char ** error_message)
8028 {
8029 unsigned long r_type = howto->type;
8030 unsigned long r_symndx;
8031 bfd_byte * hit_data = contents + rel->r_offset;
8032 bfd_vma * local_got_offsets;
8033 bfd_vma * local_tlsdesc_gotents;
8034 asection * sgot;
8035 asection * splt;
8036 asection * sreloc = NULL;
8037 asection * srelgot;
8038 bfd_vma addend;
8039 bfd_signed_vma signed_addend;
8040 unsigned char dynreloc_st_type;
8041 bfd_vma dynreloc_value;
8042 struct elf32_arm_link_hash_table * globals;
8043 struct elf32_arm_link_hash_entry *eh;
8044 union gotplt_union *root_plt;
8045 struct arm_plt_info *arm_plt;
8046 bfd_vma plt_offset;
8047 bfd_vma gotplt_offset;
8048 bfd_boolean has_iplt_entry;
8049
8050 globals = elf32_arm_hash_table (info);
8051 if (globals == NULL)
8052 return bfd_reloc_notsupported;
8053
8054 BFD_ASSERT (is_arm_elf (input_bfd));
8055
8056 /* Some relocation types map to different relocations depending on the
8057 target. We pick the right one here. */
8058 r_type = arm_real_reloc_type (globals, r_type);
8059
8060 /* It is possible to have linker relaxations on some TLS access
8061 models. Update our information here. */
8062 r_type = elf32_arm_tls_transition (info, r_type, h);
8063
8064 if (r_type != howto->type)
8065 howto = elf32_arm_howto_from_type (r_type);
8066
8067 /* If the start address has been set, then set the EF_ARM_HASENTRY
8068 flag. Setting this more than once is redundant, but the cost is
8069 not too high, and it keeps the code simple.
8070
8071 The test is done here, rather than somewhere else, because the
8072 start address is only set just before the final link commences.
8073
8074 Note - if the user deliberately sets a start address of 0, the
8075 flag will not be set. */
8076 if (bfd_get_start_address (output_bfd) != 0)
8077 elf_elfheader (output_bfd)->e_flags |= EF_ARM_HASENTRY;
8078
8079 eh = (struct elf32_arm_link_hash_entry *) h;
8080 sgot = globals->root.sgot;
8081 local_got_offsets = elf_local_got_offsets (input_bfd);
8082 local_tlsdesc_gotents = elf32_arm_local_tlsdesc_gotent (input_bfd);
8083
8084 if (globals->root.dynamic_sections_created)
8085 srelgot = globals->root.srelgot;
8086 else
8087 srelgot = NULL;
8088
8089 r_symndx = ELF32_R_SYM (rel->r_info);
8090
8091 if (globals->use_rel)
8092 {
8093 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
8094
8095 if (addend & ((howto->src_mask + 1) >> 1))
8096 {
8097 signed_addend = -1;
8098 signed_addend &= ~ howto->src_mask;
8099 signed_addend |= addend;
8100 }
8101 else
8102 signed_addend = addend;
8103 }
8104 else
8105 addend = signed_addend = rel->r_addend;
8106
8107 /* Record the symbol information that should be used in dynamic
8108 relocations. */
8109 dynreloc_st_type = st_type;
8110 dynreloc_value = value;
8111 if (branch_type == ST_BRANCH_TO_THUMB)
8112 dynreloc_value |= 1;
8113
8114 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
8115 VALUE appropriately for relocations that we resolve at link time. */
8116 has_iplt_entry = FALSE;
8117 if (elf32_arm_get_plt_info (input_bfd, eh, r_symndx, &root_plt, &arm_plt)
8118 && root_plt->offset != (bfd_vma) -1)
8119 {
8120 plt_offset = root_plt->offset;
8121 gotplt_offset = arm_plt->got_offset;
8122
8123 if (h == NULL || eh->is_iplt)
8124 {
8125 has_iplt_entry = TRUE;
8126 splt = globals->root.iplt;
8127
8128 /* Populate .iplt entries here, because not all of them will
8129 be seen by finish_dynamic_symbol. The lower bit is set if
8130 we have already populated the entry. */
8131 if (plt_offset & 1)
8132 plt_offset--;
8133 else
8134 {
8135 elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
8136 -1, dynreloc_value);
8137 root_plt->offset |= 1;
8138 }
8139
8140 /* Static relocations always resolve to the .iplt entry. */
8141 st_type = STT_FUNC;
8142 value = (splt->output_section->vma
8143 + splt->output_offset
8144 + plt_offset);
8145 branch_type = ST_BRANCH_TO_ARM;
8146
8147 /* If there are non-call relocations that resolve to the .iplt
8148 entry, then all dynamic ones must too. */
8149 if (arm_plt->noncall_refcount != 0)
8150 {
8151 dynreloc_st_type = st_type;
8152 dynreloc_value = value;
8153 }
8154 }
8155 else
8156 /* We populate the .plt entry in finish_dynamic_symbol. */
8157 splt = globals->root.splt;
8158 }
8159 else
8160 {
8161 splt = NULL;
8162 plt_offset = (bfd_vma) -1;
8163 gotplt_offset = (bfd_vma) -1;
8164 }
8165
8166 switch (r_type)
8167 {
8168 case R_ARM_NONE:
8169 /* We don't need to find a value for this symbol. It's just a
8170 marker. */
8171 *unresolved_reloc_p = FALSE;
8172 return bfd_reloc_ok;
8173
8174 case R_ARM_ABS12:
8175 if (!globals->vxworks_p)
8176 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
8177
8178 case R_ARM_PC24:
8179 case R_ARM_ABS32:
8180 case R_ARM_ABS32_NOI:
8181 case R_ARM_REL32:
8182 case R_ARM_REL32_NOI:
8183 case R_ARM_CALL:
8184 case R_ARM_JUMP24:
8185 case R_ARM_XPC25:
8186 case R_ARM_PREL31:
8187 case R_ARM_PLT32:
8188 /* Handle relocations which should use the PLT entry. ABS32/REL32
8189 will use the symbol's value, which may point to a PLT entry, but we
8190 don't need to handle that here. If we created a PLT entry, all
8191 branches in this object should go to it, except if the PLT is too
8192 far away, in which case a long branch stub should be inserted. */
8193 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
8194 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
8195 && r_type != R_ARM_CALL
8196 && r_type != R_ARM_JUMP24
8197 && r_type != R_ARM_PLT32)
8198 && plt_offset != (bfd_vma) -1)
8199 {
8200 /* If we've created a .plt section, and assigned a PLT entry
8201 to this function, it must either be a STT_GNU_IFUNC reference
8202 or not be known to bind locally. In other cases, we should
8203 have cleared the PLT entry by now. */
8204 BFD_ASSERT (has_iplt_entry || !SYMBOL_CALLS_LOCAL (info, h));
8205
8206 value = (splt->output_section->vma
8207 + splt->output_offset
8208 + plt_offset);
8209 *unresolved_reloc_p = FALSE;
8210 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8211 contents, rel->r_offset, value,
8212 rel->r_addend);
8213 }
8214
8215 /* When generating a shared object or relocatable executable, these
8216 relocations are copied into the output file to be resolved at
8217 run time. */
8218 if ((info->shared || globals->root.is_relocatable_executable)
8219 && (input_section->flags & SEC_ALLOC)
8220 && !(globals->vxworks_p
8221 && strcmp (input_section->output_section->name,
8222 ".tls_vars") == 0)
8223 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
8224 || !SYMBOL_CALLS_LOCAL (info, h))
8225 && (!strstr (input_section->name, STUB_SUFFIX))
8226 && (h == NULL
8227 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
8228 || h->root.type != bfd_link_hash_undefweak)
8229 && r_type != R_ARM_PC24
8230 && r_type != R_ARM_CALL
8231 && r_type != R_ARM_JUMP24
8232 && r_type != R_ARM_PREL31
8233 && r_type != R_ARM_PLT32)
8234 {
8235 Elf_Internal_Rela outrel;
8236 bfd_boolean skip, relocate;
8237
8238 *unresolved_reloc_p = FALSE;
8239
8240 if (sreloc == NULL && globals->root.dynamic_sections_created)
8241 {
8242 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
8243 ! globals->use_rel);
8244
8245 if (sreloc == NULL)
8246 return bfd_reloc_notsupported;
8247 }
8248
8249 skip = FALSE;
8250 relocate = FALSE;
8251
8252 outrel.r_addend = addend;
8253 outrel.r_offset =
8254 _bfd_elf_section_offset (output_bfd, info, input_section,
8255 rel->r_offset);
8256 if (outrel.r_offset == (bfd_vma) -1)
8257 skip = TRUE;
8258 else if (outrel.r_offset == (bfd_vma) -2)
8259 skip = TRUE, relocate = TRUE;
8260 outrel.r_offset += (input_section->output_section->vma
8261 + input_section->output_offset);
8262
8263 if (skip)
8264 memset (&outrel, 0, sizeof outrel);
8265 else if (h != NULL
8266 && h->dynindx != -1
8267 && (!info->shared
8268 || !info->symbolic
8269 || !h->def_regular))
8270 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
8271 else
8272 {
8273 int symbol;
8274
8275 /* This symbol is local, or marked to become local. */
8276 BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI);
8277 if (globals->symbian_p)
8278 {
8279 asection *osec;
8280
8281 /* On Symbian OS, the data segment and text segement
8282 can be relocated independently. Therefore, we
8283 must indicate the segment to which this
8284 relocation is relative. The BPABI allows us to
8285 use any symbol in the right segment; we just use
8286 the section symbol as it is convenient. (We
8287 cannot use the symbol given by "h" directly as it
8288 will not appear in the dynamic symbol table.)
8289
8290 Note that the dynamic linker ignores the section
8291 symbol value, so we don't subtract osec->vma
8292 from the emitted reloc addend. */
8293 if (sym_sec)
8294 osec = sym_sec->output_section;
8295 else
8296 osec = input_section->output_section;
8297 symbol = elf_section_data (osec)->dynindx;
8298 if (symbol == 0)
8299 {
8300 struct elf_link_hash_table *htab = elf_hash_table (info);
8301
8302 if ((osec->flags & SEC_READONLY) == 0
8303 && htab->data_index_section != NULL)
8304 osec = htab->data_index_section;
8305 else
8306 osec = htab->text_index_section;
8307 symbol = elf_section_data (osec)->dynindx;
8308 }
8309 BFD_ASSERT (symbol != 0);
8310 }
8311 else
8312 /* On SVR4-ish systems, the dynamic loader cannot
8313 relocate the text and data segments independently,
8314 so the symbol does not matter. */
8315 symbol = 0;
8316 if (dynreloc_st_type == STT_GNU_IFUNC)
8317 /* We have an STT_GNU_IFUNC symbol that doesn't resolve
8318 to the .iplt entry. Instead, every non-call reference
8319 must use an R_ARM_IRELATIVE relocation to obtain the
8320 correct run-time address. */
8321 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE);
8322 else
8323 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
8324 if (globals->use_rel)
8325 relocate = TRUE;
8326 else
8327 outrel.r_addend += dynreloc_value;
8328 }
8329
8330 elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel);
8331
8332 /* If this reloc is against an external symbol, we do not want to
8333 fiddle with the addend. Otherwise, we need to include the symbol
8334 value so that it becomes an addend for the dynamic reloc. */
8335 if (! relocate)
8336 return bfd_reloc_ok;
8337
8338 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8339 contents, rel->r_offset,
8340 dynreloc_value, (bfd_vma) 0);
8341 }
8342 else switch (r_type)
8343 {
8344 case R_ARM_ABS12:
8345 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
8346
8347 case R_ARM_XPC25: /* Arm BLX instruction. */
8348 case R_ARM_CALL:
8349 case R_ARM_JUMP24:
8350 case R_ARM_PC24: /* Arm B/BL instruction. */
8351 case R_ARM_PLT32:
8352 {
8353 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
8354
8355 if (r_type == R_ARM_XPC25)
8356 {
8357 /* Check for Arm calling Arm function. */
8358 /* FIXME: Should we translate the instruction into a BL
8359 instruction instead ? */
8360 if (branch_type != ST_BRANCH_TO_THUMB)
8361 (*_bfd_error_handler)
8362 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
8363 input_bfd,
8364 h ? h->root.root.string : "(local)");
8365 }
8366 else if (r_type == R_ARM_PC24)
8367 {
8368 /* Check for Arm calling Thumb function. */
8369 if (branch_type == ST_BRANCH_TO_THUMB)
8370 {
8371 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
8372 output_bfd, input_section,
8373 hit_data, sym_sec, rel->r_offset,
8374 signed_addend, value,
8375 error_message))
8376 return bfd_reloc_ok;
8377 else
8378 return bfd_reloc_dangerous;
8379 }
8380 }
8381
8382 /* Check if a stub has to be inserted because the
8383 destination is too far or we are changing mode. */
8384 if ( r_type == R_ARM_CALL
8385 || r_type == R_ARM_JUMP24
8386 || r_type == R_ARM_PLT32)
8387 {
8388 enum elf32_arm_stub_type stub_type = arm_stub_none;
8389 struct elf32_arm_link_hash_entry *hash;
8390
8391 hash = (struct elf32_arm_link_hash_entry *) h;
8392 stub_type = arm_type_of_stub (info, input_section, rel,
8393 st_type, &branch_type,
8394 hash, value, sym_sec,
8395 input_bfd, sym_name);
8396
8397 if (stub_type != arm_stub_none)
8398 {
8399 /* The target is out of reach, so redirect the
8400 branch to the local stub for this function. */
8401 stub_entry = elf32_arm_get_stub_entry (input_section,
8402 sym_sec, h,
8403 rel, globals,
8404 stub_type);
8405 {
8406 if (stub_entry != NULL)
8407 value = (stub_entry->stub_offset
8408 + stub_entry->stub_sec->output_offset
8409 + stub_entry->stub_sec->output_section->vma);
8410
8411 if (plt_offset != (bfd_vma) -1)
8412 *unresolved_reloc_p = FALSE;
8413 }
8414 }
8415 else
8416 {
8417 /* If the call goes through a PLT entry, make sure to
8418 check distance to the right destination address. */
8419 if (plt_offset != (bfd_vma) -1)
8420 {
8421 value = (splt->output_section->vma
8422 + splt->output_offset
8423 + plt_offset);
8424 *unresolved_reloc_p = FALSE;
8425 /* The PLT entry is in ARM mode, regardless of the
8426 target function. */
8427 branch_type = ST_BRANCH_TO_ARM;
8428 }
8429 }
8430 }
8431
8432 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
8433 where:
8434 S is the address of the symbol in the relocation.
8435 P is address of the instruction being relocated.
8436 A is the addend (extracted from the instruction) in bytes.
8437
8438 S is held in 'value'.
8439 P is the base address of the section containing the
8440 instruction plus the offset of the reloc into that
8441 section, ie:
8442 (input_section->output_section->vma +
8443 input_section->output_offset +
8444 rel->r_offset).
8445 A is the addend, converted into bytes, ie:
8446 (signed_addend * 4)
8447
8448 Note: None of these operations have knowledge of the pipeline
8449 size of the processor, thus it is up to the assembler to
8450 encode this information into the addend. */
8451 value -= (input_section->output_section->vma
8452 + input_section->output_offset);
8453 value -= rel->r_offset;
8454 if (globals->use_rel)
8455 value += (signed_addend << howto->size);
8456 else
8457 /* RELA addends do not have to be adjusted by howto->size. */
8458 value += signed_addend;
8459
8460 signed_addend = value;
8461 signed_addend >>= howto->rightshift;
8462
8463 /* A branch to an undefined weak symbol is turned into a jump to
8464 the next instruction unless a PLT entry will be created.
8465 Do the same for local undefined symbols (but not for STN_UNDEF).
8466 The jump to the next instruction is optimized as a NOP depending
8467 on the architecture. */
8468 if (h ? (h->root.type == bfd_link_hash_undefweak
8469 && plt_offset == (bfd_vma) -1)
8470 : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
8471 {
8472 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
8473
8474 if (arch_has_arm_nop (globals))
8475 value |= 0x0320f000;
8476 else
8477 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
8478 }
8479 else
8480 {
8481 /* Perform a signed range check. */
8482 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
8483 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
8484 return bfd_reloc_overflow;
8485
8486 addend = (value & 2);
8487
8488 value = (signed_addend & howto->dst_mask)
8489 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
8490
8491 if (r_type == R_ARM_CALL)
8492 {
8493 /* Set the H bit in the BLX instruction. */
8494 if (branch_type == ST_BRANCH_TO_THUMB)
8495 {
8496 if (addend)
8497 value |= (1 << 24);
8498 else
8499 value &= ~(bfd_vma)(1 << 24);
8500 }
8501
8502 /* Select the correct instruction (BL or BLX). */
8503 /* Only if we are not handling a BL to a stub. In this
8504 case, mode switching is performed by the stub. */
8505 if (branch_type == ST_BRANCH_TO_THUMB && !stub_entry)
8506 value |= (1 << 28);
8507 else if (stub_entry || branch_type != ST_BRANCH_UNKNOWN)
8508 {
8509 value &= ~(bfd_vma)(1 << 28);
8510 value |= (1 << 24);
8511 }
8512 }
8513 }
8514 }
8515 break;
8516
8517 case R_ARM_ABS32:
8518 value += addend;
8519 if (branch_type == ST_BRANCH_TO_THUMB)
8520 value |= 1;
8521 break;
8522
8523 case R_ARM_ABS32_NOI:
8524 value += addend;
8525 break;
8526
8527 case R_ARM_REL32:
8528 value += addend;
8529 if (branch_type == ST_BRANCH_TO_THUMB)
8530 value |= 1;
8531 value -= (input_section->output_section->vma
8532 + input_section->output_offset + rel->r_offset);
8533 break;
8534
8535 case R_ARM_REL32_NOI:
8536 value += addend;
8537 value -= (input_section->output_section->vma
8538 + input_section->output_offset + rel->r_offset);
8539 break;
8540
8541 case R_ARM_PREL31:
8542 value -= (input_section->output_section->vma
8543 + input_section->output_offset + rel->r_offset);
8544 value += signed_addend;
8545 if (! h || h->root.type != bfd_link_hash_undefweak)
8546 {
8547 /* Check for overflow. */
8548 if ((value ^ (value >> 1)) & (1 << 30))
8549 return bfd_reloc_overflow;
8550 }
8551 value &= 0x7fffffff;
8552 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
8553 if (branch_type == ST_BRANCH_TO_THUMB)
8554 value |= 1;
8555 break;
8556 }
8557
8558 bfd_put_32 (input_bfd, value, hit_data);
8559 return bfd_reloc_ok;
8560
8561 case R_ARM_ABS8:
8562 value += addend;
8563
8564 /* There is no way to tell whether the user intended to use a signed or
8565 unsigned addend. When checking for overflow we accept either,
8566 as specified by the AAELF. */
8567 if ((long) value > 0xff || (long) value < -0x80)
8568 return bfd_reloc_overflow;
8569
8570 bfd_put_8 (input_bfd, value, hit_data);
8571 return bfd_reloc_ok;
8572
8573 case R_ARM_ABS16:
8574 value += addend;
8575
8576 /* See comment for R_ARM_ABS8. */
8577 if ((long) value > 0xffff || (long) value < -0x8000)
8578 return bfd_reloc_overflow;
8579
8580 bfd_put_16 (input_bfd, value, hit_data);
8581 return bfd_reloc_ok;
8582
8583 case R_ARM_THM_ABS5:
8584 /* Support ldr and str instructions for the thumb. */
8585 if (globals->use_rel)
8586 {
8587 /* Need to refetch addend. */
8588 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
8589 /* ??? Need to determine shift amount from operand size. */
8590 addend >>= howto->rightshift;
8591 }
8592 value += addend;
8593
8594 /* ??? Isn't value unsigned? */
8595 if ((long) value > 0x1f || (long) value < -0x10)
8596 return bfd_reloc_overflow;
8597
8598 /* ??? Value needs to be properly shifted into place first. */
8599 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
8600 bfd_put_16 (input_bfd, value, hit_data);
8601 return bfd_reloc_ok;
8602
8603 case R_ARM_THM_ALU_PREL_11_0:
8604 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
8605 {
8606 bfd_vma insn;
8607 bfd_signed_vma relocation;
8608
8609 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
8610 | bfd_get_16 (input_bfd, hit_data + 2);
8611
8612 if (globals->use_rel)
8613 {
8614 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
8615 | ((insn & (1 << 26)) >> 15);
8616 if (insn & 0xf00000)
8617 signed_addend = -signed_addend;
8618 }
8619
8620 relocation = value + signed_addend;
8621 relocation -= (input_section->output_section->vma
8622 + input_section->output_offset
8623 + rel->r_offset);
8624
8625 value = abs (relocation);
8626
8627 if (value >= 0x1000)
8628 return bfd_reloc_overflow;
8629
8630 insn = (insn & 0xfb0f8f00) | (value & 0xff)
8631 | ((value & 0x700) << 4)
8632 | ((value & 0x800) << 15);
8633 if (relocation < 0)
8634 insn |= 0xa00000;
8635
8636 bfd_put_16 (input_bfd, insn >> 16, hit_data);
8637 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
8638
8639 return bfd_reloc_ok;
8640 }
8641
8642 case R_ARM_THM_PC8:
8643 /* PR 10073: This reloc is not generated by the GNU toolchain,
8644 but it is supported for compatibility with third party libraries
8645 generated by other compilers, specifically the ARM/IAR. */
8646 {
8647 bfd_vma insn;
8648 bfd_signed_vma relocation;
8649
8650 insn = bfd_get_16 (input_bfd, hit_data);
8651
8652 if (globals->use_rel)
8653 addend = (insn & 0x00ff) << 2;
8654
8655 relocation = value + addend;
8656 relocation -= (input_section->output_section->vma
8657 + input_section->output_offset
8658 + rel->r_offset);
8659
8660 value = abs (relocation);
8661
8662 /* We do not check for overflow of this reloc. Although strictly
8663 speaking this is incorrect, it appears to be necessary in order
8664 to work with IAR generated relocs. Since GCC and GAS do not
8665 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
8666 a problem for them. */
8667 value &= 0x3fc;
8668
8669 insn = (insn & 0xff00) | (value >> 2);
8670
8671 bfd_put_16 (input_bfd, insn, hit_data);
8672
8673 return bfd_reloc_ok;
8674 }
8675
8676 case R_ARM_THM_PC12:
8677 /* Corresponds to: ldr.w reg, [pc, #offset]. */
8678 {
8679 bfd_vma insn;
8680 bfd_signed_vma relocation;
8681
8682 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
8683 | bfd_get_16 (input_bfd, hit_data + 2);
8684
8685 if (globals->use_rel)
8686 {
8687 signed_addend = insn & 0xfff;
8688 if (!(insn & (1 << 23)))
8689 signed_addend = -signed_addend;
8690 }
8691
8692 relocation = value + signed_addend;
8693 relocation -= (input_section->output_section->vma
8694 + input_section->output_offset
8695 + rel->r_offset);
8696
8697 value = abs (relocation);
8698
8699 if (value >= 0x1000)
8700 return bfd_reloc_overflow;
8701
8702 insn = (insn & 0xff7ff000) | value;
8703 if (relocation >= 0)
8704 insn |= (1 << 23);
8705
8706 bfd_put_16 (input_bfd, insn >> 16, hit_data);
8707 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
8708
8709 return bfd_reloc_ok;
8710 }
8711
8712 case R_ARM_THM_XPC22:
8713 case R_ARM_THM_CALL:
8714 case R_ARM_THM_JUMP24:
8715 /* Thumb BL (branch long instruction). */
8716 {
8717 bfd_vma relocation;
8718 bfd_vma reloc_sign;
8719 bfd_boolean overflow = FALSE;
8720 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
8721 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
8722 bfd_signed_vma reloc_signed_max;
8723 bfd_signed_vma reloc_signed_min;
8724 bfd_vma check;
8725 bfd_signed_vma signed_check;
8726 int bitsize;
8727 const int thumb2 = using_thumb2 (globals);
8728
8729 /* A branch to an undefined weak symbol is turned into a jump to
8730 the next instruction unless a PLT entry will be created.
8731 The jump to the next instruction is optimized as a NOP.W for
8732 Thumb-2 enabled architectures. */
8733 if (h && h->root.type == bfd_link_hash_undefweak
8734 && plt_offset == (bfd_vma) -1)
8735 {
8736 if (arch_has_thumb2_nop (globals))
8737 {
8738 bfd_put_16 (input_bfd, 0xf3af, hit_data);
8739 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
8740 }
8741 else
8742 {
8743 bfd_put_16 (input_bfd, 0xe000, hit_data);
8744 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
8745 }
8746 return bfd_reloc_ok;
8747 }
8748
8749 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
8750 with Thumb-1) involving the J1 and J2 bits. */
8751 if (globals->use_rel)
8752 {
8753 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
8754 bfd_vma upper = upper_insn & 0x3ff;
8755 bfd_vma lower = lower_insn & 0x7ff;
8756 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
8757 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
8758 bfd_vma i1 = j1 ^ s ? 0 : 1;
8759 bfd_vma i2 = j2 ^ s ? 0 : 1;
8760
8761 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
8762 /* Sign extend. */
8763 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
8764
8765 signed_addend = addend;
8766 }
8767
8768 if (r_type == R_ARM_THM_XPC22)
8769 {
8770 /* Check for Thumb to Thumb call. */
8771 /* FIXME: Should we translate the instruction into a BL
8772 instruction instead ? */
8773 if (branch_type == ST_BRANCH_TO_THUMB)
8774 (*_bfd_error_handler)
8775 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
8776 input_bfd,
8777 h ? h->root.root.string : "(local)");
8778 }
8779 else
8780 {
8781 /* If it is not a call to Thumb, assume call to Arm.
8782 If it is a call relative to a section name, then it is not a
8783 function call at all, but rather a long jump. Calls through
8784 the PLT do not require stubs. */
8785 if (branch_type == ST_BRANCH_TO_ARM && plt_offset == (bfd_vma) -1)
8786 {
8787 if (globals->use_blx && r_type == R_ARM_THM_CALL)
8788 {
8789 /* Convert BL to BLX. */
8790 lower_insn = (lower_insn & ~0x1000) | 0x0800;
8791 }
8792 else if (( r_type != R_ARM_THM_CALL)
8793 && (r_type != R_ARM_THM_JUMP24))
8794 {
8795 if (elf32_thumb_to_arm_stub
8796 (info, sym_name, input_bfd, output_bfd, input_section,
8797 hit_data, sym_sec, rel->r_offset, signed_addend, value,
8798 error_message))
8799 return bfd_reloc_ok;
8800 else
8801 return bfd_reloc_dangerous;
8802 }
8803 }
8804 else if (branch_type == ST_BRANCH_TO_THUMB
8805 && globals->use_blx
8806 && r_type == R_ARM_THM_CALL)
8807 {
8808 /* Make sure this is a BL. */
8809 lower_insn |= 0x1800;
8810 }
8811 }
8812
8813 enum elf32_arm_stub_type stub_type = arm_stub_none;
8814 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
8815 {
8816 /* Check if a stub has to be inserted because the destination
8817 is too far. */
8818 struct elf32_arm_stub_hash_entry *stub_entry;
8819 struct elf32_arm_link_hash_entry *hash;
8820
8821 hash = (struct elf32_arm_link_hash_entry *) h;
8822
8823 stub_type = arm_type_of_stub (info, input_section, rel,
8824 st_type, &branch_type,
8825 hash, value, sym_sec,
8826 input_bfd, sym_name);
8827
8828 if (stub_type != arm_stub_none)
8829 {
8830 /* The target is out of reach or we are changing modes, so
8831 redirect the branch to the local stub for this
8832 function. */
8833 stub_entry = elf32_arm_get_stub_entry (input_section,
8834 sym_sec, h,
8835 rel, globals,
8836 stub_type);
8837 if (stub_entry != NULL)
8838 {
8839 value = (stub_entry->stub_offset
8840 + stub_entry->stub_sec->output_offset
8841 + stub_entry->stub_sec->output_section->vma);
8842
8843 if (plt_offset != (bfd_vma) -1)
8844 *unresolved_reloc_p = FALSE;
8845 }
8846
8847 /* If this call becomes a call to Arm, force BLX. */
8848 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
8849 {
8850 if ((stub_entry
8851 && !arm_stub_is_thumb (stub_entry->stub_type))
8852 || branch_type != ST_BRANCH_TO_THUMB)
8853 lower_insn = (lower_insn & ~0x1000) | 0x0800;
8854 }
8855 }
8856 }
8857
8858 /* Handle calls via the PLT. */
8859 if (stub_type == arm_stub_none && plt_offset != (bfd_vma) -1)
8860 {
8861 value = (splt->output_section->vma
8862 + splt->output_offset
8863 + plt_offset);
8864
8865 if (globals->use_blx && r_type == R_ARM_THM_CALL)
8866 {
8867 /* If the Thumb BLX instruction is available, convert
8868 the BL to a BLX instruction to call the ARM-mode
8869 PLT entry. */
8870 lower_insn = (lower_insn & ~0x1000) | 0x0800;
8871 branch_type = ST_BRANCH_TO_ARM;
8872 }
8873 else
8874 {
8875 /* Target the Thumb stub before the ARM PLT entry. */
8876 value -= PLT_THUMB_STUB_SIZE;
8877 branch_type = ST_BRANCH_TO_THUMB;
8878 }
8879 *unresolved_reloc_p = FALSE;
8880 }
8881
8882 relocation = value + signed_addend;
8883
8884 relocation -= (input_section->output_section->vma
8885 + input_section->output_offset
8886 + rel->r_offset);
8887
8888 check = relocation >> howto->rightshift;
8889
8890 /* If this is a signed value, the rightshift just dropped
8891 leading 1 bits (assuming twos complement). */
8892 if ((bfd_signed_vma) relocation >= 0)
8893 signed_check = check;
8894 else
8895 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
8896
8897 /* Calculate the permissable maximum and minimum values for
8898 this relocation according to whether we're relocating for
8899 Thumb-2 or not. */
8900 bitsize = howto->bitsize;
8901 if (!thumb2)
8902 bitsize -= 2;
8903 reloc_signed_max = (1 << (bitsize - 1)) - 1;
8904 reloc_signed_min = ~reloc_signed_max;
8905
8906 /* Assumes two's complement. */
8907 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
8908 overflow = TRUE;
8909
8910 if ((lower_insn & 0x5000) == 0x4000)
8911 /* For a BLX instruction, make sure that the relocation is rounded up
8912 to a word boundary. This follows the semantics of the instruction
8913 which specifies that bit 1 of the target address will come from bit
8914 1 of the base address. */
8915 relocation = (relocation + 2) & ~ 3;
8916
8917 /* Put RELOCATION back into the insn. Assumes two's complement.
8918 We use the Thumb-2 encoding, which is safe even if dealing with
8919 a Thumb-1 instruction by virtue of our overflow check above. */
8920 reloc_sign = (signed_check < 0) ? 1 : 0;
8921 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
8922 | ((relocation >> 12) & 0x3ff)
8923 | (reloc_sign << 10);
8924 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
8925 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
8926 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
8927 | ((relocation >> 1) & 0x7ff);
8928
8929 /* Put the relocated value back in the object file: */
8930 bfd_put_16 (input_bfd, upper_insn, hit_data);
8931 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
8932
8933 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
8934 }
8935 break;
8936
8937 case R_ARM_THM_JUMP19:
8938 /* Thumb32 conditional branch instruction. */
8939 {
8940 bfd_vma relocation;
8941 bfd_boolean overflow = FALSE;
8942 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
8943 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
8944 bfd_signed_vma reloc_signed_max = 0xffffe;
8945 bfd_signed_vma reloc_signed_min = -0x100000;
8946 bfd_signed_vma signed_check;
8947
8948 /* Need to refetch the addend, reconstruct the top three bits,
8949 and squish the two 11 bit pieces together. */
8950 if (globals->use_rel)
8951 {
8952 bfd_vma S = (upper_insn & 0x0400) >> 10;
8953 bfd_vma upper = (upper_insn & 0x003f);
8954 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
8955 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
8956 bfd_vma lower = (lower_insn & 0x07ff);
8957
8958 upper |= J1 << 6;
8959 upper |= J2 << 7;
8960 upper |= (!S) << 8;
8961 upper -= 0x0100; /* Sign extend. */
8962
8963 addend = (upper << 12) | (lower << 1);
8964 signed_addend = addend;
8965 }
8966
8967 /* Handle calls via the PLT. */
8968 if (plt_offset != (bfd_vma) -1)
8969 {
8970 value = (splt->output_section->vma
8971 + splt->output_offset
8972 + plt_offset);
8973 /* Target the Thumb stub before the ARM PLT entry. */
8974 value -= PLT_THUMB_STUB_SIZE;
8975 *unresolved_reloc_p = FALSE;
8976 }
8977
8978 /* ??? Should handle interworking? GCC might someday try to
8979 use this for tail calls. */
8980
8981 relocation = value + signed_addend;
8982 relocation -= (input_section->output_section->vma
8983 + input_section->output_offset
8984 + rel->r_offset);
8985 signed_check = (bfd_signed_vma) relocation;
8986
8987 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
8988 overflow = TRUE;
8989
8990 /* Put RELOCATION back into the insn. */
8991 {
8992 bfd_vma S = (relocation & 0x00100000) >> 20;
8993 bfd_vma J2 = (relocation & 0x00080000) >> 19;
8994 bfd_vma J1 = (relocation & 0x00040000) >> 18;
8995 bfd_vma hi = (relocation & 0x0003f000) >> 12;
8996 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
8997
8998 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
8999 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
9000 }
9001
9002 /* Put the relocated value back in the object file: */
9003 bfd_put_16 (input_bfd, upper_insn, hit_data);
9004 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
9005
9006 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
9007 }
9008
9009 case R_ARM_THM_JUMP11:
9010 case R_ARM_THM_JUMP8:
9011 case R_ARM_THM_JUMP6:
9012 /* Thumb B (branch) instruction). */
9013 {
9014 bfd_signed_vma relocation;
9015 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
9016 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
9017 bfd_signed_vma signed_check;
9018
9019 /* CZB cannot jump backward. */
9020 if (r_type == R_ARM_THM_JUMP6)
9021 reloc_signed_min = 0;
9022
9023 if (globals->use_rel)
9024 {
9025 /* Need to refetch addend. */
9026 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
9027 if (addend & ((howto->src_mask + 1) >> 1))
9028 {
9029 signed_addend = -1;
9030 signed_addend &= ~ howto->src_mask;
9031 signed_addend |= addend;
9032 }
9033 else
9034 signed_addend = addend;
9035 /* The value in the insn has been right shifted. We need to
9036 undo this, so that we can perform the address calculation
9037 in terms of bytes. */
9038 signed_addend <<= howto->rightshift;
9039 }
9040 relocation = value + signed_addend;
9041
9042 relocation -= (input_section->output_section->vma
9043 + input_section->output_offset
9044 + rel->r_offset);
9045
9046 relocation >>= howto->rightshift;
9047 signed_check = relocation;
9048
9049 if (r_type == R_ARM_THM_JUMP6)
9050 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
9051 else
9052 relocation &= howto->dst_mask;
9053 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
9054
9055 bfd_put_16 (input_bfd, relocation, hit_data);
9056
9057 /* Assumes two's complement. */
9058 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
9059 return bfd_reloc_overflow;
9060
9061 return bfd_reloc_ok;
9062 }
9063
9064 case R_ARM_ALU_PCREL7_0:
9065 case R_ARM_ALU_PCREL15_8:
9066 case R_ARM_ALU_PCREL23_15:
9067 {
9068 bfd_vma insn;
9069 bfd_vma relocation;
9070
9071 insn = bfd_get_32 (input_bfd, hit_data);
9072 if (globals->use_rel)
9073 {
9074 /* Extract the addend. */
9075 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
9076 signed_addend = addend;
9077 }
9078 relocation = value + signed_addend;
9079
9080 relocation -= (input_section->output_section->vma
9081 + input_section->output_offset
9082 + rel->r_offset);
9083 insn = (insn & ~0xfff)
9084 | ((howto->bitpos << 7) & 0xf00)
9085 | ((relocation >> howto->bitpos) & 0xff);
9086 bfd_put_32 (input_bfd, value, hit_data);
9087 }
9088 return bfd_reloc_ok;
9089
9090 case R_ARM_GNU_VTINHERIT:
9091 case R_ARM_GNU_VTENTRY:
9092 return bfd_reloc_ok;
9093
9094 case R_ARM_GOTOFF32:
9095 /* Relocation is relative to the start of the
9096 global offset table. */
9097
9098 BFD_ASSERT (sgot != NULL);
9099 if (sgot == NULL)
9100 return bfd_reloc_notsupported;
9101
9102 /* If we are addressing a Thumb function, we need to adjust the
9103 address by one, so that attempts to call the function pointer will
9104 correctly interpret it as Thumb code. */
9105 if (branch_type == ST_BRANCH_TO_THUMB)
9106 value += 1;
9107
9108 /* Note that sgot->output_offset is not involved in this
9109 calculation. We always want the start of .got. If we
9110 define _GLOBAL_OFFSET_TABLE in a different way, as is
9111 permitted by the ABI, we might have to change this
9112 calculation. */
9113 value -= sgot->output_section->vma;
9114 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9115 contents, rel->r_offset, value,
9116 rel->r_addend);
9117
9118 case R_ARM_GOTPC:
9119 /* Use global offset table as symbol value. */
9120 BFD_ASSERT (sgot != NULL);
9121
9122 if (sgot == NULL)
9123 return bfd_reloc_notsupported;
9124
9125 *unresolved_reloc_p = FALSE;
9126 value = sgot->output_section->vma;
9127 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9128 contents, rel->r_offset, value,
9129 rel->r_addend);
9130
9131 case R_ARM_GOT32:
9132 case R_ARM_GOT_PREL:
9133 /* Relocation is to the entry for this symbol in the
9134 global offset table. */
9135 if (sgot == NULL)
9136 return bfd_reloc_notsupported;
9137
9138 if (dynreloc_st_type == STT_GNU_IFUNC
9139 && plt_offset != (bfd_vma) -1
9140 && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h)))
9141 {
9142 /* We have a relocation against a locally-binding STT_GNU_IFUNC
9143 symbol, and the relocation resolves directly to the runtime
9144 target rather than to the .iplt entry. This means that any
9145 .got entry would be the same value as the .igot.plt entry,
9146 so there's no point creating both. */
9147 sgot = globals->root.igotplt;
9148 value = sgot->output_offset + gotplt_offset;
9149 }
9150 else if (h != NULL)
9151 {
9152 bfd_vma off;
9153
9154 off = h->got.offset;
9155 BFD_ASSERT (off != (bfd_vma) -1);
9156 if ((off & 1) != 0)
9157 {
9158 /* We have already processsed one GOT relocation against
9159 this symbol. */
9160 off &= ~1;
9161 if (globals->root.dynamic_sections_created
9162 && !SYMBOL_REFERENCES_LOCAL (info, h))
9163 *unresolved_reloc_p = FALSE;
9164 }
9165 else
9166 {
9167 Elf_Internal_Rela outrel;
9168
9169 if (!SYMBOL_REFERENCES_LOCAL (info, h))
9170 {
9171 /* If the symbol doesn't resolve locally in a static
9172 object, we have an undefined reference. If the
9173 symbol doesn't resolve locally in a dynamic object,
9174 it should be resolved by the dynamic linker. */
9175 if (globals->root.dynamic_sections_created)
9176 {
9177 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
9178 *unresolved_reloc_p = FALSE;
9179 }
9180 else
9181 outrel.r_info = 0;
9182 outrel.r_addend = 0;
9183 }
9184 else
9185 {
9186 if (dynreloc_st_type == STT_GNU_IFUNC)
9187 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
9188 else if (info->shared)
9189 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
9190 else
9191 outrel.r_info = 0;
9192 outrel.r_addend = dynreloc_value;
9193 }
9194
9195 /* The GOT entry is initialized to zero by default.
9196 See if we should install a different value. */
9197 if (outrel.r_addend != 0
9198 && (outrel.r_info == 0 || globals->use_rel))
9199 {
9200 bfd_put_32 (output_bfd, outrel.r_addend,
9201 sgot->contents + off);
9202 outrel.r_addend = 0;
9203 }
9204
9205 if (outrel.r_info != 0)
9206 {
9207 outrel.r_offset = (sgot->output_section->vma
9208 + sgot->output_offset
9209 + off);
9210 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9211 }
9212 h->got.offset |= 1;
9213 }
9214 value = sgot->output_offset + off;
9215 }
9216 else
9217 {
9218 bfd_vma off;
9219
9220 BFD_ASSERT (local_got_offsets != NULL &&
9221 local_got_offsets[r_symndx] != (bfd_vma) -1);
9222
9223 off = local_got_offsets[r_symndx];
9224
9225 /* The offset must always be a multiple of 4. We use the
9226 least significant bit to record whether we have already
9227 generated the necessary reloc. */
9228 if ((off & 1) != 0)
9229 off &= ~1;
9230 else
9231 {
9232 if (globals->use_rel)
9233 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off);
9234
9235 if (info->shared || dynreloc_st_type == STT_GNU_IFUNC)
9236 {
9237 Elf_Internal_Rela outrel;
9238
9239 outrel.r_addend = addend + dynreloc_value;
9240 outrel.r_offset = (sgot->output_section->vma
9241 + sgot->output_offset
9242 + off);
9243 if (dynreloc_st_type == STT_GNU_IFUNC)
9244 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
9245 else
9246 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
9247 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9248 }
9249
9250 local_got_offsets[r_symndx] |= 1;
9251 }
9252
9253 value = sgot->output_offset + off;
9254 }
9255 if (r_type != R_ARM_GOT32)
9256 value += sgot->output_section->vma;
9257
9258 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9259 contents, rel->r_offset, value,
9260 rel->r_addend);
9261
9262 case R_ARM_TLS_LDO32:
9263 value = value - dtpoff_base (info);
9264
9265 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9266 contents, rel->r_offset, value,
9267 rel->r_addend);
9268
9269 case R_ARM_TLS_LDM32:
9270 {
9271 bfd_vma off;
9272
9273 if (sgot == NULL)
9274 abort ();
9275
9276 off = globals->tls_ldm_got.offset;
9277
9278 if ((off & 1) != 0)
9279 off &= ~1;
9280 else
9281 {
9282 /* If we don't know the module number, create a relocation
9283 for it. */
9284 if (info->shared)
9285 {
9286 Elf_Internal_Rela outrel;
9287
9288 if (srelgot == NULL)
9289 abort ();
9290
9291 outrel.r_addend = 0;
9292 outrel.r_offset = (sgot->output_section->vma
9293 + sgot->output_offset + off);
9294 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
9295
9296 if (globals->use_rel)
9297 bfd_put_32 (output_bfd, outrel.r_addend,
9298 sgot->contents + off);
9299
9300 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9301 }
9302 else
9303 bfd_put_32 (output_bfd, 1, sgot->contents + off);
9304
9305 globals->tls_ldm_got.offset |= 1;
9306 }
9307
9308 value = sgot->output_section->vma + sgot->output_offset + off
9309 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
9310
9311 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9312 contents, rel->r_offset, value,
9313 rel->r_addend);
9314 }
9315
9316 case R_ARM_TLS_CALL:
9317 case R_ARM_THM_TLS_CALL:
9318 case R_ARM_TLS_GD32:
9319 case R_ARM_TLS_IE32:
9320 case R_ARM_TLS_GOTDESC:
9321 case R_ARM_TLS_DESCSEQ:
9322 case R_ARM_THM_TLS_DESCSEQ:
9323 {
9324 bfd_vma off, offplt;
9325 int indx = 0;
9326 char tls_type;
9327
9328 BFD_ASSERT (sgot != NULL);
9329
9330 if (h != NULL)
9331 {
9332 bfd_boolean dyn;
9333 dyn = globals->root.dynamic_sections_created;
9334 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
9335 && (!info->shared
9336 || !SYMBOL_REFERENCES_LOCAL (info, h)))
9337 {
9338 *unresolved_reloc_p = FALSE;
9339 indx = h->dynindx;
9340 }
9341 off = h->got.offset;
9342 offplt = elf32_arm_hash_entry (h)->tlsdesc_got;
9343 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
9344 }
9345 else
9346 {
9347 BFD_ASSERT (local_got_offsets != NULL);
9348 off = local_got_offsets[r_symndx];
9349 offplt = local_tlsdesc_gotents[r_symndx];
9350 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
9351 }
9352
9353 /* Linker relaxations happens from one of the
9354 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
9355 if (ELF32_R_TYPE(rel->r_info) != r_type)
9356 tls_type = GOT_TLS_IE;
9357
9358 BFD_ASSERT (tls_type != GOT_UNKNOWN);
9359
9360 if ((off & 1) != 0)
9361 off &= ~1;
9362 else
9363 {
9364 bfd_boolean need_relocs = FALSE;
9365 Elf_Internal_Rela outrel;
9366 int cur_off = off;
9367
9368 /* The GOT entries have not been initialized yet. Do it
9369 now, and emit any relocations. If both an IE GOT and a
9370 GD GOT are necessary, we emit the GD first. */
9371
9372 if ((info->shared || indx != 0)
9373 && (h == NULL
9374 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
9375 || h->root.type != bfd_link_hash_undefweak))
9376 {
9377 need_relocs = TRUE;
9378 BFD_ASSERT (srelgot != NULL);
9379 }
9380
9381 if (tls_type & GOT_TLS_GDESC)
9382 {
9383 bfd_byte *loc;
9384
9385 /* We should have relaxed, unless this is an undefined
9386 weak symbol. */
9387 BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak))
9388 || info->shared);
9389 BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8
9390 <= globals->root.sgotplt->size);
9391
9392 outrel.r_addend = 0;
9393 outrel.r_offset = (globals->root.sgotplt->output_section->vma
9394 + globals->root.sgotplt->output_offset
9395 + offplt
9396 + globals->sgotplt_jump_table_size);
9397
9398 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DESC);
9399 sreloc = globals->root.srelplt;
9400 loc = sreloc->contents;
9401 loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals);
9402 BFD_ASSERT (loc + RELOC_SIZE (globals)
9403 <= sreloc->contents + sreloc->size);
9404
9405 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
9406
9407 /* For globals, the first word in the relocation gets
9408 the relocation index and the top bit set, or zero,
9409 if we're binding now. For locals, it gets the
9410 symbol's offset in the tls section. */
9411 bfd_put_32 (output_bfd,
9412 !h ? value - elf_hash_table (info)->tls_sec->vma
9413 : info->flags & DF_BIND_NOW ? 0
9414 : 0x80000000 | ELF32_R_SYM (outrel.r_info),
9415 globals->root.sgotplt->contents + offplt
9416 + globals->sgotplt_jump_table_size);
9417
9418 /* Second word in the relocation is always zero. */
9419 bfd_put_32 (output_bfd, 0,
9420 globals->root.sgotplt->contents + offplt
9421 + globals->sgotplt_jump_table_size + 4);
9422 }
9423 if (tls_type & GOT_TLS_GD)
9424 {
9425 if (need_relocs)
9426 {
9427 outrel.r_addend = 0;
9428 outrel.r_offset = (sgot->output_section->vma
9429 + sgot->output_offset
9430 + cur_off);
9431 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
9432
9433 if (globals->use_rel)
9434 bfd_put_32 (output_bfd, outrel.r_addend,
9435 sgot->contents + cur_off);
9436
9437 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9438
9439 if (indx == 0)
9440 bfd_put_32 (output_bfd, value - dtpoff_base (info),
9441 sgot->contents + cur_off + 4);
9442 else
9443 {
9444 outrel.r_addend = 0;
9445 outrel.r_info = ELF32_R_INFO (indx,
9446 R_ARM_TLS_DTPOFF32);
9447 outrel.r_offset += 4;
9448
9449 if (globals->use_rel)
9450 bfd_put_32 (output_bfd, outrel.r_addend,
9451 sgot->contents + cur_off + 4);
9452
9453 elf32_arm_add_dynreloc (output_bfd, info,
9454 srelgot, &outrel);
9455 }
9456 }
9457 else
9458 {
9459 /* If we are not emitting relocations for a
9460 general dynamic reference, then we must be in a
9461 static link or an executable link with the
9462 symbol binding locally. Mark it as belonging
9463 to module 1, the executable. */
9464 bfd_put_32 (output_bfd, 1,
9465 sgot->contents + cur_off);
9466 bfd_put_32 (output_bfd, value - dtpoff_base (info),
9467 sgot->contents + cur_off + 4);
9468 }
9469
9470 cur_off += 8;
9471 }
9472
9473 if (tls_type & GOT_TLS_IE)
9474 {
9475 if (need_relocs)
9476 {
9477 if (indx == 0)
9478 outrel.r_addend = value - dtpoff_base (info);
9479 else
9480 outrel.r_addend = 0;
9481 outrel.r_offset = (sgot->output_section->vma
9482 + sgot->output_offset
9483 + cur_off);
9484 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
9485
9486 if (globals->use_rel)
9487 bfd_put_32 (output_bfd, outrel.r_addend,
9488 sgot->contents + cur_off);
9489
9490 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9491 }
9492 else
9493 bfd_put_32 (output_bfd, tpoff (info, value),
9494 sgot->contents + cur_off);
9495 cur_off += 4;
9496 }
9497
9498 if (h != NULL)
9499 h->got.offset |= 1;
9500 else
9501 local_got_offsets[r_symndx] |= 1;
9502 }
9503
9504 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
9505 off += 8;
9506 else if (tls_type & GOT_TLS_GDESC)
9507 off = offplt;
9508
9509 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL
9510 || ELF32_R_TYPE(rel->r_info) == R_ARM_THM_TLS_CALL)
9511 {
9512 bfd_signed_vma offset;
9513 /* TLS stubs are arm mode. The original symbol is a
9514 data object, so branch_type is bogus. */
9515 branch_type = ST_BRANCH_TO_ARM;
9516 enum elf32_arm_stub_type stub_type
9517 = arm_type_of_stub (info, input_section, rel,
9518 st_type, &branch_type,
9519 (struct elf32_arm_link_hash_entry *)h,
9520 globals->tls_trampoline, globals->root.splt,
9521 input_bfd, sym_name);
9522
9523 if (stub_type != arm_stub_none)
9524 {
9525 struct elf32_arm_stub_hash_entry *stub_entry
9526 = elf32_arm_get_stub_entry
9527 (input_section, globals->root.splt, 0, rel,
9528 globals, stub_type);
9529 offset = (stub_entry->stub_offset
9530 + stub_entry->stub_sec->output_offset
9531 + stub_entry->stub_sec->output_section->vma);
9532 }
9533 else
9534 offset = (globals->root.splt->output_section->vma
9535 + globals->root.splt->output_offset
9536 + globals->tls_trampoline);
9537
9538 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL)
9539 {
9540 unsigned long inst;
9541
9542 offset -= (input_section->output_section->vma
9543 + input_section->output_offset
9544 + rel->r_offset + 8);
9545
9546 inst = offset >> 2;
9547 inst &= 0x00ffffff;
9548 value = inst | (globals->use_blx ? 0xfa000000 : 0xeb000000);
9549 }
9550 else
9551 {
9552 /* Thumb blx encodes the offset in a complicated
9553 fashion. */
9554 unsigned upper_insn, lower_insn;
9555 unsigned neg;
9556
9557 offset -= (input_section->output_section->vma
9558 + input_section->output_offset
9559 + rel->r_offset + 4);
9560
9561 if (stub_type != arm_stub_none
9562 && arm_stub_is_thumb (stub_type))
9563 {
9564 lower_insn = 0xd000;
9565 }
9566 else
9567 {
9568 lower_insn = 0xc000;
9569 /* Round up the offset to a word boundary */
9570 offset = (offset + 2) & ~2;
9571 }
9572
9573 neg = offset < 0;
9574 upper_insn = (0xf000
9575 | ((offset >> 12) & 0x3ff)
9576 | (neg << 10));
9577 lower_insn |= (((!((offset >> 23) & 1)) ^ neg) << 13)
9578 | (((!((offset >> 22) & 1)) ^ neg) << 11)
9579 | ((offset >> 1) & 0x7ff);
9580 bfd_put_16 (input_bfd, upper_insn, hit_data);
9581 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
9582 return bfd_reloc_ok;
9583 }
9584 }
9585 /* These relocations needs special care, as besides the fact
9586 they point somewhere in .gotplt, the addend must be
9587 adjusted accordingly depending on the type of instruction
9588 we refer to */
9589 else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC))
9590 {
9591 unsigned long data, insn;
9592 unsigned thumb;
9593
9594 data = bfd_get_32 (input_bfd, hit_data);
9595 thumb = data & 1;
9596 data &= ~1u;
9597
9598 if (thumb)
9599 {
9600 insn = bfd_get_16 (input_bfd, contents + rel->r_offset - data);
9601 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
9602 insn = (insn << 16)
9603 | bfd_get_16 (input_bfd,
9604 contents + rel->r_offset - data + 2);
9605 if ((insn & 0xf800c000) == 0xf000c000)
9606 /* bl/blx */
9607 value = -6;
9608 else if ((insn & 0xffffff00) == 0x4400)
9609 /* add */
9610 value = -5;
9611 else
9612 {
9613 (*_bfd_error_handler)
9614 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' referenced by TLS_GOTDESC"),
9615 input_bfd, input_section,
9616 (unsigned long)rel->r_offset, insn);
9617 return bfd_reloc_notsupported;
9618 }
9619 }
9620 else
9621 {
9622 insn = bfd_get_32 (input_bfd, contents + rel->r_offset - data);
9623
9624 switch (insn >> 24)
9625 {
9626 case 0xeb: /* bl */
9627 case 0xfa: /* blx */
9628 value = -4;
9629 break;
9630
9631 case 0xe0: /* add */
9632 value = -8;
9633 break;
9634
9635 default:
9636 (*_bfd_error_handler)
9637 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' referenced by TLS_GOTDESC"),
9638 input_bfd, input_section,
9639 (unsigned long)rel->r_offset, insn);
9640 return bfd_reloc_notsupported;
9641 }
9642 }
9643
9644 value += ((globals->root.sgotplt->output_section->vma
9645 + globals->root.sgotplt->output_offset + off)
9646 - (input_section->output_section->vma
9647 + input_section->output_offset
9648 + rel->r_offset)
9649 + globals->sgotplt_jump_table_size);
9650 }
9651 else
9652 value = ((globals->root.sgot->output_section->vma
9653 + globals->root.sgot->output_offset + off)
9654 - (input_section->output_section->vma
9655 + input_section->output_offset + rel->r_offset));
9656
9657 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9658 contents, rel->r_offset, value,
9659 rel->r_addend);
9660 }
9661
9662 case R_ARM_TLS_LE32:
9663 if (info->shared && !info->pie)
9664 {
9665 (*_bfd_error_handler)
9666 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
9667 input_bfd, input_section,
9668 (long) rel->r_offset, howto->name);
9669 return bfd_reloc_notsupported;
9670 }
9671 else
9672 value = tpoff (info, value);
9673
9674 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9675 contents, rel->r_offset, value,
9676 rel->r_addend);
9677
9678 case R_ARM_V4BX:
9679 if (globals->fix_v4bx)
9680 {
9681 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
9682
9683 /* Ensure that we have a BX instruction. */
9684 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
9685
9686 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
9687 {
9688 /* Branch to veneer. */
9689 bfd_vma glue_addr;
9690 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
9691 glue_addr -= input_section->output_section->vma
9692 + input_section->output_offset
9693 + rel->r_offset + 8;
9694 insn = (insn & 0xf0000000) | 0x0a000000
9695 | ((glue_addr >> 2) & 0x00ffffff);
9696 }
9697 else
9698 {
9699 /* Preserve Rm (lowest four bits) and the condition code
9700 (highest four bits). Other bits encode MOV PC,Rm. */
9701 insn = (insn & 0xf000000f) | 0x01a0f000;
9702 }
9703
9704 bfd_put_32 (input_bfd, insn, hit_data);
9705 }
9706 return bfd_reloc_ok;
9707
9708 case R_ARM_MOVW_ABS_NC:
9709 case R_ARM_MOVT_ABS:
9710 case R_ARM_MOVW_PREL_NC:
9711 case R_ARM_MOVT_PREL:
9712 /* Until we properly support segment-base-relative addressing then
9713 we assume the segment base to be zero, as for the group relocations.
9714 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
9715 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
9716 case R_ARM_MOVW_BREL_NC:
9717 case R_ARM_MOVW_BREL:
9718 case R_ARM_MOVT_BREL:
9719 {
9720 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
9721
9722 if (globals->use_rel)
9723 {
9724 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
9725 signed_addend = (addend ^ 0x8000) - 0x8000;
9726 }
9727
9728 value += signed_addend;
9729
9730 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
9731 value -= (input_section->output_section->vma
9732 + input_section->output_offset + rel->r_offset);
9733
9734 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
9735 return bfd_reloc_overflow;
9736
9737 if (branch_type == ST_BRANCH_TO_THUMB)
9738 value |= 1;
9739
9740 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
9741 || r_type == R_ARM_MOVT_BREL)
9742 value >>= 16;
9743
9744 insn &= 0xfff0f000;
9745 insn |= value & 0xfff;
9746 insn |= (value & 0xf000) << 4;
9747 bfd_put_32 (input_bfd, insn, hit_data);
9748 }
9749 return bfd_reloc_ok;
9750
9751 case R_ARM_THM_MOVW_ABS_NC:
9752 case R_ARM_THM_MOVT_ABS:
9753 case R_ARM_THM_MOVW_PREL_NC:
9754 case R_ARM_THM_MOVT_PREL:
9755 /* Until we properly support segment-base-relative addressing then
9756 we assume the segment base to be zero, as for the above relocations.
9757 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
9758 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
9759 as R_ARM_THM_MOVT_ABS. */
9760 case R_ARM_THM_MOVW_BREL_NC:
9761 case R_ARM_THM_MOVW_BREL:
9762 case R_ARM_THM_MOVT_BREL:
9763 {
9764 bfd_vma insn;
9765
9766 insn = bfd_get_16 (input_bfd, hit_data) << 16;
9767 insn |= bfd_get_16 (input_bfd, hit_data + 2);
9768
9769 if (globals->use_rel)
9770 {
9771 addend = ((insn >> 4) & 0xf000)
9772 | ((insn >> 15) & 0x0800)
9773 | ((insn >> 4) & 0x0700)
9774 | (insn & 0x00ff);
9775 signed_addend = (addend ^ 0x8000) - 0x8000;
9776 }
9777
9778 value += signed_addend;
9779
9780 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
9781 value -= (input_section->output_section->vma
9782 + input_section->output_offset + rel->r_offset);
9783
9784 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
9785 return bfd_reloc_overflow;
9786
9787 if (branch_type == ST_BRANCH_TO_THUMB)
9788 value |= 1;
9789
9790 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
9791 || r_type == R_ARM_THM_MOVT_BREL)
9792 value >>= 16;
9793
9794 insn &= 0xfbf08f00;
9795 insn |= (value & 0xf000) << 4;
9796 insn |= (value & 0x0800) << 15;
9797 insn |= (value & 0x0700) << 4;
9798 insn |= (value & 0x00ff);
9799
9800 bfd_put_16 (input_bfd, insn >> 16, hit_data);
9801 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
9802 }
9803 return bfd_reloc_ok;
9804
9805 case R_ARM_ALU_PC_G0_NC:
9806 case R_ARM_ALU_PC_G1_NC:
9807 case R_ARM_ALU_PC_G0:
9808 case R_ARM_ALU_PC_G1:
9809 case R_ARM_ALU_PC_G2:
9810 case R_ARM_ALU_SB_G0_NC:
9811 case R_ARM_ALU_SB_G1_NC:
9812 case R_ARM_ALU_SB_G0:
9813 case R_ARM_ALU_SB_G1:
9814 case R_ARM_ALU_SB_G2:
9815 {
9816 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
9817 bfd_vma pc = input_section->output_section->vma
9818 + input_section->output_offset + rel->r_offset;
9819 /* sb should be the origin of the *segment* containing the symbol.
9820 It is not clear how to obtain this OS-dependent value, so we
9821 make an arbitrary choice of zero. */
9822 bfd_vma sb = 0;
9823 bfd_vma residual;
9824 bfd_vma g_n;
9825 bfd_signed_vma signed_value;
9826 int group = 0;
9827
9828 /* Determine which group of bits to select. */
9829 switch (r_type)
9830 {
9831 case R_ARM_ALU_PC_G0_NC:
9832 case R_ARM_ALU_PC_G0:
9833 case R_ARM_ALU_SB_G0_NC:
9834 case R_ARM_ALU_SB_G0:
9835 group = 0;
9836 break;
9837
9838 case R_ARM_ALU_PC_G1_NC:
9839 case R_ARM_ALU_PC_G1:
9840 case R_ARM_ALU_SB_G1_NC:
9841 case R_ARM_ALU_SB_G1:
9842 group = 1;
9843 break;
9844
9845 case R_ARM_ALU_PC_G2:
9846 case R_ARM_ALU_SB_G2:
9847 group = 2;
9848 break;
9849
9850 default:
9851 abort ();
9852 }
9853
9854 /* If REL, extract the addend from the insn. If RELA, it will
9855 have already been fetched for us. */
9856 if (globals->use_rel)
9857 {
9858 int negative;
9859 bfd_vma constant = insn & 0xff;
9860 bfd_vma rotation = (insn & 0xf00) >> 8;
9861
9862 if (rotation == 0)
9863 signed_addend = constant;
9864 else
9865 {
9866 /* Compensate for the fact that in the instruction, the
9867 rotation is stored in multiples of 2 bits. */
9868 rotation *= 2;
9869
9870 /* Rotate "constant" right by "rotation" bits. */
9871 signed_addend = (constant >> rotation) |
9872 (constant << (8 * sizeof (bfd_vma) - rotation));
9873 }
9874
9875 /* Determine if the instruction is an ADD or a SUB.
9876 (For REL, this determines the sign of the addend.) */
9877 negative = identify_add_or_sub (insn);
9878 if (negative == 0)
9879 {
9880 (*_bfd_error_handler)
9881 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
9882 input_bfd, input_section,
9883 (long) rel->r_offset, howto->name);
9884 return bfd_reloc_overflow;
9885 }
9886
9887 signed_addend *= negative;
9888 }
9889
9890 /* Compute the value (X) to go in the place. */
9891 if (r_type == R_ARM_ALU_PC_G0_NC
9892 || r_type == R_ARM_ALU_PC_G1_NC
9893 || r_type == R_ARM_ALU_PC_G0
9894 || r_type == R_ARM_ALU_PC_G1
9895 || r_type == R_ARM_ALU_PC_G2)
9896 /* PC relative. */
9897 signed_value = value - pc + signed_addend;
9898 else
9899 /* Section base relative. */
9900 signed_value = value - sb + signed_addend;
9901
9902 /* If the target symbol is a Thumb function, then set the
9903 Thumb bit in the address. */
9904 if (branch_type == ST_BRANCH_TO_THUMB)
9905 signed_value |= 1;
9906
9907 /* Calculate the value of the relevant G_n, in encoded
9908 constant-with-rotation format. */
9909 g_n = calculate_group_reloc_mask (abs (signed_value), group,
9910 &residual);
9911
9912 /* Check for overflow if required. */
9913 if ((r_type == R_ARM_ALU_PC_G0
9914 || r_type == R_ARM_ALU_PC_G1
9915 || r_type == R_ARM_ALU_PC_G2
9916 || r_type == R_ARM_ALU_SB_G0
9917 || r_type == R_ARM_ALU_SB_G1
9918 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
9919 {
9920 (*_bfd_error_handler)
9921 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
9922 input_bfd, input_section,
9923 (long) rel->r_offset, abs (signed_value), howto->name);
9924 return bfd_reloc_overflow;
9925 }
9926
9927 /* Mask out the value and the ADD/SUB part of the opcode; take care
9928 not to destroy the S bit. */
9929 insn &= 0xff1ff000;
9930
9931 /* Set the opcode according to whether the value to go in the
9932 place is negative. */
9933 if (signed_value < 0)
9934 insn |= 1 << 22;
9935 else
9936 insn |= 1 << 23;
9937
9938 /* Encode the offset. */
9939 insn |= g_n;
9940
9941 bfd_put_32 (input_bfd, insn, hit_data);
9942 }
9943 return bfd_reloc_ok;
9944
9945 case R_ARM_LDR_PC_G0:
9946 case R_ARM_LDR_PC_G1:
9947 case R_ARM_LDR_PC_G2:
9948 case R_ARM_LDR_SB_G0:
9949 case R_ARM_LDR_SB_G1:
9950 case R_ARM_LDR_SB_G2:
9951 {
9952 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
9953 bfd_vma pc = input_section->output_section->vma
9954 + input_section->output_offset + rel->r_offset;
9955 bfd_vma sb = 0; /* See note above. */
9956 bfd_vma residual;
9957 bfd_signed_vma signed_value;
9958 int group = 0;
9959
9960 /* Determine which groups of bits to calculate. */
9961 switch (r_type)
9962 {
9963 case R_ARM_LDR_PC_G0:
9964 case R_ARM_LDR_SB_G0:
9965 group = 0;
9966 break;
9967
9968 case R_ARM_LDR_PC_G1:
9969 case R_ARM_LDR_SB_G1:
9970 group = 1;
9971 break;
9972
9973 case R_ARM_LDR_PC_G2:
9974 case R_ARM_LDR_SB_G2:
9975 group = 2;
9976 break;
9977
9978 default:
9979 abort ();
9980 }
9981
9982 /* If REL, extract the addend from the insn. If RELA, it will
9983 have already been fetched for us. */
9984 if (globals->use_rel)
9985 {
9986 int negative = (insn & (1 << 23)) ? 1 : -1;
9987 signed_addend = negative * (insn & 0xfff);
9988 }
9989
9990 /* Compute the value (X) to go in the place. */
9991 if (r_type == R_ARM_LDR_PC_G0
9992 || r_type == R_ARM_LDR_PC_G1
9993 || r_type == R_ARM_LDR_PC_G2)
9994 /* PC relative. */
9995 signed_value = value - pc + signed_addend;
9996 else
9997 /* Section base relative. */
9998 signed_value = value - sb + signed_addend;
9999
10000 /* Calculate the value of the relevant G_{n-1} to obtain
10001 the residual at that stage. */
10002 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
10003
10004 /* Check for overflow. */
10005 if (residual >= 0x1000)
10006 {
10007 (*_bfd_error_handler)
10008 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
10009 input_bfd, input_section,
10010 (long) rel->r_offset, abs (signed_value), howto->name);
10011 return bfd_reloc_overflow;
10012 }
10013
10014 /* Mask out the value and U bit. */
10015 insn &= 0xff7ff000;
10016
10017 /* Set the U bit if the value to go in the place is non-negative. */
10018 if (signed_value >= 0)
10019 insn |= 1 << 23;
10020
10021 /* Encode the offset. */
10022 insn |= residual;
10023
10024 bfd_put_32 (input_bfd, insn, hit_data);
10025 }
10026 return bfd_reloc_ok;
10027
10028 case R_ARM_LDRS_PC_G0:
10029 case R_ARM_LDRS_PC_G1:
10030 case R_ARM_LDRS_PC_G2:
10031 case R_ARM_LDRS_SB_G0:
10032 case R_ARM_LDRS_SB_G1:
10033 case R_ARM_LDRS_SB_G2:
10034 {
10035 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10036 bfd_vma pc = input_section->output_section->vma
10037 + input_section->output_offset + rel->r_offset;
10038 bfd_vma sb = 0; /* See note above. */
10039 bfd_vma residual;
10040 bfd_signed_vma signed_value;
10041 int group = 0;
10042
10043 /* Determine which groups of bits to calculate. */
10044 switch (r_type)
10045 {
10046 case R_ARM_LDRS_PC_G0:
10047 case R_ARM_LDRS_SB_G0:
10048 group = 0;
10049 break;
10050
10051 case R_ARM_LDRS_PC_G1:
10052 case R_ARM_LDRS_SB_G1:
10053 group = 1;
10054 break;
10055
10056 case R_ARM_LDRS_PC_G2:
10057 case R_ARM_LDRS_SB_G2:
10058 group = 2;
10059 break;
10060
10061 default:
10062 abort ();
10063 }
10064
10065 /* If REL, extract the addend from the insn. If RELA, it will
10066 have already been fetched for us. */
10067 if (globals->use_rel)
10068 {
10069 int negative = (insn & (1 << 23)) ? 1 : -1;
10070 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
10071 }
10072
10073 /* Compute the value (X) to go in the place. */
10074 if (r_type == R_ARM_LDRS_PC_G0
10075 || r_type == R_ARM_LDRS_PC_G1
10076 || r_type == R_ARM_LDRS_PC_G2)
10077 /* PC relative. */
10078 signed_value = value - pc + signed_addend;
10079 else
10080 /* Section base relative. */
10081 signed_value = value - sb + signed_addend;
10082
10083 /* Calculate the value of the relevant G_{n-1} to obtain
10084 the residual at that stage. */
10085 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
10086
10087 /* Check for overflow. */
10088 if (residual >= 0x100)
10089 {
10090 (*_bfd_error_handler)
10091 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
10092 input_bfd, input_section,
10093 (long) rel->r_offset, abs (signed_value), howto->name);
10094 return bfd_reloc_overflow;
10095 }
10096
10097 /* Mask out the value and U bit. */
10098 insn &= 0xff7ff0f0;
10099
10100 /* Set the U bit if the value to go in the place is non-negative. */
10101 if (signed_value >= 0)
10102 insn |= 1 << 23;
10103
10104 /* Encode the offset. */
10105 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
10106
10107 bfd_put_32 (input_bfd, insn, hit_data);
10108 }
10109 return bfd_reloc_ok;
10110
10111 case R_ARM_LDC_PC_G0:
10112 case R_ARM_LDC_PC_G1:
10113 case R_ARM_LDC_PC_G2:
10114 case R_ARM_LDC_SB_G0:
10115 case R_ARM_LDC_SB_G1:
10116 case R_ARM_LDC_SB_G2:
10117 {
10118 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10119 bfd_vma pc = input_section->output_section->vma
10120 + input_section->output_offset + rel->r_offset;
10121 bfd_vma sb = 0; /* See note above. */
10122 bfd_vma residual;
10123 bfd_signed_vma signed_value;
10124 int group = 0;
10125
10126 /* Determine which groups of bits to calculate. */
10127 switch (r_type)
10128 {
10129 case R_ARM_LDC_PC_G0:
10130 case R_ARM_LDC_SB_G0:
10131 group = 0;
10132 break;
10133
10134 case R_ARM_LDC_PC_G1:
10135 case R_ARM_LDC_SB_G1:
10136 group = 1;
10137 break;
10138
10139 case R_ARM_LDC_PC_G2:
10140 case R_ARM_LDC_SB_G2:
10141 group = 2;
10142 break;
10143
10144 default:
10145 abort ();
10146 }
10147
10148 /* If REL, extract the addend from the insn. If RELA, it will
10149 have already been fetched for us. */
10150 if (globals->use_rel)
10151 {
10152 int negative = (insn & (1 << 23)) ? 1 : -1;
10153 signed_addend = negative * ((insn & 0xff) << 2);
10154 }
10155
10156 /* Compute the value (X) to go in the place. */
10157 if (r_type == R_ARM_LDC_PC_G0
10158 || r_type == R_ARM_LDC_PC_G1
10159 || r_type == R_ARM_LDC_PC_G2)
10160 /* PC relative. */
10161 signed_value = value - pc + signed_addend;
10162 else
10163 /* Section base relative. */
10164 signed_value = value - sb + signed_addend;
10165
10166 /* Calculate the value of the relevant G_{n-1} to obtain
10167 the residual at that stage. */
10168 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
10169
10170 /* Check for overflow. (The absolute value to go in the place must be
10171 divisible by four and, after having been divided by four, must
10172 fit in eight bits.) */
10173 if ((residual & 0x3) != 0 || residual >= 0x400)
10174 {
10175 (*_bfd_error_handler)
10176 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
10177 input_bfd, input_section,
10178 (long) rel->r_offset, abs (signed_value), howto->name);
10179 return bfd_reloc_overflow;
10180 }
10181
10182 /* Mask out the value and U bit. */
10183 insn &= 0xff7fff00;
10184
10185 /* Set the U bit if the value to go in the place is non-negative. */
10186 if (signed_value >= 0)
10187 insn |= 1 << 23;
10188
10189 /* Encode the offset. */
10190 insn |= residual >> 2;
10191
10192 bfd_put_32 (input_bfd, insn, hit_data);
10193 }
10194 return bfd_reloc_ok;
10195
10196 default:
10197 return bfd_reloc_notsupported;
10198 }
10199 }
10200
10201 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
10202 static void
10203 arm_add_to_rel (bfd * abfd,
10204 bfd_byte * address,
10205 reloc_howto_type * howto,
10206 bfd_signed_vma increment)
10207 {
10208 bfd_signed_vma addend;
10209
10210 if (howto->type == R_ARM_THM_CALL
10211 || howto->type == R_ARM_THM_JUMP24)
10212 {
10213 int upper_insn, lower_insn;
10214 int upper, lower;
10215
10216 upper_insn = bfd_get_16 (abfd, address);
10217 lower_insn = bfd_get_16 (abfd, address + 2);
10218 upper = upper_insn & 0x7ff;
10219 lower = lower_insn & 0x7ff;
10220
10221 addend = (upper << 12) | (lower << 1);
10222 addend += increment;
10223 addend >>= 1;
10224
10225 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
10226 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
10227
10228 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
10229 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
10230 }
10231 else
10232 {
10233 bfd_vma contents;
10234
10235 contents = bfd_get_32 (abfd, address);
10236
10237 /* Get the (signed) value from the instruction. */
10238 addend = contents & howto->src_mask;
10239 if (addend & ((howto->src_mask + 1) >> 1))
10240 {
10241 bfd_signed_vma mask;
10242
10243 mask = -1;
10244 mask &= ~ howto->src_mask;
10245 addend |= mask;
10246 }
10247
10248 /* Add in the increment, (which is a byte value). */
10249 switch (howto->type)
10250 {
10251 default:
10252 addend += increment;
10253 break;
10254
10255 case R_ARM_PC24:
10256 case R_ARM_PLT32:
10257 case R_ARM_CALL:
10258 case R_ARM_JUMP24:
10259 addend <<= howto->size;
10260 addend += increment;
10261
10262 /* Should we check for overflow here ? */
10263
10264 /* Drop any undesired bits. */
10265 addend >>= howto->rightshift;
10266 break;
10267 }
10268
10269 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
10270
10271 bfd_put_32 (abfd, contents, address);
10272 }
10273 }
10274
10275 #define IS_ARM_TLS_RELOC(R_TYPE) \
10276 ((R_TYPE) == R_ARM_TLS_GD32 \
10277 || (R_TYPE) == R_ARM_TLS_LDO32 \
10278 || (R_TYPE) == R_ARM_TLS_LDM32 \
10279 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
10280 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
10281 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
10282 || (R_TYPE) == R_ARM_TLS_LE32 \
10283 || (R_TYPE) == R_ARM_TLS_IE32 \
10284 || IS_ARM_TLS_GNU_RELOC (R_TYPE))
10285
10286 /* Specific set of relocations for the gnu tls dialect. */
10287 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
10288 ((R_TYPE) == R_ARM_TLS_GOTDESC \
10289 || (R_TYPE) == R_ARM_TLS_CALL \
10290 || (R_TYPE) == R_ARM_THM_TLS_CALL \
10291 || (R_TYPE) == R_ARM_TLS_DESCSEQ \
10292 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
10293
10294 /* Relocate an ARM ELF section. */
10295
10296 static bfd_boolean
10297 elf32_arm_relocate_section (bfd * output_bfd,
10298 struct bfd_link_info * info,
10299 bfd * input_bfd,
10300 asection * input_section,
10301 bfd_byte * contents,
10302 Elf_Internal_Rela * relocs,
10303 Elf_Internal_Sym * local_syms,
10304 asection ** local_sections)
10305 {
10306 Elf_Internal_Shdr *symtab_hdr;
10307 struct elf_link_hash_entry **sym_hashes;
10308 Elf_Internal_Rela *rel;
10309 Elf_Internal_Rela *relend;
10310 const char *name;
10311 struct elf32_arm_link_hash_table * globals;
10312
10313 globals = elf32_arm_hash_table (info);
10314 if (globals == NULL)
10315 return FALSE;
10316
10317 symtab_hdr = & elf_symtab_hdr (input_bfd);
10318 sym_hashes = elf_sym_hashes (input_bfd);
10319
10320 rel = relocs;
10321 relend = relocs + input_section->reloc_count;
10322 for (; rel < relend; rel++)
10323 {
10324 int r_type;
10325 reloc_howto_type * howto;
10326 unsigned long r_symndx;
10327 Elf_Internal_Sym * sym;
10328 asection * sec;
10329 struct elf_link_hash_entry * h;
10330 bfd_vma relocation;
10331 bfd_reloc_status_type r;
10332 arelent bfd_reloc;
10333 char sym_type;
10334 bfd_boolean unresolved_reloc = FALSE;
10335 char *error_message = NULL;
10336
10337 r_symndx = ELF32_R_SYM (rel->r_info);
10338 r_type = ELF32_R_TYPE (rel->r_info);
10339 r_type = arm_real_reloc_type (globals, r_type);
10340
10341 if ( r_type == R_ARM_GNU_VTENTRY
10342 || r_type == R_ARM_GNU_VTINHERIT)
10343 continue;
10344
10345 bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
10346 howto = bfd_reloc.howto;
10347
10348 h = NULL;
10349 sym = NULL;
10350 sec = NULL;
10351
10352 if (r_symndx < symtab_hdr->sh_info)
10353 {
10354 sym = local_syms + r_symndx;
10355 sym_type = ELF32_ST_TYPE (sym->st_info);
10356 sec = local_sections[r_symndx];
10357
10358 /* An object file might have a reference to a local
10359 undefined symbol. This is a daft object file, but we
10360 should at least do something about it. V4BX & NONE
10361 relocations do not use the symbol and are explicitly
10362 allowed to use the undefined symbol, so allow those.
10363 Likewise for relocations against STN_UNDEF. */
10364 if (r_type != R_ARM_V4BX
10365 && r_type != R_ARM_NONE
10366 && r_symndx != STN_UNDEF
10367 && bfd_is_und_section (sec)
10368 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
10369 {
10370 if (!info->callbacks->undefined_symbol
10371 (info, bfd_elf_string_from_elf_section
10372 (input_bfd, symtab_hdr->sh_link, sym->st_name),
10373 input_bfd, input_section,
10374 rel->r_offset, TRUE))
10375 return FALSE;
10376 }
10377
10378 if (globals->use_rel)
10379 {
10380 relocation = (sec->output_section->vma
10381 + sec->output_offset
10382 + sym->st_value);
10383 if (!info->relocatable
10384 && (sec->flags & SEC_MERGE)
10385 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
10386 {
10387 asection *msec;
10388 bfd_vma addend, value;
10389
10390 switch (r_type)
10391 {
10392 case R_ARM_MOVW_ABS_NC:
10393 case R_ARM_MOVT_ABS:
10394 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
10395 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
10396 addend = (addend ^ 0x8000) - 0x8000;
10397 break;
10398
10399 case R_ARM_THM_MOVW_ABS_NC:
10400 case R_ARM_THM_MOVT_ABS:
10401 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
10402 << 16;
10403 value |= bfd_get_16 (input_bfd,
10404 contents + rel->r_offset + 2);
10405 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
10406 | ((value & 0x04000000) >> 15);
10407 addend = (addend ^ 0x8000) - 0x8000;
10408 break;
10409
10410 default:
10411 if (howto->rightshift
10412 || (howto->src_mask & (howto->src_mask + 1)))
10413 {
10414 (*_bfd_error_handler)
10415 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
10416 input_bfd, input_section,
10417 (long) rel->r_offset, howto->name);
10418 return FALSE;
10419 }
10420
10421 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
10422
10423 /* Get the (signed) value from the instruction. */
10424 addend = value & howto->src_mask;
10425 if (addend & ((howto->src_mask + 1) >> 1))
10426 {
10427 bfd_signed_vma mask;
10428
10429 mask = -1;
10430 mask &= ~ howto->src_mask;
10431 addend |= mask;
10432 }
10433 break;
10434 }
10435
10436 msec = sec;
10437 addend =
10438 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
10439 - relocation;
10440 addend += msec->output_section->vma + msec->output_offset;
10441
10442 /* Cases here must match those in the preceding
10443 switch statement. */
10444 switch (r_type)
10445 {
10446 case R_ARM_MOVW_ABS_NC:
10447 case R_ARM_MOVT_ABS:
10448 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
10449 | (addend & 0xfff);
10450 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
10451 break;
10452
10453 case R_ARM_THM_MOVW_ABS_NC:
10454 case R_ARM_THM_MOVT_ABS:
10455 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
10456 | (addend & 0xff) | ((addend & 0x0800) << 15);
10457 bfd_put_16 (input_bfd, value >> 16,
10458 contents + rel->r_offset);
10459 bfd_put_16 (input_bfd, value,
10460 contents + rel->r_offset + 2);
10461 break;
10462
10463 default:
10464 value = (value & ~ howto->dst_mask)
10465 | (addend & howto->dst_mask);
10466 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
10467 break;
10468 }
10469 }
10470 }
10471 else
10472 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
10473 }
10474 else
10475 {
10476 bfd_boolean warned;
10477
10478 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
10479 r_symndx, symtab_hdr, sym_hashes,
10480 h, sec, relocation,
10481 unresolved_reloc, warned);
10482
10483 sym_type = h->type;
10484 }
10485
10486 if (sec != NULL && discarded_section (sec))
10487 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
10488 rel, 1, relend, howto, 0, contents);
10489
10490 if (info->relocatable)
10491 {
10492 /* This is a relocatable link. We don't have to change
10493 anything, unless the reloc is against a section symbol,
10494 in which case we have to adjust according to where the
10495 section symbol winds up in the output section. */
10496 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
10497 {
10498 if (globals->use_rel)
10499 arm_add_to_rel (input_bfd, contents + rel->r_offset,
10500 howto, (bfd_signed_vma) sec->output_offset);
10501 else
10502 rel->r_addend += sec->output_offset;
10503 }
10504 continue;
10505 }
10506
10507 if (h != NULL)
10508 name = h->root.root.string;
10509 else
10510 {
10511 name = (bfd_elf_string_from_elf_section
10512 (input_bfd, symtab_hdr->sh_link, sym->st_name));
10513 if (name == NULL || *name == '\0')
10514 name = bfd_section_name (input_bfd, sec);
10515 }
10516
10517 if (r_symndx != STN_UNDEF
10518 && r_type != R_ARM_NONE
10519 && (h == NULL
10520 || h->root.type == bfd_link_hash_defined
10521 || h->root.type == bfd_link_hash_defweak)
10522 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
10523 {
10524 (*_bfd_error_handler)
10525 ((sym_type == STT_TLS
10526 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
10527 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
10528 input_bfd,
10529 input_section,
10530 (long) rel->r_offset,
10531 howto->name,
10532 name);
10533 }
10534
10535 /* We call elf32_arm_final_link_relocate unless we're completely
10536 done, i.e., the relaxation produced the final output we want,
10537 and we won't let anybody mess with it. Also, we have to do
10538 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
10539 both in relaxed and non-relaxed cases */
10540 if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
10541 || (IS_ARM_TLS_GNU_RELOC (r_type)
10542 && !((h ? elf32_arm_hash_entry (h)->tls_type :
10543 elf32_arm_local_got_tls_type (input_bfd)[r_symndx])
10544 & GOT_TLS_GDESC)))
10545 {
10546 r = elf32_arm_tls_relax (globals, input_bfd, input_section,
10547 contents, rel, h == NULL);
10548 /* This may have been marked unresolved because it came from
10549 a shared library. But we've just dealt with that. */
10550 unresolved_reloc = 0;
10551 }
10552 else
10553 r = bfd_reloc_continue;
10554
10555 if (r == bfd_reloc_continue)
10556 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
10557 input_section, contents, rel,
10558 relocation, info, sec, name, sym_type,
10559 (h ? h->target_internal
10560 : ARM_SYM_BRANCH_TYPE (sym)), h,
10561 &unresolved_reloc, &error_message);
10562
10563 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
10564 because such sections are not SEC_ALLOC and thus ld.so will
10565 not process them. */
10566 if (unresolved_reloc
10567 && !((input_section->flags & SEC_DEBUGGING) != 0
10568 && h->def_dynamic)
10569 && _bfd_elf_section_offset (output_bfd, info, input_section,
10570 rel->r_offset) != (bfd_vma) -1)
10571 {
10572 (*_bfd_error_handler)
10573 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
10574 input_bfd,
10575 input_section,
10576 (long) rel->r_offset,
10577 howto->name,
10578 h->root.root.string);
10579 return FALSE;
10580 }
10581
10582 if (r != bfd_reloc_ok)
10583 {
10584 switch (r)
10585 {
10586 case bfd_reloc_overflow:
10587 /* If the overflowing reloc was to an undefined symbol,
10588 we have already printed one error message and there
10589 is no point complaining again. */
10590 if ((! h ||
10591 h->root.type != bfd_link_hash_undefined)
10592 && (!((*info->callbacks->reloc_overflow)
10593 (info, (h ? &h->root : NULL), name, howto->name,
10594 (bfd_vma) 0, input_bfd, input_section,
10595 rel->r_offset))))
10596 return FALSE;
10597 break;
10598
10599 case bfd_reloc_undefined:
10600 if (!((*info->callbacks->undefined_symbol)
10601 (info, name, input_bfd, input_section,
10602 rel->r_offset, TRUE)))
10603 return FALSE;
10604 break;
10605
10606 case bfd_reloc_outofrange:
10607 error_message = _("out of range");
10608 goto common_error;
10609
10610 case bfd_reloc_notsupported:
10611 error_message = _("unsupported relocation");
10612 goto common_error;
10613
10614 case bfd_reloc_dangerous:
10615 /* error_message should already be set. */
10616 goto common_error;
10617
10618 default:
10619 error_message = _("unknown error");
10620 /* Fall through. */
10621
10622 common_error:
10623 BFD_ASSERT (error_message != NULL);
10624 if (!((*info->callbacks->reloc_dangerous)
10625 (info, error_message, input_bfd, input_section,
10626 rel->r_offset)))
10627 return FALSE;
10628 break;
10629 }
10630 }
10631 }
10632
10633 return TRUE;
10634 }
10635
10636 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
10637 adds the edit to the start of the list. (The list must be built in order of
10638 ascending TINDEX: the function's callers are primarily responsible for
10639 maintaining that condition). */
10640
10641 static void
10642 add_unwind_table_edit (arm_unwind_table_edit **head,
10643 arm_unwind_table_edit **tail,
10644 arm_unwind_edit_type type,
10645 asection *linked_section,
10646 unsigned int tindex)
10647 {
10648 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
10649 xmalloc (sizeof (arm_unwind_table_edit));
10650
10651 new_edit->type = type;
10652 new_edit->linked_section = linked_section;
10653 new_edit->index = tindex;
10654
10655 if (tindex > 0)
10656 {
10657 new_edit->next = NULL;
10658
10659 if (*tail)
10660 (*tail)->next = new_edit;
10661
10662 (*tail) = new_edit;
10663
10664 if (!*head)
10665 (*head) = new_edit;
10666 }
10667 else
10668 {
10669 new_edit->next = *head;
10670
10671 if (!*tail)
10672 *tail = new_edit;
10673
10674 *head = new_edit;
10675 }
10676 }
10677
10678 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
10679
10680 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
10681 static void
10682 adjust_exidx_size(asection *exidx_sec, int adjust)
10683 {
10684 asection *out_sec;
10685
10686 if (!exidx_sec->rawsize)
10687 exidx_sec->rawsize = exidx_sec->size;
10688
10689 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
10690 out_sec = exidx_sec->output_section;
10691 /* Adjust size of output section. */
10692 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
10693 }
10694
10695 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
10696 static void
10697 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
10698 {
10699 struct _arm_elf_section_data *exidx_arm_data;
10700
10701 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
10702 add_unwind_table_edit (
10703 &exidx_arm_data->u.exidx.unwind_edit_list,
10704 &exidx_arm_data->u.exidx.unwind_edit_tail,
10705 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
10706
10707 adjust_exidx_size(exidx_sec, 8);
10708 }
10709
10710 /* Scan .ARM.exidx tables, and create a list describing edits which should be
10711 made to those tables, such that:
10712
10713 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
10714 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
10715 codes which have been inlined into the index).
10716
10717 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
10718
10719 The edits are applied when the tables are written
10720 (in elf32_arm_write_section). */
10721
10722 bfd_boolean
10723 elf32_arm_fix_exidx_coverage (asection **text_section_order,
10724 unsigned int num_text_sections,
10725 struct bfd_link_info *info,
10726 bfd_boolean merge_exidx_entries)
10727 {
10728 bfd *inp;
10729 unsigned int last_second_word = 0, i;
10730 asection *last_exidx_sec = NULL;
10731 asection *last_text_sec = NULL;
10732 int last_unwind_type = -1;
10733
10734 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
10735 text sections. */
10736 for (inp = info->input_bfds; inp != NULL; inp = inp->link_next)
10737 {
10738 asection *sec;
10739
10740 for (sec = inp->sections; sec != NULL; sec = sec->next)
10741 {
10742 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
10743 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
10744
10745 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
10746 continue;
10747
10748 if (elf_sec->linked_to)
10749 {
10750 Elf_Internal_Shdr *linked_hdr
10751 = &elf_section_data (elf_sec->linked_to)->this_hdr;
10752 struct _arm_elf_section_data *linked_sec_arm_data
10753 = get_arm_elf_section_data (linked_hdr->bfd_section);
10754
10755 if (linked_sec_arm_data == NULL)
10756 continue;
10757
10758 /* Link this .ARM.exidx section back from the text section it
10759 describes. */
10760 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
10761 }
10762 }
10763 }
10764
10765 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
10766 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
10767 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
10768
10769 for (i = 0; i < num_text_sections; i++)
10770 {
10771 asection *sec = text_section_order[i];
10772 asection *exidx_sec;
10773 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
10774 struct _arm_elf_section_data *exidx_arm_data;
10775 bfd_byte *contents = NULL;
10776 int deleted_exidx_bytes = 0;
10777 bfd_vma j;
10778 arm_unwind_table_edit *unwind_edit_head = NULL;
10779 arm_unwind_table_edit *unwind_edit_tail = NULL;
10780 Elf_Internal_Shdr *hdr;
10781 bfd *ibfd;
10782
10783 if (arm_data == NULL)
10784 continue;
10785
10786 exidx_sec = arm_data->u.text.arm_exidx_sec;
10787 if (exidx_sec == NULL)
10788 {
10789 /* Section has no unwind data. */
10790 if (last_unwind_type == 0 || !last_exidx_sec)
10791 continue;
10792
10793 /* Ignore zero sized sections. */
10794 if (sec->size == 0)
10795 continue;
10796
10797 insert_cantunwind_after(last_text_sec, last_exidx_sec);
10798 last_unwind_type = 0;
10799 continue;
10800 }
10801
10802 /* Skip /DISCARD/ sections. */
10803 if (bfd_is_abs_section (exidx_sec->output_section))
10804 continue;
10805
10806 hdr = &elf_section_data (exidx_sec)->this_hdr;
10807 if (hdr->sh_type != SHT_ARM_EXIDX)
10808 continue;
10809
10810 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
10811 if (exidx_arm_data == NULL)
10812 continue;
10813
10814 ibfd = exidx_sec->owner;
10815
10816 if (hdr->contents != NULL)
10817 contents = hdr->contents;
10818 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
10819 /* An error? */
10820 continue;
10821
10822 for (j = 0; j < hdr->sh_size; j += 8)
10823 {
10824 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
10825 int unwind_type;
10826 int elide = 0;
10827
10828 /* An EXIDX_CANTUNWIND entry. */
10829 if (second_word == 1)
10830 {
10831 if (last_unwind_type == 0)
10832 elide = 1;
10833 unwind_type = 0;
10834 }
10835 /* Inlined unwinding data. Merge if equal to previous. */
10836 else if ((second_word & 0x80000000) != 0)
10837 {
10838 if (merge_exidx_entries
10839 && last_second_word == second_word && last_unwind_type == 1)
10840 elide = 1;
10841 unwind_type = 1;
10842 last_second_word = second_word;
10843 }
10844 /* Normal table entry. In theory we could merge these too,
10845 but duplicate entries are likely to be much less common. */
10846 else
10847 unwind_type = 2;
10848
10849 if (elide)
10850 {
10851 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
10852 DELETE_EXIDX_ENTRY, NULL, j / 8);
10853
10854 deleted_exidx_bytes += 8;
10855 }
10856
10857 last_unwind_type = unwind_type;
10858 }
10859
10860 /* Free contents if we allocated it ourselves. */
10861 if (contents != hdr->contents)
10862 free (contents);
10863
10864 /* Record edits to be applied later (in elf32_arm_write_section). */
10865 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
10866 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
10867
10868 if (deleted_exidx_bytes > 0)
10869 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
10870
10871 last_exidx_sec = exidx_sec;
10872 last_text_sec = sec;
10873 }
10874
10875 /* Add terminating CANTUNWIND entry. */
10876 if (last_exidx_sec && last_unwind_type != 0)
10877 insert_cantunwind_after(last_text_sec, last_exidx_sec);
10878
10879 return TRUE;
10880 }
10881
10882 static bfd_boolean
10883 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
10884 bfd *ibfd, const char *name)
10885 {
10886 asection *sec, *osec;
10887
10888 sec = bfd_get_section_by_name (ibfd, name);
10889 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
10890 return TRUE;
10891
10892 osec = sec->output_section;
10893 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
10894 return TRUE;
10895
10896 if (! bfd_set_section_contents (obfd, osec, sec->contents,
10897 sec->output_offset, sec->size))
10898 return FALSE;
10899
10900 return TRUE;
10901 }
10902
10903 static bfd_boolean
10904 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
10905 {
10906 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
10907 asection *sec, *osec;
10908
10909 if (globals == NULL)
10910 return FALSE;
10911
10912 /* Invoke the regular ELF backend linker to do all the work. */
10913 if (!bfd_elf_final_link (abfd, info))
10914 return FALSE;
10915
10916 /* Process stub sections (eg BE8 encoding, ...). */
10917 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
10918 int i;
10919 for (i=0; i<htab->top_id; i++)
10920 {
10921 sec = htab->stub_group[i].stub_sec;
10922 /* Only process it once, in its link_sec slot. */
10923 if (sec && i == htab->stub_group[i].link_sec->id)
10924 {
10925 osec = sec->output_section;
10926 elf32_arm_write_section (abfd, info, sec, sec->contents);
10927 if (! bfd_set_section_contents (abfd, osec, sec->contents,
10928 sec->output_offset, sec->size))
10929 return FALSE;
10930 }
10931 }
10932
10933 /* Write out any glue sections now that we have created all the
10934 stubs. */
10935 if (globals->bfd_of_glue_owner != NULL)
10936 {
10937 if (! elf32_arm_output_glue_section (info, abfd,
10938 globals->bfd_of_glue_owner,
10939 ARM2THUMB_GLUE_SECTION_NAME))
10940 return FALSE;
10941
10942 if (! elf32_arm_output_glue_section (info, abfd,
10943 globals->bfd_of_glue_owner,
10944 THUMB2ARM_GLUE_SECTION_NAME))
10945 return FALSE;
10946
10947 if (! elf32_arm_output_glue_section (info, abfd,
10948 globals->bfd_of_glue_owner,
10949 VFP11_ERRATUM_VENEER_SECTION_NAME))
10950 return FALSE;
10951
10952 if (! elf32_arm_output_glue_section (info, abfd,
10953 globals->bfd_of_glue_owner,
10954 ARM_BX_GLUE_SECTION_NAME))
10955 return FALSE;
10956 }
10957
10958 return TRUE;
10959 }
10960
10961 /* Return a best guess for the machine number based on the attributes. */
10962
10963 static unsigned int
10964 bfd_arm_get_mach_from_attributes (bfd * abfd)
10965 {
10966 int arch = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_CPU_arch);
10967
10968 switch (arch)
10969 {
10970 case TAG_CPU_ARCH_V4: return bfd_mach_arm_4;
10971 case TAG_CPU_ARCH_V4T: return bfd_mach_arm_4T;
10972 case TAG_CPU_ARCH_V5T: return bfd_mach_arm_5T;
10973
10974 case TAG_CPU_ARCH_V5TE:
10975 {
10976 char * name;
10977
10978 BFD_ASSERT (Tag_CPU_name < NUM_KNOWN_OBJ_ATTRIBUTES);
10979 name = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_CPU_name].s;
10980
10981 if (name)
10982 {
10983 if (strcmp (name, "IWMMXT2") == 0)
10984 return bfd_mach_arm_iWMMXt2;
10985
10986 if (strcmp (name, "IWMMXT") == 0)
10987 return bfd_mach_arm_iWMMXt;
10988 }
10989
10990 return bfd_mach_arm_5TE;
10991 }
10992
10993 default:
10994 return bfd_mach_arm_unknown;
10995 }
10996 }
10997
10998 /* Set the right machine number. */
10999
11000 static bfd_boolean
11001 elf32_arm_object_p (bfd *abfd)
11002 {
11003 unsigned int mach;
11004
11005 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
11006
11007 if (mach == bfd_mach_arm_unknown)
11008 {
11009 if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
11010 mach = bfd_mach_arm_ep9312;
11011 else
11012 mach = bfd_arm_get_mach_from_attributes (abfd);
11013 }
11014
11015 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
11016 return TRUE;
11017 }
11018
11019 /* Function to keep ARM specific flags in the ELF header. */
11020
11021 static bfd_boolean
11022 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
11023 {
11024 if (elf_flags_init (abfd)
11025 && elf_elfheader (abfd)->e_flags != flags)
11026 {
11027 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
11028 {
11029 if (flags & EF_ARM_INTERWORK)
11030 (*_bfd_error_handler)
11031 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
11032 abfd);
11033 else
11034 _bfd_error_handler
11035 (_("Warning: Clearing the interworking flag of %B due to outside request"),
11036 abfd);
11037 }
11038 }
11039 else
11040 {
11041 elf_elfheader (abfd)->e_flags = flags;
11042 elf_flags_init (abfd) = TRUE;
11043 }
11044
11045 return TRUE;
11046 }
11047
11048 /* Copy backend specific data from one object module to another. */
11049
11050 static bfd_boolean
11051 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
11052 {
11053 flagword in_flags;
11054 flagword out_flags;
11055
11056 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
11057 return TRUE;
11058
11059 in_flags = elf_elfheader (ibfd)->e_flags;
11060 out_flags = elf_elfheader (obfd)->e_flags;
11061
11062 if (elf_flags_init (obfd)
11063 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
11064 && in_flags != out_flags)
11065 {
11066 /* Cannot mix APCS26 and APCS32 code. */
11067 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
11068 return FALSE;
11069
11070 /* Cannot mix float APCS and non-float APCS code. */
11071 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
11072 return FALSE;
11073
11074 /* If the src and dest have different interworking flags
11075 then turn off the interworking bit. */
11076 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
11077 {
11078 if (out_flags & EF_ARM_INTERWORK)
11079 _bfd_error_handler
11080 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
11081 obfd, ibfd);
11082
11083 in_flags &= ~EF_ARM_INTERWORK;
11084 }
11085
11086 /* Likewise for PIC, though don't warn for this case. */
11087 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
11088 in_flags &= ~EF_ARM_PIC;
11089 }
11090
11091 elf_elfheader (obfd)->e_flags = in_flags;
11092 elf_flags_init (obfd) = TRUE;
11093
11094 /* Also copy the EI_OSABI field. */
11095 elf_elfheader (obfd)->e_ident[EI_OSABI] =
11096 elf_elfheader (ibfd)->e_ident[EI_OSABI];
11097
11098 /* Copy object attributes. */
11099 _bfd_elf_copy_obj_attributes (ibfd, obfd);
11100
11101 return TRUE;
11102 }
11103
11104 /* Values for Tag_ABI_PCS_R9_use. */
11105 enum
11106 {
11107 AEABI_R9_V6,
11108 AEABI_R9_SB,
11109 AEABI_R9_TLS,
11110 AEABI_R9_unused
11111 };
11112
11113 /* Values for Tag_ABI_PCS_RW_data. */
11114 enum
11115 {
11116 AEABI_PCS_RW_data_absolute,
11117 AEABI_PCS_RW_data_PCrel,
11118 AEABI_PCS_RW_data_SBrel,
11119 AEABI_PCS_RW_data_unused
11120 };
11121
11122 /* Values for Tag_ABI_enum_size. */
11123 enum
11124 {
11125 AEABI_enum_unused,
11126 AEABI_enum_short,
11127 AEABI_enum_wide,
11128 AEABI_enum_forced_wide
11129 };
11130
11131 /* Determine whether an object attribute tag takes an integer, a
11132 string or both. */
11133
11134 static int
11135 elf32_arm_obj_attrs_arg_type (int tag)
11136 {
11137 if (tag == Tag_compatibility)
11138 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
11139 else if (tag == Tag_nodefaults)
11140 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
11141 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
11142 return ATTR_TYPE_FLAG_STR_VAL;
11143 else if (tag < 32)
11144 return ATTR_TYPE_FLAG_INT_VAL;
11145 else
11146 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
11147 }
11148
11149 /* The ABI defines that Tag_conformance should be emitted first, and that
11150 Tag_nodefaults should be second (if either is defined). This sets those
11151 two positions, and bumps up the position of all the remaining tags to
11152 compensate. */
11153 static int
11154 elf32_arm_obj_attrs_order (int num)
11155 {
11156 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
11157 return Tag_conformance;
11158 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
11159 return Tag_nodefaults;
11160 if ((num - 2) < Tag_nodefaults)
11161 return num - 2;
11162 if ((num - 1) < Tag_conformance)
11163 return num - 1;
11164 return num;
11165 }
11166
11167 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
11168 static bfd_boolean
11169 elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag)
11170 {
11171 if ((tag & 127) < 64)
11172 {
11173 _bfd_error_handler
11174 (_("%B: Unknown mandatory EABI object attribute %d"),
11175 abfd, tag);
11176 bfd_set_error (bfd_error_bad_value);
11177 return FALSE;
11178 }
11179 else
11180 {
11181 _bfd_error_handler
11182 (_("Warning: %B: Unknown EABI object attribute %d"),
11183 abfd, tag);
11184 return TRUE;
11185 }
11186 }
11187
11188 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
11189 Returns -1 if no architecture could be read. */
11190
11191 static int
11192 get_secondary_compatible_arch (bfd *abfd)
11193 {
11194 obj_attribute *attr =
11195 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
11196
11197 /* Note: the tag and its argument below are uleb128 values, though
11198 currently-defined values fit in one byte for each. */
11199 if (attr->s
11200 && attr->s[0] == Tag_CPU_arch
11201 && (attr->s[1] & 128) != 128
11202 && attr->s[2] == 0)
11203 return attr->s[1];
11204
11205 /* This tag is "safely ignorable", so don't complain if it looks funny. */
11206 return -1;
11207 }
11208
11209 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
11210 The tag is removed if ARCH is -1. */
11211
11212 static void
11213 set_secondary_compatible_arch (bfd *abfd, int arch)
11214 {
11215 obj_attribute *attr =
11216 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
11217
11218 if (arch == -1)
11219 {
11220 attr->s = NULL;
11221 return;
11222 }
11223
11224 /* Note: the tag and its argument below are uleb128 values, though
11225 currently-defined values fit in one byte for each. */
11226 if (!attr->s)
11227 attr->s = (char *) bfd_alloc (abfd, 3);
11228 attr->s[0] = Tag_CPU_arch;
11229 attr->s[1] = arch;
11230 attr->s[2] = '\0';
11231 }
11232
11233 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
11234 into account. */
11235
11236 static int
11237 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
11238 int newtag, int secondary_compat)
11239 {
11240 #define T(X) TAG_CPU_ARCH_##X
11241 int tagl, tagh, result;
11242 const int v6t2[] =
11243 {
11244 T(V6T2), /* PRE_V4. */
11245 T(V6T2), /* V4. */
11246 T(V6T2), /* V4T. */
11247 T(V6T2), /* V5T. */
11248 T(V6T2), /* V5TE. */
11249 T(V6T2), /* V5TEJ. */
11250 T(V6T2), /* V6. */
11251 T(V7), /* V6KZ. */
11252 T(V6T2) /* V6T2. */
11253 };
11254 const int v6k[] =
11255 {
11256 T(V6K), /* PRE_V4. */
11257 T(V6K), /* V4. */
11258 T(V6K), /* V4T. */
11259 T(V6K), /* V5T. */
11260 T(V6K), /* V5TE. */
11261 T(V6K), /* V5TEJ. */
11262 T(V6K), /* V6. */
11263 T(V6KZ), /* V6KZ. */
11264 T(V7), /* V6T2. */
11265 T(V6K) /* V6K. */
11266 };
11267 const int v7[] =
11268 {
11269 T(V7), /* PRE_V4. */
11270 T(V7), /* V4. */
11271 T(V7), /* V4T. */
11272 T(V7), /* V5T. */
11273 T(V7), /* V5TE. */
11274 T(V7), /* V5TEJ. */
11275 T(V7), /* V6. */
11276 T(V7), /* V6KZ. */
11277 T(V7), /* V6T2. */
11278 T(V7), /* V6K. */
11279 T(V7) /* V7. */
11280 };
11281 const int v6_m[] =
11282 {
11283 -1, /* PRE_V4. */
11284 -1, /* V4. */
11285 T(V6K), /* V4T. */
11286 T(V6K), /* V5T. */
11287 T(V6K), /* V5TE. */
11288 T(V6K), /* V5TEJ. */
11289 T(V6K), /* V6. */
11290 T(V6KZ), /* V6KZ. */
11291 T(V7), /* V6T2. */
11292 T(V6K), /* V6K. */
11293 T(V7), /* V7. */
11294 T(V6_M) /* V6_M. */
11295 };
11296 const int v6s_m[] =
11297 {
11298 -1, /* PRE_V4. */
11299 -1, /* V4. */
11300 T(V6K), /* V4T. */
11301 T(V6K), /* V5T. */
11302 T(V6K), /* V5TE. */
11303 T(V6K), /* V5TEJ. */
11304 T(V6K), /* V6. */
11305 T(V6KZ), /* V6KZ. */
11306 T(V7), /* V6T2. */
11307 T(V6K), /* V6K. */
11308 T(V7), /* V7. */
11309 T(V6S_M), /* V6_M. */
11310 T(V6S_M) /* V6S_M. */
11311 };
11312 const int v7e_m[] =
11313 {
11314 -1, /* PRE_V4. */
11315 -1, /* V4. */
11316 T(V7E_M), /* V4T. */
11317 T(V7E_M), /* V5T. */
11318 T(V7E_M), /* V5TE. */
11319 T(V7E_M), /* V5TEJ. */
11320 T(V7E_M), /* V6. */
11321 T(V7E_M), /* V6KZ. */
11322 T(V7E_M), /* V6T2. */
11323 T(V7E_M), /* V6K. */
11324 T(V7E_M), /* V7. */
11325 T(V7E_M), /* V6_M. */
11326 T(V7E_M), /* V6S_M. */
11327 T(V7E_M) /* V7E_M. */
11328 };
11329 const int v4t_plus_v6_m[] =
11330 {
11331 -1, /* PRE_V4. */
11332 -1, /* V4. */
11333 T(V4T), /* V4T. */
11334 T(V5T), /* V5T. */
11335 T(V5TE), /* V5TE. */
11336 T(V5TEJ), /* V5TEJ. */
11337 T(V6), /* V6. */
11338 T(V6KZ), /* V6KZ. */
11339 T(V6T2), /* V6T2. */
11340 T(V6K), /* V6K. */
11341 T(V7), /* V7. */
11342 T(V6_M), /* V6_M. */
11343 T(V6S_M), /* V6S_M. */
11344 T(V7E_M), /* V7E_M. */
11345 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
11346 };
11347 const int *comb[] =
11348 {
11349 v6t2,
11350 v6k,
11351 v7,
11352 v6_m,
11353 v6s_m,
11354 v7e_m,
11355 /* Pseudo-architecture. */
11356 v4t_plus_v6_m
11357 };
11358
11359 /* Check we've not got a higher architecture than we know about. */
11360
11361 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
11362 {
11363 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
11364 return -1;
11365 }
11366
11367 /* Override old tag if we have a Tag_also_compatible_with on the output. */
11368
11369 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
11370 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
11371 oldtag = T(V4T_PLUS_V6_M);
11372
11373 /* And override the new tag if we have a Tag_also_compatible_with on the
11374 input. */
11375
11376 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
11377 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
11378 newtag = T(V4T_PLUS_V6_M);
11379
11380 tagl = (oldtag < newtag) ? oldtag : newtag;
11381 result = tagh = (oldtag > newtag) ? oldtag : newtag;
11382
11383 /* Architectures before V6KZ add features monotonically. */
11384 if (tagh <= TAG_CPU_ARCH_V6KZ)
11385 return result;
11386
11387 result = comb[tagh - T(V6T2)][tagl];
11388
11389 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
11390 as the canonical version. */
11391 if (result == T(V4T_PLUS_V6_M))
11392 {
11393 result = T(V4T);
11394 *secondary_compat_out = T(V6_M);
11395 }
11396 else
11397 *secondary_compat_out = -1;
11398
11399 if (result == -1)
11400 {
11401 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
11402 ibfd, oldtag, newtag);
11403 return -1;
11404 }
11405
11406 return result;
11407 #undef T
11408 }
11409
11410 /* Query attributes object to see if integer divide instructions may be
11411 present in an object. */
11412 static bfd_boolean
11413 elf32_arm_attributes_accept_div (const obj_attribute *attr)
11414 {
11415 int arch = attr[Tag_CPU_arch].i;
11416 int profile = attr[Tag_CPU_arch_profile].i;
11417
11418 switch (attr[Tag_DIV_use].i)
11419 {
11420 case 0:
11421 /* Integer divide allowed if instruction contained in archetecture. */
11422 if (arch == TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M'))
11423 return TRUE;
11424 else if (arch >= TAG_CPU_ARCH_V7E_M)
11425 return TRUE;
11426 else
11427 return FALSE;
11428
11429 case 1:
11430 /* Integer divide explicitly prohibited. */
11431 return FALSE;
11432
11433 default:
11434 /* Unrecognised case - treat as allowing divide everywhere. */
11435 case 2:
11436 /* Integer divide allowed in ARM state. */
11437 return TRUE;
11438 }
11439 }
11440
11441 /* Query attributes object to see if integer divide instructions are
11442 forbidden to be in the object. This is not the inverse of
11443 elf32_arm_attributes_accept_div. */
11444 static bfd_boolean
11445 elf32_arm_attributes_forbid_div (const obj_attribute *attr)
11446 {
11447 return attr[Tag_DIV_use].i == 1;
11448 }
11449
11450 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
11451 are conflicting attributes. */
11452
11453 static bfd_boolean
11454 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
11455 {
11456 obj_attribute *in_attr;
11457 obj_attribute *out_attr;
11458 /* Some tags have 0 = don't care, 1 = strong requirement,
11459 2 = weak requirement. */
11460 static const int order_021[3] = {0, 2, 1};
11461 int i;
11462 bfd_boolean result = TRUE;
11463
11464 /* Skip the linker stubs file. This preserves previous behavior
11465 of accepting unknown attributes in the first input file - but
11466 is that a bug? */
11467 if (ibfd->flags & BFD_LINKER_CREATED)
11468 return TRUE;
11469
11470 if (!elf_known_obj_attributes_proc (obfd)[0].i)
11471 {
11472 /* This is the first object. Copy the attributes. */
11473 _bfd_elf_copy_obj_attributes (ibfd, obfd);
11474
11475 out_attr = elf_known_obj_attributes_proc (obfd);
11476
11477 /* Use the Tag_null value to indicate the attributes have been
11478 initialized. */
11479 out_attr[0].i = 1;
11480
11481 /* We do not output objects with Tag_MPextension_use_legacy - we move
11482 the attribute's value to Tag_MPextension_use. */
11483 if (out_attr[Tag_MPextension_use_legacy].i != 0)
11484 {
11485 if (out_attr[Tag_MPextension_use].i != 0
11486 && out_attr[Tag_MPextension_use_legacy].i
11487 != out_attr[Tag_MPextension_use].i)
11488 {
11489 _bfd_error_handler
11490 (_("Error: %B has both the current and legacy "
11491 "Tag_MPextension_use attributes"), ibfd);
11492 result = FALSE;
11493 }
11494
11495 out_attr[Tag_MPextension_use] =
11496 out_attr[Tag_MPextension_use_legacy];
11497 out_attr[Tag_MPextension_use_legacy].type = 0;
11498 out_attr[Tag_MPextension_use_legacy].i = 0;
11499 }
11500
11501 return result;
11502 }
11503
11504 in_attr = elf_known_obj_attributes_proc (ibfd);
11505 out_attr = elf_known_obj_attributes_proc (obfd);
11506 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
11507 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
11508 {
11509 /* Ignore mismatches if the object doesn't use floating point. */
11510 if (out_attr[Tag_ABI_FP_number_model].i == 0)
11511 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
11512 else if (in_attr[Tag_ABI_FP_number_model].i != 0)
11513 {
11514 _bfd_error_handler
11515 (_("error: %B uses VFP register arguments, %B does not"),
11516 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
11517 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
11518 result = FALSE;
11519 }
11520 }
11521
11522 for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
11523 {
11524 /* Merge this attribute with existing attributes. */
11525 switch (i)
11526 {
11527 case Tag_CPU_raw_name:
11528 case Tag_CPU_name:
11529 /* These are merged after Tag_CPU_arch. */
11530 break;
11531
11532 case Tag_ABI_optimization_goals:
11533 case Tag_ABI_FP_optimization_goals:
11534 /* Use the first value seen. */
11535 break;
11536
11537 case Tag_CPU_arch:
11538 {
11539 int secondary_compat = -1, secondary_compat_out = -1;
11540 unsigned int saved_out_attr = out_attr[i].i;
11541 static const char *name_table[] = {
11542 /* These aren't real CPU names, but we can't guess
11543 that from the architecture version alone. */
11544 "Pre v4",
11545 "ARM v4",
11546 "ARM v4T",
11547 "ARM v5T",
11548 "ARM v5TE",
11549 "ARM v5TEJ",
11550 "ARM v6",
11551 "ARM v6KZ",
11552 "ARM v6T2",
11553 "ARM v6K",
11554 "ARM v7",
11555 "ARM v6-M",
11556 "ARM v6S-M"
11557 };
11558
11559 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
11560 secondary_compat = get_secondary_compatible_arch (ibfd);
11561 secondary_compat_out = get_secondary_compatible_arch (obfd);
11562 out_attr[i].i = tag_cpu_arch_combine (ibfd, out_attr[i].i,
11563 &secondary_compat_out,
11564 in_attr[i].i,
11565 secondary_compat);
11566 set_secondary_compatible_arch (obfd, secondary_compat_out);
11567
11568 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
11569 if (out_attr[i].i == saved_out_attr)
11570 ; /* Leave the names alone. */
11571 else if (out_attr[i].i == in_attr[i].i)
11572 {
11573 /* The output architecture has been changed to match the
11574 input architecture. Use the input names. */
11575 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
11576 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
11577 : NULL;
11578 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
11579 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
11580 : NULL;
11581 }
11582 else
11583 {
11584 out_attr[Tag_CPU_name].s = NULL;
11585 out_attr[Tag_CPU_raw_name].s = NULL;
11586 }
11587
11588 /* If we still don't have a value for Tag_CPU_name,
11589 make one up now. Tag_CPU_raw_name remains blank. */
11590 if (out_attr[Tag_CPU_name].s == NULL
11591 && out_attr[i].i < ARRAY_SIZE (name_table))
11592 out_attr[Tag_CPU_name].s =
11593 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
11594 }
11595 break;
11596
11597 case Tag_ARM_ISA_use:
11598 case Tag_THUMB_ISA_use:
11599 case Tag_WMMX_arch:
11600 case Tag_Advanced_SIMD_arch:
11601 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
11602 case Tag_ABI_FP_rounding:
11603 case Tag_ABI_FP_exceptions:
11604 case Tag_ABI_FP_user_exceptions:
11605 case Tag_ABI_FP_number_model:
11606 case Tag_FP_HP_extension:
11607 case Tag_CPU_unaligned_access:
11608 case Tag_T2EE_use:
11609 case Tag_MPextension_use:
11610 /* Use the largest value specified. */
11611 if (in_attr[i].i > out_attr[i].i)
11612 out_attr[i].i = in_attr[i].i;
11613 break;
11614
11615 case Tag_ABI_align_preserved:
11616 case Tag_ABI_PCS_RO_data:
11617 /* Use the smallest value specified. */
11618 if (in_attr[i].i < out_attr[i].i)
11619 out_attr[i].i = in_attr[i].i;
11620 break;
11621
11622 case Tag_ABI_align_needed:
11623 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
11624 && (in_attr[Tag_ABI_align_preserved].i == 0
11625 || out_attr[Tag_ABI_align_preserved].i == 0))
11626 {
11627 /* This error message should be enabled once all non-conformant
11628 binaries in the toolchain have had the attributes set
11629 properly.
11630 _bfd_error_handler
11631 (_("error: %B: 8-byte data alignment conflicts with %B"),
11632 obfd, ibfd);
11633 result = FALSE; */
11634 }
11635 /* Fall through. */
11636 case Tag_ABI_FP_denormal:
11637 case Tag_ABI_PCS_GOT_use:
11638 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
11639 value if greater than 2 (for future-proofing). */
11640 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
11641 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
11642 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
11643 out_attr[i].i = in_attr[i].i;
11644 break;
11645
11646 case Tag_Virtualization_use:
11647 /* The virtualization tag effectively stores two bits of
11648 information: the intended use of TrustZone (in bit 0), and the
11649 intended use of Virtualization (in bit 1). */
11650 if (out_attr[i].i == 0)
11651 out_attr[i].i = in_attr[i].i;
11652 else if (in_attr[i].i != 0
11653 && in_attr[i].i != out_attr[i].i)
11654 {
11655 if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
11656 out_attr[i].i = 3;
11657 else
11658 {
11659 _bfd_error_handler
11660 (_("error: %B: unable to merge virtualization attributes "
11661 "with %B"),
11662 obfd, ibfd);
11663 result = FALSE;
11664 }
11665 }
11666 break;
11667
11668 case Tag_CPU_arch_profile:
11669 if (out_attr[i].i != in_attr[i].i)
11670 {
11671 /* 0 will merge with anything.
11672 'A' and 'S' merge to 'A'.
11673 'R' and 'S' merge to 'R'.
11674 'M' and 'A|R|S' is an error. */
11675 if (out_attr[i].i == 0
11676 || (out_attr[i].i == 'S'
11677 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
11678 out_attr[i].i = in_attr[i].i;
11679 else if (in_attr[i].i == 0
11680 || (in_attr[i].i == 'S'
11681 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
11682 ; /* Do nothing. */
11683 else
11684 {
11685 _bfd_error_handler
11686 (_("error: %B: Conflicting architecture profiles %c/%c"),
11687 ibfd,
11688 in_attr[i].i ? in_attr[i].i : '0',
11689 out_attr[i].i ? out_attr[i].i : '0');
11690 result = FALSE;
11691 }
11692 }
11693 break;
11694 case Tag_FP_arch:
11695 {
11696 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
11697 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
11698 when it's 0. It might mean absence of FP hardware if
11699 Tag_FP_arch is zero, otherwise it is effectively SP + DP. */
11700
11701 static const struct
11702 {
11703 int ver;
11704 int regs;
11705 } vfp_versions[7] =
11706 {
11707 {0, 0},
11708 {1, 16},
11709 {2, 16},
11710 {3, 32},
11711 {3, 16},
11712 {4, 32},
11713 {4, 16}
11714 };
11715 int ver;
11716 int regs;
11717 int newval;
11718
11719 /* If the output has no requirement about FP hardware,
11720 follow the requirement of the input. */
11721 if (out_attr[i].i == 0)
11722 {
11723 BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
11724 out_attr[i].i = in_attr[i].i;
11725 out_attr[Tag_ABI_HardFP_use].i
11726 = in_attr[Tag_ABI_HardFP_use].i;
11727 break;
11728 }
11729 /* If the input has no requirement about FP hardware, do
11730 nothing. */
11731 else if (in_attr[i].i == 0)
11732 {
11733 BFD_ASSERT (in_attr[Tag_ABI_HardFP_use].i == 0);
11734 break;
11735 }
11736
11737 /* Both the input and the output have nonzero Tag_FP_arch.
11738 So Tag_ABI_HardFP_use is (SP & DP) when it's zero. */
11739
11740 /* If both the input and the output have zero Tag_ABI_HardFP_use,
11741 do nothing. */
11742 if (in_attr[Tag_ABI_HardFP_use].i == 0
11743 && out_attr[Tag_ABI_HardFP_use].i == 0)
11744 ;
11745 /* If the input and the output have different Tag_ABI_HardFP_use,
11746 the combination of them is 3 (SP & DP). */
11747 else if (in_attr[Tag_ABI_HardFP_use].i
11748 != out_attr[Tag_ABI_HardFP_use].i)
11749 out_attr[Tag_ABI_HardFP_use].i = 3;
11750
11751 /* Now we can handle Tag_FP_arch. */
11752
11753 /* Values greater than 6 aren't defined, so just pick the
11754 biggest */
11755 if (in_attr[i].i > 6 && in_attr[i].i > out_attr[i].i)
11756 {
11757 out_attr[i] = in_attr[i];
11758 break;
11759 }
11760 /* The output uses the superset of input features
11761 (ISA version) and registers. */
11762 ver = vfp_versions[in_attr[i].i].ver;
11763 if (ver < vfp_versions[out_attr[i].i].ver)
11764 ver = vfp_versions[out_attr[i].i].ver;
11765 regs = vfp_versions[in_attr[i].i].regs;
11766 if (regs < vfp_versions[out_attr[i].i].regs)
11767 regs = vfp_versions[out_attr[i].i].regs;
11768 /* This assumes all possible supersets are also a valid
11769 options. */
11770 for (newval = 6; newval > 0; newval--)
11771 {
11772 if (regs == vfp_versions[newval].regs
11773 && ver == vfp_versions[newval].ver)
11774 break;
11775 }
11776 out_attr[i].i = newval;
11777 }
11778 break;
11779 case Tag_PCS_config:
11780 if (out_attr[i].i == 0)
11781 out_attr[i].i = in_attr[i].i;
11782 else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i)
11783 {
11784 /* It's sometimes ok to mix different configs, so this is only
11785 a warning. */
11786 _bfd_error_handler
11787 (_("Warning: %B: Conflicting platform configuration"), ibfd);
11788 }
11789 break;
11790 case Tag_ABI_PCS_R9_use:
11791 if (in_attr[i].i != out_attr[i].i
11792 && out_attr[i].i != AEABI_R9_unused
11793 && in_attr[i].i != AEABI_R9_unused)
11794 {
11795 _bfd_error_handler
11796 (_("error: %B: Conflicting use of R9"), ibfd);
11797 result = FALSE;
11798 }
11799 if (out_attr[i].i == AEABI_R9_unused)
11800 out_attr[i].i = in_attr[i].i;
11801 break;
11802 case Tag_ABI_PCS_RW_data:
11803 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
11804 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
11805 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
11806 {
11807 _bfd_error_handler
11808 (_("error: %B: SB relative addressing conflicts with use of R9"),
11809 ibfd);
11810 result = FALSE;
11811 }
11812 /* Use the smallest value specified. */
11813 if (in_attr[i].i < out_attr[i].i)
11814 out_attr[i].i = in_attr[i].i;
11815 break;
11816 case Tag_ABI_PCS_wchar_t:
11817 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
11818 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
11819 {
11820 _bfd_error_handler
11821 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
11822 ibfd, in_attr[i].i, out_attr[i].i);
11823 }
11824 else if (in_attr[i].i && !out_attr[i].i)
11825 out_attr[i].i = in_attr[i].i;
11826 break;
11827 case Tag_ABI_enum_size:
11828 if (in_attr[i].i != AEABI_enum_unused)
11829 {
11830 if (out_attr[i].i == AEABI_enum_unused
11831 || out_attr[i].i == AEABI_enum_forced_wide)
11832 {
11833 /* The existing object is compatible with anything.
11834 Use whatever requirements the new object has. */
11835 out_attr[i].i = in_attr[i].i;
11836 }
11837 else if (in_attr[i].i != AEABI_enum_forced_wide
11838 && out_attr[i].i != in_attr[i].i
11839 && !elf_arm_tdata (obfd)->no_enum_size_warning)
11840 {
11841 static const char *aeabi_enum_names[] =
11842 { "", "variable-size", "32-bit", "" };
11843 const char *in_name =
11844 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
11845 ? aeabi_enum_names[in_attr[i].i]
11846 : "<unknown>";
11847 const char *out_name =
11848 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
11849 ? aeabi_enum_names[out_attr[i].i]
11850 : "<unknown>";
11851 _bfd_error_handler
11852 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
11853 ibfd, in_name, out_name);
11854 }
11855 }
11856 break;
11857 case Tag_ABI_VFP_args:
11858 /* Aready done. */
11859 break;
11860 case Tag_ABI_WMMX_args:
11861 if (in_attr[i].i != out_attr[i].i)
11862 {
11863 _bfd_error_handler
11864 (_("error: %B uses iWMMXt register arguments, %B does not"),
11865 ibfd, obfd);
11866 result = FALSE;
11867 }
11868 break;
11869 case Tag_compatibility:
11870 /* Merged in target-independent code. */
11871 break;
11872 case Tag_ABI_HardFP_use:
11873 /* This is handled along with Tag_FP_arch. */
11874 break;
11875 case Tag_ABI_FP_16bit_format:
11876 if (in_attr[i].i != 0 && out_attr[i].i != 0)
11877 {
11878 if (in_attr[i].i != out_attr[i].i)
11879 {
11880 _bfd_error_handler
11881 (_("error: fp16 format mismatch between %B and %B"),
11882 ibfd, obfd);
11883 result = FALSE;
11884 }
11885 }
11886 if (in_attr[i].i != 0)
11887 out_attr[i].i = in_attr[i].i;
11888 break;
11889
11890 case Tag_DIV_use:
11891 /* A value of zero on input means that the divide instruction may
11892 be used if available in the base architecture as specified via
11893 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that
11894 the user did not want divide instructions. A value of 2
11895 explicitly means that divide instructions were allowed in ARM
11896 and Thumb state. */
11897 if (in_attr[i].i == out_attr[i].i)
11898 /* Do nothing. */ ;
11899 else if (elf32_arm_attributes_forbid_div (in_attr)
11900 && !elf32_arm_attributes_accept_div (out_attr))
11901 out_attr[i].i = 1;
11902 else if (elf32_arm_attributes_forbid_div (out_attr)
11903 && elf32_arm_attributes_accept_div (in_attr))
11904 out_attr[i].i = in_attr[i].i;
11905 else if (in_attr[i].i == 2)
11906 out_attr[i].i = in_attr[i].i;
11907 break;
11908
11909 case Tag_MPextension_use_legacy:
11910 /* We don't output objects with Tag_MPextension_use_legacy - we
11911 move the value to Tag_MPextension_use. */
11912 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
11913 {
11914 if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
11915 {
11916 _bfd_error_handler
11917 (_("%B has has both the current and legacy "
11918 "Tag_MPextension_use attributes"),
11919 ibfd);
11920 result = FALSE;
11921 }
11922 }
11923
11924 if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
11925 out_attr[Tag_MPextension_use] = in_attr[i];
11926
11927 break;
11928
11929 case Tag_nodefaults:
11930 /* This tag is set if it exists, but the value is unused (and is
11931 typically zero). We don't actually need to do anything here -
11932 the merge happens automatically when the type flags are merged
11933 below. */
11934 break;
11935 case Tag_also_compatible_with:
11936 /* Already done in Tag_CPU_arch. */
11937 break;
11938 case Tag_conformance:
11939 /* Keep the attribute if it matches. Throw it away otherwise.
11940 No attribute means no claim to conform. */
11941 if (!in_attr[i].s || !out_attr[i].s
11942 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
11943 out_attr[i].s = NULL;
11944 break;
11945
11946 default:
11947 result
11948 = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i);
11949 }
11950
11951 /* If out_attr was copied from in_attr then it won't have a type yet. */
11952 if (in_attr[i].type && !out_attr[i].type)
11953 out_attr[i].type = in_attr[i].type;
11954 }
11955
11956 /* Merge Tag_compatibility attributes and any common GNU ones. */
11957 if (!_bfd_elf_merge_object_attributes (ibfd, obfd))
11958 return FALSE;
11959
11960 /* Check for any attributes not known on ARM. */
11961 result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd);
11962
11963 return result;
11964 }
11965
11966
11967 /* Return TRUE if the two EABI versions are incompatible. */
11968
11969 static bfd_boolean
11970 elf32_arm_versions_compatible (unsigned iver, unsigned over)
11971 {
11972 /* v4 and v5 are the same spec before and after it was released,
11973 so allow mixing them. */
11974 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
11975 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
11976 return TRUE;
11977
11978 return (iver == over);
11979 }
11980
11981 /* Merge backend specific data from an object file to the output
11982 object file when linking. */
11983
11984 static bfd_boolean
11985 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd);
11986
11987 /* Display the flags field. */
11988
11989 static bfd_boolean
11990 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
11991 {
11992 FILE * file = (FILE *) ptr;
11993 unsigned long flags;
11994
11995 BFD_ASSERT (abfd != NULL && ptr != NULL);
11996
11997 /* Print normal ELF private data. */
11998 _bfd_elf_print_private_bfd_data (abfd, ptr);
11999
12000 flags = elf_elfheader (abfd)->e_flags;
12001 /* Ignore init flag - it may not be set, despite the flags field
12002 containing valid data. */
12003
12004 /* xgettext:c-format */
12005 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
12006
12007 switch (EF_ARM_EABI_VERSION (flags))
12008 {
12009 case EF_ARM_EABI_UNKNOWN:
12010 /* The following flag bits are GNU extensions and not part of the
12011 official ARM ELF extended ABI. Hence they are only decoded if
12012 the EABI version is not set. */
12013 if (flags & EF_ARM_INTERWORK)
12014 fprintf (file, _(" [interworking enabled]"));
12015
12016 if (flags & EF_ARM_APCS_26)
12017 fprintf (file, " [APCS-26]");
12018 else
12019 fprintf (file, " [APCS-32]");
12020
12021 if (flags & EF_ARM_VFP_FLOAT)
12022 fprintf (file, _(" [VFP float format]"));
12023 else if (flags & EF_ARM_MAVERICK_FLOAT)
12024 fprintf (file, _(" [Maverick float format]"));
12025 else
12026 fprintf (file, _(" [FPA float format]"));
12027
12028 if (flags & EF_ARM_APCS_FLOAT)
12029 fprintf (file, _(" [floats passed in float registers]"));
12030
12031 if (flags & EF_ARM_PIC)
12032 fprintf (file, _(" [position independent]"));
12033
12034 if (flags & EF_ARM_NEW_ABI)
12035 fprintf (file, _(" [new ABI]"));
12036
12037 if (flags & EF_ARM_OLD_ABI)
12038 fprintf (file, _(" [old ABI]"));
12039
12040 if (flags & EF_ARM_SOFT_FLOAT)
12041 fprintf (file, _(" [software FP]"));
12042
12043 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
12044 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
12045 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
12046 | EF_ARM_MAVERICK_FLOAT);
12047 break;
12048
12049 case EF_ARM_EABI_VER1:
12050 fprintf (file, _(" [Version1 EABI]"));
12051
12052 if (flags & EF_ARM_SYMSARESORTED)
12053 fprintf (file, _(" [sorted symbol table]"));
12054 else
12055 fprintf (file, _(" [unsorted symbol table]"));
12056
12057 flags &= ~ EF_ARM_SYMSARESORTED;
12058 break;
12059
12060 case EF_ARM_EABI_VER2:
12061 fprintf (file, _(" [Version2 EABI]"));
12062
12063 if (flags & EF_ARM_SYMSARESORTED)
12064 fprintf (file, _(" [sorted symbol table]"));
12065 else
12066 fprintf (file, _(" [unsorted symbol table]"));
12067
12068 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
12069 fprintf (file, _(" [dynamic symbols use segment index]"));
12070
12071 if (flags & EF_ARM_MAPSYMSFIRST)
12072 fprintf (file, _(" [mapping symbols precede others]"));
12073
12074 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
12075 | EF_ARM_MAPSYMSFIRST);
12076 break;
12077
12078 case EF_ARM_EABI_VER3:
12079 fprintf (file, _(" [Version3 EABI]"));
12080 break;
12081
12082 case EF_ARM_EABI_VER4:
12083 fprintf (file, _(" [Version4 EABI]"));
12084 goto eabi;
12085
12086 case EF_ARM_EABI_VER5:
12087 fprintf (file, _(" [Version5 EABI]"));
12088 eabi:
12089 if (flags & EF_ARM_BE8)
12090 fprintf (file, _(" [BE8]"));
12091
12092 if (flags & EF_ARM_LE8)
12093 fprintf (file, _(" [LE8]"));
12094
12095 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
12096 break;
12097
12098 default:
12099 fprintf (file, _(" <EABI version unrecognised>"));
12100 break;
12101 }
12102
12103 flags &= ~ EF_ARM_EABIMASK;
12104
12105 if (flags & EF_ARM_RELEXEC)
12106 fprintf (file, _(" [relocatable executable]"));
12107
12108 if (flags & EF_ARM_HASENTRY)
12109 fprintf (file, _(" [has entry point]"));
12110
12111 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_HASENTRY);
12112
12113 if (flags)
12114 fprintf (file, _("<Unrecognised flag bits set>"));
12115
12116 fputc ('\n', file);
12117
12118 return TRUE;
12119 }
12120
12121 static int
12122 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
12123 {
12124 switch (ELF_ST_TYPE (elf_sym->st_info))
12125 {
12126 case STT_ARM_TFUNC:
12127 return ELF_ST_TYPE (elf_sym->st_info);
12128
12129 case STT_ARM_16BIT:
12130 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
12131 This allows us to distinguish between data used by Thumb instructions
12132 and non-data (which is probably code) inside Thumb regions of an
12133 executable. */
12134 if (type != STT_OBJECT && type != STT_TLS)
12135 return ELF_ST_TYPE (elf_sym->st_info);
12136 break;
12137
12138 default:
12139 break;
12140 }
12141
12142 return type;
12143 }
12144
12145 static asection *
12146 elf32_arm_gc_mark_hook (asection *sec,
12147 struct bfd_link_info *info,
12148 Elf_Internal_Rela *rel,
12149 struct elf_link_hash_entry *h,
12150 Elf_Internal_Sym *sym)
12151 {
12152 if (h != NULL)
12153 switch (ELF32_R_TYPE (rel->r_info))
12154 {
12155 case R_ARM_GNU_VTINHERIT:
12156 case R_ARM_GNU_VTENTRY:
12157 return NULL;
12158 }
12159
12160 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
12161 }
12162
12163 /* Update the got entry reference counts for the section being removed. */
12164
12165 static bfd_boolean
12166 elf32_arm_gc_sweep_hook (bfd * abfd,
12167 struct bfd_link_info * info,
12168 asection * sec,
12169 const Elf_Internal_Rela * relocs)
12170 {
12171 Elf_Internal_Shdr *symtab_hdr;
12172 struct elf_link_hash_entry **sym_hashes;
12173 bfd_signed_vma *local_got_refcounts;
12174 const Elf_Internal_Rela *rel, *relend;
12175 struct elf32_arm_link_hash_table * globals;
12176
12177 if (info->relocatable)
12178 return TRUE;
12179
12180 globals = elf32_arm_hash_table (info);
12181 if (globals == NULL)
12182 return FALSE;
12183
12184 elf_section_data (sec)->local_dynrel = NULL;
12185
12186 symtab_hdr = & elf_symtab_hdr (abfd);
12187 sym_hashes = elf_sym_hashes (abfd);
12188 local_got_refcounts = elf_local_got_refcounts (abfd);
12189
12190 check_use_blx (globals);
12191
12192 relend = relocs + sec->reloc_count;
12193 for (rel = relocs; rel < relend; rel++)
12194 {
12195 unsigned long r_symndx;
12196 struct elf_link_hash_entry *h = NULL;
12197 struct elf32_arm_link_hash_entry *eh;
12198 int r_type;
12199 bfd_boolean call_reloc_p;
12200 bfd_boolean may_become_dynamic_p;
12201 bfd_boolean may_need_local_target_p;
12202 union gotplt_union *root_plt;
12203 struct arm_plt_info *arm_plt;
12204
12205 r_symndx = ELF32_R_SYM (rel->r_info);
12206 if (r_symndx >= symtab_hdr->sh_info)
12207 {
12208 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
12209 while (h->root.type == bfd_link_hash_indirect
12210 || h->root.type == bfd_link_hash_warning)
12211 h = (struct elf_link_hash_entry *) h->root.u.i.link;
12212 }
12213 eh = (struct elf32_arm_link_hash_entry *) h;
12214
12215 call_reloc_p = FALSE;
12216 may_become_dynamic_p = FALSE;
12217 may_need_local_target_p = FALSE;
12218
12219 r_type = ELF32_R_TYPE (rel->r_info);
12220 r_type = arm_real_reloc_type (globals, r_type);
12221 switch (r_type)
12222 {
12223 case R_ARM_GOT32:
12224 case R_ARM_GOT_PREL:
12225 case R_ARM_TLS_GD32:
12226 case R_ARM_TLS_IE32:
12227 if (h != NULL)
12228 {
12229 if (h->got.refcount > 0)
12230 h->got.refcount -= 1;
12231 }
12232 else if (local_got_refcounts != NULL)
12233 {
12234 if (local_got_refcounts[r_symndx] > 0)
12235 local_got_refcounts[r_symndx] -= 1;
12236 }
12237 break;
12238
12239 case R_ARM_TLS_LDM32:
12240 globals->tls_ldm_got.refcount -= 1;
12241 break;
12242
12243 case R_ARM_PC24:
12244 case R_ARM_PLT32:
12245 case R_ARM_CALL:
12246 case R_ARM_JUMP24:
12247 case R_ARM_PREL31:
12248 case R_ARM_THM_CALL:
12249 case R_ARM_THM_JUMP24:
12250 case R_ARM_THM_JUMP19:
12251 call_reloc_p = TRUE;
12252 may_need_local_target_p = TRUE;
12253 break;
12254
12255 case R_ARM_ABS12:
12256 if (!globals->vxworks_p)
12257 {
12258 may_need_local_target_p = TRUE;
12259 break;
12260 }
12261 /* Fall through. */
12262 case R_ARM_ABS32:
12263 case R_ARM_ABS32_NOI:
12264 case R_ARM_REL32:
12265 case R_ARM_REL32_NOI:
12266 case R_ARM_MOVW_ABS_NC:
12267 case R_ARM_MOVT_ABS:
12268 case R_ARM_MOVW_PREL_NC:
12269 case R_ARM_MOVT_PREL:
12270 case R_ARM_THM_MOVW_ABS_NC:
12271 case R_ARM_THM_MOVT_ABS:
12272 case R_ARM_THM_MOVW_PREL_NC:
12273 case R_ARM_THM_MOVT_PREL:
12274 /* Should the interworking branches be here also? */
12275 if ((info->shared || globals->root.is_relocatable_executable)
12276 && (sec->flags & SEC_ALLOC) != 0)
12277 {
12278 if (h == NULL
12279 && (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI))
12280 {
12281 call_reloc_p = TRUE;
12282 may_need_local_target_p = TRUE;
12283 }
12284 else
12285 may_become_dynamic_p = TRUE;
12286 }
12287 else
12288 may_need_local_target_p = TRUE;
12289 break;
12290
12291 default:
12292 break;
12293 }
12294
12295 if (may_need_local_target_p
12296 && elf32_arm_get_plt_info (abfd, eh, r_symndx, &root_plt, &arm_plt))
12297 {
12298 /* If PLT refcount book-keeping is wrong and too low, we'll
12299 see a zero value (going to -1) for the root PLT reference
12300 count. */
12301 if (root_plt->refcount >= 0)
12302 {
12303 BFD_ASSERT (root_plt->refcount != 0);
12304 root_plt->refcount -= 1;
12305 }
12306 else
12307 /* A value of -1 means the symbol has become local, forced
12308 or seeing a hidden definition. Any other negative value
12309 is an error. */
12310 BFD_ASSERT (root_plt->refcount == -1);
12311
12312 if (!call_reloc_p)
12313 arm_plt->noncall_refcount--;
12314
12315 if (r_type == R_ARM_THM_CALL)
12316 arm_plt->maybe_thumb_refcount--;
12317
12318 if (r_type == R_ARM_THM_JUMP24
12319 || r_type == R_ARM_THM_JUMP19)
12320 arm_plt->thumb_refcount--;
12321 }
12322
12323 if (may_become_dynamic_p)
12324 {
12325 struct elf_dyn_relocs **pp;
12326 struct elf_dyn_relocs *p;
12327
12328 if (h != NULL)
12329 pp = &(eh->dyn_relocs);
12330 else
12331 {
12332 Elf_Internal_Sym *isym;
12333
12334 isym = bfd_sym_from_r_symndx (&globals->sym_cache,
12335 abfd, r_symndx);
12336 if (isym == NULL)
12337 return FALSE;
12338 pp = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
12339 if (pp == NULL)
12340 return FALSE;
12341 }
12342 for (; (p = *pp) != NULL; pp = &p->next)
12343 if (p->sec == sec)
12344 {
12345 /* Everything must go for SEC. */
12346 *pp = p->next;
12347 break;
12348 }
12349 }
12350 }
12351
12352 return TRUE;
12353 }
12354
12355 /* Look through the relocs for a section during the first phase. */
12356
12357 static bfd_boolean
12358 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
12359 asection *sec, const Elf_Internal_Rela *relocs)
12360 {
12361 Elf_Internal_Shdr *symtab_hdr;
12362 struct elf_link_hash_entry **sym_hashes;
12363 const Elf_Internal_Rela *rel;
12364 const Elf_Internal_Rela *rel_end;
12365 bfd *dynobj;
12366 asection *sreloc;
12367 struct elf32_arm_link_hash_table *htab;
12368 bfd_boolean call_reloc_p;
12369 bfd_boolean may_become_dynamic_p;
12370 bfd_boolean may_need_local_target_p;
12371 unsigned long nsyms;
12372
12373 if (info->relocatable)
12374 return TRUE;
12375
12376 BFD_ASSERT (is_arm_elf (abfd));
12377
12378 htab = elf32_arm_hash_table (info);
12379 if (htab == NULL)
12380 return FALSE;
12381
12382 sreloc = NULL;
12383
12384 /* Create dynamic sections for relocatable executables so that we can
12385 copy relocations. */
12386 if (htab->root.is_relocatable_executable
12387 && ! htab->root.dynamic_sections_created)
12388 {
12389 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
12390 return FALSE;
12391 }
12392
12393 if (htab->root.dynobj == NULL)
12394 htab->root.dynobj = abfd;
12395 if (!create_ifunc_sections (info))
12396 return FALSE;
12397
12398 dynobj = htab->root.dynobj;
12399
12400 symtab_hdr = & elf_symtab_hdr (abfd);
12401 sym_hashes = elf_sym_hashes (abfd);
12402 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
12403
12404 rel_end = relocs + sec->reloc_count;
12405 for (rel = relocs; rel < rel_end; rel++)
12406 {
12407 Elf_Internal_Sym *isym;
12408 struct elf_link_hash_entry *h;
12409 struct elf32_arm_link_hash_entry *eh;
12410 unsigned long r_symndx;
12411 int r_type;
12412
12413 r_symndx = ELF32_R_SYM (rel->r_info);
12414 r_type = ELF32_R_TYPE (rel->r_info);
12415 r_type = arm_real_reloc_type (htab, r_type);
12416
12417 if (r_symndx >= nsyms
12418 /* PR 9934: It is possible to have relocations that do not
12419 refer to symbols, thus it is also possible to have an
12420 object file containing relocations but no symbol table. */
12421 && (r_symndx > STN_UNDEF || nsyms > 0))
12422 {
12423 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
12424 r_symndx);
12425 return FALSE;
12426 }
12427
12428 h = NULL;
12429 isym = NULL;
12430 if (nsyms > 0)
12431 {
12432 if (r_symndx < symtab_hdr->sh_info)
12433 {
12434 /* A local symbol. */
12435 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
12436 abfd, r_symndx);
12437 if (isym == NULL)
12438 return FALSE;
12439 }
12440 else
12441 {
12442 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
12443 while (h->root.type == bfd_link_hash_indirect
12444 || h->root.type == bfd_link_hash_warning)
12445 h = (struct elf_link_hash_entry *) h->root.u.i.link;
12446 }
12447 }
12448
12449 eh = (struct elf32_arm_link_hash_entry *) h;
12450
12451 call_reloc_p = FALSE;
12452 may_become_dynamic_p = FALSE;
12453 may_need_local_target_p = FALSE;
12454
12455 /* Could be done earlier, if h were already available. */
12456 r_type = elf32_arm_tls_transition (info, r_type, h);
12457 switch (r_type)
12458 {
12459 case R_ARM_GOT32:
12460 case R_ARM_GOT_PREL:
12461 case R_ARM_TLS_GD32:
12462 case R_ARM_TLS_IE32:
12463 case R_ARM_TLS_GOTDESC:
12464 case R_ARM_TLS_DESCSEQ:
12465 case R_ARM_THM_TLS_DESCSEQ:
12466 case R_ARM_TLS_CALL:
12467 case R_ARM_THM_TLS_CALL:
12468 /* This symbol requires a global offset table entry. */
12469 {
12470 int tls_type, old_tls_type;
12471
12472 switch (r_type)
12473 {
12474 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
12475
12476 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
12477
12478 case R_ARM_TLS_GOTDESC:
12479 case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL:
12480 case R_ARM_TLS_DESCSEQ: case R_ARM_THM_TLS_DESCSEQ:
12481 tls_type = GOT_TLS_GDESC; break;
12482
12483 default: tls_type = GOT_NORMAL; break;
12484 }
12485
12486 if (h != NULL)
12487 {
12488 h->got.refcount++;
12489 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
12490 }
12491 else
12492 {
12493 /* This is a global offset table entry for a local symbol. */
12494 if (!elf32_arm_allocate_local_sym_info (abfd))
12495 return FALSE;
12496 elf_local_got_refcounts (abfd)[r_symndx] += 1;
12497 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
12498 }
12499
12500 /* If a variable is accessed with both tls methods, two
12501 slots may be created. */
12502 if (GOT_TLS_GD_ANY_P (old_tls_type)
12503 && GOT_TLS_GD_ANY_P (tls_type))
12504 tls_type |= old_tls_type;
12505
12506 /* We will already have issued an error message if there
12507 is a TLS/non-TLS mismatch, based on the symbol
12508 type. So just combine any TLS types needed. */
12509 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
12510 && tls_type != GOT_NORMAL)
12511 tls_type |= old_tls_type;
12512
12513 /* If the symbol is accessed in both IE and GDESC
12514 method, we're able to relax. Turn off the GDESC flag,
12515 without messing up with any other kind of tls types
12516 that may be involved */
12517 if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC))
12518 tls_type &= ~GOT_TLS_GDESC;
12519
12520 if (old_tls_type != tls_type)
12521 {
12522 if (h != NULL)
12523 elf32_arm_hash_entry (h)->tls_type = tls_type;
12524 else
12525 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
12526 }
12527 }
12528 /* Fall through. */
12529
12530 case R_ARM_TLS_LDM32:
12531 if (r_type == R_ARM_TLS_LDM32)
12532 htab->tls_ldm_got.refcount++;
12533 /* Fall through. */
12534
12535 case R_ARM_GOTOFF32:
12536 case R_ARM_GOTPC:
12537 if (htab->root.sgot == NULL
12538 && !create_got_section (htab->root.dynobj, info))
12539 return FALSE;
12540 break;
12541
12542 case R_ARM_PC24:
12543 case R_ARM_PLT32:
12544 case R_ARM_CALL:
12545 case R_ARM_JUMP24:
12546 case R_ARM_PREL31:
12547 case R_ARM_THM_CALL:
12548 case R_ARM_THM_JUMP24:
12549 case R_ARM_THM_JUMP19:
12550 call_reloc_p = TRUE;
12551 may_need_local_target_p = TRUE;
12552 break;
12553
12554 case R_ARM_ABS12:
12555 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
12556 ldr __GOTT_INDEX__ offsets. */
12557 if (!htab->vxworks_p)
12558 {
12559 may_need_local_target_p = TRUE;
12560 break;
12561 }
12562 /* Fall through. */
12563
12564 case R_ARM_MOVW_ABS_NC:
12565 case R_ARM_MOVT_ABS:
12566 case R_ARM_THM_MOVW_ABS_NC:
12567 case R_ARM_THM_MOVT_ABS:
12568 if (info->shared)
12569 {
12570 (*_bfd_error_handler)
12571 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
12572 abfd, elf32_arm_howto_table_1[r_type].name,
12573 (h) ? h->root.root.string : "a local symbol");
12574 bfd_set_error (bfd_error_bad_value);
12575 return FALSE;
12576 }
12577
12578 /* Fall through. */
12579 case R_ARM_ABS32:
12580 case R_ARM_ABS32_NOI:
12581 case R_ARM_REL32:
12582 case R_ARM_REL32_NOI:
12583 case R_ARM_MOVW_PREL_NC:
12584 case R_ARM_MOVT_PREL:
12585 case R_ARM_THM_MOVW_PREL_NC:
12586 case R_ARM_THM_MOVT_PREL:
12587
12588 /* Should the interworking branches be listed here? */
12589 if ((info->shared || htab->root.is_relocatable_executable)
12590 && (sec->flags & SEC_ALLOC) != 0)
12591 {
12592 if (h == NULL
12593 && (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI))
12594 {
12595 /* In shared libraries and relocatable executables,
12596 we treat local relative references as calls;
12597 see the related SYMBOL_CALLS_LOCAL code in
12598 allocate_dynrelocs. */
12599 call_reloc_p = TRUE;
12600 may_need_local_target_p = TRUE;
12601 }
12602 else
12603 /* We are creating a shared library or relocatable
12604 executable, and this is a reloc against a global symbol,
12605 or a non-PC-relative reloc against a local symbol.
12606 We may need to copy the reloc into the output. */
12607 may_become_dynamic_p = TRUE;
12608 }
12609 else
12610 may_need_local_target_p = TRUE;
12611 break;
12612
12613 /* This relocation describes the C++ object vtable hierarchy.
12614 Reconstruct it for later use during GC. */
12615 case R_ARM_GNU_VTINHERIT:
12616 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
12617 return FALSE;
12618 break;
12619
12620 /* This relocation describes which C++ vtable entries are actually
12621 used. Record for later use during GC. */
12622 case R_ARM_GNU_VTENTRY:
12623 BFD_ASSERT (h != NULL);
12624 if (h != NULL
12625 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
12626 return FALSE;
12627 break;
12628 }
12629
12630 if (h != NULL)
12631 {
12632 if (call_reloc_p)
12633 /* We may need a .plt entry if the function this reloc
12634 refers to is in a different object, regardless of the
12635 symbol's type. We can't tell for sure yet, because
12636 something later might force the symbol local. */
12637 h->needs_plt = 1;
12638 else if (may_need_local_target_p)
12639 /* If this reloc is in a read-only section, we might
12640 need a copy reloc. We can't check reliably at this
12641 stage whether the section is read-only, as input
12642 sections have not yet been mapped to output sections.
12643 Tentatively set the flag for now, and correct in
12644 adjust_dynamic_symbol. */
12645 h->non_got_ref = 1;
12646 }
12647
12648 if (may_need_local_target_p
12649 && (h != NULL || ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC))
12650 {
12651 union gotplt_union *root_plt;
12652 struct arm_plt_info *arm_plt;
12653 struct arm_local_iplt_info *local_iplt;
12654
12655 if (h != NULL)
12656 {
12657 root_plt = &h->plt;
12658 arm_plt = &eh->plt;
12659 }
12660 else
12661 {
12662 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
12663 if (local_iplt == NULL)
12664 return FALSE;
12665 root_plt = &local_iplt->root;
12666 arm_plt = &local_iplt->arm;
12667 }
12668
12669 /* If the symbol is a function that doesn't bind locally,
12670 this relocation will need a PLT entry. */
12671 root_plt->refcount += 1;
12672
12673 if (!call_reloc_p)
12674 arm_plt->noncall_refcount++;
12675
12676 /* It's too early to use htab->use_blx here, so we have to
12677 record possible blx references separately from
12678 relocs that definitely need a thumb stub. */
12679
12680 if (r_type == R_ARM_THM_CALL)
12681 arm_plt->maybe_thumb_refcount += 1;
12682
12683 if (r_type == R_ARM_THM_JUMP24
12684 || r_type == R_ARM_THM_JUMP19)
12685 arm_plt->thumb_refcount += 1;
12686 }
12687
12688 if (may_become_dynamic_p)
12689 {
12690 struct elf_dyn_relocs *p, **head;
12691
12692 /* Create a reloc section in dynobj. */
12693 if (sreloc == NULL)
12694 {
12695 sreloc = _bfd_elf_make_dynamic_reloc_section
12696 (sec, dynobj, 2, abfd, ! htab->use_rel);
12697
12698 if (sreloc == NULL)
12699 return FALSE;
12700
12701 /* BPABI objects never have dynamic relocations mapped. */
12702 if (htab->symbian_p)
12703 {
12704 flagword flags;
12705
12706 flags = bfd_get_section_flags (dynobj, sreloc);
12707 flags &= ~(SEC_LOAD | SEC_ALLOC);
12708 bfd_set_section_flags (dynobj, sreloc, flags);
12709 }
12710 }
12711
12712 /* If this is a global symbol, count the number of
12713 relocations we need for this symbol. */
12714 if (h != NULL)
12715 head = &((struct elf32_arm_link_hash_entry *) h)->dyn_relocs;
12716 else
12717 {
12718 head = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
12719 if (head == NULL)
12720 return FALSE;
12721 }
12722
12723 p = *head;
12724 if (p == NULL || p->sec != sec)
12725 {
12726 bfd_size_type amt = sizeof *p;
12727
12728 p = (struct elf_dyn_relocs *) bfd_alloc (htab->root.dynobj, amt);
12729 if (p == NULL)
12730 return FALSE;
12731 p->next = *head;
12732 *head = p;
12733 p->sec = sec;
12734 p->count = 0;
12735 p->pc_count = 0;
12736 }
12737
12738 if (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
12739 p->pc_count += 1;
12740 p->count += 1;
12741 }
12742 }
12743
12744 return TRUE;
12745 }
12746
12747 /* Unwinding tables are not referenced directly. This pass marks them as
12748 required if the corresponding code section is marked. */
12749
12750 static bfd_boolean
12751 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
12752 elf_gc_mark_hook_fn gc_mark_hook)
12753 {
12754 bfd *sub;
12755 Elf_Internal_Shdr **elf_shdrp;
12756 bfd_boolean again;
12757
12758 _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
12759
12760 /* Marking EH data may cause additional code sections to be marked,
12761 requiring multiple passes. */
12762 again = TRUE;
12763 while (again)
12764 {
12765 again = FALSE;
12766 for (sub = info->input_bfds; sub != NULL; sub = sub->link_next)
12767 {
12768 asection *o;
12769
12770 if (! is_arm_elf (sub))
12771 continue;
12772
12773 elf_shdrp = elf_elfsections (sub);
12774 for (o = sub->sections; o != NULL; o = o->next)
12775 {
12776 Elf_Internal_Shdr *hdr;
12777
12778 hdr = &elf_section_data (o)->this_hdr;
12779 if (hdr->sh_type == SHT_ARM_EXIDX
12780 && hdr->sh_link
12781 && hdr->sh_link < elf_numsections (sub)
12782 && !o->gc_mark
12783 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
12784 {
12785 again = TRUE;
12786 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
12787 return FALSE;
12788 }
12789 }
12790 }
12791 }
12792
12793 return TRUE;
12794 }
12795
12796 /* Treat mapping symbols as special target symbols. */
12797
12798 static bfd_boolean
12799 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
12800 {
12801 return bfd_is_arm_special_symbol_name (sym->name,
12802 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
12803 }
12804
12805 /* This is a copy of elf_find_function() from elf.c except that
12806 ARM mapping symbols are ignored when looking for function names
12807 and STT_ARM_TFUNC is considered to a function type. */
12808
12809 static bfd_boolean
12810 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
12811 asection * section,
12812 asymbol ** symbols,
12813 bfd_vma offset,
12814 const char ** filename_ptr,
12815 const char ** functionname_ptr)
12816 {
12817 const char * filename = NULL;
12818 asymbol * func = NULL;
12819 bfd_vma low_func = 0;
12820 asymbol ** p;
12821
12822 for (p = symbols; *p != NULL; p++)
12823 {
12824 elf_symbol_type *q;
12825
12826 q = (elf_symbol_type *) *p;
12827
12828 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
12829 {
12830 default:
12831 break;
12832 case STT_FILE:
12833 filename = bfd_asymbol_name (&q->symbol);
12834 break;
12835 case STT_FUNC:
12836 case STT_ARM_TFUNC:
12837 case STT_NOTYPE:
12838 /* Skip mapping symbols. */
12839 if ((q->symbol.flags & BSF_LOCAL)
12840 && bfd_is_arm_special_symbol_name (q->symbol.name,
12841 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
12842 continue;
12843 /* Fall through. */
12844 if (bfd_get_section (&q->symbol) == section
12845 && q->symbol.value >= low_func
12846 && q->symbol.value <= offset)
12847 {
12848 func = (asymbol *) q;
12849 low_func = q->symbol.value;
12850 }
12851 break;
12852 }
12853 }
12854
12855 if (func == NULL)
12856 return FALSE;
12857
12858 if (filename_ptr)
12859 *filename_ptr = filename;
12860 if (functionname_ptr)
12861 *functionname_ptr = bfd_asymbol_name (func);
12862
12863 return TRUE;
12864 }
12865
12866
12867 /* Find the nearest line to a particular section and offset, for error
12868 reporting. This code is a duplicate of the code in elf.c, except
12869 that it uses arm_elf_find_function. */
12870
12871 static bfd_boolean
12872 elf32_arm_find_nearest_line (bfd * abfd,
12873 asection * section,
12874 asymbol ** symbols,
12875 bfd_vma offset,
12876 const char ** filename_ptr,
12877 const char ** functionname_ptr,
12878 unsigned int * line_ptr)
12879 {
12880 bfd_boolean found = FALSE;
12881
12882 /* We skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain uses it. */
12883
12884 if (_bfd_dwarf2_find_nearest_line (abfd, dwarf_debug_sections,
12885 section, symbols, offset,
12886 filename_ptr, functionname_ptr,
12887 line_ptr, 0,
12888 & elf_tdata (abfd)->dwarf2_find_line_info))
12889 {
12890 if (!*functionname_ptr)
12891 arm_elf_find_function (abfd, section, symbols, offset,
12892 *filename_ptr ? NULL : filename_ptr,
12893 functionname_ptr);
12894
12895 return TRUE;
12896 }
12897
12898 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
12899 & found, filename_ptr,
12900 functionname_ptr, line_ptr,
12901 & elf_tdata (abfd)->line_info))
12902 return FALSE;
12903
12904 if (found && (*functionname_ptr || *line_ptr))
12905 return TRUE;
12906
12907 if (symbols == NULL)
12908 return FALSE;
12909
12910 if (! arm_elf_find_function (abfd, section, symbols, offset,
12911 filename_ptr, functionname_ptr))
12912 return FALSE;
12913
12914 *line_ptr = 0;
12915 return TRUE;
12916 }
12917
12918 static bfd_boolean
12919 elf32_arm_find_inliner_info (bfd * abfd,
12920 const char ** filename_ptr,
12921 const char ** functionname_ptr,
12922 unsigned int * line_ptr)
12923 {
12924 bfd_boolean found;
12925 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
12926 functionname_ptr, line_ptr,
12927 & elf_tdata (abfd)->dwarf2_find_line_info);
12928 return found;
12929 }
12930
12931 /* Adjust a symbol defined by a dynamic object and referenced by a
12932 regular object. The current definition is in some section of the
12933 dynamic object, but we're not including those sections. We have to
12934 change the definition to something the rest of the link can
12935 understand. */
12936
12937 static bfd_boolean
12938 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
12939 struct elf_link_hash_entry * h)
12940 {
12941 bfd * dynobj;
12942 asection * s;
12943 struct elf32_arm_link_hash_entry * eh;
12944 struct elf32_arm_link_hash_table *globals;
12945
12946 globals = elf32_arm_hash_table (info);
12947 if (globals == NULL)
12948 return FALSE;
12949
12950 dynobj = elf_hash_table (info)->dynobj;
12951
12952 /* Make sure we know what is going on here. */
12953 BFD_ASSERT (dynobj != NULL
12954 && (h->needs_plt
12955 || h->type == STT_GNU_IFUNC
12956 || h->u.weakdef != NULL
12957 || (h->def_dynamic
12958 && h->ref_regular
12959 && !h->def_regular)));
12960
12961 eh = (struct elf32_arm_link_hash_entry *) h;
12962
12963 /* If this is a function, put it in the procedure linkage table. We
12964 will fill in the contents of the procedure linkage table later,
12965 when we know the address of the .got section. */
12966 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
12967 {
12968 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
12969 symbol binds locally. */
12970 if (h->plt.refcount <= 0
12971 || (h->type != STT_GNU_IFUNC
12972 && (SYMBOL_CALLS_LOCAL (info, h)
12973 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
12974 && h->root.type == bfd_link_hash_undefweak))))
12975 {
12976 /* This case can occur if we saw a PLT32 reloc in an input
12977 file, but the symbol was never referred to by a dynamic
12978 object, or if all references were garbage collected. In
12979 such a case, we don't actually need to build a procedure
12980 linkage table, and we can just do a PC24 reloc instead. */
12981 h->plt.offset = (bfd_vma) -1;
12982 eh->plt.thumb_refcount = 0;
12983 eh->plt.maybe_thumb_refcount = 0;
12984 eh->plt.noncall_refcount = 0;
12985 h->needs_plt = 0;
12986 }
12987
12988 return TRUE;
12989 }
12990 else
12991 {
12992 /* It's possible that we incorrectly decided a .plt reloc was
12993 needed for an R_ARM_PC24 or similar reloc to a non-function sym
12994 in check_relocs. We can't decide accurately between function
12995 and non-function syms in check-relocs; Objects loaded later in
12996 the link may change h->type. So fix it now. */
12997 h->plt.offset = (bfd_vma) -1;
12998 eh->plt.thumb_refcount = 0;
12999 eh->plt.maybe_thumb_refcount = 0;
13000 eh->plt.noncall_refcount = 0;
13001 }
13002
13003 /* If this is a weak symbol, and there is a real definition, the
13004 processor independent code will have arranged for us to see the
13005 real definition first, and we can just use the same value. */
13006 if (h->u.weakdef != NULL)
13007 {
13008 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
13009 || h->u.weakdef->root.type == bfd_link_hash_defweak);
13010 h->root.u.def.section = h->u.weakdef->root.u.def.section;
13011 h->root.u.def.value = h->u.weakdef->root.u.def.value;
13012 return TRUE;
13013 }
13014
13015 /* If there are no non-GOT references, we do not need a copy
13016 relocation. */
13017 if (!h->non_got_ref)
13018 return TRUE;
13019
13020 /* This is a reference to a symbol defined by a dynamic object which
13021 is not a function. */
13022
13023 /* If we are creating a shared library, we must presume that the
13024 only references to the symbol are via the global offset table.
13025 For such cases we need not do anything here; the relocations will
13026 be handled correctly by relocate_section. Relocatable executables
13027 can reference data in shared objects directly, so we don't need to
13028 do anything here. */
13029 if (info->shared || globals->root.is_relocatable_executable)
13030 return TRUE;
13031
13032 /* We must allocate the symbol in our .dynbss section, which will
13033 become part of the .bss section of the executable. There will be
13034 an entry for this symbol in the .dynsym section. The dynamic
13035 object will contain position independent code, so all references
13036 from the dynamic object to this symbol will go through the global
13037 offset table. The dynamic linker will use the .dynsym entry to
13038 determine the address it must put in the global offset table, so
13039 both the dynamic object and the regular object will refer to the
13040 same memory location for the variable. */
13041 s = bfd_get_section_by_name (dynobj, ".dynbss");
13042 BFD_ASSERT (s != NULL);
13043
13044 /* We must generate a R_ARM_COPY reloc to tell the dynamic linker to
13045 copy the initial value out of the dynamic object and into the
13046 runtime process image. We need to remember the offset into the
13047 .rel(a).bss section we are going to use. */
13048 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
13049 {
13050 asection *srel;
13051
13052 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (globals, ".bss"));
13053 elf32_arm_allocate_dynrelocs (info, srel, 1);
13054 h->needs_copy = 1;
13055 }
13056
13057 return _bfd_elf_adjust_dynamic_copy (h, s);
13058 }
13059
13060 /* Allocate space in .plt, .got and associated reloc sections for
13061 dynamic relocs. */
13062
13063 static bfd_boolean
13064 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
13065 {
13066 struct bfd_link_info *info;
13067 struct elf32_arm_link_hash_table *htab;
13068 struct elf32_arm_link_hash_entry *eh;
13069 struct elf_dyn_relocs *p;
13070
13071 if (h->root.type == bfd_link_hash_indirect)
13072 return TRUE;
13073
13074 eh = (struct elf32_arm_link_hash_entry *) h;
13075
13076 info = (struct bfd_link_info *) inf;
13077 htab = elf32_arm_hash_table (info);
13078 if (htab == NULL)
13079 return FALSE;
13080
13081 if ((htab->root.dynamic_sections_created || h->type == STT_GNU_IFUNC)
13082 && h->plt.refcount > 0)
13083 {
13084 /* Make sure this symbol is output as a dynamic symbol.
13085 Undefined weak syms won't yet be marked as dynamic. */
13086 if (h->dynindx == -1
13087 && !h->forced_local)
13088 {
13089 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13090 return FALSE;
13091 }
13092
13093 /* If the call in the PLT entry binds locally, the associated
13094 GOT entry should use an R_ARM_IRELATIVE relocation instead of
13095 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
13096 than the .plt section. */
13097 if (h->type == STT_GNU_IFUNC && SYMBOL_CALLS_LOCAL (info, h))
13098 {
13099 eh->is_iplt = 1;
13100 if (eh->plt.noncall_refcount == 0
13101 && SYMBOL_REFERENCES_LOCAL (info, h))
13102 /* All non-call references can be resolved directly.
13103 This means that they can (and in some cases, must)
13104 resolve directly to the run-time target, rather than
13105 to the PLT. That in turns means that any .got entry
13106 would be equal to the .igot.plt entry, so there's
13107 no point having both. */
13108 h->got.refcount = 0;
13109 }
13110
13111 if (info->shared
13112 || eh->is_iplt
13113 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
13114 {
13115 elf32_arm_allocate_plt_entry (info, eh->is_iplt, &h->plt, &eh->plt);
13116
13117 /* If this symbol is not defined in a regular file, and we are
13118 not generating a shared library, then set the symbol to this
13119 location in the .plt. This is required to make function
13120 pointers compare as equal between the normal executable and
13121 the shared library. */
13122 if (! info->shared
13123 && !h->def_regular)
13124 {
13125 h->root.u.def.section = htab->root.splt;
13126 h->root.u.def.value = h->plt.offset;
13127
13128 /* Make sure the function is not marked as Thumb, in case
13129 it is the target of an ABS32 relocation, which will
13130 point to the PLT entry. */
13131 h->target_internal = ST_BRANCH_TO_ARM;
13132 }
13133
13134 htab->next_tls_desc_index++;
13135
13136 /* VxWorks executables have a second set of relocations for
13137 each PLT entry. They go in a separate relocation section,
13138 which is processed by the kernel loader. */
13139 if (htab->vxworks_p && !info->shared)
13140 {
13141 /* There is a relocation for the initial PLT entry:
13142 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
13143 if (h->plt.offset == htab->plt_header_size)
13144 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 1);
13145
13146 /* There are two extra relocations for each subsequent
13147 PLT entry: an R_ARM_32 relocation for the GOT entry,
13148 and an R_ARM_32 relocation for the PLT entry. */
13149 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 2);
13150 }
13151 }
13152 else
13153 {
13154 h->plt.offset = (bfd_vma) -1;
13155 h->needs_plt = 0;
13156 }
13157 }
13158 else
13159 {
13160 h->plt.offset = (bfd_vma) -1;
13161 h->needs_plt = 0;
13162 }
13163
13164 eh = (struct elf32_arm_link_hash_entry *) h;
13165 eh->tlsdesc_got = (bfd_vma) -1;
13166
13167 if (h->got.refcount > 0)
13168 {
13169 asection *s;
13170 bfd_boolean dyn;
13171 int tls_type = elf32_arm_hash_entry (h)->tls_type;
13172 int indx;
13173
13174 /* Make sure this symbol is output as a dynamic symbol.
13175 Undefined weak syms won't yet be marked as dynamic. */
13176 if (h->dynindx == -1
13177 && !h->forced_local)
13178 {
13179 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13180 return FALSE;
13181 }
13182
13183 if (!htab->symbian_p)
13184 {
13185 s = htab->root.sgot;
13186 h->got.offset = s->size;
13187
13188 if (tls_type == GOT_UNKNOWN)
13189 abort ();
13190
13191 if (tls_type == GOT_NORMAL)
13192 /* Non-TLS symbols need one GOT slot. */
13193 s->size += 4;
13194 else
13195 {
13196 if (tls_type & GOT_TLS_GDESC)
13197 {
13198 /* R_ARM_TLS_DESC needs 2 GOT slots. */
13199 eh->tlsdesc_got
13200 = (htab->root.sgotplt->size
13201 - elf32_arm_compute_jump_table_size (htab));
13202 htab->root.sgotplt->size += 8;
13203 h->got.offset = (bfd_vma) -2;
13204 /* plt.got_offset needs to know there's a TLS_DESC
13205 reloc in the middle of .got.plt. */
13206 htab->num_tls_desc++;
13207 }
13208
13209 if (tls_type & GOT_TLS_GD)
13210 {
13211 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. If
13212 the symbol is both GD and GDESC, got.offset may
13213 have been overwritten. */
13214 h->got.offset = s->size;
13215 s->size += 8;
13216 }
13217
13218 if (tls_type & GOT_TLS_IE)
13219 /* R_ARM_TLS_IE32 needs one GOT slot. */
13220 s->size += 4;
13221 }
13222
13223 dyn = htab->root.dynamic_sections_created;
13224
13225 indx = 0;
13226 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
13227 && (!info->shared
13228 || !SYMBOL_REFERENCES_LOCAL (info, h)))
13229 indx = h->dynindx;
13230
13231 if (tls_type != GOT_NORMAL
13232 && (info->shared || indx != 0)
13233 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
13234 || h->root.type != bfd_link_hash_undefweak))
13235 {
13236 if (tls_type & GOT_TLS_IE)
13237 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13238
13239 if (tls_type & GOT_TLS_GD)
13240 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13241
13242 if (tls_type & GOT_TLS_GDESC)
13243 {
13244 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
13245 /* GDESC needs a trampoline to jump to. */
13246 htab->tls_trampoline = -1;
13247 }
13248
13249 /* Only GD needs it. GDESC just emits one relocation per
13250 2 entries. */
13251 if ((tls_type & GOT_TLS_GD) && indx != 0)
13252 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13253 }
13254 else if (!SYMBOL_REFERENCES_LOCAL (info, h))
13255 {
13256 if (htab->root.dynamic_sections_created)
13257 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
13258 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13259 }
13260 else if (h->type == STT_GNU_IFUNC
13261 && eh->plt.noncall_refcount == 0)
13262 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
13263 they all resolve dynamically instead. Reserve room for the
13264 GOT entry's R_ARM_IRELATIVE relocation. */
13265 elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1);
13266 else if (info->shared)
13267 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
13268 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13269 }
13270 }
13271 else
13272 h->got.offset = (bfd_vma) -1;
13273
13274 /* Allocate stubs for exported Thumb functions on v4t. */
13275 if (!htab->use_blx && h->dynindx != -1
13276 && h->def_regular
13277 && h->target_internal == ST_BRANCH_TO_THUMB
13278 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
13279 {
13280 struct elf_link_hash_entry * th;
13281 struct bfd_link_hash_entry * bh;
13282 struct elf_link_hash_entry * myh;
13283 char name[1024];
13284 asection *s;
13285 bh = NULL;
13286 /* Create a new symbol to regist the real location of the function. */
13287 s = h->root.u.def.section;
13288 sprintf (name, "__real_%s", h->root.root.string);
13289 _bfd_generic_link_add_one_symbol (info, s->owner,
13290 name, BSF_GLOBAL, s,
13291 h->root.u.def.value,
13292 NULL, TRUE, FALSE, &bh);
13293
13294 myh = (struct elf_link_hash_entry *) bh;
13295 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
13296 myh->forced_local = 1;
13297 myh->target_internal = ST_BRANCH_TO_THUMB;
13298 eh->export_glue = myh;
13299 th = record_arm_to_thumb_glue (info, h);
13300 /* Point the symbol at the stub. */
13301 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
13302 h->target_internal = ST_BRANCH_TO_ARM;
13303 h->root.u.def.section = th->root.u.def.section;
13304 h->root.u.def.value = th->root.u.def.value & ~1;
13305 }
13306
13307 if (eh->dyn_relocs == NULL)
13308 return TRUE;
13309
13310 /* In the shared -Bsymbolic case, discard space allocated for
13311 dynamic pc-relative relocs against symbols which turn out to be
13312 defined in regular objects. For the normal shared case, discard
13313 space for pc-relative relocs that have become local due to symbol
13314 visibility changes. */
13315
13316 if (info->shared || htab->root.is_relocatable_executable)
13317 {
13318 /* The only relocs that use pc_count are R_ARM_REL32 and
13319 R_ARM_REL32_NOI, which will appear on something like
13320 ".long foo - .". We want calls to protected symbols to resolve
13321 directly to the function rather than going via the plt. If people
13322 want function pointer comparisons to work as expected then they
13323 should avoid writing assembly like ".long foo - .". */
13324 if (SYMBOL_CALLS_LOCAL (info, h))
13325 {
13326 struct elf_dyn_relocs **pp;
13327
13328 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
13329 {
13330 p->count -= p->pc_count;
13331 p->pc_count = 0;
13332 if (p->count == 0)
13333 *pp = p->next;
13334 else
13335 pp = &p->next;
13336 }
13337 }
13338
13339 if (htab->vxworks_p)
13340 {
13341 struct elf_dyn_relocs **pp;
13342
13343 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
13344 {
13345 if (strcmp (p->sec->output_section->name, ".tls_vars") == 0)
13346 *pp = p->next;
13347 else
13348 pp = &p->next;
13349 }
13350 }
13351
13352 /* Also discard relocs on undefined weak syms with non-default
13353 visibility. */
13354 if (eh->dyn_relocs != NULL
13355 && h->root.type == bfd_link_hash_undefweak)
13356 {
13357 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
13358 eh->dyn_relocs = NULL;
13359
13360 /* Make sure undefined weak symbols are output as a dynamic
13361 symbol in PIEs. */
13362 else if (h->dynindx == -1
13363 && !h->forced_local)
13364 {
13365 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13366 return FALSE;
13367 }
13368 }
13369
13370 else if (htab->root.is_relocatable_executable && h->dynindx == -1
13371 && h->root.type == bfd_link_hash_new)
13372 {
13373 /* Output absolute symbols so that we can create relocations
13374 against them. For normal symbols we output a relocation
13375 against the section that contains them. */
13376 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13377 return FALSE;
13378 }
13379
13380 }
13381 else
13382 {
13383 /* For the non-shared case, discard space for relocs against
13384 symbols which turn out to need copy relocs or are not
13385 dynamic. */
13386
13387 if (!h->non_got_ref
13388 && ((h->def_dynamic
13389 && !h->def_regular)
13390 || (htab->root.dynamic_sections_created
13391 && (h->root.type == bfd_link_hash_undefweak
13392 || h->root.type == bfd_link_hash_undefined))))
13393 {
13394 /* Make sure this symbol is output as a dynamic symbol.
13395 Undefined weak syms won't yet be marked as dynamic. */
13396 if (h->dynindx == -1
13397 && !h->forced_local)
13398 {
13399 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13400 return FALSE;
13401 }
13402
13403 /* If that succeeded, we know we'll be keeping all the
13404 relocs. */
13405 if (h->dynindx != -1)
13406 goto keep;
13407 }
13408
13409 eh->dyn_relocs = NULL;
13410
13411 keep: ;
13412 }
13413
13414 /* Finally, allocate space. */
13415 for (p = eh->dyn_relocs; p != NULL; p = p->next)
13416 {
13417 asection *sreloc = elf_section_data (p->sec)->sreloc;
13418 if (h->type == STT_GNU_IFUNC
13419 && eh->plt.noncall_refcount == 0
13420 && SYMBOL_REFERENCES_LOCAL (info, h))
13421 elf32_arm_allocate_irelocs (info, sreloc, p->count);
13422 else
13423 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
13424 }
13425
13426 return TRUE;
13427 }
13428
13429 /* Find any dynamic relocs that apply to read-only sections. */
13430
13431 static bfd_boolean
13432 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
13433 {
13434 struct elf32_arm_link_hash_entry * eh;
13435 struct elf_dyn_relocs * p;
13436
13437 eh = (struct elf32_arm_link_hash_entry *) h;
13438 for (p = eh->dyn_relocs; p != NULL; p = p->next)
13439 {
13440 asection *s = p->sec;
13441
13442 if (s != NULL && (s->flags & SEC_READONLY) != 0)
13443 {
13444 struct bfd_link_info *info = (struct bfd_link_info *) inf;
13445
13446 info->flags |= DF_TEXTREL;
13447
13448 /* Not an error, just cut short the traversal. */
13449 return FALSE;
13450 }
13451 }
13452 return TRUE;
13453 }
13454
13455 void
13456 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
13457 int byteswap_code)
13458 {
13459 struct elf32_arm_link_hash_table *globals;
13460
13461 globals = elf32_arm_hash_table (info);
13462 if (globals == NULL)
13463 return;
13464
13465 globals->byteswap_code = byteswap_code;
13466 }
13467
13468 /* Set the sizes of the dynamic sections. */
13469
13470 static bfd_boolean
13471 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
13472 struct bfd_link_info * info)
13473 {
13474 bfd * dynobj;
13475 asection * s;
13476 bfd_boolean plt;
13477 bfd_boolean relocs;
13478 bfd *ibfd;
13479 struct elf32_arm_link_hash_table *htab;
13480
13481 htab = elf32_arm_hash_table (info);
13482 if (htab == NULL)
13483 return FALSE;
13484
13485 dynobj = elf_hash_table (info)->dynobj;
13486 BFD_ASSERT (dynobj != NULL);
13487 check_use_blx (htab);
13488
13489 if (elf_hash_table (info)->dynamic_sections_created)
13490 {
13491 /* Set the contents of the .interp section to the interpreter. */
13492 if (info->executable)
13493 {
13494 s = bfd_get_section_by_name (dynobj, ".interp");
13495 BFD_ASSERT (s != NULL);
13496 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
13497 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
13498 }
13499 }
13500
13501 /* Set up .got offsets for local syms, and space for local dynamic
13502 relocs. */
13503 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
13504 {
13505 bfd_signed_vma *local_got;
13506 bfd_signed_vma *end_local_got;
13507 struct arm_local_iplt_info **local_iplt_ptr, *local_iplt;
13508 char *local_tls_type;
13509 bfd_vma *local_tlsdesc_gotent;
13510 bfd_size_type locsymcount;
13511 Elf_Internal_Shdr *symtab_hdr;
13512 asection *srel;
13513 bfd_boolean is_vxworks = htab->vxworks_p;
13514 unsigned int symndx;
13515
13516 if (! is_arm_elf (ibfd))
13517 continue;
13518
13519 for (s = ibfd->sections; s != NULL; s = s->next)
13520 {
13521 struct elf_dyn_relocs *p;
13522
13523 for (p = (struct elf_dyn_relocs *)
13524 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
13525 {
13526 if (!bfd_is_abs_section (p->sec)
13527 && bfd_is_abs_section (p->sec->output_section))
13528 {
13529 /* Input section has been discarded, either because
13530 it is a copy of a linkonce section or due to
13531 linker script /DISCARD/, so we'll be discarding
13532 the relocs too. */
13533 }
13534 else if (is_vxworks
13535 && strcmp (p->sec->output_section->name,
13536 ".tls_vars") == 0)
13537 {
13538 /* Relocations in vxworks .tls_vars sections are
13539 handled specially by the loader. */
13540 }
13541 else if (p->count != 0)
13542 {
13543 srel = elf_section_data (p->sec)->sreloc;
13544 elf32_arm_allocate_dynrelocs (info, srel, p->count);
13545 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
13546 info->flags |= DF_TEXTREL;
13547 }
13548 }
13549 }
13550
13551 local_got = elf_local_got_refcounts (ibfd);
13552 if (!local_got)
13553 continue;
13554
13555 symtab_hdr = & elf_symtab_hdr (ibfd);
13556 locsymcount = symtab_hdr->sh_info;
13557 end_local_got = local_got + locsymcount;
13558 local_iplt_ptr = elf32_arm_local_iplt (ibfd);
13559 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
13560 local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd);
13561 symndx = 0;
13562 s = htab->root.sgot;
13563 srel = htab->root.srelgot;
13564 for (; local_got < end_local_got;
13565 ++local_got, ++local_iplt_ptr, ++local_tls_type,
13566 ++local_tlsdesc_gotent, ++symndx)
13567 {
13568 *local_tlsdesc_gotent = (bfd_vma) -1;
13569 local_iplt = *local_iplt_ptr;
13570 if (local_iplt != NULL)
13571 {
13572 struct elf_dyn_relocs *p;
13573
13574 if (local_iplt->root.refcount > 0)
13575 {
13576 elf32_arm_allocate_plt_entry (info, TRUE,
13577 &local_iplt->root,
13578 &local_iplt->arm);
13579 if (local_iplt->arm.noncall_refcount == 0)
13580 /* All references to the PLT are calls, so all
13581 non-call references can resolve directly to the
13582 run-time target. This means that the .got entry
13583 would be the same as the .igot.plt entry, so there's
13584 no point creating both. */
13585 *local_got = 0;
13586 }
13587 else
13588 {
13589 BFD_ASSERT (local_iplt->arm.noncall_refcount == 0);
13590 local_iplt->root.offset = (bfd_vma) -1;
13591 }
13592
13593 for (p = local_iplt->dyn_relocs; p != NULL; p = p->next)
13594 {
13595 asection *psrel;
13596
13597 psrel = elf_section_data (p->sec)->sreloc;
13598 if (local_iplt->arm.noncall_refcount == 0)
13599 elf32_arm_allocate_irelocs (info, psrel, p->count);
13600 else
13601 elf32_arm_allocate_dynrelocs (info, psrel, p->count);
13602 }
13603 }
13604 if (*local_got > 0)
13605 {
13606 Elf_Internal_Sym *isym;
13607
13608 *local_got = s->size;
13609 if (*local_tls_type & GOT_TLS_GD)
13610 /* TLS_GD relocs need an 8-byte structure in the GOT. */
13611 s->size += 8;
13612 if (*local_tls_type & GOT_TLS_GDESC)
13613 {
13614 *local_tlsdesc_gotent = htab->root.sgotplt->size
13615 - elf32_arm_compute_jump_table_size (htab);
13616 htab->root.sgotplt->size += 8;
13617 *local_got = (bfd_vma) -2;
13618 /* plt.got_offset needs to know there's a TLS_DESC
13619 reloc in the middle of .got.plt. */
13620 htab->num_tls_desc++;
13621 }
13622 if (*local_tls_type & GOT_TLS_IE)
13623 s->size += 4;
13624
13625 if (*local_tls_type & GOT_NORMAL)
13626 {
13627 /* If the symbol is both GD and GDESC, *local_got
13628 may have been overwritten. */
13629 *local_got = s->size;
13630 s->size += 4;
13631 }
13632
13633 isym = bfd_sym_from_r_symndx (&htab->sym_cache, ibfd, symndx);
13634 if (isym == NULL)
13635 return FALSE;
13636
13637 /* If all references to an STT_GNU_IFUNC PLT are calls,
13638 then all non-call references, including this GOT entry,
13639 resolve directly to the run-time target. */
13640 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC
13641 && (local_iplt == NULL
13642 || local_iplt->arm.noncall_refcount == 0))
13643 elf32_arm_allocate_irelocs (info, srel, 1);
13644 else if ((info->shared && !(*local_tls_type & GOT_TLS_GDESC))
13645 || *local_tls_type & GOT_TLS_GD)
13646 elf32_arm_allocate_dynrelocs (info, srel, 1);
13647
13648 if (info->shared && *local_tls_type & GOT_TLS_GDESC)
13649 {
13650 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
13651 htab->tls_trampoline = -1;
13652 }
13653 }
13654 else
13655 *local_got = (bfd_vma) -1;
13656 }
13657 }
13658
13659 if (htab->tls_ldm_got.refcount > 0)
13660 {
13661 /* Allocate two GOT entries and one dynamic relocation (if necessary)
13662 for R_ARM_TLS_LDM32 relocations. */
13663 htab->tls_ldm_got.offset = htab->root.sgot->size;
13664 htab->root.sgot->size += 8;
13665 if (info->shared)
13666 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13667 }
13668 else
13669 htab->tls_ldm_got.offset = -1;
13670
13671 /* Allocate global sym .plt and .got entries, and space for global
13672 sym dynamic relocs. */
13673 elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info);
13674
13675 /* Here we rummage through the found bfds to collect glue information. */
13676 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
13677 {
13678 if (! is_arm_elf (ibfd))
13679 continue;
13680
13681 /* Initialise mapping tables for code/data. */
13682 bfd_elf32_arm_init_maps (ibfd);
13683
13684 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
13685 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info))
13686 /* xgettext:c-format */
13687 _bfd_error_handler (_("Errors encountered processing file %s"),
13688 ibfd->filename);
13689 }
13690
13691 /* Allocate space for the glue sections now that we've sized them. */
13692 bfd_elf32_arm_allocate_interworking_sections (info);
13693
13694 /* For every jump slot reserved in the sgotplt, reloc_count is
13695 incremented. However, when we reserve space for TLS descriptors,
13696 it's not incremented, so in order to compute the space reserved
13697 for them, it suffices to multiply the reloc count by the jump
13698 slot size. */
13699 if (htab->root.srelplt)
13700 htab->sgotplt_jump_table_size = elf32_arm_compute_jump_table_size(htab);
13701
13702 if (htab->tls_trampoline)
13703 {
13704 if (htab->root.splt->size == 0)
13705 htab->root.splt->size += htab->plt_header_size;
13706
13707 htab->tls_trampoline = htab->root.splt->size;
13708 htab->root.splt->size += htab->plt_entry_size;
13709
13710 /* If we're not using lazy TLS relocations, don't generate the
13711 PLT and GOT entries they require. */
13712 if (!(info->flags & DF_BIND_NOW))
13713 {
13714 htab->dt_tlsdesc_got = htab->root.sgot->size;
13715 htab->root.sgot->size += 4;
13716
13717 htab->dt_tlsdesc_plt = htab->root.splt->size;
13718 htab->root.splt->size += 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline);
13719 }
13720 }
13721
13722 /* The check_relocs and adjust_dynamic_symbol entry points have
13723 determined the sizes of the various dynamic sections. Allocate
13724 memory for them. */
13725 plt = FALSE;
13726 relocs = FALSE;
13727 for (s = dynobj->sections; s != NULL; s = s->next)
13728 {
13729 const char * name;
13730
13731 if ((s->flags & SEC_LINKER_CREATED) == 0)
13732 continue;
13733
13734 /* It's OK to base decisions on the section name, because none
13735 of the dynobj section names depend upon the input files. */
13736 name = bfd_get_section_name (dynobj, s);
13737
13738 if (s == htab->root.splt)
13739 {
13740 /* Remember whether there is a PLT. */
13741 plt = s->size != 0;
13742 }
13743 else if (CONST_STRNEQ (name, ".rel"))
13744 {
13745 if (s->size != 0)
13746 {
13747 /* Remember whether there are any reloc sections other
13748 than .rel(a).plt and .rela.plt.unloaded. */
13749 if (s != htab->root.srelplt && s != htab->srelplt2)
13750 relocs = TRUE;
13751
13752 /* We use the reloc_count field as a counter if we need
13753 to copy relocs into the output file. */
13754 s->reloc_count = 0;
13755 }
13756 }
13757 else if (s != htab->root.sgot
13758 && s != htab->root.sgotplt
13759 && s != htab->root.iplt
13760 && s != htab->root.igotplt
13761 && s != htab->sdynbss)
13762 {
13763 /* It's not one of our sections, so don't allocate space. */
13764 continue;
13765 }
13766
13767 if (s->size == 0)
13768 {
13769 /* If we don't need this section, strip it from the
13770 output file. This is mostly to handle .rel(a).bss and
13771 .rel(a).plt. We must create both sections in
13772 create_dynamic_sections, because they must be created
13773 before the linker maps input sections to output
13774 sections. The linker does that before
13775 adjust_dynamic_symbol is called, and it is that
13776 function which decides whether anything needs to go
13777 into these sections. */
13778 s->flags |= SEC_EXCLUDE;
13779 continue;
13780 }
13781
13782 if ((s->flags & SEC_HAS_CONTENTS) == 0)
13783 continue;
13784
13785 /* Allocate memory for the section contents. */
13786 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
13787 if (s->contents == NULL)
13788 return FALSE;
13789 }
13790
13791 if (elf_hash_table (info)->dynamic_sections_created)
13792 {
13793 /* Add some entries to the .dynamic section. We fill in the
13794 values later, in elf32_arm_finish_dynamic_sections, but we
13795 must add the entries now so that we get the correct size for
13796 the .dynamic section. The DT_DEBUG entry is filled in by the
13797 dynamic linker and used by the debugger. */
13798 #define add_dynamic_entry(TAG, VAL) \
13799 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
13800
13801 if (info->executable)
13802 {
13803 if (!add_dynamic_entry (DT_DEBUG, 0))
13804 return FALSE;
13805 }
13806
13807 if (plt)
13808 {
13809 if ( !add_dynamic_entry (DT_PLTGOT, 0)
13810 || !add_dynamic_entry (DT_PLTRELSZ, 0)
13811 || !add_dynamic_entry (DT_PLTREL,
13812 htab->use_rel ? DT_REL : DT_RELA)
13813 || !add_dynamic_entry (DT_JMPREL, 0))
13814 return FALSE;
13815
13816 if (htab->dt_tlsdesc_plt &&
13817 (!add_dynamic_entry (DT_TLSDESC_PLT,0)
13818 || !add_dynamic_entry (DT_TLSDESC_GOT,0)))
13819 return FALSE;
13820 }
13821
13822 if (relocs)
13823 {
13824 if (htab->use_rel)
13825 {
13826 if (!add_dynamic_entry (DT_REL, 0)
13827 || !add_dynamic_entry (DT_RELSZ, 0)
13828 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
13829 return FALSE;
13830 }
13831 else
13832 {
13833 if (!add_dynamic_entry (DT_RELA, 0)
13834 || !add_dynamic_entry (DT_RELASZ, 0)
13835 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
13836 return FALSE;
13837 }
13838 }
13839
13840 /* If any dynamic relocs apply to a read-only section,
13841 then we need a DT_TEXTREL entry. */
13842 if ((info->flags & DF_TEXTREL) == 0)
13843 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs,
13844 info);
13845
13846 if ((info->flags & DF_TEXTREL) != 0)
13847 {
13848 if (!add_dynamic_entry (DT_TEXTREL, 0))
13849 return FALSE;
13850 }
13851 if (htab->vxworks_p
13852 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
13853 return FALSE;
13854 }
13855 #undef add_dynamic_entry
13856
13857 return TRUE;
13858 }
13859
13860 /* Size sections even though they're not dynamic. We use it to setup
13861 _TLS_MODULE_BASE_, if needed. */
13862
13863 static bfd_boolean
13864 elf32_arm_always_size_sections (bfd *output_bfd,
13865 struct bfd_link_info *info)
13866 {
13867 asection *tls_sec;
13868
13869 if (info->relocatable)
13870 return TRUE;
13871
13872 tls_sec = elf_hash_table (info)->tls_sec;
13873
13874 if (tls_sec)
13875 {
13876 struct elf_link_hash_entry *tlsbase;
13877
13878 tlsbase = elf_link_hash_lookup
13879 (elf_hash_table (info), "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
13880
13881 if (tlsbase)
13882 {
13883 struct bfd_link_hash_entry *bh = NULL;
13884 const struct elf_backend_data *bed
13885 = get_elf_backend_data (output_bfd);
13886
13887 if (!(_bfd_generic_link_add_one_symbol
13888 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
13889 tls_sec, 0, NULL, FALSE,
13890 bed->collect, &bh)))
13891 return FALSE;
13892
13893 tlsbase->type = STT_TLS;
13894 tlsbase = (struct elf_link_hash_entry *)bh;
13895 tlsbase->def_regular = 1;
13896 tlsbase->other = STV_HIDDEN;
13897 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
13898 }
13899 }
13900 return TRUE;
13901 }
13902
13903 /* Finish up dynamic symbol handling. We set the contents of various
13904 dynamic sections here. */
13905
13906 static bfd_boolean
13907 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
13908 struct bfd_link_info * info,
13909 struct elf_link_hash_entry * h,
13910 Elf_Internal_Sym * sym)
13911 {
13912 struct elf32_arm_link_hash_table *htab;
13913 struct elf32_arm_link_hash_entry *eh;
13914
13915 htab = elf32_arm_hash_table (info);
13916 if (htab == NULL)
13917 return FALSE;
13918
13919 eh = (struct elf32_arm_link_hash_entry *) h;
13920
13921 if (h->plt.offset != (bfd_vma) -1)
13922 {
13923 if (!eh->is_iplt)
13924 {
13925 BFD_ASSERT (h->dynindx != -1);
13926 elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
13927 h->dynindx, 0);
13928 }
13929
13930 if (!h->def_regular)
13931 {
13932 /* Mark the symbol as undefined, rather than as defined in
13933 the .plt section. Leave the value alone. */
13934 sym->st_shndx = SHN_UNDEF;
13935 /* If the symbol is weak, we do need to clear the value.
13936 Otherwise, the PLT entry would provide a definition for
13937 the symbol even if the symbol wasn't defined anywhere,
13938 and so the symbol would never be NULL. */
13939 if (!h->ref_regular_nonweak)
13940 sym->st_value = 0;
13941 }
13942 else if (eh->is_iplt && eh->plt.noncall_refcount != 0)
13943 {
13944 /* At least one non-call relocation references this .iplt entry,
13945 so the .iplt entry is the function's canonical address. */
13946 sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC);
13947 sym->st_target_internal = ST_BRANCH_TO_ARM;
13948 sym->st_shndx = (_bfd_elf_section_from_bfd_section
13949 (output_bfd, htab->root.iplt->output_section));
13950 sym->st_value = (h->plt.offset
13951 + htab->root.iplt->output_section->vma
13952 + htab->root.iplt->output_offset);
13953 }
13954 }
13955
13956 if (h->needs_copy)
13957 {
13958 asection * s;
13959 Elf_Internal_Rela rel;
13960
13961 /* This symbol needs a copy reloc. Set it up. */
13962 BFD_ASSERT (h->dynindx != -1
13963 && (h->root.type == bfd_link_hash_defined
13964 || h->root.type == bfd_link_hash_defweak));
13965
13966 s = htab->srelbss;
13967 BFD_ASSERT (s != NULL);
13968
13969 rel.r_addend = 0;
13970 rel.r_offset = (h->root.u.def.value
13971 + h->root.u.def.section->output_section->vma
13972 + h->root.u.def.section->output_offset);
13973 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
13974 elf32_arm_add_dynreloc (output_bfd, info, s, &rel);
13975 }
13976
13977 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
13978 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
13979 to the ".got" section. */
13980 if (strcmp (h->root.root.string, "_DYNAMIC") == 0
13981 || (!htab->vxworks_p && h == htab->root.hgot))
13982 sym->st_shndx = SHN_ABS;
13983
13984 return TRUE;
13985 }
13986
13987 static void
13988 arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
13989 void *contents,
13990 const unsigned long *template, unsigned count)
13991 {
13992 unsigned ix;
13993
13994 for (ix = 0; ix != count; ix++)
13995 {
13996 unsigned long insn = template[ix];
13997
13998 /* Emit mov pc,rx if bx is not permitted. */
13999 if (htab->fix_v4bx == 1 && (insn & 0x0ffffff0) == 0x012fff10)
14000 insn = (insn & 0xf000000f) | 0x01a0f000;
14001 put_arm_insn (htab, output_bfd, insn, (char *)contents + ix*4);
14002 }
14003 }
14004
14005 /* Finish up the dynamic sections. */
14006
14007 static bfd_boolean
14008 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
14009 {
14010 bfd * dynobj;
14011 asection * sgot;
14012 asection * sdyn;
14013 struct elf32_arm_link_hash_table *htab;
14014
14015 htab = elf32_arm_hash_table (info);
14016 if (htab == NULL)
14017 return FALSE;
14018
14019 dynobj = elf_hash_table (info)->dynobj;
14020
14021 sgot = htab->root.sgotplt;
14022 /* A broken linker script might have discarded the dynamic sections.
14023 Catch this here so that we do not seg-fault later on. */
14024 if (sgot != NULL && bfd_is_abs_section (sgot->output_section))
14025 return FALSE;
14026 sdyn = bfd_get_section_by_name (dynobj, ".dynamic");
14027
14028 if (elf_hash_table (info)->dynamic_sections_created)
14029 {
14030 asection *splt;
14031 Elf32_External_Dyn *dyncon, *dynconend;
14032
14033 splt = htab->root.splt;
14034 BFD_ASSERT (splt != NULL && sdyn != NULL);
14035 BFD_ASSERT (htab->symbian_p || sgot != NULL);
14036
14037 dyncon = (Elf32_External_Dyn *) sdyn->contents;
14038 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
14039
14040 for (; dyncon < dynconend; dyncon++)
14041 {
14042 Elf_Internal_Dyn dyn;
14043 const char * name;
14044 asection * s;
14045
14046 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
14047
14048 switch (dyn.d_tag)
14049 {
14050 unsigned int type;
14051
14052 default:
14053 if (htab->vxworks_p
14054 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
14055 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14056 break;
14057
14058 case DT_HASH:
14059 name = ".hash";
14060 goto get_vma_if_bpabi;
14061 case DT_STRTAB:
14062 name = ".dynstr";
14063 goto get_vma_if_bpabi;
14064 case DT_SYMTAB:
14065 name = ".dynsym";
14066 goto get_vma_if_bpabi;
14067 case DT_VERSYM:
14068 name = ".gnu.version";
14069 goto get_vma_if_bpabi;
14070 case DT_VERDEF:
14071 name = ".gnu.version_d";
14072 goto get_vma_if_bpabi;
14073 case DT_VERNEED:
14074 name = ".gnu.version_r";
14075 goto get_vma_if_bpabi;
14076
14077 case DT_PLTGOT:
14078 name = ".got";
14079 goto get_vma;
14080 case DT_JMPREL:
14081 name = RELOC_SECTION (htab, ".plt");
14082 get_vma:
14083 s = bfd_get_section_by_name (output_bfd, name);
14084 BFD_ASSERT (s != NULL);
14085 if (!htab->symbian_p)
14086 dyn.d_un.d_ptr = s->vma;
14087 else
14088 /* In the BPABI, tags in the PT_DYNAMIC section point
14089 at the file offset, not the memory address, for the
14090 convenience of the post linker. */
14091 dyn.d_un.d_ptr = s->filepos;
14092 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14093 break;
14094
14095 get_vma_if_bpabi:
14096 if (htab->symbian_p)
14097 goto get_vma;
14098 break;
14099
14100 case DT_PLTRELSZ:
14101 s = htab->root.srelplt;
14102 BFD_ASSERT (s != NULL);
14103 dyn.d_un.d_val = s->size;
14104 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14105 break;
14106
14107 case DT_RELSZ:
14108 case DT_RELASZ:
14109 if (!htab->symbian_p)
14110 {
14111 /* My reading of the SVR4 ABI indicates that the
14112 procedure linkage table relocs (DT_JMPREL) should be
14113 included in the overall relocs (DT_REL). This is
14114 what Solaris does. However, UnixWare can not handle
14115 that case. Therefore, we override the DT_RELSZ entry
14116 here to make it not include the JMPREL relocs. Since
14117 the linker script arranges for .rel(a).plt to follow all
14118 other relocation sections, we don't have to worry
14119 about changing the DT_REL entry. */
14120 s = htab->root.srelplt;
14121 if (s != NULL)
14122 dyn.d_un.d_val -= s->size;
14123 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14124 break;
14125 }
14126 /* Fall through. */
14127
14128 case DT_REL:
14129 case DT_RELA:
14130 /* In the BPABI, the DT_REL tag must point at the file
14131 offset, not the VMA, of the first relocation
14132 section. So, we use code similar to that in
14133 elflink.c, but do not check for SHF_ALLOC on the
14134 relcoation section, since relocations sections are
14135 never allocated under the BPABI. The comments above
14136 about Unixware notwithstanding, we include all of the
14137 relocations here. */
14138 if (htab->symbian_p)
14139 {
14140 unsigned int i;
14141 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
14142 ? SHT_REL : SHT_RELA);
14143 dyn.d_un.d_val = 0;
14144 for (i = 1; i < elf_numsections (output_bfd); i++)
14145 {
14146 Elf_Internal_Shdr *hdr
14147 = elf_elfsections (output_bfd)[i];
14148 if (hdr->sh_type == type)
14149 {
14150 if (dyn.d_tag == DT_RELSZ
14151 || dyn.d_tag == DT_RELASZ)
14152 dyn.d_un.d_val += hdr->sh_size;
14153 else if ((ufile_ptr) hdr->sh_offset
14154 <= dyn.d_un.d_val - 1)
14155 dyn.d_un.d_val = hdr->sh_offset;
14156 }
14157 }
14158 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14159 }
14160 break;
14161
14162 case DT_TLSDESC_PLT:
14163 s = htab->root.splt;
14164 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
14165 + htab->dt_tlsdesc_plt);
14166 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14167 break;
14168
14169 case DT_TLSDESC_GOT:
14170 s = htab->root.sgot;
14171 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
14172 + htab->dt_tlsdesc_got);
14173 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14174 break;
14175
14176 /* Set the bottom bit of DT_INIT/FINI if the
14177 corresponding function is Thumb. */
14178 case DT_INIT:
14179 name = info->init_function;
14180 goto get_sym;
14181 case DT_FINI:
14182 name = info->fini_function;
14183 get_sym:
14184 /* If it wasn't set by elf_bfd_final_link
14185 then there is nothing to adjust. */
14186 if (dyn.d_un.d_val != 0)
14187 {
14188 struct elf_link_hash_entry * eh;
14189
14190 eh = elf_link_hash_lookup (elf_hash_table (info), name,
14191 FALSE, FALSE, TRUE);
14192 if (eh != NULL && eh->target_internal == ST_BRANCH_TO_THUMB)
14193 {
14194 dyn.d_un.d_val |= 1;
14195 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14196 }
14197 }
14198 break;
14199 }
14200 }
14201
14202 /* Fill in the first entry in the procedure linkage table. */
14203 if (splt->size > 0 && htab->plt_header_size)
14204 {
14205 const bfd_vma *plt0_entry;
14206 bfd_vma got_address, plt_address, got_displacement;
14207
14208 /* Calculate the addresses of the GOT and PLT. */
14209 got_address = sgot->output_section->vma + sgot->output_offset;
14210 plt_address = splt->output_section->vma + splt->output_offset;
14211
14212 if (htab->vxworks_p)
14213 {
14214 /* The VxWorks GOT is relocated by the dynamic linker.
14215 Therefore, we must emit relocations rather than simply
14216 computing the values now. */
14217 Elf_Internal_Rela rel;
14218
14219 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
14220 put_arm_insn (htab, output_bfd, plt0_entry[0],
14221 splt->contents + 0);
14222 put_arm_insn (htab, output_bfd, plt0_entry[1],
14223 splt->contents + 4);
14224 put_arm_insn (htab, output_bfd, plt0_entry[2],
14225 splt->contents + 8);
14226 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
14227
14228 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
14229 rel.r_offset = plt_address + 12;
14230 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
14231 rel.r_addend = 0;
14232 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
14233 htab->srelplt2->contents);
14234 }
14235 else if (htab->nacl_p)
14236 {
14237 unsigned int i;
14238
14239 got_displacement = got_address + 8 - (plt_address + 16);
14240
14241 put_arm_insn (htab, output_bfd,
14242 elf32_arm_nacl_plt0_entry[0]
14243 | arm_movw_immediate (got_displacement),
14244 splt->contents + 0);
14245 put_arm_insn (htab, output_bfd,
14246 elf32_arm_nacl_plt0_entry[1]
14247 | arm_movt_immediate (got_displacement),
14248 splt->contents + 4);
14249 for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i)
14250 put_arm_insn (htab, output_bfd,
14251 elf32_arm_nacl_plt0_entry[i],
14252 splt->contents + (i * 4));
14253 }
14254 else
14255 {
14256 got_displacement = got_address - (plt_address + 16);
14257
14258 plt0_entry = elf32_arm_plt0_entry;
14259 put_arm_insn (htab, output_bfd, plt0_entry[0],
14260 splt->contents + 0);
14261 put_arm_insn (htab, output_bfd, plt0_entry[1],
14262 splt->contents + 4);
14263 put_arm_insn (htab, output_bfd, plt0_entry[2],
14264 splt->contents + 8);
14265 put_arm_insn (htab, output_bfd, plt0_entry[3],
14266 splt->contents + 12);
14267
14268 #ifdef FOUR_WORD_PLT
14269 /* The displacement value goes in the otherwise-unused
14270 last word of the second entry. */
14271 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
14272 #else
14273 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
14274 #endif
14275 }
14276 }
14277
14278 /* UnixWare sets the entsize of .plt to 4, although that doesn't
14279 really seem like the right value. */
14280 if (splt->output_section->owner == output_bfd)
14281 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
14282
14283 if (htab->dt_tlsdesc_plt)
14284 {
14285 bfd_vma got_address
14286 = sgot->output_section->vma + sgot->output_offset;
14287 bfd_vma gotplt_address = (htab->root.sgot->output_section->vma
14288 + htab->root.sgot->output_offset);
14289 bfd_vma plt_address
14290 = splt->output_section->vma + splt->output_offset;
14291
14292 arm_put_trampoline (htab, output_bfd,
14293 splt->contents + htab->dt_tlsdesc_plt,
14294 dl_tlsdesc_lazy_trampoline, 6);
14295
14296 bfd_put_32 (output_bfd,
14297 gotplt_address + htab->dt_tlsdesc_got
14298 - (plt_address + htab->dt_tlsdesc_plt)
14299 - dl_tlsdesc_lazy_trampoline[6],
14300 splt->contents + htab->dt_tlsdesc_plt + 24);
14301 bfd_put_32 (output_bfd,
14302 got_address - (plt_address + htab->dt_tlsdesc_plt)
14303 - dl_tlsdesc_lazy_trampoline[7],
14304 splt->contents + htab->dt_tlsdesc_plt + 24 + 4);
14305 }
14306
14307 if (htab->tls_trampoline)
14308 {
14309 arm_put_trampoline (htab, output_bfd,
14310 splt->contents + htab->tls_trampoline,
14311 tls_trampoline, 3);
14312 #ifdef FOUR_WORD_PLT
14313 bfd_put_32 (output_bfd, 0x00000000,
14314 splt->contents + htab->tls_trampoline + 12);
14315 #endif
14316 }
14317
14318 if (htab->vxworks_p && !info->shared && htab->root.splt->size > 0)
14319 {
14320 /* Correct the .rel(a).plt.unloaded relocations. They will have
14321 incorrect symbol indexes. */
14322 int num_plts;
14323 unsigned char *p;
14324
14325 num_plts = ((htab->root.splt->size - htab->plt_header_size)
14326 / htab->plt_entry_size);
14327 p = htab->srelplt2->contents + RELOC_SIZE (htab);
14328
14329 for (; num_plts; num_plts--)
14330 {
14331 Elf_Internal_Rela rel;
14332
14333 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
14334 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
14335 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
14336 p += RELOC_SIZE (htab);
14337
14338 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
14339 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
14340 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
14341 p += RELOC_SIZE (htab);
14342 }
14343 }
14344 }
14345
14346 /* Fill in the first three entries in the global offset table. */
14347 if (sgot)
14348 {
14349 if (sgot->size > 0)
14350 {
14351 if (sdyn == NULL)
14352 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
14353 else
14354 bfd_put_32 (output_bfd,
14355 sdyn->output_section->vma + sdyn->output_offset,
14356 sgot->contents);
14357 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
14358 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
14359 }
14360
14361 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
14362 }
14363
14364 return TRUE;
14365 }
14366
14367 static void
14368 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
14369 {
14370 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
14371 struct elf32_arm_link_hash_table *globals;
14372
14373 i_ehdrp = elf_elfheader (abfd);
14374
14375 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
14376 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
14377 else
14378 i_ehdrp->e_ident[EI_OSABI] = 0;
14379 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
14380
14381 if (link_info)
14382 {
14383 globals = elf32_arm_hash_table (link_info);
14384 if (globals != NULL && globals->byteswap_code)
14385 i_ehdrp->e_flags |= EF_ARM_BE8;
14386 }
14387 }
14388
14389 static enum elf_reloc_type_class
14390 elf32_arm_reloc_type_class (const Elf_Internal_Rela *rela)
14391 {
14392 switch ((int) ELF32_R_TYPE (rela->r_info))
14393 {
14394 case R_ARM_RELATIVE:
14395 return reloc_class_relative;
14396 case R_ARM_JUMP_SLOT:
14397 return reloc_class_plt;
14398 case R_ARM_COPY:
14399 return reloc_class_copy;
14400 default:
14401 return reloc_class_normal;
14402 }
14403 }
14404
14405 static void
14406 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
14407 {
14408 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
14409 }
14410
14411 /* Return TRUE if this is an unwinding table entry. */
14412
14413 static bfd_boolean
14414 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
14415 {
14416 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
14417 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
14418 }
14419
14420
14421 /* Set the type and flags for an ARM section. We do this by
14422 the section name, which is a hack, but ought to work. */
14423
14424 static bfd_boolean
14425 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
14426 {
14427 const char * name;
14428
14429 name = bfd_get_section_name (abfd, sec);
14430
14431 if (is_arm_elf_unwind_section_name (abfd, name))
14432 {
14433 hdr->sh_type = SHT_ARM_EXIDX;
14434 hdr->sh_flags |= SHF_LINK_ORDER;
14435 }
14436 return TRUE;
14437 }
14438
14439 /* Handle an ARM specific section when reading an object file. This is
14440 called when bfd_section_from_shdr finds a section with an unknown
14441 type. */
14442
14443 static bfd_boolean
14444 elf32_arm_section_from_shdr (bfd *abfd,
14445 Elf_Internal_Shdr * hdr,
14446 const char *name,
14447 int shindex)
14448 {
14449 /* There ought to be a place to keep ELF backend specific flags, but
14450 at the moment there isn't one. We just keep track of the
14451 sections by their name, instead. Fortunately, the ABI gives
14452 names for all the ARM specific sections, so we will probably get
14453 away with this. */
14454 switch (hdr->sh_type)
14455 {
14456 case SHT_ARM_EXIDX:
14457 case SHT_ARM_PREEMPTMAP:
14458 case SHT_ARM_ATTRIBUTES:
14459 break;
14460
14461 default:
14462 return FALSE;
14463 }
14464
14465 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
14466 return FALSE;
14467
14468 return TRUE;
14469 }
14470
14471 static _arm_elf_section_data *
14472 get_arm_elf_section_data (asection * sec)
14473 {
14474 if (sec && sec->owner && is_arm_elf (sec->owner))
14475 return elf32_arm_section_data (sec);
14476 else
14477 return NULL;
14478 }
14479
14480 typedef struct
14481 {
14482 void *flaginfo;
14483 struct bfd_link_info *info;
14484 asection *sec;
14485 int sec_shndx;
14486 int (*func) (void *, const char *, Elf_Internal_Sym *,
14487 asection *, struct elf_link_hash_entry *);
14488 } output_arch_syminfo;
14489
14490 enum map_symbol_type
14491 {
14492 ARM_MAP_ARM,
14493 ARM_MAP_THUMB,
14494 ARM_MAP_DATA
14495 };
14496
14497
14498 /* Output a single mapping symbol. */
14499
14500 static bfd_boolean
14501 elf32_arm_output_map_sym (output_arch_syminfo *osi,
14502 enum map_symbol_type type,
14503 bfd_vma offset)
14504 {
14505 static const char *names[3] = {"$a", "$t", "$d"};
14506 Elf_Internal_Sym sym;
14507
14508 sym.st_value = osi->sec->output_section->vma
14509 + osi->sec->output_offset
14510 + offset;
14511 sym.st_size = 0;
14512 sym.st_other = 0;
14513 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
14514 sym.st_shndx = osi->sec_shndx;
14515 sym.st_target_internal = 0;
14516 elf32_arm_section_map_add (osi->sec, names[type][1], offset);
14517 return osi->func (osi->flaginfo, names[type], &sym, osi->sec, NULL) == 1;
14518 }
14519
14520 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
14521 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
14522
14523 static bfd_boolean
14524 elf32_arm_output_plt_map_1 (output_arch_syminfo *osi,
14525 bfd_boolean is_iplt_entry_p,
14526 union gotplt_union *root_plt,
14527 struct arm_plt_info *arm_plt)
14528 {
14529 struct elf32_arm_link_hash_table *htab;
14530 bfd_vma addr, plt_header_size;
14531
14532 if (root_plt->offset == (bfd_vma) -1)
14533 return TRUE;
14534
14535 htab = elf32_arm_hash_table (osi->info);
14536 if (htab == NULL)
14537 return FALSE;
14538
14539 if (is_iplt_entry_p)
14540 {
14541 osi->sec = htab->root.iplt;
14542 plt_header_size = 0;
14543 }
14544 else
14545 {
14546 osi->sec = htab->root.splt;
14547 plt_header_size = htab->plt_header_size;
14548 }
14549 osi->sec_shndx = (_bfd_elf_section_from_bfd_section
14550 (osi->info->output_bfd, osi->sec->output_section));
14551
14552 addr = root_plt->offset & -2;
14553 if (htab->symbian_p)
14554 {
14555 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14556 return FALSE;
14557 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
14558 return FALSE;
14559 }
14560 else if (htab->vxworks_p)
14561 {
14562 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14563 return FALSE;
14564 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
14565 return FALSE;
14566 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
14567 return FALSE;
14568 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
14569 return FALSE;
14570 }
14571 else if (htab->nacl_p)
14572 {
14573 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14574 return FALSE;
14575 }
14576 else
14577 {
14578 bfd_boolean thumb_stub_p;
14579
14580 thumb_stub_p = elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt);
14581 if (thumb_stub_p)
14582 {
14583 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
14584 return FALSE;
14585 }
14586 #ifdef FOUR_WORD_PLT
14587 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14588 return FALSE;
14589 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
14590 return FALSE;
14591 #else
14592 /* A three-word PLT with no Thumb thunk contains only Arm code,
14593 so only need to output a mapping symbol for the first PLT entry and
14594 entries with thumb thunks. */
14595 if (thumb_stub_p || addr == plt_header_size)
14596 {
14597 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14598 return FALSE;
14599 }
14600 #endif
14601 }
14602
14603 return TRUE;
14604 }
14605
14606 /* Output mapping symbols for PLT entries associated with H. */
14607
14608 static bfd_boolean
14609 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
14610 {
14611 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
14612 struct elf32_arm_link_hash_entry *eh;
14613
14614 if (h->root.type == bfd_link_hash_indirect)
14615 return TRUE;
14616
14617 if (h->root.type == bfd_link_hash_warning)
14618 /* When warning symbols are created, they **replace** the "real"
14619 entry in the hash table, thus we never get to see the real
14620 symbol in a hash traversal. So look at it now. */
14621 h = (struct elf_link_hash_entry *) h->root.u.i.link;
14622
14623 eh = (struct elf32_arm_link_hash_entry *) h;
14624 return elf32_arm_output_plt_map_1 (osi, SYMBOL_CALLS_LOCAL (osi->info, h),
14625 &h->plt, &eh->plt);
14626 }
14627
14628 /* Output a single local symbol for a generated stub. */
14629
14630 static bfd_boolean
14631 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
14632 bfd_vma offset, bfd_vma size)
14633 {
14634 Elf_Internal_Sym sym;
14635
14636 sym.st_value = osi->sec->output_section->vma
14637 + osi->sec->output_offset
14638 + offset;
14639 sym.st_size = size;
14640 sym.st_other = 0;
14641 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
14642 sym.st_shndx = osi->sec_shndx;
14643 sym.st_target_internal = 0;
14644 return osi->func (osi->flaginfo, name, &sym, osi->sec, NULL) == 1;
14645 }
14646
14647 static bfd_boolean
14648 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
14649 void * in_arg)
14650 {
14651 struct elf32_arm_stub_hash_entry *stub_entry;
14652 asection *stub_sec;
14653 bfd_vma addr;
14654 char *stub_name;
14655 output_arch_syminfo *osi;
14656 const insn_sequence *template_sequence;
14657 enum stub_insn_type prev_type;
14658 int size;
14659 int i;
14660 enum map_symbol_type sym_type;
14661
14662 /* Massage our args to the form they really have. */
14663 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
14664 osi = (output_arch_syminfo *) in_arg;
14665
14666 stub_sec = stub_entry->stub_sec;
14667
14668 /* Ensure this stub is attached to the current section being
14669 processed. */
14670 if (stub_sec != osi->sec)
14671 return TRUE;
14672
14673 addr = (bfd_vma) stub_entry->stub_offset;
14674 stub_name = stub_entry->output_name;
14675
14676 template_sequence = stub_entry->stub_template;
14677 switch (template_sequence[0].type)
14678 {
14679 case ARM_TYPE:
14680 if (!elf32_arm_output_stub_sym (osi, stub_name, addr, stub_entry->stub_size))
14681 return FALSE;
14682 break;
14683 case THUMB16_TYPE:
14684 case THUMB32_TYPE:
14685 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
14686 stub_entry->stub_size))
14687 return FALSE;
14688 break;
14689 default:
14690 BFD_FAIL ();
14691 return 0;
14692 }
14693
14694 prev_type = DATA_TYPE;
14695 size = 0;
14696 for (i = 0; i < stub_entry->stub_template_size; i++)
14697 {
14698 switch (template_sequence[i].type)
14699 {
14700 case ARM_TYPE:
14701 sym_type = ARM_MAP_ARM;
14702 break;
14703
14704 case THUMB16_TYPE:
14705 case THUMB32_TYPE:
14706 sym_type = ARM_MAP_THUMB;
14707 break;
14708
14709 case DATA_TYPE:
14710 sym_type = ARM_MAP_DATA;
14711 break;
14712
14713 default:
14714 BFD_FAIL ();
14715 return FALSE;
14716 }
14717
14718 if (template_sequence[i].type != prev_type)
14719 {
14720 prev_type = template_sequence[i].type;
14721 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
14722 return FALSE;
14723 }
14724
14725 switch (template_sequence[i].type)
14726 {
14727 case ARM_TYPE:
14728 case THUMB32_TYPE:
14729 size += 4;
14730 break;
14731
14732 case THUMB16_TYPE:
14733 size += 2;
14734 break;
14735
14736 case DATA_TYPE:
14737 size += 4;
14738 break;
14739
14740 default:
14741 BFD_FAIL ();
14742 return FALSE;
14743 }
14744 }
14745
14746 return TRUE;
14747 }
14748
14749 /* Output mapping symbols for linker generated sections,
14750 and for those data-only sections that do not have a
14751 $d. */
14752
14753 static bfd_boolean
14754 elf32_arm_output_arch_local_syms (bfd *output_bfd,
14755 struct bfd_link_info *info,
14756 void *flaginfo,
14757 int (*func) (void *, const char *,
14758 Elf_Internal_Sym *,
14759 asection *,
14760 struct elf_link_hash_entry *))
14761 {
14762 output_arch_syminfo osi;
14763 struct elf32_arm_link_hash_table *htab;
14764 bfd_vma offset;
14765 bfd_size_type size;
14766 bfd *input_bfd;
14767
14768 htab = elf32_arm_hash_table (info);
14769 if (htab == NULL)
14770 return FALSE;
14771
14772 check_use_blx (htab);
14773
14774 osi.flaginfo = flaginfo;
14775 osi.info = info;
14776 osi.func = func;
14777
14778 /* Add a $d mapping symbol to data-only sections that
14779 don't have any mapping symbol. This may result in (harmless) redundant
14780 mapping symbols. */
14781 for (input_bfd = info->input_bfds;
14782 input_bfd != NULL;
14783 input_bfd = input_bfd->link_next)
14784 {
14785 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
14786 for (osi.sec = input_bfd->sections;
14787 osi.sec != NULL;
14788 osi.sec = osi.sec->next)
14789 {
14790 if (osi.sec->output_section != NULL
14791 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
14792 != 0)
14793 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
14794 == SEC_HAS_CONTENTS
14795 && get_arm_elf_section_data (osi.sec) != NULL
14796 && get_arm_elf_section_data (osi.sec)->mapcount == 0
14797 && osi.sec->size > 0
14798 && (osi.sec->flags & SEC_EXCLUDE) == 0)
14799 {
14800 osi.sec_shndx = _bfd_elf_section_from_bfd_section
14801 (output_bfd, osi.sec->output_section);
14802 if (osi.sec_shndx != (int)SHN_BAD)
14803 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
14804 }
14805 }
14806 }
14807
14808 /* ARM->Thumb glue. */
14809 if (htab->arm_glue_size > 0)
14810 {
14811 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
14812 ARM2THUMB_GLUE_SECTION_NAME);
14813
14814 osi.sec_shndx = _bfd_elf_section_from_bfd_section
14815 (output_bfd, osi.sec->output_section);
14816 if (info->shared || htab->root.is_relocatable_executable
14817 || htab->pic_veneer)
14818 size = ARM2THUMB_PIC_GLUE_SIZE;
14819 else if (htab->use_blx)
14820 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
14821 else
14822 size = ARM2THUMB_STATIC_GLUE_SIZE;
14823
14824 for (offset = 0; offset < htab->arm_glue_size; offset += size)
14825 {
14826 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
14827 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
14828 }
14829 }
14830
14831 /* Thumb->ARM glue. */
14832 if (htab->thumb_glue_size > 0)
14833 {
14834 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
14835 THUMB2ARM_GLUE_SECTION_NAME);
14836
14837 osi.sec_shndx = _bfd_elf_section_from_bfd_section
14838 (output_bfd, osi.sec->output_section);
14839 size = THUMB2ARM_GLUE_SIZE;
14840
14841 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
14842 {
14843 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
14844 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
14845 }
14846 }
14847
14848 /* ARMv4 BX veneers. */
14849 if (htab->bx_glue_size > 0)
14850 {
14851 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
14852 ARM_BX_GLUE_SECTION_NAME);
14853
14854 osi.sec_shndx = _bfd_elf_section_from_bfd_section
14855 (output_bfd, osi.sec->output_section);
14856
14857 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
14858 }
14859
14860 /* Long calls stubs. */
14861 if (htab->stub_bfd && htab->stub_bfd->sections)
14862 {
14863 asection* stub_sec;
14864
14865 for (stub_sec = htab->stub_bfd->sections;
14866 stub_sec != NULL;
14867 stub_sec = stub_sec->next)
14868 {
14869 /* Ignore non-stub sections. */
14870 if (!strstr (stub_sec->name, STUB_SUFFIX))
14871 continue;
14872
14873 osi.sec = stub_sec;
14874
14875 osi.sec_shndx = _bfd_elf_section_from_bfd_section
14876 (output_bfd, osi.sec->output_section);
14877
14878 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
14879 }
14880 }
14881
14882 /* Finally, output mapping symbols for the PLT. */
14883 if (htab->root.splt && htab->root.splt->size > 0)
14884 {
14885 osi.sec = htab->root.splt;
14886 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
14887 (output_bfd, osi.sec->output_section));
14888
14889 /* Output mapping symbols for the plt header. SymbianOS does not have a
14890 plt header. */
14891 if (htab->vxworks_p)
14892 {
14893 /* VxWorks shared libraries have no PLT header. */
14894 if (!info->shared)
14895 {
14896 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
14897 return FALSE;
14898 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
14899 return FALSE;
14900 }
14901 }
14902 else if (htab->nacl_p)
14903 {
14904 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
14905 return FALSE;
14906 }
14907 else if (!htab->symbian_p)
14908 {
14909 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
14910 return FALSE;
14911 #ifndef FOUR_WORD_PLT
14912 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
14913 return FALSE;
14914 #endif
14915 }
14916 }
14917 if ((htab->root.splt && htab->root.splt->size > 0)
14918 || (htab->root.iplt && htab->root.iplt->size > 0))
14919 {
14920 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi);
14921 for (input_bfd = info->input_bfds;
14922 input_bfd != NULL;
14923 input_bfd = input_bfd->link_next)
14924 {
14925 struct arm_local_iplt_info **local_iplt;
14926 unsigned int i, num_syms;
14927
14928 local_iplt = elf32_arm_local_iplt (input_bfd);
14929 if (local_iplt != NULL)
14930 {
14931 num_syms = elf_symtab_hdr (input_bfd).sh_info;
14932 for (i = 0; i < num_syms; i++)
14933 if (local_iplt[i] != NULL
14934 && !elf32_arm_output_plt_map_1 (&osi, TRUE,
14935 &local_iplt[i]->root,
14936 &local_iplt[i]->arm))
14937 return FALSE;
14938 }
14939 }
14940 }
14941 if (htab->dt_tlsdesc_plt != 0)
14942 {
14943 /* Mapping symbols for the lazy tls trampoline. */
14944 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->dt_tlsdesc_plt))
14945 return FALSE;
14946
14947 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
14948 htab->dt_tlsdesc_plt + 24))
14949 return FALSE;
14950 }
14951 if (htab->tls_trampoline != 0)
14952 {
14953 /* Mapping symbols for the tls trampoline. */
14954 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->tls_trampoline))
14955 return FALSE;
14956 #ifdef FOUR_WORD_PLT
14957 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
14958 htab->tls_trampoline + 12))
14959 return FALSE;
14960 #endif
14961 }
14962
14963 return TRUE;
14964 }
14965
14966 /* Allocate target specific section data. */
14967
14968 static bfd_boolean
14969 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
14970 {
14971 if (!sec->used_by_bfd)
14972 {
14973 _arm_elf_section_data *sdata;
14974 bfd_size_type amt = sizeof (*sdata);
14975
14976 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
14977 if (sdata == NULL)
14978 return FALSE;
14979 sec->used_by_bfd = sdata;
14980 }
14981
14982 return _bfd_elf_new_section_hook (abfd, sec);
14983 }
14984
14985
14986 /* Used to order a list of mapping symbols by address. */
14987
14988 static int
14989 elf32_arm_compare_mapping (const void * a, const void * b)
14990 {
14991 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
14992 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
14993
14994 if (amap->vma > bmap->vma)
14995 return 1;
14996 else if (amap->vma < bmap->vma)
14997 return -1;
14998 else if (amap->type > bmap->type)
14999 /* Ensure results do not depend on the host qsort for objects with
15000 multiple mapping symbols at the same address by sorting on type
15001 after vma. */
15002 return 1;
15003 else if (amap->type < bmap->type)
15004 return -1;
15005 else
15006 return 0;
15007 }
15008
15009 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
15010
15011 static unsigned long
15012 offset_prel31 (unsigned long addr, bfd_vma offset)
15013 {
15014 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
15015 }
15016
15017 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
15018 relocations. */
15019
15020 static void
15021 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
15022 {
15023 unsigned long first_word = bfd_get_32 (output_bfd, from);
15024 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
15025
15026 /* High bit of first word is supposed to be zero. */
15027 if ((first_word & 0x80000000ul) == 0)
15028 first_word = offset_prel31 (first_word, offset);
15029
15030 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
15031 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
15032 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
15033 second_word = offset_prel31 (second_word, offset);
15034
15035 bfd_put_32 (output_bfd, first_word, to);
15036 bfd_put_32 (output_bfd, second_word, to + 4);
15037 }
15038
15039 /* Data for make_branch_to_a8_stub(). */
15040
15041 struct a8_branch_to_stub_data
15042 {
15043 asection *writing_section;
15044 bfd_byte *contents;
15045 };
15046
15047
15048 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
15049 places for a particular section. */
15050
15051 static bfd_boolean
15052 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
15053 void *in_arg)
15054 {
15055 struct elf32_arm_stub_hash_entry *stub_entry;
15056 struct a8_branch_to_stub_data *data;
15057 bfd_byte *contents;
15058 unsigned long branch_insn;
15059 bfd_vma veneered_insn_loc, veneer_entry_loc;
15060 bfd_signed_vma branch_offset;
15061 bfd *abfd;
15062 unsigned int target;
15063
15064 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
15065 data = (struct a8_branch_to_stub_data *) in_arg;
15066
15067 if (stub_entry->target_section != data->writing_section
15068 || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
15069 return TRUE;
15070
15071 contents = data->contents;
15072
15073 veneered_insn_loc = stub_entry->target_section->output_section->vma
15074 + stub_entry->target_section->output_offset
15075 + stub_entry->target_value;
15076
15077 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
15078 + stub_entry->stub_sec->output_offset
15079 + stub_entry->stub_offset;
15080
15081 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
15082 veneered_insn_loc &= ~3u;
15083
15084 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
15085
15086 abfd = stub_entry->target_section->owner;
15087 target = stub_entry->target_value;
15088
15089 /* We attempt to avoid this condition by setting stubs_always_after_branch
15090 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
15091 This check is just to be on the safe side... */
15092 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
15093 {
15094 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
15095 "allocated in unsafe location"), abfd);
15096 return FALSE;
15097 }
15098
15099 switch (stub_entry->stub_type)
15100 {
15101 case arm_stub_a8_veneer_b:
15102 case arm_stub_a8_veneer_b_cond:
15103 branch_insn = 0xf0009000;
15104 goto jump24;
15105
15106 case arm_stub_a8_veneer_blx:
15107 branch_insn = 0xf000e800;
15108 goto jump24;
15109
15110 case arm_stub_a8_veneer_bl:
15111 {
15112 unsigned int i1, j1, i2, j2, s;
15113
15114 branch_insn = 0xf000d000;
15115
15116 jump24:
15117 if (branch_offset < -16777216 || branch_offset > 16777214)
15118 {
15119 /* There's not much we can do apart from complain if this
15120 happens. */
15121 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
15122 "of range (input file too large)"), abfd);
15123 return FALSE;
15124 }
15125
15126 /* i1 = not(j1 eor s), so:
15127 not i1 = j1 eor s
15128 j1 = (not i1) eor s. */
15129
15130 branch_insn |= (branch_offset >> 1) & 0x7ff;
15131 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
15132 i2 = (branch_offset >> 22) & 1;
15133 i1 = (branch_offset >> 23) & 1;
15134 s = (branch_offset >> 24) & 1;
15135 j1 = (!i1) ^ s;
15136 j2 = (!i2) ^ s;
15137 branch_insn |= j2 << 11;
15138 branch_insn |= j1 << 13;
15139 branch_insn |= s << 26;
15140 }
15141 break;
15142
15143 default:
15144 BFD_FAIL ();
15145 return FALSE;
15146 }
15147
15148 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[target]);
15149 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[target + 2]);
15150
15151 return TRUE;
15152 }
15153
15154 /* Do code byteswapping. Return FALSE afterwards so that the section is
15155 written out as normal. */
15156
15157 static bfd_boolean
15158 elf32_arm_write_section (bfd *output_bfd,
15159 struct bfd_link_info *link_info,
15160 asection *sec,
15161 bfd_byte *contents)
15162 {
15163 unsigned int mapcount, errcount;
15164 _arm_elf_section_data *arm_data;
15165 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
15166 elf32_arm_section_map *map;
15167 elf32_vfp11_erratum_list *errnode;
15168 bfd_vma ptr;
15169 bfd_vma end;
15170 bfd_vma offset = sec->output_section->vma + sec->output_offset;
15171 bfd_byte tmp;
15172 unsigned int i;
15173
15174 if (globals == NULL)
15175 return FALSE;
15176
15177 /* If this section has not been allocated an _arm_elf_section_data
15178 structure then we cannot record anything. */
15179 arm_data = get_arm_elf_section_data (sec);
15180 if (arm_data == NULL)
15181 return FALSE;
15182
15183 mapcount = arm_data->mapcount;
15184 map = arm_data->map;
15185 errcount = arm_data->erratumcount;
15186
15187 if (errcount != 0)
15188 {
15189 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
15190
15191 for (errnode = arm_data->erratumlist; errnode != 0;
15192 errnode = errnode->next)
15193 {
15194 bfd_vma target = errnode->vma - offset;
15195
15196 switch (errnode->type)
15197 {
15198 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
15199 {
15200 bfd_vma branch_to_veneer;
15201 /* Original condition code of instruction, plus bit mask for
15202 ARM B instruction. */
15203 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
15204 | 0x0a000000;
15205
15206 /* The instruction is before the label. */
15207 target -= 4;
15208
15209 /* Above offset included in -4 below. */
15210 branch_to_veneer = errnode->u.b.veneer->vma
15211 - errnode->vma - 4;
15212
15213 if ((signed) branch_to_veneer < -(1 << 25)
15214 || (signed) branch_to_veneer >= (1 << 25))
15215 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
15216 "range"), output_bfd);
15217
15218 insn |= (branch_to_veneer >> 2) & 0xffffff;
15219 contents[endianflip ^ target] = insn & 0xff;
15220 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
15221 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
15222 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
15223 }
15224 break;
15225
15226 case VFP11_ERRATUM_ARM_VENEER:
15227 {
15228 bfd_vma branch_from_veneer;
15229 unsigned int insn;
15230
15231 /* Take size of veneer into account. */
15232 branch_from_veneer = errnode->u.v.branch->vma
15233 - errnode->vma - 12;
15234
15235 if ((signed) branch_from_veneer < -(1 << 25)
15236 || (signed) branch_from_veneer >= (1 << 25))
15237 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
15238 "range"), output_bfd);
15239
15240 /* Original instruction. */
15241 insn = errnode->u.v.branch->u.b.vfp_insn;
15242 contents[endianflip ^ target] = insn & 0xff;
15243 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
15244 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
15245 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
15246
15247 /* Branch back to insn after original insn. */
15248 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
15249 contents[endianflip ^ (target + 4)] = insn & 0xff;
15250 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
15251 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
15252 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
15253 }
15254 break;
15255
15256 default:
15257 abort ();
15258 }
15259 }
15260 }
15261
15262 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
15263 {
15264 arm_unwind_table_edit *edit_node
15265 = arm_data->u.exidx.unwind_edit_list;
15266 /* Now, sec->size is the size of the section we will write. The original
15267 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
15268 markers) was sec->rawsize. (This isn't the case if we perform no
15269 edits, then rawsize will be zero and we should use size). */
15270 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
15271 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
15272 unsigned int in_index, out_index;
15273 bfd_vma add_to_offsets = 0;
15274
15275 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
15276 {
15277 if (edit_node)
15278 {
15279 unsigned int edit_index = edit_node->index;
15280
15281 if (in_index < edit_index && in_index * 8 < input_size)
15282 {
15283 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
15284 contents + in_index * 8, add_to_offsets);
15285 out_index++;
15286 in_index++;
15287 }
15288 else if (in_index == edit_index
15289 || (in_index * 8 >= input_size
15290 && edit_index == UINT_MAX))
15291 {
15292 switch (edit_node->type)
15293 {
15294 case DELETE_EXIDX_ENTRY:
15295 in_index++;
15296 add_to_offsets += 8;
15297 break;
15298
15299 case INSERT_EXIDX_CANTUNWIND_AT_END:
15300 {
15301 asection *text_sec = edit_node->linked_section;
15302 bfd_vma text_offset = text_sec->output_section->vma
15303 + text_sec->output_offset
15304 + text_sec->size;
15305 bfd_vma exidx_offset = offset + out_index * 8;
15306 unsigned long prel31_offset;
15307
15308 /* Note: this is meant to be equivalent to an
15309 R_ARM_PREL31 relocation. These synthetic
15310 EXIDX_CANTUNWIND markers are not relocated by the
15311 usual BFD method. */
15312 prel31_offset = (text_offset - exidx_offset)
15313 & 0x7ffffffful;
15314
15315 /* First address we can't unwind. */
15316 bfd_put_32 (output_bfd, prel31_offset,
15317 &edited_contents[out_index * 8]);
15318
15319 /* Code for EXIDX_CANTUNWIND. */
15320 bfd_put_32 (output_bfd, 0x1,
15321 &edited_contents[out_index * 8 + 4]);
15322
15323 out_index++;
15324 add_to_offsets -= 8;
15325 }
15326 break;
15327 }
15328
15329 edit_node = edit_node->next;
15330 }
15331 }
15332 else
15333 {
15334 /* No more edits, copy remaining entries verbatim. */
15335 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
15336 contents + in_index * 8, add_to_offsets);
15337 out_index++;
15338 in_index++;
15339 }
15340 }
15341
15342 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
15343 bfd_set_section_contents (output_bfd, sec->output_section,
15344 edited_contents,
15345 (file_ptr) sec->output_offset, sec->size);
15346
15347 return TRUE;
15348 }
15349
15350 /* Fix code to point to Cortex-A8 erratum stubs. */
15351 if (globals->fix_cortex_a8)
15352 {
15353 struct a8_branch_to_stub_data data;
15354
15355 data.writing_section = sec;
15356 data.contents = contents;
15357
15358 bfd_hash_traverse (&globals->stub_hash_table, make_branch_to_a8_stub,
15359 &data);
15360 }
15361
15362 if (mapcount == 0)
15363 return FALSE;
15364
15365 if (globals->byteswap_code)
15366 {
15367 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
15368
15369 ptr = map[0].vma;
15370 for (i = 0; i < mapcount; i++)
15371 {
15372 if (i == mapcount - 1)
15373 end = sec->size;
15374 else
15375 end = map[i + 1].vma;
15376
15377 switch (map[i].type)
15378 {
15379 case 'a':
15380 /* Byte swap code words. */
15381 while (ptr + 3 < end)
15382 {
15383 tmp = contents[ptr];
15384 contents[ptr] = contents[ptr + 3];
15385 contents[ptr + 3] = tmp;
15386 tmp = contents[ptr + 1];
15387 contents[ptr + 1] = contents[ptr + 2];
15388 contents[ptr + 2] = tmp;
15389 ptr += 4;
15390 }
15391 break;
15392
15393 case 't':
15394 /* Byte swap code halfwords. */
15395 while (ptr + 1 < end)
15396 {
15397 tmp = contents[ptr];
15398 contents[ptr] = contents[ptr + 1];
15399 contents[ptr + 1] = tmp;
15400 ptr += 2;
15401 }
15402 break;
15403
15404 case 'd':
15405 /* Leave data alone. */
15406 break;
15407 }
15408 ptr = end;
15409 }
15410 }
15411
15412 free (map);
15413 arm_data->mapcount = -1;
15414 arm_data->mapsize = 0;
15415 arm_data->map = NULL;
15416
15417 return FALSE;
15418 }
15419
15420 /* Mangle thumb function symbols as we read them in. */
15421
15422 static bfd_boolean
15423 elf32_arm_swap_symbol_in (bfd * abfd,
15424 const void *psrc,
15425 const void *pshn,
15426 Elf_Internal_Sym *dst)
15427 {
15428 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
15429 return FALSE;
15430
15431 /* New EABI objects mark thumb function symbols by setting the low bit of
15432 the address. */
15433 if (ELF_ST_TYPE (dst->st_info) == STT_FUNC
15434 || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC)
15435 {
15436 if (dst->st_value & 1)
15437 {
15438 dst->st_value &= ~(bfd_vma) 1;
15439 dst->st_target_internal = ST_BRANCH_TO_THUMB;
15440 }
15441 else
15442 dst->st_target_internal = ST_BRANCH_TO_ARM;
15443 }
15444 else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC)
15445 {
15446 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC);
15447 dst->st_target_internal = ST_BRANCH_TO_THUMB;
15448 }
15449 else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION)
15450 dst->st_target_internal = ST_BRANCH_LONG;
15451 else
15452 dst->st_target_internal = ST_BRANCH_UNKNOWN;
15453
15454 return TRUE;
15455 }
15456
15457
15458 /* Mangle thumb function symbols as we write them out. */
15459
15460 static void
15461 elf32_arm_swap_symbol_out (bfd *abfd,
15462 const Elf_Internal_Sym *src,
15463 void *cdst,
15464 void *shndx)
15465 {
15466 Elf_Internal_Sym newsym;
15467
15468 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
15469 of the address set, as per the new EABI. We do this unconditionally
15470 because objcopy does not set the elf header flags until after
15471 it writes out the symbol table. */
15472 if (src->st_target_internal == ST_BRANCH_TO_THUMB)
15473 {
15474 newsym = *src;
15475 if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC)
15476 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
15477 if (newsym.st_shndx != SHN_UNDEF)
15478 {
15479 /* Do this only for defined symbols. At link type, the static
15480 linker will simulate the work of dynamic linker of resolving
15481 symbols and will carry over the thumbness of found symbols to
15482 the output symbol table. It's not clear how it happens, but
15483 the thumbness of undefined symbols can well be different at
15484 runtime, and writing '1' for them will be confusing for users
15485 and possibly for dynamic linker itself.
15486 */
15487 newsym.st_value |= 1;
15488 }
15489
15490 src = &newsym;
15491 }
15492 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
15493 }
15494
15495 /* Add the PT_ARM_EXIDX program header. */
15496
15497 static bfd_boolean
15498 elf32_arm_modify_segment_map (bfd *abfd,
15499 struct bfd_link_info *info ATTRIBUTE_UNUSED)
15500 {
15501 struct elf_segment_map *m;
15502 asection *sec;
15503
15504 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
15505 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
15506 {
15507 /* If there is already a PT_ARM_EXIDX header, then we do not
15508 want to add another one. This situation arises when running
15509 "strip"; the input binary already has the header. */
15510 m = elf_tdata (abfd)->segment_map;
15511 while (m && m->p_type != PT_ARM_EXIDX)
15512 m = m->next;
15513 if (!m)
15514 {
15515 m = (struct elf_segment_map *)
15516 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
15517 if (m == NULL)
15518 return FALSE;
15519 m->p_type = PT_ARM_EXIDX;
15520 m->count = 1;
15521 m->sections[0] = sec;
15522
15523 m->next = elf_tdata (abfd)->segment_map;
15524 elf_tdata (abfd)->segment_map = m;
15525 }
15526 }
15527
15528 return TRUE;
15529 }
15530
15531 /* We may add a PT_ARM_EXIDX program header. */
15532
15533 static int
15534 elf32_arm_additional_program_headers (bfd *abfd,
15535 struct bfd_link_info *info ATTRIBUTE_UNUSED)
15536 {
15537 asection *sec;
15538
15539 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
15540 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
15541 return 1;
15542 else
15543 return 0;
15544 }
15545
15546 /* Hook called by the linker routine which adds symbols from an object
15547 file. */
15548
15549 static bfd_boolean
15550 elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
15551 Elf_Internal_Sym *sym, const char **namep,
15552 flagword *flagsp, asection **secp, bfd_vma *valp)
15553 {
15554 if ((abfd->flags & DYNAMIC) == 0
15555 && (ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC
15556 || ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE))
15557 elf_tdata (info->output_bfd)->has_gnu_symbols = TRUE;
15558
15559 if (elf32_arm_hash_table (info)->vxworks_p
15560 && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep,
15561 flagsp, secp, valp))
15562 return FALSE;
15563
15564 return TRUE;
15565 }
15566
15567 /* We use this to override swap_symbol_in and swap_symbol_out. */
15568 const struct elf_size_info elf32_arm_size_info =
15569 {
15570 sizeof (Elf32_External_Ehdr),
15571 sizeof (Elf32_External_Phdr),
15572 sizeof (Elf32_External_Shdr),
15573 sizeof (Elf32_External_Rel),
15574 sizeof (Elf32_External_Rela),
15575 sizeof (Elf32_External_Sym),
15576 sizeof (Elf32_External_Dyn),
15577 sizeof (Elf_External_Note),
15578 4,
15579 1,
15580 32, 2,
15581 ELFCLASS32, EV_CURRENT,
15582 bfd_elf32_write_out_phdrs,
15583 bfd_elf32_write_shdrs_and_ehdr,
15584 bfd_elf32_checksum_contents,
15585 bfd_elf32_write_relocs,
15586 elf32_arm_swap_symbol_in,
15587 elf32_arm_swap_symbol_out,
15588 bfd_elf32_slurp_reloc_table,
15589 bfd_elf32_slurp_symbol_table,
15590 bfd_elf32_swap_dyn_in,
15591 bfd_elf32_swap_dyn_out,
15592 bfd_elf32_swap_reloc_in,
15593 bfd_elf32_swap_reloc_out,
15594 bfd_elf32_swap_reloca_in,
15595 bfd_elf32_swap_reloca_out
15596 };
15597
15598 #define ELF_ARCH bfd_arch_arm
15599 #define ELF_TARGET_ID ARM_ELF_DATA
15600 #define ELF_MACHINE_CODE EM_ARM
15601 #ifdef __QNXTARGET__
15602 #define ELF_MAXPAGESIZE 0x1000
15603 #else
15604 #define ELF_MAXPAGESIZE 0x8000
15605 #endif
15606 #define ELF_MINPAGESIZE 0x1000
15607 #define ELF_COMMONPAGESIZE 0x1000
15608
15609 #define bfd_elf32_mkobject elf32_arm_mkobject
15610
15611 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
15612 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
15613 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
15614 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
15615 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
15616 #define bfd_elf32_bfd_link_hash_table_free elf32_arm_hash_table_free
15617 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
15618 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
15619 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
15620 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
15621 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
15622 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
15623 #define bfd_elf32_bfd_final_link elf32_arm_final_link
15624
15625 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
15626 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
15627 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
15628 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
15629 #define elf_backend_check_relocs elf32_arm_check_relocs
15630 #define elf_backend_relocate_section elf32_arm_relocate_section
15631 #define elf_backend_write_section elf32_arm_write_section
15632 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
15633 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
15634 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
15635 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
15636 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
15637 #define elf_backend_always_size_sections elf32_arm_always_size_sections
15638 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
15639 #define elf_backend_post_process_headers elf32_arm_post_process_headers
15640 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
15641 #define elf_backend_object_p elf32_arm_object_p
15642 #define elf_backend_fake_sections elf32_arm_fake_sections
15643 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
15644 #define elf_backend_final_write_processing elf32_arm_final_write_processing
15645 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
15646 #define elf_backend_size_info elf32_arm_size_info
15647 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
15648 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
15649 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
15650 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
15651 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
15652
15653 #define elf_backend_can_refcount 1
15654 #define elf_backend_can_gc_sections 1
15655 #define elf_backend_plt_readonly 1
15656 #define elf_backend_want_got_plt 1
15657 #define elf_backend_want_plt_sym 0
15658 #define elf_backend_may_use_rel_p 1
15659 #define elf_backend_may_use_rela_p 0
15660 #define elf_backend_default_use_rela_p 0
15661
15662 #define elf_backend_got_header_size 12
15663
15664 #undef elf_backend_obj_attrs_vendor
15665 #define elf_backend_obj_attrs_vendor "aeabi"
15666 #undef elf_backend_obj_attrs_section
15667 #define elf_backend_obj_attrs_section ".ARM.attributes"
15668 #undef elf_backend_obj_attrs_arg_type
15669 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
15670 #undef elf_backend_obj_attrs_section_type
15671 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
15672 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
15673 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
15674
15675 #include "elf32-target.h"
15676
15677 /* Native Client targets. */
15678
15679 #undef TARGET_LITTLE_SYM
15680 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_nacl_vec
15681 #undef TARGET_LITTLE_NAME
15682 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl"
15683 #undef TARGET_BIG_SYM
15684 #define TARGET_BIG_SYM bfd_elf32_bigarm_nacl_vec
15685 #undef TARGET_BIG_NAME
15686 #define TARGET_BIG_NAME "elf32-bigarm-nacl"
15687
15688 /* Like elf32_arm_link_hash_table_create -- but overrides
15689 appropriately for NaCl. */
15690
15691 static struct bfd_link_hash_table *
15692 elf32_arm_nacl_link_hash_table_create (bfd *abfd)
15693 {
15694 struct bfd_link_hash_table *ret;
15695
15696 ret = elf32_arm_link_hash_table_create (abfd);
15697 if (ret)
15698 {
15699 struct elf32_arm_link_hash_table *htab
15700 = (struct elf32_arm_link_hash_table *) ret;
15701
15702 htab->nacl_p = 1;
15703
15704 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry);
15705 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry);
15706 }
15707 return ret;
15708 }
15709
15710 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
15711 really need to use elf32_arm_modify_segment_map. But we do it
15712 anyway just to reduce gratuitous differences with the stock ARM backend. */
15713
15714 static bfd_boolean
15715 elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
15716 {
15717 return (elf32_arm_modify_segment_map (abfd, info)
15718 && nacl_modify_segment_map (abfd, info));
15719 }
15720
15721 #undef elf32_bed
15722 #define elf32_bed elf32_arm_nacl_bed
15723 #undef bfd_elf32_bfd_link_hash_table_create
15724 #define bfd_elf32_bfd_link_hash_table_create \
15725 elf32_arm_nacl_link_hash_table_create
15726 #undef elf_backend_plt_alignment
15727 #define elf_backend_plt_alignment 4
15728 #undef elf_backend_modify_segment_map
15729 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
15730 #undef elf_backend_modify_program_headers
15731 #define elf_backend_modify_program_headers nacl_modify_program_headers
15732
15733 #undef ELF_MAXPAGESIZE
15734 #define ELF_MAXPAGESIZE 0x10000
15735
15736 #include "elf32-target.h"
15737
15738 /* Reset to defaults. */
15739 #undef elf_backend_plt_alignment
15740 #undef elf_backend_modify_segment_map
15741 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
15742 #undef elf_backend_modify_program_headers
15743
15744 /* VxWorks Targets. */
15745
15746 #undef TARGET_LITTLE_SYM
15747 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vxworks_vec
15748 #undef TARGET_LITTLE_NAME
15749 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
15750 #undef TARGET_BIG_SYM
15751 #define TARGET_BIG_SYM bfd_elf32_bigarm_vxworks_vec
15752 #undef TARGET_BIG_NAME
15753 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
15754
15755 /* Like elf32_arm_link_hash_table_create -- but overrides
15756 appropriately for VxWorks. */
15757
15758 static struct bfd_link_hash_table *
15759 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
15760 {
15761 struct bfd_link_hash_table *ret;
15762
15763 ret = elf32_arm_link_hash_table_create (abfd);
15764 if (ret)
15765 {
15766 struct elf32_arm_link_hash_table *htab
15767 = (struct elf32_arm_link_hash_table *) ret;
15768 htab->use_rel = 0;
15769 htab->vxworks_p = 1;
15770 }
15771 return ret;
15772 }
15773
15774 static void
15775 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
15776 {
15777 elf32_arm_final_write_processing (abfd, linker);
15778 elf_vxworks_final_write_processing (abfd, linker);
15779 }
15780
15781 #undef elf32_bed
15782 #define elf32_bed elf32_arm_vxworks_bed
15783
15784 #undef bfd_elf32_bfd_link_hash_table_create
15785 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
15786 #undef elf_backend_final_write_processing
15787 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
15788 #undef elf_backend_emit_relocs
15789 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
15790
15791 #undef elf_backend_may_use_rel_p
15792 #define elf_backend_may_use_rel_p 0
15793 #undef elf_backend_may_use_rela_p
15794 #define elf_backend_may_use_rela_p 1
15795 #undef elf_backend_default_use_rela_p
15796 #define elf_backend_default_use_rela_p 1
15797 #undef elf_backend_want_plt_sym
15798 #define elf_backend_want_plt_sym 1
15799 #undef ELF_MAXPAGESIZE
15800 #define ELF_MAXPAGESIZE 0x1000
15801
15802 #include "elf32-target.h"
15803
15804
15805 /* Merge backend specific data from an object file to the output
15806 object file when linking. */
15807
15808 static bfd_boolean
15809 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
15810 {
15811 flagword out_flags;
15812 flagword in_flags;
15813 bfd_boolean flags_compatible = TRUE;
15814 asection *sec;
15815
15816 /* Check if we have the same endianness. */
15817 if (! _bfd_generic_verify_endian_match (ibfd, obfd))
15818 return FALSE;
15819
15820 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
15821 return TRUE;
15822
15823 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
15824 return FALSE;
15825
15826 /* The input BFD must have had its flags initialised. */
15827 /* The following seems bogus to me -- The flags are initialized in
15828 the assembler but I don't think an elf_flags_init field is
15829 written into the object. */
15830 /* BFD_ASSERT (elf_flags_init (ibfd)); */
15831
15832 in_flags = elf_elfheader (ibfd)->e_flags;
15833 out_flags = elf_elfheader (obfd)->e_flags;
15834
15835 /* In theory there is no reason why we couldn't handle this. However
15836 in practice it isn't even close to working and there is no real
15837 reason to want it. */
15838 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
15839 && !(ibfd->flags & DYNAMIC)
15840 && (in_flags & EF_ARM_BE8))
15841 {
15842 _bfd_error_handler (_("error: %B is already in final BE8 format"),
15843 ibfd);
15844 return FALSE;
15845 }
15846
15847 if (!elf_flags_init (obfd))
15848 {
15849 /* If the input is the default architecture and had the default
15850 flags then do not bother setting the flags for the output
15851 architecture, instead allow future merges to do this. If no
15852 future merges ever set these flags then they will retain their
15853 uninitialised values, which surprise surprise, correspond
15854 to the default values. */
15855 if (bfd_get_arch_info (ibfd)->the_default
15856 && elf_elfheader (ibfd)->e_flags == 0)
15857 return TRUE;
15858
15859 elf_flags_init (obfd) = TRUE;
15860 elf_elfheader (obfd)->e_flags = in_flags;
15861
15862 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
15863 && bfd_get_arch_info (obfd)->the_default)
15864 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
15865
15866 return TRUE;
15867 }
15868
15869 /* Determine what should happen if the input ARM architecture
15870 does not match the output ARM architecture. */
15871 if (! bfd_arm_merge_machines (ibfd, obfd))
15872 return FALSE;
15873
15874 /* Identical flags must be compatible. */
15875 if (in_flags == out_flags)
15876 return TRUE;
15877
15878 /* Check to see if the input BFD actually contains any sections. If
15879 not, its flags may not have been initialised either, but it
15880 cannot actually cause any incompatiblity. Do not short-circuit
15881 dynamic objects; their section list may be emptied by
15882 elf_link_add_object_symbols.
15883
15884 Also check to see if there are no code sections in the input.
15885 In this case there is no need to check for code specific flags.
15886 XXX - do we need to worry about floating-point format compatability
15887 in data sections ? */
15888 if (!(ibfd->flags & DYNAMIC))
15889 {
15890 bfd_boolean null_input_bfd = TRUE;
15891 bfd_boolean only_data_sections = TRUE;
15892
15893 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
15894 {
15895 /* Ignore synthetic glue sections. */
15896 if (strcmp (sec->name, ".glue_7")
15897 && strcmp (sec->name, ".glue_7t"))
15898 {
15899 if ((bfd_get_section_flags (ibfd, sec)
15900 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
15901 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
15902 only_data_sections = FALSE;
15903
15904 null_input_bfd = FALSE;
15905 break;
15906 }
15907 }
15908
15909 if (null_input_bfd || only_data_sections)
15910 return TRUE;
15911 }
15912
15913 /* Complain about various flag mismatches. */
15914 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
15915 EF_ARM_EABI_VERSION (out_flags)))
15916 {
15917 _bfd_error_handler
15918 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
15919 ibfd, obfd,
15920 (in_flags & EF_ARM_EABIMASK) >> 24,
15921 (out_flags & EF_ARM_EABIMASK) >> 24);
15922 return FALSE;
15923 }
15924
15925 /* Not sure what needs to be checked for EABI versions >= 1. */
15926 /* VxWorks libraries do not use these flags. */
15927 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
15928 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
15929 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
15930 {
15931 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
15932 {
15933 _bfd_error_handler
15934 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
15935 ibfd, obfd,
15936 in_flags & EF_ARM_APCS_26 ? 26 : 32,
15937 out_flags & EF_ARM_APCS_26 ? 26 : 32);
15938 flags_compatible = FALSE;
15939 }
15940
15941 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
15942 {
15943 if (in_flags & EF_ARM_APCS_FLOAT)
15944 _bfd_error_handler
15945 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
15946 ibfd, obfd);
15947 else
15948 _bfd_error_handler
15949 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
15950 ibfd, obfd);
15951
15952 flags_compatible = FALSE;
15953 }
15954
15955 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
15956 {
15957 if (in_flags & EF_ARM_VFP_FLOAT)
15958 _bfd_error_handler
15959 (_("error: %B uses VFP instructions, whereas %B does not"),
15960 ibfd, obfd);
15961 else
15962 _bfd_error_handler
15963 (_("error: %B uses FPA instructions, whereas %B does not"),
15964 ibfd, obfd);
15965
15966 flags_compatible = FALSE;
15967 }
15968
15969 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
15970 {
15971 if (in_flags & EF_ARM_MAVERICK_FLOAT)
15972 _bfd_error_handler
15973 (_("error: %B uses Maverick instructions, whereas %B does not"),
15974 ibfd, obfd);
15975 else
15976 _bfd_error_handler
15977 (_("error: %B does not use Maverick instructions, whereas %B does"),
15978 ibfd, obfd);
15979
15980 flags_compatible = FALSE;
15981 }
15982
15983 #ifdef EF_ARM_SOFT_FLOAT
15984 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
15985 {
15986 /* We can allow interworking between code that is VFP format
15987 layout, and uses either soft float or integer regs for
15988 passing floating point arguments and results. We already
15989 know that the APCS_FLOAT flags match; similarly for VFP
15990 flags. */
15991 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
15992 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
15993 {
15994 if (in_flags & EF_ARM_SOFT_FLOAT)
15995 _bfd_error_handler
15996 (_("error: %B uses software FP, whereas %B uses hardware FP"),
15997 ibfd, obfd);
15998 else
15999 _bfd_error_handler
16000 (_("error: %B uses hardware FP, whereas %B uses software FP"),
16001 ibfd, obfd);
16002
16003 flags_compatible = FALSE;
16004 }
16005 }
16006 #endif
16007
16008 /* Interworking mismatch is only a warning. */
16009 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
16010 {
16011 if (in_flags & EF_ARM_INTERWORK)
16012 {
16013 _bfd_error_handler
16014 (_("Warning: %B supports interworking, whereas %B does not"),
16015 ibfd, obfd);
16016 }
16017 else
16018 {
16019 _bfd_error_handler
16020 (_("Warning: %B does not support interworking, whereas %B does"),
16021 ibfd, obfd);
16022 }
16023 }
16024 }
16025
16026 return flags_compatible;
16027 }
16028
16029
16030 /* Symbian OS Targets. */
16031
16032 #undef TARGET_LITTLE_SYM
16033 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_symbian_vec
16034 #undef TARGET_LITTLE_NAME
16035 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
16036 #undef TARGET_BIG_SYM
16037 #define TARGET_BIG_SYM bfd_elf32_bigarm_symbian_vec
16038 #undef TARGET_BIG_NAME
16039 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
16040
16041 /* Like elf32_arm_link_hash_table_create -- but overrides
16042 appropriately for Symbian OS. */
16043
16044 static struct bfd_link_hash_table *
16045 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
16046 {
16047 struct bfd_link_hash_table *ret;
16048
16049 ret = elf32_arm_link_hash_table_create (abfd);
16050 if (ret)
16051 {
16052 struct elf32_arm_link_hash_table *htab
16053 = (struct elf32_arm_link_hash_table *)ret;
16054 /* There is no PLT header for Symbian OS. */
16055 htab->plt_header_size = 0;
16056 /* The PLT entries are each one instruction and one word. */
16057 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
16058 htab->symbian_p = 1;
16059 /* Symbian uses armv5t or above, so use_blx is always true. */
16060 htab->use_blx = 1;
16061 htab->root.is_relocatable_executable = 1;
16062 }
16063 return ret;
16064 }
16065
16066 static const struct bfd_elf_special_section
16067 elf32_arm_symbian_special_sections[] =
16068 {
16069 /* In a BPABI executable, the dynamic linking sections do not go in
16070 the loadable read-only segment. The post-linker may wish to
16071 refer to these sections, but they are not part of the final
16072 program image. */
16073 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
16074 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
16075 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
16076 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
16077 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
16078 /* These sections do not need to be writable as the SymbianOS
16079 postlinker will arrange things so that no dynamic relocation is
16080 required. */
16081 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
16082 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
16083 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
16084 { NULL, 0, 0, 0, 0 }
16085 };
16086
16087 static void
16088 elf32_arm_symbian_begin_write_processing (bfd *abfd,
16089 struct bfd_link_info *link_info)
16090 {
16091 /* BPABI objects are never loaded directly by an OS kernel; they are
16092 processed by a postlinker first, into an OS-specific format. If
16093 the D_PAGED bit is set on the file, BFD will align segments on
16094 page boundaries, so that an OS can directly map the file. With
16095 BPABI objects, that just results in wasted space. In addition,
16096 because we clear the D_PAGED bit, map_sections_to_segments will
16097 recognize that the program headers should not be mapped into any
16098 loadable segment. */
16099 abfd->flags &= ~D_PAGED;
16100 elf32_arm_begin_write_processing (abfd, link_info);
16101 }
16102
16103 static bfd_boolean
16104 elf32_arm_symbian_modify_segment_map (bfd *abfd,
16105 struct bfd_link_info *info)
16106 {
16107 struct elf_segment_map *m;
16108 asection *dynsec;
16109
16110 /* BPABI shared libraries and executables should have a PT_DYNAMIC
16111 segment. However, because the .dynamic section is not marked
16112 with SEC_LOAD, the generic ELF code will not create such a
16113 segment. */
16114 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
16115 if (dynsec)
16116 {
16117 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
16118 if (m->p_type == PT_DYNAMIC)
16119 break;
16120
16121 if (m == NULL)
16122 {
16123 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
16124 m->next = elf_tdata (abfd)->segment_map;
16125 elf_tdata (abfd)->segment_map = m;
16126 }
16127 }
16128
16129 /* Also call the generic arm routine. */
16130 return elf32_arm_modify_segment_map (abfd, info);
16131 }
16132
16133 /* Return address for Ith PLT stub in section PLT, for relocation REL
16134 or (bfd_vma) -1 if it should not be included. */
16135
16136 static bfd_vma
16137 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
16138 const arelent *rel ATTRIBUTE_UNUSED)
16139 {
16140 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
16141 }
16142
16143
16144 #undef elf32_bed
16145 #define elf32_bed elf32_arm_symbian_bed
16146
16147 /* The dynamic sections are not allocated on SymbianOS; the postlinker
16148 will process them and then discard them. */
16149 #undef ELF_DYNAMIC_SEC_FLAGS
16150 #define ELF_DYNAMIC_SEC_FLAGS \
16151 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
16152
16153 #undef elf_backend_emit_relocs
16154
16155 #undef bfd_elf32_bfd_link_hash_table_create
16156 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
16157 #undef elf_backend_special_sections
16158 #define elf_backend_special_sections elf32_arm_symbian_special_sections
16159 #undef elf_backend_begin_write_processing
16160 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
16161 #undef elf_backend_final_write_processing
16162 #define elf_backend_final_write_processing elf32_arm_final_write_processing
16163
16164 #undef elf_backend_modify_segment_map
16165 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
16166
16167 /* There is no .got section for BPABI objects, and hence no header. */
16168 #undef elf_backend_got_header_size
16169 #define elf_backend_got_header_size 0
16170
16171 /* Similarly, there is no .got.plt section. */
16172 #undef elf_backend_want_got_plt
16173 #define elf_backend_want_got_plt 0
16174
16175 #undef elf_backend_plt_sym_val
16176 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
16177
16178 #undef elf_backend_may_use_rel_p
16179 #define elf_backend_may_use_rel_p 1
16180 #undef elf_backend_may_use_rela_p
16181 #define elf_backend_may_use_rela_p 0
16182 #undef elf_backend_default_use_rela_p
16183 #define elf_backend_default_use_rela_p 0
16184 #undef elf_backend_want_plt_sym
16185 #define elf_backend_want_plt_sym 0
16186 #undef ELF_MAXPAGESIZE
16187 #define ELF_MAXPAGESIZE 0x8000
16188
16189 #include "elf32-target.h"