* elf32-arm.c (find_stub_size_and_template): Avoid uninitialized
[binutils-gdb.git] / bfd / elf32-arm.c
1 /* 32-bit ELF support for ARM
2 Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
3 2008, 2009, 2010 Free Software Foundation, Inc.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "sysdep.h"
23 #include <limits.h>
24
25 #include "bfd.h"
26 #include "libiberty.h"
27 #include "libbfd.h"
28 #include "elf-bfd.h"
29 #include "elf-vxworks.h"
30 #include "elf/arm.h"
31
32 /* Return the relocation section associated with NAME. HTAB is the
33 bfd's elf32_arm_link_hash_entry. */
34 #define RELOC_SECTION(HTAB, NAME) \
35 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
36
37 /* Return size of a relocation entry. HTAB is the bfd's
38 elf32_arm_link_hash_entry. */
39 #define RELOC_SIZE(HTAB) \
40 ((HTAB)->use_rel \
41 ? sizeof (Elf32_External_Rel) \
42 : sizeof (Elf32_External_Rela))
43
44 /* Return function to swap relocations in. HTAB is the bfd's
45 elf32_arm_link_hash_entry. */
46 #define SWAP_RELOC_IN(HTAB) \
47 ((HTAB)->use_rel \
48 ? bfd_elf32_swap_reloc_in \
49 : bfd_elf32_swap_reloca_in)
50
51 /* Return function to swap relocations out. HTAB is the bfd's
52 elf32_arm_link_hash_entry. */
53 #define SWAP_RELOC_OUT(HTAB) \
54 ((HTAB)->use_rel \
55 ? bfd_elf32_swap_reloc_out \
56 : bfd_elf32_swap_reloca_out)
57
58 #define elf_info_to_howto 0
59 #define elf_info_to_howto_rel elf32_arm_info_to_howto
60
61 #define ARM_ELF_ABI_VERSION 0
62 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
63
64 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
65 struct bfd_link_info *link_info,
66 asection *sec,
67 bfd_byte *contents);
68
69 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
70 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
71 in that slot. */
72
73 static reloc_howto_type elf32_arm_howto_table_1[] =
74 {
75 /* No relocation. */
76 HOWTO (R_ARM_NONE, /* type */
77 0, /* rightshift */
78 0, /* size (0 = byte, 1 = short, 2 = long) */
79 0, /* bitsize */
80 FALSE, /* pc_relative */
81 0, /* bitpos */
82 complain_overflow_dont,/* complain_on_overflow */
83 bfd_elf_generic_reloc, /* special_function */
84 "R_ARM_NONE", /* name */
85 FALSE, /* partial_inplace */
86 0, /* src_mask */
87 0, /* dst_mask */
88 FALSE), /* pcrel_offset */
89
90 HOWTO (R_ARM_PC24, /* type */
91 2, /* rightshift */
92 2, /* size (0 = byte, 1 = short, 2 = long) */
93 24, /* bitsize */
94 TRUE, /* pc_relative */
95 0, /* bitpos */
96 complain_overflow_signed,/* complain_on_overflow */
97 bfd_elf_generic_reloc, /* special_function */
98 "R_ARM_PC24", /* name */
99 FALSE, /* partial_inplace */
100 0x00ffffff, /* src_mask */
101 0x00ffffff, /* dst_mask */
102 TRUE), /* pcrel_offset */
103
104 /* 32 bit absolute */
105 HOWTO (R_ARM_ABS32, /* type */
106 0, /* rightshift */
107 2, /* size (0 = byte, 1 = short, 2 = long) */
108 32, /* bitsize */
109 FALSE, /* pc_relative */
110 0, /* bitpos */
111 complain_overflow_bitfield,/* complain_on_overflow */
112 bfd_elf_generic_reloc, /* special_function */
113 "R_ARM_ABS32", /* name */
114 FALSE, /* partial_inplace */
115 0xffffffff, /* src_mask */
116 0xffffffff, /* dst_mask */
117 FALSE), /* pcrel_offset */
118
119 /* standard 32bit pc-relative reloc */
120 HOWTO (R_ARM_REL32, /* type */
121 0, /* rightshift */
122 2, /* size (0 = byte, 1 = short, 2 = long) */
123 32, /* bitsize */
124 TRUE, /* pc_relative */
125 0, /* bitpos */
126 complain_overflow_bitfield,/* complain_on_overflow */
127 bfd_elf_generic_reloc, /* special_function */
128 "R_ARM_REL32", /* name */
129 FALSE, /* partial_inplace */
130 0xffffffff, /* src_mask */
131 0xffffffff, /* dst_mask */
132 TRUE), /* pcrel_offset */
133
134 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
135 HOWTO (R_ARM_LDR_PC_G0, /* type */
136 0, /* rightshift */
137 0, /* size (0 = byte, 1 = short, 2 = long) */
138 32, /* bitsize */
139 TRUE, /* pc_relative */
140 0, /* bitpos */
141 complain_overflow_dont,/* complain_on_overflow */
142 bfd_elf_generic_reloc, /* special_function */
143 "R_ARM_LDR_PC_G0", /* name */
144 FALSE, /* partial_inplace */
145 0xffffffff, /* src_mask */
146 0xffffffff, /* dst_mask */
147 TRUE), /* pcrel_offset */
148
149 /* 16 bit absolute */
150 HOWTO (R_ARM_ABS16, /* type */
151 0, /* rightshift */
152 1, /* size (0 = byte, 1 = short, 2 = long) */
153 16, /* bitsize */
154 FALSE, /* pc_relative */
155 0, /* bitpos */
156 complain_overflow_bitfield,/* complain_on_overflow */
157 bfd_elf_generic_reloc, /* special_function */
158 "R_ARM_ABS16", /* name */
159 FALSE, /* partial_inplace */
160 0x0000ffff, /* src_mask */
161 0x0000ffff, /* dst_mask */
162 FALSE), /* pcrel_offset */
163
164 /* 12 bit absolute */
165 HOWTO (R_ARM_ABS12, /* type */
166 0, /* rightshift */
167 2, /* size (0 = byte, 1 = short, 2 = long) */
168 12, /* bitsize */
169 FALSE, /* pc_relative */
170 0, /* bitpos */
171 complain_overflow_bitfield,/* complain_on_overflow */
172 bfd_elf_generic_reloc, /* special_function */
173 "R_ARM_ABS12", /* name */
174 FALSE, /* partial_inplace */
175 0x00000fff, /* src_mask */
176 0x00000fff, /* dst_mask */
177 FALSE), /* pcrel_offset */
178
179 HOWTO (R_ARM_THM_ABS5, /* type */
180 6, /* rightshift */
181 1, /* size (0 = byte, 1 = short, 2 = long) */
182 5, /* bitsize */
183 FALSE, /* pc_relative */
184 0, /* bitpos */
185 complain_overflow_bitfield,/* complain_on_overflow */
186 bfd_elf_generic_reloc, /* special_function */
187 "R_ARM_THM_ABS5", /* name */
188 FALSE, /* partial_inplace */
189 0x000007e0, /* src_mask */
190 0x000007e0, /* dst_mask */
191 FALSE), /* pcrel_offset */
192
193 /* 8 bit absolute */
194 HOWTO (R_ARM_ABS8, /* type */
195 0, /* rightshift */
196 0, /* size (0 = byte, 1 = short, 2 = long) */
197 8, /* bitsize */
198 FALSE, /* pc_relative */
199 0, /* bitpos */
200 complain_overflow_bitfield,/* complain_on_overflow */
201 bfd_elf_generic_reloc, /* special_function */
202 "R_ARM_ABS8", /* name */
203 FALSE, /* partial_inplace */
204 0x000000ff, /* src_mask */
205 0x000000ff, /* dst_mask */
206 FALSE), /* pcrel_offset */
207
208 HOWTO (R_ARM_SBREL32, /* type */
209 0, /* rightshift */
210 2, /* size (0 = byte, 1 = short, 2 = long) */
211 32, /* bitsize */
212 FALSE, /* pc_relative */
213 0, /* bitpos */
214 complain_overflow_dont,/* complain_on_overflow */
215 bfd_elf_generic_reloc, /* special_function */
216 "R_ARM_SBREL32", /* name */
217 FALSE, /* partial_inplace */
218 0xffffffff, /* src_mask */
219 0xffffffff, /* dst_mask */
220 FALSE), /* pcrel_offset */
221
222 HOWTO (R_ARM_THM_CALL, /* type */
223 1, /* rightshift */
224 2, /* size (0 = byte, 1 = short, 2 = long) */
225 24, /* bitsize */
226 TRUE, /* pc_relative */
227 0, /* bitpos */
228 complain_overflow_signed,/* complain_on_overflow */
229 bfd_elf_generic_reloc, /* special_function */
230 "R_ARM_THM_CALL", /* name */
231 FALSE, /* partial_inplace */
232 0x07ff07ff, /* src_mask */
233 0x07ff07ff, /* dst_mask */
234 TRUE), /* pcrel_offset */
235
236 HOWTO (R_ARM_THM_PC8, /* type */
237 1, /* rightshift */
238 1, /* size (0 = byte, 1 = short, 2 = long) */
239 8, /* bitsize */
240 TRUE, /* pc_relative */
241 0, /* bitpos */
242 complain_overflow_signed,/* complain_on_overflow */
243 bfd_elf_generic_reloc, /* special_function */
244 "R_ARM_THM_PC8", /* name */
245 FALSE, /* partial_inplace */
246 0x000000ff, /* src_mask */
247 0x000000ff, /* dst_mask */
248 TRUE), /* pcrel_offset */
249
250 HOWTO (R_ARM_BREL_ADJ, /* type */
251 1, /* rightshift */
252 1, /* size (0 = byte, 1 = short, 2 = long) */
253 32, /* bitsize */
254 FALSE, /* pc_relative */
255 0, /* bitpos */
256 complain_overflow_signed,/* complain_on_overflow */
257 bfd_elf_generic_reloc, /* special_function */
258 "R_ARM_BREL_ADJ", /* name */
259 FALSE, /* partial_inplace */
260 0xffffffff, /* src_mask */
261 0xffffffff, /* dst_mask */
262 FALSE), /* pcrel_offset */
263
264 HOWTO (R_ARM_SWI24, /* type */
265 0, /* rightshift */
266 0, /* size (0 = byte, 1 = short, 2 = long) */
267 0, /* bitsize */
268 FALSE, /* pc_relative */
269 0, /* bitpos */
270 complain_overflow_signed,/* complain_on_overflow */
271 bfd_elf_generic_reloc, /* special_function */
272 "R_ARM_SWI24", /* name */
273 FALSE, /* partial_inplace */
274 0x00000000, /* src_mask */
275 0x00000000, /* dst_mask */
276 FALSE), /* pcrel_offset */
277
278 HOWTO (R_ARM_THM_SWI8, /* type */
279 0, /* rightshift */
280 0, /* size (0 = byte, 1 = short, 2 = long) */
281 0, /* bitsize */
282 FALSE, /* pc_relative */
283 0, /* bitpos */
284 complain_overflow_signed,/* complain_on_overflow */
285 bfd_elf_generic_reloc, /* special_function */
286 "R_ARM_SWI8", /* name */
287 FALSE, /* partial_inplace */
288 0x00000000, /* src_mask */
289 0x00000000, /* dst_mask */
290 FALSE), /* pcrel_offset */
291
292 /* BLX instruction for the ARM. */
293 HOWTO (R_ARM_XPC25, /* type */
294 2, /* rightshift */
295 2, /* size (0 = byte, 1 = short, 2 = long) */
296 25, /* bitsize */
297 TRUE, /* pc_relative */
298 0, /* bitpos */
299 complain_overflow_signed,/* complain_on_overflow */
300 bfd_elf_generic_reloc, /* special_function */
301 "R_ARM_XPC25", /* name */
302 FALSE, /* partial_inplace */
303 0x00ffffff, /* src_mask */
304 0x00ffffff, /* dst_mask */
305 TRUE), /* pcrel_offset */
306
307 /* BLX instruction for the Thumb. */
308 HOWTO (R_ARM_THM_XPC22, /* type */
309 2, /* rightshift */
310 2, /* size (0 = byte, 1 = short, 2 = long) */
311 22, /* bitsize */
312 TRUE, /* pc_relative */
313 0, /* bitpos */
314 complain_overflow_signed,/* complain_on_overflow */
315 bfd_elf_generic_reloc, /* special_function */
316 "R_ARM_THM_XPC22", /* name */
317 FALSE, /* partial_inplace */
318 0x07ff07ff, /* src_mask */
319 0x07ff07ff, /* dst_mask */
320 TRUE), /* pcrel_offset */
321
322 /* Dynamic TLS relocations. */
323
324 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
325 0, /* rightshift */
326 2, /* size (0 = byte, 1 = short, 2 = long) */
327 32, /* bitsize */
328 FALSE, /* pc_relative */
329 0, /* bitpos */
330 complain_overflow_bitfield,/* complain_on_overflow */
331 bfd_elf_generic_reloc, /* special_function */
332 "R_ARM_TLS_DTPMOD32", /* name */
333 TRUE, /* partial_inplace */
334 0xffffffff, /* src_mask */
335 0xffffffff, /* dst_mask */
336 FALSE), /* pcrel_offset */
337
338 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
339 0, /* rightshift */
340 2, /* size (0 = byte, 1 = short, 2 = long) */
341 32, /* bitsize */
342 FALSE, /* pc_relative */
343 0, /* bitpos */
344 complain_overflow_bitfield,/* complain_on_overflow */
345 bfd_elf_generic_reloc, /* special_function */
346 "R_ARM_TLS_DTPOFF32", /* name */
347 TRUE, /* partial_inplace */
348 0xffffffff, /* src_mask */
349 0xffffffff, /* dst_mask */
350 FALSE), /* pcrel_offset */
351
352 HOWTO (R_ARM_TLS_TPOFF32, /* type */
353 0, /* rightshift */
354 2, /* size (0 = byte, 1 = short, 2 = long) */
355 32, /* bitsize */
356 FALSE, /* pc_relative */
357 0, /* bitpos */
358 complain_overflow_bitfield,/* complain_on_overflow */
359 bfd_elf_generic_reloc, /* special_function */
360 "R_ARM_TLS_TPOFF32", /* name */
361 TRUE, /* partial_inplace */
362 0xffffffff, /* src_mask */
363 0xffffffff, /* dst_mask */
364 FALSE), /* pcrel_offset */
365
366 /* Relocs used in ARM Linux */
367
368 HOWTO (R_ARM_COPY, /* type */
369 0, /* rightshift */
370 2, /* size (0 = byte, 1 = short, 2 = long) */
371 32, /* bitsize */
372 FALSE, /* pc_relative */
373 0, /* bitpos */
374 complain_overflow_bitfield,/* complain_on_overflow */
375 bfd_elf_generic_reloc, /* special_function */
376 "R_ARM_COPY", /* name */
377 TRUE, /* partial_inplace */
378 0xffffffff, /* src_mask */
379 0xffffffff, /* dst_mask */
380 FALSE), /* pcrel_offset */
381
382 HOWTO (R_ARM_GLOB_DAT, /* type */
383 0, /* rightshift */
384 2, /* size (0 = byte, 1 = short, 2 = long) */
385 32, /* bitsize */
386 FALSE, /* pc_relative */
387 0, /* bitpos */
388 complain_overflow_bitfield,/* complain_on_overflow */
389 bfd_elf_generic_reloc, /* special_function */
390 "R_ARM_GLOB_DAT", /* name */
391 TRUE, /* partial_inplace */
392 0xffffffff, /* src_mask */
393 0xffffffff, /* dst_mask */
394 FALSE), /* pcrel_offset */
395
396 HOWTO (R_ARM_JUMP_SLOT, /* type */
397 0, /* rightshift */
398 2, /* size (0 = byte, 1 = short, 2 = long) */
399 32, /* bitsize */
400 FALSE, /* pc_relative */
401 0, /* bitpos */
402 complain_overflow_bitfield,/* complain_on_overflow */
403 bfd_elf_generic_reloc, /* special_function */
404 "R_ARM_JUMP_SLOT", /* name */
405 TRUE, /* partial_inplace */
406 0xffffffff, /* src_mask */
407 0xffffffff, /* dst_mask */
408 FALSE), /* pcrel_offset */
409
410 HOWTO (R_ARM_RELATIVE, /* type */
411 0, /* rightshift */
412 2, /* size (0 = byte, 1 = short, 2 = long) */
413 32, /* bitsize */
414 FALSE, /* pc_relative */
415 0, /* bitpos */
416 complain_overflow_bitfield,/* complain_on_overflow */
417 bfd_elf_generic_reloc, /* special_function */
418 "R_ARM_RELATIVE", /* name */
419 TRUE, /* partial_inplace */
420 0xffffffff, /* src_mask */
421 0xffffffff, /* dst_mask */
422 FALSE), /* pcrel_offset */
423
424 HOWTO (R_ARM_GOTOFF32, /* type */
425 0, /* rightshift */
426 2, /* size (0 = byte, 1 = short, 2 = long) */
427 32, /* bitsize */
428 FALSE, /* pc_relative */
429 0, /* bitpos */
430 complain_overflow_bitfield,/* complain_on_overflow */
431 bfd_elf_generic_reloc, /* special_function */
432 "R_ARM_GOTOFF32", /* name */
433 TRUE, /* partial_inplace */
434 0xffffffff, /* src_mask */
435 0xffffffff, /* dst_mask */
436 FALSE), /* pcrel_offset */
437
438 HOWTO (R_ARM_GOTPC, /* type */
439 0, /* rightshift */
440 2, /* size (0 = byte, 1 = short, 2 = long) */
441 32, /* bitsize */
442 TRUE, /* pc_relative */
443 0, /* bitpos */
444 complain_overflow_bitfield,/* complain_on_overflow */
445 bfd_elf_generic_reloc, /* special_function */
446 "R_ARM_GOTPC", /* name */
447 TRUE, /* partial_inplace */
448 0xffffffff, /* src_mask */
449 0xffffffff, /* dst_mask */
450 TRUE), /* pcrel_offset */
451
452 HOWTO (R_ARM_GOT32, /* type */
453 0, /* rightshift */
454 2, /* size (0 = byte, 1 = short, 2 = long) */
455 32, /* bitsize */
456 FALSE, /* pc_relative */
457 0, /* bitpos */
458 complain_overflow_bitfield,/* complain_on_overflow */
459 bfd_elf_generic_reloc, /* special_function */
460 "R_ARM_GOT32", /* name */
461 TRUE, /* partial_inplace */
462 0xffffffff, /* src_mask */
463 0xffffffff, /* dst_mask */
464 FALSE), /* pcrel_offset */
465
466 HOWTO (R_ARM_PLT32, /* type */
467 2, /* rightshift */
468 2, /* size (0 = byte, 1 = short, 2 = long) */
469 24, /* bitsize */
470 TRUE, /* pc_relative */
471 0, /* bitpos */
472 complain_overflow_bitfield,/* complain_on_overflow */
473 bfd_elf_generic_reloc, /* special_function */
474 "R_ARM_PLT32", /* name */
475 FALSE, /* partial_inplace */
476 0x00ffffff, /* src_mask */
477 0x00ffffff, /* dst_mask */
478 TRUE), /* pcrel_offset */
479
480 HOWTO (R_ARM_CALL, /* type */
481 2, /* rightshift */
482 2, /* size (0 = byte, 1 = short, 2 = long) */
483 24, /* bitsize */
484 TRUE, /* pc_relative */
485 0, /* bitpos */
486 complain_overflow_signed,/* complain_on_overflow */
487 bfd_elf_generic_reloc, /* special_function */
488 "R_ARM_CALL", /* name */
489 FALSE, /* partial_inplace */
490 0x00ffffff, /* src_mask */
491 0x00ffffff, /* dst_mask */
492 TRUE), /* pcrel_offset */
493
494 HOWTO (R_ARM_JUMP24, /* type */
495 2, /* rightshift */
496 2, /* size (0 = byte, 1 = short, 2 = long) */
497 24, /* bitsize */
498 TRUE, /* pc_relative */
499 0, /* bitpos */
500 complain_overflow_signed,/* complain_on_overflow */
501 bfd_elf_generic_reloc, /* special_function */
502 "R_ARM_JUMP24", /* name */
503 FALSE, /* partial_inplace */
504 0x00ffffff, /* src_mask */
505 0x00ffffff, /* dst_mask */
506 TRUE), /* pcrel_offset */
507
508 HOWTO (R_ARM_THM_JUMP24, /* type */
509 1, /* rightshift */
510 2, /* size (0 = byte, 1 = short, 2 = long) */
511 24, /* bitsize */
512 TRUE, /* pc_relative */
513 0, /* bitpos */
514 complain_overflow_signed,/* complain_on_overflow */
515 bfd_elf_generic_reloc, /* special_function */
516 "R_ARM_THM_JUMP24", /* name */
517 FALSE, /* partial_inplace */
518 0x07ff2fff, /* src_mask */
519 0x07ff2fff, /* dst_mask */
520 TRUE), /* pcrel_offset */
521
522 HOWTO (R_ARM_BASE_ABS, /* type */
523 0, /* rightshift */
524 2, /* size (0 = byte, 1 = short, 2 = long) */
525 32, /* bitsize */
526 FALSE, /* pc_relative */
527 0, /* bitpos */
528 complain_overflow_dont,/* complain_on_overflow */
529 bfd_elf_generic_reloc, /* special_function */
530 "R_ARM_BASE_ABS", /* name */
531 FALSE, /* partial_inplace */
532 0xffffffff, /* src_mask */
533 0xffffffff, /* dst_mask */
534 FALSE), /* pcrel_offset */
535
536 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
537 0, /* rightshift */
538 2, /* size (0 = byte, 1 = short, 2 = long) */
539 12, /* bitsize */
540 TRUE, /* pc_relative */
541 0, /* bitpos */
542 complain_overflow_dont,/* complain_on_overflow */
543 bfd_elf_generic_reloc, /* special_function */
544 "R_ARM_ALU_PCREL_7_0", /* name */
545 FALSE, /* partial_inplace */
546 0x00000fff, /* src_mask */
547 0x00000fff, /* dst_mask */
548 TRUE), /* pcrel_offset */
549
550 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
551 0, /* rightshift */
552 2, /* size (0 = byte, 1 = short, 2 = long) */
553 12, /* bitsize */
554 TRUE, /* pc_relative */
555 8, /* bitpos */
556 complain_overflow_dont,/* complain_on_overflow */
557 bfd_elf_generic_reloc, /* special_function */
558 "R_ARM_ALU_PCREL_15_8",/* name */
559 FALSE, /* partial_inplace */
560 0x00000fff, /* src_mask */
561 0x00000fff, /* dst_mask */
562 TRUE), /* pcrel_offset */
563
564 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
565 0, /* rightshift */
566 2, /* size (0 = byte, 1 = short, 2 = long) */
567 12, /* bitsize */
568 TRUE, /* pc_relative */
569 16, /* bitpos */
570 complain_overflow_dont,/* complain_on_overflow */
571 bfd_elf_generic_reloc, /* special_function */
572 "R_ARM_ALU_PCREL_23_15",/* name */
573 FALSE, /* partial_inplace */
574 0x00000fff, /* src_mask */
575 0x00000fff, /* dst_mask */
576 TRUE), /* pcrel_offset */
577
578 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
579 0, /* rightshift */
580 2, /* size (0 = byte, 1 = short, 2 = long) */
581 12, /* bitsize */
582 FALSE, /* pc_relative */
583 0, /* bitpos */
584 complain_overflow_dont,/* complain_on_overflow */
585 bfd_elf_generic_reloc, /* special_function */
586 "R_ARM_LDR_SBREL_11_0",/* name */
587 FALSE, /* partial_inplace */
588 0x00000fff, /* src_mask */
589 0x00000fff, /* dst_mask */
590 FALSE), /* pcrel_offset */
591
592 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
593 0, /* rightshift */
594 2, /* size (0 = byte, 1 = short, 2 = long) */
595 8, /* bitsize */
596 FALSE, /* pc_relative */
597 12, /* bitpos */
598 complain_overflow_dont,/* complain_on_overflow */
599 bfd_elf_generic_reloc, /* special_function */
600 "R_ARM_ALU_SBREL_19_12",/* name */
601 FALSE, /* partial_inplace */
602 0x000ff000, /* src_mask */
603 0x000ff000, /* dst_mask */
604 FALSE), /* pcrel_offset */
605
606 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
607 0, /* rightshift */
608 2, /* size (0 = byte, 1 = short, 2 = long) */
609 8, /* bitsize */
610 FALSE, /* pc_relative */
611 20, /* bitpos */
612 complain_overflow_dont,/* complain_on_overflow */
613 bfd_elf_generic_reloc, /* special_function */
614 "R_ARM_ALU_SBREL_27_20",/* name */
615 FALSE, /* partial_inplace */
616 0x0ff00000, /* src_mask */
617 0x0ff00000, /* dst_mask */
618 FALSE), /* pcrel_offset */
619
620 HOWTO (R_ARM_TARGET1, /* type */
621 0, /* rightshift */
622 2, /* size (0 = byte, 1 = short, 2 = long) */
623 32, /* bitsize */
624 FALSE, /* pc_relative */
625 0, /* bitpos */
626 complain_overflow_dont,/* complain_on_overflow */
627 bfd_elf_generic_reloc, /* special_function */
628 "R_ARM_TARGET1", /* name */
629 FALSE, /* partial_inplace */
630 0xffffffff, /* src_mask */
631 0xffffffff, /* dst_mask */
632 FALSE), /* pcrel_offset */
633
634 HOWTO (R_ARM_ROSEGREL32, /* type */
635 0, /* rightshift */
636 2, /* size (0 = byte, 1 = short, 2 = long) */
637 32, /* bitsize */
638 FALSE, /* pc_relative */
639 0, /* bitpos */
640 complain_overflow_dont,/* complain_on_overflow */
641 bfd_elf_generic_reloc, /* special_function */
642 "R_ARM_ROSEGREL32", /* name */
643 FALSE, /* partial_inplace */
644 0xffffffff, /* src_mask */
645 0xffffffff, /* dst_mask */
646 FALSE), /* pcrel_offset */
647
648 HOWTO (R_ARM_V4BX, /* type */
649 0, /* rightshift */
650 2, /* size (0 = byte, 1 = short, 2 = long) */
651 32, /* bitsize */
652 FALSE, /* pc_relative */
653 0, /* bitpos */
654 complain_overflow_dont,/* complain_on_overflow */
655 bfd_elf_generic_reloc, /* special_function */
656 "R_ARM_V4BX", /* name */
657 FALSE, /* partial_inplace */
658 0xffffffff, /* src_mask */
659 0xffffffff, /* dst_mask */
660 FALSE), /* pcrel_offset */
661
662 HOWTO (R_ARM_TARGET2, /* type */
663 0, /* rightshift */
664 2, /* size (0 = byte, 1 = short, 2 = long) */
665 32, /* bitsize */
666 FALSE, /* pc_relative */
667 0, /* bitpos */
668 complain_overflow_signed,/* complain_on_overflow */
669 bfd_elf_generic_reloc, /* special_function */
670 "R_ARM_TARGET2", /* name */
671 FALSE, /* partial_inplace */
672 0xffffffff, /* src_mask */
673 0xffffffff, /* dst_mask */
674 TRUE), /* pcrel_offset */
675
676 HOWTO (R_ARM_PREL31, /* type */
677 0, /* rightshift */
678 2, /* size (0 = byte, 1 = short, 2 = long) */
679 31, /* bitsize */
680 TRUE, /* pc_relative */
681 0, /* bitpos */
682 complain_overflow_signed,/* complain_on_overflow */
683 bfd_elf_generic_reloc, /* special_function */
684 "R_ARM_PREL31", /* name */
685 FALSE, /* partial_inplace */
686 0x7fffffff, /* src_mask */
687 0x7fffffff, /* dst_mask */
688 TRUE), /* pcrel_offset */
689
690 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
691 0, /* rightshift */
692 2, /* size (0 = byte, 1 = short, 2 = long) */
693 16, /* bitsize */
694 FALSE, /* pc_relative */
695 0, /* bitpos */
696 complain_overflow_dont,/* complain_on_overflow */
697 bfd_elf_generic_reloc, /* special_function */
698 "R_ARM_MOVW_ABS_NC", /* name */
699 FALSE, /* partial_inplace */
700 0x000f0fff, /* src_mask */
701 0x000f0fff, /* dst_mask */
702 FALSE), /* pcrel_offset */
703
704 HOWTO (R_ARM_MOVT_ABS, /* type */
705 0, /* rightshift */
706 2, /* size (0 = byte, 1 = short, 2 = long) */
707 16, /* bitsize */
708 FALSE, /* pc_relative */
709 0, /* bitpos */
710 complain_overflow_bitfield,/* complain_on_overflow */
711 bfd_elf_generic_reloc, /* special_function */
712 "R_ARM_MOVT_ABS", /* name */
713 FALSE, /* partial_inplace */
714 0x000f0fff, /* src_mask */
715 0x000f0fff, /* dst_mask */
716 FALSE), /* pcrel_offset */
717
718 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
719 0, /* rightshift */
720 2, /* size (0 = byte, 1 = short, 2 = long) */
721 16, /* bitsize */
722 TRUE, /* pc_relative */
723 0, /* bitpos */
724 complain_overflow_dont,/* complain_on_overflow */
725 bfd_elf_generic_reloc, /* special_function */
726 "R_ARM_MOVW_PREL_NC", /* name */
727 FALSE, /* partial_inplace */
728 0x000f0fff, /* src_mask */
729 0x000f0fff, /* dst_mask */
730 TRUE), /* pcrel_offset */
731
732 HOWTO (R_ARM_MOVT_PREL, /* type */
733 0, /* rightshift */
734 2, /* size (0 = byte, 1 = short, 2 = long) */
735 16, /* bitsize */
736 TRUE, /* pc_relative */
737 0, /* bitpos */
738 complain_overflow_bitfield,/* complain_on_overflow */
739 bfd_elf_generic_reloc, /* special_function */
740 "R_ARM_MOVT_PREL", /* name */
741 FALSE, /* partial_inplace */
742 0x000f0fff, /* src_mask */
743 0x000f0fff, /* dst_mask */
744 TRUE), /* pcrel_offset */
745
746 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
747 0, /* rightshift */
748 2, /* size (0 = byte, 1 = short, 2 = long) */
749 16, /* bitsize */
750 FALSE, /* pc_relative */
751 0, /* bitpos */
752 complain_overflow_dont,/* complain_on_overflow */
753 bfd_elf_generic_reloc, /* special_function */
754 "R_ARM_THM_MOVW_ABS_NC",/* name */
755 FALSE, /* partial_inplace */
756 0x040f70ff, /* src_mask */
757 0x040f70ff, /* dst_mask */
758 FALSE), /* pcrel_offset */
759
760 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
761 0, /* rightshift */
762 2, /* size (0 = byte, 1 = short, 2 = long) */
763 16, /* bitsize */
764 FALSE, /* pc_relative */
765 0, /* bitpos */
766 complain_overflow_bitfield,/* complain_on_overflow */
767 bfd_elf_generic_reloc, /* special_function */
768 "R_ARM_THM_MOVT_ABS", /* name */
769 FALSE, /* partial_inplace */
770 0x040f70ff, /* src_mask */
771 0x040f70ff, /* dst_mask */
772 FALSE), /* pcrel_offset */
773
774 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
775 0, /* rightshift */
776 2, /* size (0 = byte, 1 = short, 2 = long) */
777 16, /* bitsize */
778 TRUE, /* pc_relative */
779 0, /* bitpos */
780 complain_overflow_dont,/* complain_on_overflow */
781 bfd_elf_generic_reloc, /* special_function */
782 "R_ARM_THM_MOVW_PREL_NC",/* name */
783 FALSE, /* partial_inplace */
784 0x040f70ff, /* src_mask */
785 0x040f70ff, /* dst_mask */
786 TRUE), /* pcrel_offset */
787
788 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
789 0, /* rightshift */
790 2, /* size (0 = byte, 1 = short, 2 = long) */
791 16, /* bitsize */
792 TRUE, /* pc_relative */
793 0, /* bitpos */
794 complain_overflow_bitfield,/* complain_on_overflow */
795 bfd_elf_generic_reloc, /* special_function */
796 "R_ARM_THM_MOVT_PREL", /* name */
797 FALSE, /* partial_inplace */
798 0x040f70ff, /* src_mask */
799 0x040f70ff, /* dst_mask */
800 TRUE), /* pcrel_offset */
801
802 HOWTO (R_ARM_THM_JUMP19, /* type */
803 1, /* rightshift */
804 2, /* size (0 = byte, 1 = short, 2 = long) */
805 19, /* bitsize */
806 TRUE, /* pc_relative */
807 0, /* bitpos */
808 complain_overflow_signed,/* complain_on_overflow */
809 bfd_elf_generic_reloc, /* special_function */
810 "R_ARM_THM_JUMP19", /* name */
811 FALSE, /* partial_inplace */
812 0x043f2fff, /* src_mask */
813 0x043f2fff, /* dst_mask */
814 TRUE), /* pcrel_offset */
815
816 HOWTO (R_ARM_THM_JUMP6, /* type */
817 1, /* rightshift */
818 1, /* size (0 = byte, 1 = short, 2 = long) */
819 6, /* bitsize */
820 TRUE, /* pc_relative */
821 0, /* bitpos */
822 complain_overflow_unsigned,/* complain_on_overflow */
823 bfd_elf_generic_reloc, /* special_function */
824 "R_ARM_THM_JUMP6", /* name */
825 FALSE, /* partial_inplace */
826 0x02f8, /* src_mask */
827 0x02f8, /* dst_mask */
828 TRUE), /* pcrel_offset */
829
830 /* These are declared as 13-bit signed relocations because we can
831 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
832 versa. */
833 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
834 0, /* rightshift */
835 2, /* size (0 = byte, 1 = short, 2 = long) */
836 13, /* bitsize */
837 TRUE, /* pc_relative */
838 0, /* bitpos */
839 complain_overflow_dont,/* complain_on_overflow */
840 bfd_elf_generic_reloc, /* special_function */
841 "R_ARM_THM_ALU_PREL_11_0",/* name */
842 FALSE, /* partial_inplace */
843 0xffffffff, /* src_mask */
844 0xffffffff, /* dst_mask */
845 TRUE), /* pcrel_offset */
846
847 HOWTO (R_ARM_THM_PC12, /* type */
848 0, /* rightshift */
849 2, /* size (0 = byte, 1 = short, 2 = long) */
850 13, /* bitsize */
851 TRUE, /* pc_relative */
852 0, /* bitpos */
853 complain_overflow_dont,/* complain_on_overflow */
854 bfd_elf_generic_reloc, /* special_function */
855 "R_ARM_THM_PC12", /* name */
856 FALSE, /* partial_inplace */
857 0xffffffff, /* src_mask */
858 0xffffffff, /* dst_mask */
859 TRUE), /* pcrel_offset */
860
861 HOWTO (R_ARM_ABS32_NOI, /* type */
862 0, /* rightshift */
863 2, /* size (0 = byte, 1 = short, 2 = long) */
864 32, /* bitsize */
865 FALSE, /* pc_relative */
866 0, /* bitpos */
867 complain_overflow_dont,/* complain_on_overflow */
868 bfd_elf_generic_reloc, /* special_function */
869 "R_ARM_ABS32_NOI", /* name */
870 FALSE, /* partial_inplace */
871 0xffffffff, /* src_mask */
872 0xffffffff, /* dst_mask */
873 FALSE), /* pcrel_offset */
874
875 HOWTO (R_ARM_REL32_NOI, /* type */
876 0, /* rightshift */
877 2, /* size (0 = byte, 1 = short, 2 = long) */
878 32, /* bitsize */
879 TRUE, /* pc_relative */
880 0, /* bitpos */
881 complain_overflow_dont,/* complain_on_overflow */
882 bfd_elf_generic_reloc, /* special_function */
883 "R_ARM_REL32_NOI", /* name */
884 FALSE, /* partial_inplace */
885 0xffffffff, /* src_mask */
886 0xffffffff, /* dst_mask */
887 FALSE), /* pcrel_offset */
888
889 /* Group relocations. */
890
891 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
892 0, /* rightshift */
893 2, /* size (0 = byte, 1 = short, 2 = long) */
894 32, /* bitsize */
895 TRUE, /* pc_relative */
896 0, /* bitpos */
897 complain_overflow_dont,/* complain_on_overflow */
898 bfd_elf_generic_reloc, /* special_function */
899 "R_ARM_ALU_PC_G0_NC", /* name */
900 FALSE, /* partial_inplace */
901 0xffffffff, /* src_mask */
902 0xffffffff, /* dst_mask */
903 TRUE), /* pcrel_offset */
904
905 HOWTO (R_ARM_ALU_PC_G0, /* type */
906 0, /* rightshift */
907 2, /* size (0 = byte, 1 = short, 2 = long) */
908 32, /* bitsize */
909 TRUE, /* pc_relative */
910 0, /* bitpos */
911 complain_overflow_dont,/* complain_on_overflow */
912 bfd_elf_generic_reloc, /* special_function */
913 "R_ARM_ALU_PC_G0", /* name */
914 FALSE, /* partial_inplace */
915 0xffffffff, /* src_mask */
916 0xffffffff, /* dst_mask */
917 TRUE), /* pcrel_offset */
918
919 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
920 0, /* rightshift */
921 2, /* size (0 = byte, 1 = short, 2 = long) */
922 32, /* bitsize */
923 TRUE, /* pc_relative */
924 0, /* bitpos */
925 complain_overflow_dont,/* complain_on_overflow */
926 bfd_elf_generic_reloc, /* special_function */
927 "R_ARM_ALU_PC_G1_NC", /* name */
928 FALSE, /* partial_inplace */
929 0xffffffff, /* src_mask */
930 0xffffffff, /* dst_mask */
931 TRUE), /* pcrel_offset */
932
933 HOWTO (R_ARM_ALU_PC_G1, /* type */
934 0, /* rightshift */
935 2, /* size (0 = byte, 1 = short, 2 = long) */
936 32, /* bitsize */
937 TRUE, /* pc_relative */
938 0, /* bitpos */
939 complain_overflow_dont,/* complain_on_overflow */
940 bfd_elf_generic_reloc, /* special_function */
941 "R_ARM_ALU_PC_G1", /* name */
942 FALSE, /* partial_inplace */
943 0xffffffff, /* src_mask */
944 0xffffffff, /* dst_mask */
945 TRUE), /* pcrel_offset */
946
947 HOWTO (R_ARM_ALU_PC_G2, /* type */
948 0, /* rightshift */
949 2, /* size (0 = byte, 1 = short, 2 = long) */
950 32, /* bitsize */
951 TRUE, /* pc_relative */
952 0, /* bitpos */
953 complain_overflow_dont,/* complain_on_overflow */
954 bfd_elf_generic_reloc, /* special_function */
955 "R_ARM_ALU_PC_G2", /* name */
956 FALSE, /* partial_inplace */
957 0xffffffff, /* src_mask */
958 0xffffffff, /* dst_mask */
959 TRUE), /* pcrel_offset */
960
961 HOWTO (R_ARM_LDR_PC_G1, /* type */
962 0, /* rightshift */
963 2, /* size (0 = byte, 1 = short, 2 = long) */
964 32, /* bitsize */
965 TRUE, /* pc_relative */
966 0, /* bitpos */
967 complain_overflow_dont,/* complain_on_overflow */
968 bfd_elf_generic_reloc, /* special_function */
969 "R_ARM_LDR_PC_G1", /* name */
970 FALSE, /* partial_inplace */
971 0xffffffff, /* src_mask */
972 0xffffffff, /* dst_mask */
973 TRUE), /* pcrel_offset */
974
975 HOWTO (R_ARM_LDR_PC_G2, /* type */
976 0, /* rightshift */
977 2, /* size (0 = byte, 1 = short, 2 = long) */
978 32, /* bitsize */
979 TRUE, /* pc_relative */
980 0, /* bitpos */
981 complain_overflow_dont,/* complain_on_overflow */
982 bfd_elf_generic_reloc, /* special_function */
983 "R_ARM_LDR_PC_G2", /* name */
984 FALSE, /* partial_inplace */
985 0xffffffff, /* src_mask */
986 0xffffffff, /* dst_mask */
987 TRUE), /* pcrel_offset */
988
989 HOWTO (R_ARM_LDRS_PC_G0, /* type */
990 0, /* rightshift */
991 2, /* size (0 = byte, 1 = short, 2 = long) */
992 32, /* bitsize */
993 TRUE, /* pc_relative */
994 0, /* bitpos */
995 complain_overflow_dont,/* complain_on_overflow */
996 bfd_elf_generic_reloc, /* special_function */
997 "R_ARM_LDRS_PC_G0", /* name */
998 FALSE, /* partial_inplace */
999 0xffffffff, /* src_mask */
1000 0xffffffff, /* dst_mask */
1001 TRUE), /* pcrel_offset */
1002
1003 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1004 0, /* rightshift */
1005 2, /* size (0 = byte, 1 = short, 2 = long) */
1006 32, /* bitsize */
1007 TRUE, /* pc_relative */
1008 0, /* bitpos */
1009 complain_overflow_dont,/* complain_on_overflow */
1010 bfd_elf_generic_reloc, /* special_function */
1011 "R_ARM_LDRS_PC_G1", /* name */
1012 FALSE, /* partial_inplace */
1013 0xffffffff, /* src_mask */
1014 0xffffffff, /* dst_mask */
1015 TRUE), /* pcrel_offset */
1016
1017 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1018 0, /* rightshift */
1019 2, /* size (0 = byte, 1 = short, 2 = long) */
1020 32, /* bitsize */
1021 TRUE, /* pc_relative */
1022 0, /* bitpos */
1023 complain_overflow_dont,/* complain_on_overflow */
1024 bfd_elf_generic_reloc, /* special_function */
1025 "R_ARM_LDRS_PC_G2", /* name */
1026 FALSE, /* partial_inplace */
1027 0xffffffff, /* src_mask */
1028 0xffffffff, /* dst_mask */
1029 TRUE), /* pcrel_offset */
1030
1031 HOWTO (R_ARM_LDC_PC_G0, /* type */
1032 0, /* rightshift */
1033 2, /* size (0 = byte, 1 = short, 2 = long) */
1034 32, /* bitsize */
1035 TRUE, /* pc_relative */
1036 0, /* bitpos */
1037 complain_overflow_dont,/* complain_on_overflow */
1038 bfd_elf_generic_reloc, /* special_function */
1039 "R_ARM_LDC_PC_G0", /* name */
1040 FALSE, /* partial_inplace */
1041 0xffffffff, /* src_mask */
1042 0xffffffff, /* dst_mask */
1043 TRUE), /* pcrel_offset */
1044
1045 HOWTO (R_ARM_LDC_PC_G1, /* type */
1046 0, /* rightshift */
1047 2, /* size (0 = byte, 1 = short, 2 = long) */
1048 32, /* bitsize */
1049 TRUE, /* pc_relative */
1050 0, /* bitpos */
1051 complain_overflow_dont,/* complain_on_overflow */
1052 bfd_elf_generic_reloc, /* special_function */
1053 "R_ARM_LDC_PC_G1", /* name */
1054 FALSE, /* partial_inplace */
1055 0xffffffff, /* src_mask */
1056 0xffffffff, /* dst_mask */
1057 TRUE), /* pcrel_offset */
1058
1059 HOWTO (R_ARM_LDC_PC_G2, /* type */
1060 0, /* rightshift */
1061 2, /* size (0 = byte, 1 = short, 2 = long) */
1062 32, /* bitsize */
1063 TRUE, /* pc_relative */
1064 0, /* bitpos */
1065 complain_overflow_dont,/* complain_on_overflow */
1066 bfd_elf_generic_reloc, /* special_function */
1067 "R_ARM_LDC_PC_G2", /* name */
1068 FALSE, /* partial_inplace */
1069 0xffffffff, /* src_mask */
1070 0xffffffff, /* dst_mask */
1071 TRUE), /* pcrel_offset */
1072
1073 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1074 0, /* rightshift */
1075 2, /* size (0 = byte, 1 = short, 2 = long) */
1076 32, /* bitsize */
1077 TRUE, /* pc_relative */
1078 0, /* bitpos */
1079 complain_overflow_dont,/* complain_on_overflow */
1080 bfd_elf_generic_reloc, /* special_function */
1081 "R_ARM_ALU_SB_G0_NC", /* name */
1082 FALSE, /* partial_inplace */
1083 0xffffffff, /* src_mask */
1084 0xffffffff, /* dst_mask */
1085 TRUE), /* pcrel_offset */
1086
1087 HOWTO (R_ARM_ALU_SB_G0, /* type */
1088 0, /* rightshift */
1089 2, /* size (0 = byte, 1 = short, 2 = long) */
1090 32, /* bitsize */
1091 TRUE, /* pc_relative */
1092 0, /* bitpos */
1093 complain_overflow_dont,/* complain_on_overflow */
1094 bfd_elf_generic_reloc, /* special_function */
1095 "R_ARM_ALU_SB_G0", /* name */
1096 FALSE, /* partial_inplace */
1097 0xffffffff, /* src_mask */
1098 0xffffffff, /* dst_mask */
1099 TRUE), /* pcrel_offset */
1100
1101 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1102 0, /* rightshift */
1103 2, /* size (0 = byte, 1 = short, 2 = long) */
1104 32, /* bitsize */
1105 TRUE, /* pc_relative */
1106 0, /* bitpos */
1107 complain_overflow_dont,/* complain_on_overflow */
1108 bfd_elf_generic_reloc, /* special_function */
1109 "R_ARM_ALU_SB_G1_NC", /* name */
1110 FALSE, /* partial_inplace */
1111 0xffffffff, /* src_mask */
1112 0xffffffff, /* dst_mask */
1113 TRUE), /* pcrel_offset */
1114
1115 HOWTO (R_ARM_ALU_SB_G1, /* type */
1116 0, /* rightshift */
1117 2, /* size (0 = byte, 1 = short, 2 = long) */
1118 32, /* bitsize */
1119 TRUE, /* pc_relative */
1120 0, /* bitpos */
1121 complain_overflow_dont,/* complain_on_overflow */
1122 bfd_elf_generic_reloc, /* special_function */
1123 "R_ARM_ALU_SB_G1", /* name */
1124 FALSE, /* partial_inplace */
1125 0xffffffff, /* src_mask */
1126 0xffffffff, /* dst_mask */
1127 TRUE), /* pcrel_offset */
1128
1129 HOWTO (R_ARM_ALU_SB_G2, /* type */
1130 0, /* rightshift */
1131 2, /* size (0 = byte, 1 = short, 2 = long) */
1132 32, /* bitsize */
1133 TRUE, /* pc_relative */
1134 0, /* bitpos */
1135 complain_overflow_dont,/* complain_on_overflow */
1136 bfd_elf_generic_reloc, /* special_function */
1137 "R_ARM_ALU_SB_G2", /* name */
1138 FALSE, /* partial_inplace */
1139 0xffffffff, /* src_mask */
1140 0xffffffff, /* dst_mask */
1141 TRUE), /* pcrel_offset */
1142
1143 HOWTO (R_ARM_LDR_SB_G0, /* type */
1144 0, /* rightshift */
1145 2, /* size (0 = byte, 1 = short, 2 = long) */
1146 32, /* bitsize */
1147 TRUE, /* pc_relative */
1148 0, /* bitpos */
1149 complain_overflow_dont,/* complain_on_overflow */
1150 bfd_elf_generic_reloc, /* special_function */
1151 "R_ARM_LDR_SB_G0", /* name */
1152 FALSE, /* partial_inplace */
1153 0xffffffff, /* src_mask */
1154 0xffffffff, /* dst_mask */
1155 TRUE), /* pcrel_offset */
1156
1157 HOWTO (R_ARM_LDR_SB_G1, /* type */
1158 0, /* rightshift */
1159 2, /* size (0 = byte, 1 = short, 2 = long) */
1160 32, /* bitsize */
1161 TRUE, /* pc_relative */
1162 0, /* bitpos */
1163 complain_overflow_dont,/* complain_on_overflow */
1164 bfd_elf_generic_reloc, /* special_function */
1165 "R_ARM_LDR_SB_G1", /* name */
1166 FALSE, /* partial_inplace */
1167 0xffffffff, /* src_mask */
1168 0xffffffff, /* dst_mask */
1169 TRUE), /* pcrel_offset */
1170
1171 HOWTO (R_ARM_LDR_SB_G2, /* type */
1172 0, /* rightshift */
1173 2, /* size (0 = byte, 1 = short, 2 = long) */
1174 32, /* bitsize */
1175 TRUE, /* pc_relative */
1176 0, /* bitpos */
1177 complain_overflow_dont,/* complain_on_overflow */
1178 bfd_elf_generic_reloc, /* special_function */
1179 "R_ARM_LDR_SB_G2", /* name */
1180 FALSE, /* partial_inplace */
1181 0xffffffff, /* src_mask */
1182 0xffffffff, /* dst_mask */
1183 TRUE), /* pcrel_offset */
1184
1185 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1186 0, /* rightshift */
1187 2, /* size (0 = byte, 1 = short, 2 = long) */
1188 32, /* bitsize */
1189 TRUE, /* pc_relative */
1190 0, /* bitpos */
1191 complain_overflow_dont,/* complain_on_overflow */
1192 bfd_elf_generic_reloc, /* special_function */
1193 "R_ARM_LDRS_SB_G0", /* name */
1194 FALSE, /* partial_inplace */
1195 0xffffffff, /* src_mask */
1196 0xffffffff, /* dst_mask */
1197 TRUE), /* pcrel_offset */
1198
1199 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1200 0, /* rightshift */
1201 2, /* size (0 = byte, 1 = short, 2 = long) */
1202 32, /* bitsize */
1203 TRUE, /* pc_relative */
1204 0, /* bitpos */
1205 complain_overflow_dont,/* complain_on_overflow */
1206 bfd_elf_generic_reloc, /* special_function */
1207 "R_ARM_LDRS_SB_G1", /* name */
1208 FALSE, /* partial_inplace */
1209 0xffffffff, /* src_mask */
1210 0xffffffff, /* dst_mask */
1211 TRUE), /* pcrel_offset */
1212
1213 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1214 0, /* rightshift */
1215 2, /* size (0 = byte, 1 = short, 2 = long) */
1216 32, /* bitsize */
1217 TRUE, /* pc_relative */
1218 0, /* bitpos */
1219 complain_overflow_dont,/* complain_on_overflow */
1220 bfd_elf_generic_reloc, /* special_function */
1221 "R_ARM_LDRS_SB_G2", /* name */
1222 FALSE, /* partial_inplace */
1223 0xffffffff, /* src_mask */
1224 0xffffffff, /* dst_mask */
1225 TRUE), /* pcrel_offset */
1226
1227 HOWTO (R_ARM_LDC_SB_G0, /* type */
1228 0, /* rightshift */
1229 2, /* size (0 = byte, 1 = short, 2 = long) */
1230 32, /* bitsize */
1231 TRUE, /* pc_relative */
1232 0, /* bitpos */
1233 complain_overflow_dont,/* complain_on_overflow */
1234 bfd_elf_generic_reloc, /* special_function */
1235 "R_ARM_LDC_SB_G0", /* name */
1236 FALSE, /* partial_inplace */
1237 0xffffffff, /* src_mask */
1238 0xffffffff, /* dst_mask */
1239 TRUE), /* pcrel_offset */
1240
1241 HOWTO (R_ARM_LDC_SB_G1, /* type */
1242 0, /* rightshift */
1243 2, /* size (0 = byte, 1 = short, 2 = long) */
1244 32, /* bitsize */
1245 TRUE, /* pc_relative */
1246 0, /* bitpos */
1247 complain_overflow_dont,/* complain_on_overflow */
1248 bfd_elf_generic_reloc, /* special_function */
1249 "R_ARM_LDC_SB_G1", /* name */
1250 FALSE, /* partial_inplace */
1251 0xffffffff, /* src_mask */
1252 0xffffffff, /* dst_mask */
1253 TRUE), /* pcrel_offset */
1254
1255 HOWTO (R_ARM_LDC_SB_G2, /* type */
1256 0, /* rightshift */
1257 2, /* size (0 = byte, 1 = short, 2 = long) */
1258 32, /* bitsize */
1259 TRUE, /* pc_relative */
1260 0, /* bitpos */
1261 complain_overflow_dont,/* complain_on_overflow */
1262 bfd_elf_generic_reloc, /* special_function */
1263 "R_ARM_LDC_SB_G2", /* name */
1264 FALSE, /* partial_inplace */
1265 0xffffffff, /* src_mask */
1266 0xffffffff, /* dst_mask */
1267 TRUE), /* pcrel_offset */
1268
1269 /* End of group relocations. */
1270
1271 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1272 0, /* rightshift */
1273 2, /* size (0 = byte, 1 = short, 2 = long) */
1274 16, /* bitsize */
1275 FALSE, /* pc_relative */
1276 0, /* bitpos */
1277 complain_overflow_dont,/* complain_on_overflow */
1278 bfd_elf_generic_reloc, /* special_function */
1279 "R_ARM_MOVW_BREL_NC", /* name */
1280 FALSE, /* partial_inplace */
1281 0x0000ffff, /* src_mask */
1282 0x0000ffff, /* dst_mask */
1283 FALSE), /* pcrel_offset */
1284
1285 HOWTO (R_ARM_MOVT_BREL, /* type */
1286 0, /* rightshift */
1287 2, /* size (0 = byte, 1 = short, 2 = long) */
1288 16, /* bitsize */
1289 FALSE, /* pc_relative */
1290 0, /* bitpos */
1291 complain_overflow_bitfield,/* complain_on_overflow */
1292 bfd_elf_generic_reloc, /* special_function */
1293 "R_ARM_MOVT_BREL", /* name */
1294 FALSE, /* partial_inplace */
1295 0x0000ffff, /* src_mask */
1296 0x0000ffff, /* dst_mask */
1297 FALSE), /* pcrel_offset */
1298
1299 HOWTO (R_ARM_MOVW_BREL, /* type */
1300 0, /* rightshift */
1301 2, /* size (0 = byte, 1 = short, 2 = long) */
1302 16, /* bitsize */
1303 FALSE, /* pc_relative */
1304 0, /* bitpos */
1305 complain_overflow_dont,/* complain_on_overflow */
1306 bfd_elf_generic_reloc, /* special_function */
1307 "R_ARM_MOVW_BREL", /* name */
1308 FALSE, /* partial_inplace */
1309 0x0000ffff, /* src_mask */
1310 0x0000ffff, /* dst_mask */
1311 FALSE), /* pcrel_offset */
1312
1313 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1314 0, /* rightshift */
1315 2, /* size (0 = byte, 1 = short, 2 = long) */
1316 16, /* bitsize */
1317 FALSE, /* pc_relative */
1318 0, /* bitpos */
1319 complain_overflow_dont,/* complain_on_overflow */
1320 bfd_elf_generic_reloc, /* special_function */
1321 "R_ARM_THM_MOVW_BREL_NC",/* name */
1322 FALSE, /* partial_inplace */
1323 0x040f70ff, /* src_mask */
1324 0x040f70ff, /* dst_mask */
1325 FALSE), /* pcrel_offset */
1326
1327 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1328 0, /* rightshift */
1329 2, /* size (0 = byte, 1 = short, 2 = long) */
1330 16, /* bitsize */
1331 FALSE, /* pc_relative */
1332 0, /* bitpos */
1333 complain_overflow_bitfield,/* complain_on_overflow */
1334 bfd_elf_generic_reloc, /* special_function */
1335 "R_ARM_THM_MOVT_BREL", /* name */
1336 FALSE, /* partial_inplace */
1337 0x040f70ff, /* src_mask */
1338 0x040f70ff, /* dst_mask */
1339 FALSE), /* pcrel_offset */
1340
1341 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1342 0, /* rightshift */
1343 2, /* size (0 = byte, 1 = short, 2 = long) */
1344 16, /* bitsize */
1345 FALSE, /* pc_relative */
1346 0, /* bitpos */
1347 complain_overflow_dont,/* complain_on_overflow */
1348 bfd_elf_generic_reloc, /* special_function */
1349 "R_ARM_THM_MOVW_BREL", /* name */
1350 FALSE, /* partial_inplace */
1351 0x040f70ff, /* src_mask */
1352 0x040f70ff, /* dst_mask */
1353 FALSE), /* pcrel_offset */
1354
1355 EMPTY_HOWTO (90), /* Unallocated. */
1356 EMPTY_HOWTO (91),
1357 EMPTY_HOWTO (92),
1358 EMPTY_HOWTO (93),
1359
1360 HOWTO (R_ARM_PLT32_ABS, /* type */
1361 0, /* rightshift */
1362 2, /* size (0 = byte, 1 = short, 2 = long) */
1363 32, /* bitsize */
1364 FALSE, /* pc_relative */
1365 0, /* bitpos */
1366 complain_overflow_dont,/* complain_on_overflow */
1367 bfd_elf_generic_reloc, /* special_function */
1368 "R_ARM_PLT32_ABS", /* name */
1369 FALSE, /* partial_inplace */
1370 0xffffffff, /* src_mask */
1371 0xffffffff, /* dst_mask */
1372 FALSE), /* pcrel_offset */
1373
1374 HOWTO (R_ARM_GOT_ABS, /* type */
1375 0, /* rightshift */
1376 2, /* size (0 = byte, 1 = short, 2 = long) */
1377 32, /* bitsize */
1378 FALSE, /* pc_relative */
1379 0, /* bitpos */
1380 complain_overflow_dont,/* complain_on_overflow */
1381 bfd_elf_generic_reloc, /* special_function */
1382 "R_ARM_GOT_ABS", /* name */
1383 FALSE, /* partial_inplace */
1384 0xffffffff, /* src_mask */
1385 0xffffffff, /* dst_mask */
1386 FALSE), /* pcrel_offset */
1387
1388 HOWTO (R_ARM_GOT_PREL, /* type */
1389 0, /* rightshift */
1390 2, /* size (0 = byte, 1 = short, 2 = long) */
1391 32, /* bitsize */
1392 TRUE, /* pc_relative */
1393 0, /* bitpos */
1394 complain_overflow_dont, /* complain_on_overflow */
1395 bfd_elf_generic_reloc, /* special_function */
1396 "R_ARM_GOT_PREL", /* name */
1397 FALSE, /* partial_inplace */
1398 0xffffffff, /* src_mask */
1399 0xffffffff, /* dst_mask */
1400 TRUE), /* pcrel_offset */
1401
1402 HOWTO (R_ARM_GOT_BREL12, /* type */
1403 0, /* rightshift */
1404 2, /* size (0 = byte, 1 = short, 2 = long) */
1405 12, /* bitsize */
1406 FALSE, /* pc_relative */
1407 0, /* bitpos */
1408 complain_overflow_bitfield,/* complain_on_overflow */
1409 bfd_elf_generic_reloc, /* special_function */
1410 "R_ARM_GOT_BREL12", /* name */
1411 FALSE, /* partial_inplace */
1412 0x00000fff, /* src_mask */
1413 0x00000fff, /* dst_mask */
1414 FALSE), /* pcrel_offset */
1415
1416 HOWTO (R_ARM_GOTOFF12, /* type */
1417 0, /* rightshift */
1418 2, /* size (0 = byte, 1 = short, 2 = long) */
1419 12, /* bitsize */
1420 FALSE, /* pc_relative */
1421 0, /* bitpos */
1422 complain_overflow_bitfield,/* complain_on_overflow */
1423 bfd_elf_generic_reloc, /* special_function */
1424 "R_ARM_GOTOFF12", /* name */
1425 FALSE, /* partial_inplace */
1426 0x00000fff, /* src_mask */
1427 0x00000fff, /* dst_mask */
1428 FALSE), /* pcrel_offset */
1429
1430 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1431
1432 /* GNU extension to record C++ vtable member usage */
1433 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1434 0, /* rightshift */
1435 2, /* size (0 = byte, 1 = short, 2 = long) */
1436 0, /* bitsize */
1437 FALSE, /* pc_relative */
1438 0, /* bitpos */
1439 complain_overflow_dont, /* complain_on_overflow */
1440 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1441 "R_ARM_GNU_VTENTRY", /* name */
1442 FALSE, /* partial_inplace */
1443 0, /* src_mask */
1444 0, /* dst_mask */
1445 FALSE), /* pcrel_offset */
1446
1447 /* GNU extension to record C++ vtable hierarchy */
1448 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1449 0, /* rightshift */
1450 2, /* size (0 = byte, 1 = short, 2 = long) */
1451 0, /* bitsize */
1452 FALSE, /* pc_relative */
1453 0, /* bitpos */
1454 complain_overflow_dont, /* complain_on_overflow */
1455 NULL, /* special_function */
1456 "R_ARM_GNU_VTINHERIT", /* name */
1457 FALSE, /* partial_inplace */
1458 0, /* src_mask */
1459 0, /* dst_mask */
1460 FALSE), /* pcrel_offset */
1461
1462 HOWTO (R_ARM_THM_JUMP11, /* type */
1463 1, /* rightshift */
1464 1, /* size (0 = byte, 1 = short, 2 = long) */
1465 11, /* bitsize */
1466 TRUE, /* pc_relative */
1467 0, /* bitpos */
1468 complain_overflow_signed, /* complain_on_overflow */
1469 bfd_elf_generic_reloc, /* special_function */
1470 "R_ARM_THM_JUMP11", /* name */
1471 FALSE, /* partial_inplace */
1472 0x000007ff, /* src_mask */
1473 0x000007ff, /* dst_mask */
1474 TRUE), /* pcrel_offset */
1475
1476 HOWTO (R_ARM_THM_JUMP8, /* type */
1477 1, /* rightshift */
1478 1, /* size (0 = byte, 1 = short, 2 = long) */
1479 8, /* bitsize */
1480 TRUE, /* pc_relative */
1481 0, /* bitpos */
1482 complain_overflow_signed, /* complain_on_overflow */
1483 bfd_elf_generic_reloc, /* special_function */
1484 "R_ARM_THM_JUMP8", /* name */
1485 FALSE, /* partial_inplace */
1486 0x000000ff, /* src_mask */
1487 0x000000ff, /* dst_mask */
1488 TRUE), /* pcrel_offset */
1489
1490 /* TLS relocations */
1491 HOWTO (R_ARM_TLS_GD32, /* type */
1492 0, /* rightshift */
1493 2, /* size (0 = byte, 1 = short, 2 = long) */
1494 32, /* bitsize */
1495 FALSE, /* pc_relative */
1496 0, /* bitpos */
1497 complain_overflow_bitfield,/* complain_on_overflow */
1498 NULL, /* special_function */
1499 "R_ARM_TLS_GD32", /* name */
1500 TRUE, /* partial_inplace */
1501 0xffffffff, /* src_mask */
1502 0xffffffff, /* dst_mask */
1503 FALSE), /* pcrel_offset */
1504
1505 HOWTO (R_ARM_TLS_LDM32, /* type */
1506 0, /* rightshift */
1507 2, /* size (0 = byte, 1 = short, 2 = long) */
1508 32, /* bitsize */
1509 FALSE, /* pc_relative */
1510 0, /* bitpos */
1511 complain_overflow_bitfield,/* complain_on_overflow */
1512 bfd_elf_generic_reloc, /* special_function */
1513 "R_ARM_TLS_LDM32", /* name */
1514 TRUE, /* partial_inplace */
1515 0xffffffff, /* src_mask */
1516 0xffffffff, /* dst_mask */
1517 FALSE), /* pcrel_offset */
1518
1519 HOWTO (R_ARM_TLS_LDO32, /* type */
1520 0, /* rightshift */
1521 2, /* size (0 = byte, 1 = short, 2 = long) */
1522 32, /* bitsize */
1523 FALSE, /* pc_relative */
1524 0, /* bitpos */
1525 complain_overflow_bitfield,/* complain_on_overflow */
1526 bfd_elf_generic_reloc, /* special_function */
1527 "R_ARM_TLS_LDO32", /* name */
1528 TRUE, /* partial_inplace */
1529 0xffffffff, /* src_mask */
1530 0xffffffff, /* dst_mask */
1531 FALSE), /* pcrel_offset */
1532
1533 HOWTO (R_ARM_TLS_IE32, /* type */
1534 0, /* rightshift */
1535 2, /* size (0 = byte, 1 = short, 2 = long) */
1536 32, /* bitsize */
1537 FALSE, /* pc_relative */
1538 0, /* bitpos */
1539 complain_overflow_bitfield,/* complain_on_overflow */
1540 NULL, /* special_function */
1541 "R_ARM_TLS_IE32", /* name */
1542 TRUE, /* partial_inplace */
1543 0xffffffff, /* src_mask */
1544 0xffffffff, /* dst_mask */
1545 FALSE), /* pcrel_offset */
1546
1547 HOWTO (R_ARM_TLS_LE32, /* type */
1548 0, /* rightshift */
1549 2, /* size (0 = byte, 1 = short, 2 = long) */
1550 32, /* bitsize */
1551 FALSE, /* pc_relative */
1552 0, /* bitpos */
1553 complain_overflow_bitfield,/* complain_on_overflow */
1554 bfd_elf_generic_reloc, /* special_function */
1555 "R_ARM_TLS_LE32", /* name */
1556 TRUE, /* partial_inplace */
1557 0xffffffff, /* src_mask */
1558 0xffffffff, /* dst_mask */
1559 FALSE), /* pcrel_offset */
1560
1561 HOWTO (R_ARM_TLS_LDO12, /* type */
1562 0, /* rightshift */
1563 2, /* size (0 = byte, 1 = short, 2 = long) */
1564 12, /* bitsize */
1565 FALSE, /* pc_relative */
1566 0, /* bitpos */
1567 complain_overflow_bitfield,/* complain_on_overflow */
1568 bfd_elf_generic_reloc, /* special_function */
1569 "R_ARM_TLS_LDO12", /* name */
1570 FALSE, /* partial_inplace */
1571 0x00000fff, /* src_mask */
1572 0x00000fff, /* dst_mask */
1573 FALSE), /* pcrel_offset */
1574
1575 HOWTO (R_ARM_TLS_LE12, /* type */
1576 0, /* rightshift */
1577 2, /* size (0 = byte, 1 = short, 2 = long) */
1578 12, /* bitsize */
1579 FALSE, /* pc_relative */
1580 0, /* bitpos */
1581 complain_overflow_bitfield,/* complain_on_overflow */
1582 bfd_elf_generic_reloc, /* special_function */
1583 "R_ARM_TLS_LE12", /* name */
1584 FALSE, /* partial_inplace */
1585 0x00000fff, /* src_mask */
1586 0x00000fff, /* dst_mask */
1587 FALSE), /* pcrel_offset */
1588
1589 HOWTO (R_ARM_TLS_IE12GP, /* type */
1590 0, /* rightshift */
1591 2, /* size (0 = byte, 1 = short, 2 = long) */
1592 12, /* bitsize */
1593 FALSE, /* pc_relative */
1594 0, /* bitpos */
1595 complain_overflow_bitfield,/* complain_on_overflow */
1596 bfd_elf_generic_reloc, /* special_function */
1597 "R_ARM_TLS_IE12GP", /* name */
1598 FALSE, /* partial_inplace */
1599 0x00000fff, /* src_mask */
1600 0x00000fff, /* dst_mask */
1601 FALSE), /* pcrel_offset */
1602 };
1603
1604 /* 112-127 private relocations
1605 128 R_ARM_ME_TOO, obsolete
1606 129-255 unallocated in AAELF.
1607
1608 249-255 extended, currently unused, relocations: */
1609
1610 static reloc_howto_type elf32_arm_howto_table_2[4] =
1611 {
1612 HOWTO (R_ARM_RREL32, /* type */
1613 0, /* rightshift */
1614 0, /* size (0 = byte, 1 = short, 2 = long) */
1615 0, /* bitsize */
1616 FALSE, /* pc_relative */
1617 0, /* bitpos */
1618 complain_overflow_dont,/* complain_on_overflow */
1619 bfd_elf_generic_reloc, /* special_function */
1620 "R_ARM_RREL32", /* name */
1621 FALSE, /* partial_inplace */
1622 0, /* src_mask */
1623 0, /* dst_mask */
1624 FALSE), /* pcrel_offset */
1625
1626 HOWTO (R_ARM_RABS32, /* type */
1627 0, /* rightshift */
1628 0, /* size (0 = byte, 1 = short, 2 = long) */
1629 0, /* bitsize */
1630 FALSE, /* pc_relative */
1631 0, /* bitpos */
1632 complain_overflow_dont,/* complain_on_overflow */
1633 bfd_elf_generic_reloc, /* special_function */
1634 "R_ARM_RABS32", /* name */
1635 FALSE, /* partial_inplace */
1636 0, /* src_mask */
1637 0, /* dst_mask */
1638 FALSE), /* pcrel_offset */
1639
1640 HOWTO (R_ARM_RPC24, /* type */
1641 0, /* rightshift */
1642 0, /* size (0 = byte, 1 = short, 2 = long) */
1643 0, /* bitsize */
1644 FALSE, /* pc_relative */
1645 0, /* bitpos */
1646 complain_overflow_dont,/* complain_on_overflow */
1647 bfd_elf_generic_reloc, /* special_function */
1648 "R_ARM_RPC24", /* name */
1649 FALSE, /* partial_inplace */
1650 0, /* src_mask */
1651 0, /* dst_mask */
1652 FALSE), /* pcrel_offset */
1653
1654 HOWTO (R_ARM_RBASE, /* type */
1655 0, /* rightshift */
1656 0, /* size (0 = byte, 1 = short, 2 = long) */
1657 0, /* bitsize */
1658 FALSE, /* pc_relative */
1659 0, /* bitpos */
1660 complain_overflow_dont,/* complain_on_overflow */
1661 bfd_elf_generic_reloc, /* special_function */
1662 "R_ARM_RBASE", /* name */
1663 FALSE, /* partial_inplace */
1664 0, /* src_mask */
1665 0, /* dst_mask */
1666 FALSE) /* pcrel_offset */
1667 };
1668
1669 static reloc_howto_type *
1670 elf32_arm_howto_from_type (unsigned int r_type)
1671 {
1672 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1673 return &elf32_arm_howto_table_1[r_type];
1674
1675 if (r_type >= R_ARM_RREL32
1676 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_2))
1677 return &elf32_arm_howto_table_2[r_type - R_ARM_RREL32];
1678
1679 return NULL;
1680 }
1681
1682 static void
1683 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc,
1684 Elf_Internal_Rela * elf_reloc)
1685 {
1686 unsigned int r_type;
1687
1688 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1689 bfd_reloc->howto = elf32_arm_howto_from_type (r_type);
1690 }
1691
1692 struct elf32_arm_reloc_map
1693 {
1694 bfd_reloc_code_real_type bfd_reloc_val;
1695 unsigned char elf_reloc_val;
1696 };
1697
1698 /* All entries in this list must also be present in elf32_arm_howto_table. */
1699 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1700 {
1701 {BFD_RELOC_NONE, R_ARM_NONE},
1702 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1703 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1704 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1705 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1706 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1707 {BFD_RELOC_32, R_ARM_ABS32},
1708 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1709 {BFD_RELOC_8, R_ARM_ABS8},
1710 {BFD_RELOC_16, R_ARM_ABS16},
1711 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1712 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1713 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1714 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1715 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1716 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1717 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1718 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1719 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1720 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1721 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1722 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1723 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1724 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL},
1725 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1726 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1727 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1728 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1729 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1730 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1731 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1732 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1733 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
1734 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
1735 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
1736 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
1737 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
1738 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
1739 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
1740 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
1741 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
1742 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
1743 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
1744 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
1745 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
1746 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
1747 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
1748 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
1749 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1750 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1751 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1752 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1753 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1754 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1755 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1756 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1757 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1758 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1759 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1760 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1761 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1762 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1763 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1764 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1765 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1766 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1767 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1768 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1769 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1770 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1771 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1772 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1773 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1774 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1775 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1776 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1777 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1778 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1779 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX}
1780 };
1781
1782 static reloc_howto_type *
1783 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1784 bfd_reloc_code_real_type code)
1785 {
1786 unsigned int i;
1787
1788 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1789 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1790 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1791
1792 return NULL;
1793 }
1794
1795 static reloc_howto_type *
1796 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1797 const char *r_name)
1798 {
1799 unsigned int i;
1800
1801 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1802 if (elf32_arm_howto_table_1[i].name != NULL
1803 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1804 return &elf32_arm_howto_table_1[i];
1805
1806 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1807 if (elf32_arm_howto_table_2[i].name != NULL
1808 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1809 return &elf32_arm_howto_table_2[i];
1810
1811 return NULL;
1812 }
1813
1814 /* Support for core dump NOTE sections. */
1815
1816 static bfd_boolean
1817 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1818 {
1819 int offset;
1820 size_t size;
1821
1822 switch (note->descsz)
1823 {
1824 default:
1825 return FALSE;
1826
1827 case 148: /* Linux/ARM 32-bit. */
1828 /* pr_cursig */
1829 elf_tdata (abfd)->core_signal = bfd_get_16 (abfd, note->descdata + 12);
1830
1831 /* pr_pid */
1832 elf_tdata (abfd)->core_lwpid = bfd_get_32 (abfd, note->descdata + 24);
1833
1834 /* pr_reg */
1835 offset = 72;
1836 size = 72;
1837
1838 break;
1839 }
1840
1841 /* Make a ".reg/999" section. */
1842 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
1843 size, note->descpos + offset);
1844 }
1845
1846 static bfd_boolean
1847 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
1848 {
1849 switch (note->descsz)
1850 {
1851 default:
1852 return FALSE;
1853
1854 case 124: /* Linux/ARM elf_prpsinfo. */
1855 elf_tdata (abfd)->core_program
1856 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
1857 elf_tdata (abfd)->core_command
1858 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
1859 }
1860
1861 /* Note that for some reason, a spurious space is tacked
1862 onto the end of the args in some (at least one anyway)
1863 implementations, so strip it off if it exists. */
1864 {
1865 char *command = elf_tdata (abfd)->core_command;
1866 int n = strlen (command);
1867
1868 if (0 < n && command[n - 1] == ' ')
1869 command[n - 1] = '\0';
1870 }
1871
1872 return TRUE;
1873 }
1874
1875 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vec
1876 #define TARGET_LITTLE_NAME "elf32-littlearm"
1877 #define TARGET_BIG_SYM bfd_elf32_bigarm_vec
1878 #define TARGET_BIG_NAME "elf32-bigarm"
1879
1880 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
1881 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
1882
1883 typedef unsigned long int insn32;
1884 typedef unsigned short int insn16;
1885
1886 /* In lieu of proper flags, assume all EABIv4 or later objects are
1887 interworkable. */
1888 #define INTERWORK_FLAG(abfd) \
1889 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
1890 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
1891 || ((abfd)->flags & BFD_LINKER_CREATED))
1892
1893 /* The linker script knows the section names for placement.
1894 The entry_names are used to do simple name mangling on the stubs.
1895 Given a function name, and its type, the stub can be found. The
1896 name can be changed. The only requirement is the %s be present. */
1897 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
1898 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
1899
1900 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
1901 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
1902
1903 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
1904 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
1905
1906 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
1907 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
1908
1909 #define STUB_ENTRY_NAME "__%s_veneer"
1910
1911 /* The name of the dynamic interpreter. This is put in the .interp
1912 section. */
1913 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
1914
1915 #ifdef FOUR_WORD_PLT
1916
1917 /* The first entry in a procedure linkage table looks like
1918 this. It is set up so that any shared library function that is
1919 called before the relocation has been set up calls the dynamic
1920 linker first. */
1921 static const bfd_vma elf32_arm_plt0_entry [] =
1922 {
1923 0xe52de004, /* str lr, [sp, #-4]! */
1924 0xe59fe010, /* ldr lr, [pc, #16] */
1925 0xe08fe00e, /* add lr, pc, lr */
1926 0xe5bef008, /* ldr pc, [lr, #8]! */
1927 };
1928
1929 /* Subsequent entries in a procedure linkage table look like
1930 this. */
1931 static const bfd_vma elf32_arm_plt_entry [] =
1932 {
1933 0xe28fc600, /* add ip, pc, #NN */
1934 0xe28cca00, /* add ip, ip, #NN */
1935 0xe5bcf000, /* ldr pc, [ip, #NN]! */
1936 0x00000000, /* unused */
1937 };
1938
1939 #else
1940
1941 /* The first entry in a procedure linkage table looks like
1942 this. It is set up so that any shared library function that is
1943 called before the relocation has been set up calls the dynamic
1944 linker first. */
1945 static const bfd_vma elf32_arm_plt0_entry [] =
1946 {
1947 0xe52de004, /* str lr, [sp, #-4]! */
1948 0xe59fe004, /* ldr lr, [pc, #4] */
1949 0xe08fe00e, /* add lr, pc, lr */
1950 0xe5bef008, /* ldr pc, [lr, #8]! */
1951 0x00000000, /* &GOT[0] - . */
1952 };
1953
1954 /* Subsequent entries in a procedure linkage table look like
1955 this. */
1956 static const bfd_vma elf32_arm_plt_entry [] =
1957 {
1958 0xe28fc600, /* add ip, pc, #0xNN00000 */
1959 0xe28cca00, /* add ip, ip, #0xNN000 */
1960 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
1961 };
1962
1963 #endif
1964
1965 /* The format of the first entry in the procedure linkage table
1966 for a VxWorks executable. */
1967 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
1968 {
1969 0xe52dc008, /* str ip,[sp,#-8]! */
1970 0xe59fc000, /* ldr ip,[pc] */
1971 0xe59cf008, /* ldr pc,[ip,#8] */
1972 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
1973 };
1974
1975 /* The format of subsequent entries in a VxWorks executable. */
1976 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
1977 {
1978 0xe59fc000, /* ldr ip,[pc] */
1979 0xe59cf000, /* ldr pc,[ip] */
1980 0x00000000, /* .long @got */
1981 0xe59fc000, /* ldr ip,[pc] */
1982 0xea000000, /* b _PLT */
1983 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1984 };
1985
1986 /* The format of entries in a VxWorks shared library. */
1987 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
1988 {
1989 0xe59fc000, /* ldr ip,[pc] */
1990 0xe79cf009, /* ldr pc,[ip,r9] */
1991 0x00000000, /* .long @got */
1992 0xe59fc000, /* ldr ip,[pc] */
1993 0xe599f008, /* ldr pc,[r9,#8] */
1994 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1995 };
1996
1997 /* An initial stub used if the PLT entry is referenced from Thumb code. */
1998 #define PLT_THUMB_STUB_SIZE 4
1999 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2000 {
2001 0x4778, /* bx pc */
2002 0x46c0 /* nop */
2003 };
2004
2005 /* The entries in a PLT when using a DLL-based target with multiple
2006 address spaces. */
2007 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2008 {
2009 0xe51ff004, /* ldr pc, [pc, #-4] */
2010 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2011 };
2012
2013 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2014 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2015 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2016 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2017 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2018 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2019
2020 enum stub_insn_type
2021 {
2022 THUMB16_TYPE = 1,
2023 THUMB32_TYPE,
2024 ARM_TYPE,
2025 DATA_TYPE
2026 };
2027
2028 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2029 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2030 is inserted in arm_build_one_stub(). */
2031 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2032 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2033 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2034 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2035 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2036 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2037
2038 typedef struct
2039 {
2040 bfd_vma data;
2041 enum stub_insn_type type;
2042 unsigned int r_type;
2043 int reloc_addend;
2044 } insn_sequence;
2045
2046 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2047 to reach the stub if necessary. */
2048 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2049 {
2050 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2051 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2052 };
2053
2054 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2055 available. */
2056 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2057 {
2058 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2059 ARM_INSN(0xe12fff1c), /* bx ip */
2060 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2061 };
2062
2063 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2064 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2065 {
2066 THUMB16_INSN(0xb401), /* push {r0} */
2067 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2068 THUMB16_INSN(0x4684), /* mov ip, r0 */
2069 THUMB16_INSN(0xbc01), /* pop {r0} */
2070 THUMB16_INSN(0x4760), /* bx ip */
2071 THUMB16_INSN(0xbf00), /* nop */
2072 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2073 };
2074
2075 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2076 allowed. */
2077 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2078 {
2079 THUMB16_INSN(0x4778), /* bx pc */
2080 THUMB16_INSN(0x46c0), /* nop */
2081 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2082 ARM_INSN(0xe12fff1c), /* bx ip */
2083 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2084 };
2085
2086 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2087 available. */
2088 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2089 {
2090 THUMB16_INSN(0x4778), /* bx pc */
2091 THUMB16_INSN(0x46c0), /* nop */
2092 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2093 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2094 };
2095
2096 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2097 one, when the destination is close enough. */
2098 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2099 {
2100 THUMB16_INSN(0x4778), /* bx pc */
2101 THUMB16_INSN(0x46c0), /* nop */
2102 ARM_REL_INSN(0xea000000, -8), /* b (X-8) */
2103 };
2104
2105 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2106 blx to reach the stub if necessary. */
2107 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2108 {
2109 ARM_INSN(0xe59fc000), /* ldr ip, [pc] */
2110 ARM_INSN(0xe08ff00c), /* add pc, pc, ip */
2111 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2112 };
2113
2114 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2115 blx to reach the stub if necessary. We can not add into pc;
2116 it is not guaranteed to mode switch (different in ARMv6 and
2117 ARMv7). */
2118 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2119 {
2120 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2121 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2122 ARM_INSN(0xe12fff1c), /* bx ip */
2123 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2124 };
2125
2126 /* V4T ARM -> ARM long branch stub, PIC. */
2127 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2128 {
2129 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2130 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2131 ARM_INSN(0xe12fff1c), /* bx ip */
2132 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2133 };
2134
2135 /* V4T Thumb -> ARM long branch stub, PIC. */
2136 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2137 {
2138 THUMB16_INSN(0x4778), /* bx pc */
2139 THUMB16_INSN(0x46c0), /* nop */
2140 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2141 ARM_INSN(0xe08cf00f), /* add pc, ip, pc */
2142 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2143 };
2144
2145 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2146 architectures. */
2147 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2148 {
2149 THUMB16_INSN(0xb401), /* push {r0} */
2150 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2151 THUMB16_INSN(0x46fc), /* mov ip, pc */
2152 THUMB16_INSN(0x4484), /* add ip, r0 */
2153 THUMB16_INSN(0xbc01), /* pop {r0} */
2154 THUMB16_INSN(0x4760), /* bx ip */
2155 DATA_WORD(0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2156 };
2157
2158 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2159 allowed. */
2160 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2161 {
2162 THUMB16_INSN(0x4778), /* bx pc */
2163 THUMB16_INSN(0x46c0), /* nop */
2164 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2165 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2166 ARM_INSN(0xe12fff1c), /* bx ip */
2167 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2168 };
2169
2170 /* Cortex-A8 erratum-workaround stubs. */
2171
2172 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2173 can't use a conditional branch to reach this stub). */
2174
2175 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2176 {
2177 THUMB16_BCOND_INSN(0xd001), /* b<cond>.n true. */
2178 THUMB32_B_INSN(0xf000b800, -4), /* b.w insn_after_original_branch. */
2179 THUMB32_B_INSN(0xf000b800, -4) /* true: b.w original_branch_dest. */
2180 };
2181
2182 /* Stub used for b.w and bl.w instructions. */
2183
2184 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2185 {
2186 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2187 };
2188
2189 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2190 {
2191 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2192 };
2193
2194 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2195 instruction (which switches to ARM mode) to point to this stub. Jump to the
2196 real destination using an ARM-mode branch. */
2197
2198 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2199 {
2200 ARM_REL_INSN(0xea000000, -8) /* b original_branch_dest. */
2201 };
2202
2203 /* Section name for stubs is the associated section name plus this
2204 string. */
2205 #define STUB_SUFFIX ".stub"
2206
2207 /* One entry per long/short branch stub defined above. */
2208 #define DEF_STUBS \
2209 DEF_STUB(long_branch_any_any) \
2210 DEF_STUB(long_branch_v4t_arm_thumb) \
2211 DEF_STUB(long_branch_thumb_only) \
2212 DEF_STUB(long_branch_v4t_thumb_thumb) \
2213 DEF_STUB(long_branch_v4t_thumb_arm) \
2214 DEF_STUB(short_branch_v4t_thumb_arm) \
2215 DEF_STUB(long_branch_any_arm_pic) \
2216 DEF_STUB(long_branch_any_thumb_pic) \
2217 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2218 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2219 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2220 DEF_STUB(long_branch_thumb_only_pic) \
2221 DEF_STUB(a8_veneer_b_cond) \
2222 DEF_STUB(a8_veneer_b) \
2223 DEF_STUB(a8_veneer_bl) \
2224 DEF_STUB(a8_veneer_blx)
2225
2226 #define DEF_STUB(x) arm_stub_##x,
2227 enum elf32_arm_stub_type {
2228 arm_stub_none,
2229 DEF_STUBS
2230 /* Note the first a8_veneer type */
2231 arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond
2232 };
2233 #undef DEF_STUB
2234
2235 typedef struct
2236 {
2237 const insn_sequence* template_sequence;
2238 int template_size;
2239 } stub_def;
2240
2241 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2242 static const stub_def stub_definitions[] = {
2243 {NULL, 0},
2244 DEF_STUBS
2245 };
2246
2247 struct elf32_arm_stub_hash_entry
2248 {
2249 /* Base hash table entry structure. */
2250 struct bfd_hash_entry root;
2251
2252 /* The stub section. */
2253 asection *stub_sec;
2254
2255 /* Offset within stub_sec of the beginning of this stub. */
2256 bfd_vma stub_offset;
2257
2258 /* Given the symbol's value and its section we can determine its final
2259 value when building the stubs (so the stub knows where to jump). */
2260 bfd_vma target_value;
2261 asection *target_section;
2262
2263 /* Offset to apply to relocation referencing target_value. */
2264 bfd_vma target_addend;
2265
2266 /* The instruction which caused this stub to be generated (only valid for
2267 Cortex-A8 erratum workaround stubs at present). */
2268 unsigned long orig_insn;
2269
2270 /* The stub type. */
2271 enum elf32_arm_stub_type stub_type;
2272 /* Its encoding size in bytes. */
2273 int stub_size;
2274 /* Its template. */
2275 const insn_sequence *stub_template;
2276 /* The size of the template (number of entries). */
2277 int stub_template_size;
2278
2279 /* The symbol table entry, if any, that this was derived from. */
2280 struct elf32_arm_link_hash_entry *h;
2281
2282 /* Destination symbol type (STT_ARM_TFUNC, ...) */
2283 unsigned char st_type;
2284
2285 /* Where this stub is being called from, or, in the case of combined
2286 stub sections, the first input section in the group. */
2287 asection *id_sec;
2288
2289 /* The name for the local symbol at the start of this stub. The
2290 stub name in the hash table has to be unique; this does not, so
2291 it can be friendlier. */
2292 char *output_name;
2293 };
2294
2295 /* Used to build a map of a section. This is required for mixed-endian
2296 code/data. */
2297
2298 typedef struct elf32_elf_section_map
2299 {
2300 bfd_vma vma;
2301 char type;
2302 }
2303 elf32_arm_section_map;
2304
2305 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2306
2307 typedef enum
2308 {
2309 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2310 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2311 VFP11_ERRATUM_ARM_VENEER,
2312 VFP11_ERRATUM_THUMB_VENEER
2313 }
2314 elf32_vfp11_erratum_type;
2315
2316 typedef struct elf32_vfp11_erratum_list
2317 {
2318 struct elf32_vfp11_erratum_list *next;
2319 bfd_vma vma;
2320 union
2321 {
2322 struct
2323 {
2324 struct elf32_vfp11_erratum_list *veneer;
2325 unsigned int vfp_insn;
2326 } b;
2327 struct
2328 {
2329 struct elf32_vfp11_erratum_list *branch;
2330 unsigned int id;
2331 } v;
2332 } u;
2333 elf32_vfp11_erratum_type type;
2334 }
2335 elf32_vfp11_erratum_list;
2336
2337 typedef enum
2338 {
2339 DELETE_EXIDX_ENTRY,
2340 INSERT_EXIDX_CANTUNWIND_AT_END
2341 }
2342 arm_unwind_edit_type;
2343
2344 /* A (sorted) list of edits to apply to an unwind table. */
2345 typedef struct arm_unwind_table_edit
2346 {
2347 arm_unwind_edit_type type;
2348 /* Note: we sometimes want to insert an unwind entry corresponding to a
2349 section different from the one we're currently writing out, so record the
2350 (text) section this edit relates to here. */
2351 asection *linked_section;
2352 unsigned int index;
2353 struct arm_unwind_table_edit *next;
2354 }
2355 arm_unwind_table_edit;
2356
2357 typedef struct _arm_elf_section_data
2358 {
2359 /* Information about mapping symbols. */
2360 struct bfd_elf_section_data elf;
2361 unsigned int mapcount;
2362 unsigned int mapsize;
2363 elf32_arm_section_map *map;
2364 /* Information about CPU errata. */
2365 unsigned int erratumcount;
2366 elf32_vfp11_erratum_list *erratumlist;
2367 /* Information about unwind tables. */
2368 union
2369 {
2370 /* Unwind info attached to a text section. */
2371 struct
2372 {
2373 asection *arm_exidx_sec;
2374 } text;
2375
2376 /* Unwind info attached to an .ARM.exidx section. */
2377 struct
2378 {
2379 arm_unwind_table_edit *unwind_edit_list;
2380 arm_unwind_table_edit *unwind_edit_tail;
2381 } exidx;
2382 } u;
2383 }
2384 _arm_elf_section_data;
2385
2386 #define elf32_arm_section_data(sec) \
2387 ((_arm_elf_section_data *) elf_section_data (sec))
2388
2389 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2390 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2391 so may be created multiple times: we use an array of these entries whilst
2392 relaxing which we can refresh easily, then create stubs for each potentially
2393 erratum-triggering instruction once we've settled on a solution. */
2394
2395 struct a8_erratum_fix {
2396 bfd *input_bfd;
2397 asection *section;
2398 bfd_vma offset;
2399 bfd_vma addend;
2400 unsigned long orig_insn;
2401 char *stub_name;
2402 enum elf32_arm_stub_type stub_type;
2403 int st_type;
2404 };
2405
2406 /* A table of relocs applied to branches which might trigger Cortex-A8
2407 erratum. */
2408
2409 struct a8_erratum_reloc {
2410 bfd_vma from;
2411 bfd_vma destination;
2412 struct elf32_arm_link_hash_entry *hash;
2413 const char *sym_name;
2414 unsigned int r_type;
2415 unsigned char st_type;
2416 bfd_boolean non_a8_stub;
2417 };
2418
2419 /* The size of the thread control block. */
2420 #define TCB_SIZE 8
2421
2422 struct elf_arm_obj_tdata
2423 {
2424 struct elf_obj_tdata root;
2425
2426 /* tls_type for each local got entry. */
2427 char *local_got_tls_type;
2428
2429 /* Zero to warn when linking objects with incompatible enum sizes. */
2430 int no_enum_size_warning;
2431
2432 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2433 int no_wchar_size_warning;
2434 };
2435
2436 #define elf_arm_tdata(bfd) \
2437 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2438
2439 #define elf32_arm_local_got_tls_type(bfd) \
2440 (elf_arm_tdata (bfd)->local_got_tls_type)
2441
2442 #define is_arm_elf(bfd) \
2443 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2444 && elf_tdata (bfd) != NULL \
2445 && elf_object_id (bfd) == ARM_ELF_DATA)
2446
2447 static bfd_boolean
2448 elf32_arm_mkobject (bfd *abfd)
2449 {
2450 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2451 ARM_ELF_DATA);
2452 }
2453
2454 /* The ARM linker needs to keep track of the number of relocs that it
2455 decides to copy in check_relocs for each symbol. This is so that
2456 it can discard PC relative relocs if it doesn't need them when
2457 linking with -Bsymbolic. We store the information in a field
2458 extending the regular ELF linker hash table. */
2459
2460 /* This structure keeps track of the number of relocs we have copied
2461 for a given symbol. */
2462 struct elf32_arm_relocs_copied
2463 {
2464 /* Next section. */
2465 struct elf32_arm_relocs_copied * next;
2466 /* A section in dynobj. */
2467 asection * section;
2468 /* Number of relocs copied in this section. */
2469 bfd_size_type count;
2470 /* Number of PC-relative relocs copied in this section. */
2471 bfd_size_type pc_count;
2472 };
2473
2474 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2475
2476 /* Arm ELF linker hash entry. */
2477 struct elf32_arm_link_hash_entry
2478 {
2479 struct elf_link_hash_entry root;
2480
2481 /* Number of PC relative relocs copied for this symbol. */
2482 struct elf32_arm_relocs_copied * relocs_copied;
2483
2484 /* We reference count Thumb references to a PLT entry separately,
2485 so that we can emit the Thumb trampoline only if needed. */
2486 bfd_signed_vma plt_thumb_refcount;
2487
2488 /* Some references from Thumb code may be eliminated by BL->BLX
2489 conversion, so record them separately. */
2490 bfd_signed_vma plt_maybe_thumb_refcount;
2491
2492 /* Since PLT entries have variable size if the Thumb prologue is
2493 used, we need to record the index into .got.plt instead of
2494 recomputing it from the PLT offset. */
2495 bfd_signed_vma plt_got_offset;
2496
2497 #define GOT_UNKNOWN 0
2498 #define GOT_NORMAL 1
2499 #define GOT_TLS_GD 2
2500 #define GOT_TLS_IE 4
2501 unsigned char tls_type;
2502
2503 /* The symbol marking the real symbol location for exported thumb
2504 symbols with Arm stubs. */
2505 struct elf_link_hash_entry *export_glue;
2506
2507 /* A pointer to the most recently used stub hash entry against this
2508 symbol. */
2509 struct elf32_arm_stub_hash_entry *stub_cache;
2510 };
2511
2512 /* Traverse an arm ELF linker hash table. */
2513 #define elf32_arm_link_hash_traverse(table, func, info) \
2514 (elf_link_hash_traverse \
2515 (&(table)->root, \
2516 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
2517 (info)))
2518
2519 /* Get the ARM elf linker hash table from a link_info structure. */
2520 #define elf32_arm_hash_table(info) \
2521 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
2522 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
2523
2524 #define arm_stub_hash_lookup(table, string, create, copy) \
2525 ((struct elf32_arm_stub_hash_entry *) \
2526 bfd_hash_lookup ((table), (string), (create), (copy)))
2527
2528 /* Array to keep track of which stub sections have been created, and
2529 information on stub grouping. */
2530 struct map_stub
2531 {
2532 /* This is the section to which stubs in the group will be
2533 attached. */
2534 asection *link_sec;
2535 /* The stub section. */
2536 asection *stub_sec;
2537 };
2538
2539 /* ARM ELF linker hash table. */
2540 struct elf32_arm_link_hash_table
2541 {
2542 /* The main hash table. */
2543 struct elf_link_hash_table root;
2544
2545 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
2546 bfd_size_type thumb_glue_size;
2547
2548 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
2549 bfd_size_type arm_glue_size;
2550
2551 /* The size in bytes of section containing the ARMv4 BX veneers. */
2552 bfd_size_type bx_glue_size;
2553
2554 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
2555 veneer has been populated. */
2556 bfd_vma bx_glue_offset[15];
2557
2558 /* The size in bytes of the section containing glue for VFP11 erratum
2559 veneers. */
2560 bfd_size_type vfp11_erratum_glue_size;
2561
2562 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
2563 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
2564 elf32_arm_write_section(). */
2565 struct a8_erratum_fix *a8_erratum_fixes;
2566 unsigned int num_a8_erratum_fixes;
2567
2568 /* An arbitrary input BFD chosen to hold the glue sections. */
2569 bfd * bfd_of_glue_owner;
2570
2571 /* Nonzero to output a BE8 image. */
2572 int byteswap_code;
2573
2574 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
2575 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
2576 int target1_is_rel;
2577
2578 /* The relocation to use for R_ARM_TARGET2 relocations. */
2579 int target2_reloc;
2580
2581 /* 0 = Ignore R_ARM_V4BX.
2582 1 = Convert BX to MOV PC.
2583 2 = Generate v4 interworing stubs. */
2584 int fix_v4bx;
2585
2586 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
2587 int fix_cortex_a8;
2588
2589 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
2590 int use_blx;
2591
2592 /* What sort of code sequences we should look for which may trigger the
2593 VFP11 denorm erratum. */
2594 bfd_arm_vfp11_fix vfp11_fix;
2595
2596 /* Global counter for the number of fixes we have emitted. */
2597 int num_vfp11_fixes;
2598
2599 /* Nonzero to force PIC branch veneers. */
2600 int pic_veneer;
2601
2602 /* The number of bytes in the initial entry in the PLT. */
2603 bfd_size_type plt_header_size;
2604
2605 /* The number of bytes in the subsequent PLT etries. */
2606 bfd_size_type plt_entry_size;
2607
2608 /* True if the target system is VxWorks. */
2609 int vxworks_p;
2610
2611 /* True if the target system is Symbian OS. */
2612 int symbian_p;
2613
2614 /* True if the target uses REL relocations. */
2615 int use_rel;
2616
2617 /* Short-cuts to get to dynamic linker sections. */
2618 asection *sgot;
2619 asection *sgotplt;
2620 asection *srelgot;
2621 asection *splt;
2622 asection *srelplt;
2623 asection *sdynbss;
2624 asection *srelbss;
2625
2626 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
2627 asection *srelplt2;
2628
2629 /* Data for R_ARM_TLS_LDM32 relocations. */
2630 union
2631 {
2632 bfd_signed_vma refcount;
2633 bfd_vma offset;
2634 } tls_ldm_got;
2635
2636 /* Small local sym cache. */
2637 struct sym_cache sym_cache;
2638
2639 /* For convenience in allocate_dynrelocs. */
2640 bfd * obfd;
2641
2642 /* The stub hash table. */
2643 struct bfd_hash_table stub_hash_table;
2644
2645 /* Linker stub bfd. */
2646 bfd *stub_bfd;
2647
2648 /* Linker call-backs. */
2649 asection * (*add_stub_section) (const char *, asection *);
2650 void (*layout_sections_again) (void);
2651
2652 /* Array to keep track of which stub sections have been created, and
2653 information on stub grouping. */
2654 struct map_stub *stub_group;
2655
2656 /* Number of elements in stub_group. */
2657 int top_id;
2658
2659 /* Assorted information used by elf32_arm_size_stubs. */
2660 unsigned int bfd_count;
2661 int top_index;
2662 asection **input_list;
2663 };
2664
2665 /* Create an entry in an ARM ELF linker hash table. */
2666
2667 static struct bfd_hash_entry *
2668 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
2669 struct bfd_hash_table * table,
2670 const char * string)
2671 {
2672 struct elf32_arm_link_hash_entry * ret =
2673 (struct elf32_arm_link_hash_entry *) entry;
2674
2675 /* Allocate the structure if it has not already been allocated by a
2676 subclass. */
2677 if (ret == NULL)
2678 ret = (struct elf32_arm_link_hash_entry *)
2679 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
2680 if (ret == NULL)
2681 return (struct bfd_hash_entry *) ret;
2682
2683 /* Call the allocation method of the superclass. */
2684 ret = ((struct elf32_arm_link_hash_entry *)
2685 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
2686 table, string));
2687 if (ret != NULL)
2688 {
2689 ret->relocs_copied = NULL;
2690 ret->tls_type = GOT_UNKNOWN;
2691 ret->plt_thumb_refcount = 0;
2692 ret->plt_maybe_thumb_refcount = 0;
2693 ret->plt_got_offset = -1;
2694 ret->export_glue = NULL;
2695
2696 ret->stub_cache = NULL;
2697 }
2698
2699 return (struct bfd_hash_entry *) ret;
2700 }
2701
2702 /* Initialize an entry in the stub hash table. */
2703
2704 static struct bfd_hash_entry *
2705 stub_hash_newfunc (struct bfd_hash_entry *entry,
2706 struct bfd_hash_table *table,
2707 const char *string)
2708 {
2709 /* Allocate the structure if it has not already been allocated by a
2710 subclass. */
2711 if (entry == NULL)
2712 {
2713 entry = (struct bfd_hash_entry *)
2714 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
2715 if (entry == NULL)
2716 return entry;
2717 }
2718
2719 /* Call the allocation method of the superclass. */
2720 entry = bfd_hash_newfunc (entry, table, string);
2721 if (entry != NULL)
2722 {
2723 struct elf32_arm_stub_hash_entry *eh;
2724
2725 /* Initialize the local fields. */
2726 eh = (struct elf32_arm_stub_hash_entry *) entry;
2727 eh->stub_sec = NULL;
2728 eh->stub_offset = 0;
2729 eh->target_value = 0;
2730 eh->target_section = NULL;
2731 eh->target_addend = 0;
2732 eh->orig_insn = 0;
2733 eh->stub_type = arm_stub_none;
2734 eh->stub_size = 0;
2735 eh->stub_template = NULL;
2736 eh->stub_template_size = 0;
2737 eh->h = NULL;
2738 eh->id_sec = NULL;
2739 eh->output_name = NULL;
2740 }
2741
2742 return entry;
2743 }
2744
2745 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
2746 shortcuts to them in our hash table. */
2747
2748 static bfd_boolean
2749 create_got_section (bfd *dynobj, struct bfd_link_info *info)
2750 {
2751 struct elf32_arm_link_hash_table *htab;
2752
2753 htab = elf32_arm_hash_table (info);
2754 if (htab == NULL)
2755 return FALSE;
2756
2757 /* BPABI objects never have a GOT, or associated sections. */
2758 if (htab->symbian_p)
2759 return TRUE;
2760
2761 if (! _bfd_elf_create_got_section (dynobj, info))
2762 return FALSE;
2763
2764 htab->sgot = bfd_get_section_by_name (dynobj, ".got");
2765 htab->sgotplt = bfd_get_section_by_name (dynobj, ".got.plt");
2766 if (!htab->sgot || !htab->sgotplt)
2767 abort ();
2768
2769 htab->srelgot = bfd_get_section_by_name (dynobj,
2770 RELOC_SECTION (htab, ".got"));
2771 if (htab->srelgot == NULL)
2772 return FALSE;
2773 return TRUE;
2774 }
2775
2776 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
2777 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
2778 hash table. */
2779
2780 static bfd_boolean
2781 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
2782 {
2783 struct elf32_arm_link_hash_table *htab;
2784
2785 htab = elf32_arm_hash_table (info);
2786 if (htab == NULL)
2787 return FALSE;
2788
2789 if (!htab->sgot && !create_got_section (dynobj, info))
2790 return FALSE;
2791
2792 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
2793 return FALSE;
2794
2795 htab->splt = bfd_get_section_by_name (dynobj, ".plt");
2796 htab->srelplt = bfd_get_section_by_name (dynobj,
2797 RELOC_SECTION (htab, ".plt"));
2798 htab->sdynbss = bfd_get_section_by_name (dynobj, ".dynbss");
2799 if (!info->shared)
2800 htab->srelbss = bfd_get_section_by_name (dynobj,
2801 RELOC_SECTION (htab, ".bss"));
2802
2803 if (htab->vxworks_p)
2804 {
2805 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
2806 return FALSE;
2807
2808 if (info->shared)
2809 {
2810 htab->plt_header_size = 0;
2811 htab->plt_entry_size
2812 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
2813 }
2814 else
2815 {
2816 htab->plt_header_size
2817 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
2818 htab->plt_entry_size
2819 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
2820 }
2821 }
2822
2823 if (!htab->splt
2824 || !htab->srelplt
2825 || !htab->sdynbss
2826 || (!info->shared && !htab->srelbss))
2827 abort ();
2828
2829 return TRUE;
2830 }
2831
2832 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2833
2834 static void
2835 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
2836 struct elf_link_hash_entry *dir,
2837 struct elf_link_hash_entry *ind)
2838 {
2839 struct elf32_arm_link_hash_entry *edir, *eind;
2840
2841 edir = (struct elf32_arm_link_hash_entry *) dir;
2842 eind = (struct elf32_arm_link_hash_entry *) ind;
2843
2844 if (eind->relocs_copied != NULL)
2845 {
2846 if (edir->relocs_copied != NULL)
2847 {
2848 struct elf32_arm_relocs_copied **pp;
2849 struct elf32_arm_relocs_copied *p;
2850
2851 /* Add reloc counts against the indirect sym to the direct sym
2852 list. Merge any entries against the same section. */
2853 for (pp = &eind->relocs_copied; (p = *pp) != NULL; )
2854 {
2855 struct elf32_arm_relocs_copied *q;
2856
2857 for (q = edir->relocs_copied; q != NULL; q = q->next)
2858 if (q->section == p->section)
2859 {
2860 q->pc_count += p->pc_count;
2861 q->count += p->count;
2862 *pp = p->next;
2863 break;
2864 }
2865 if (q == NULL)
2866 pp = &p->next;
2867 }
2868 *pp = edir->relocs_copied;
2869 }
2870
2871 edir->relocs_copied = eind->relocs_copied;
2872 eind->relocs_copied = NULL;
2873 }
2874
2875 if (ind->root.type == bfd_link_hash_indirect)
2876 {
2877 /* Copy over PLT info. */
2878 edir->plt_thumb_refcount += eind->plt_thumb_refcount;
2879 eind->plt_thumb_refcount = 0;
2880 edir->plt_maybe_thumb_refcount += eind->plt_maybe_thumb_refcount;
2881 eind->plt_maybe_thumb_refcount = 0;
2882
2883 if (dir->got.refcount <= 0)
2884 {
2885 edir->tls_type = eind->tls_type;
2886 eind->tls_type = GOT_UNKNOWN;
2887 }
2888 }
2889
2890 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2891 }
2892
2893 /* Create an ARM elf linker hash table. */
2894
2895 static struct bfd_link_hash_table *
2896 elf32_arm_link_hash_table_create (bfd *abfd)
2897 {
2898 struct elf32_arm_link_hash_table *ret;
2899 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
2900
2901 ret = (struct elf32_arm_link_hash_table *) bfd_malloc (amt);
2902 if (ret == NULL)
2903 return NULL;
2904
2905 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
2906 elf32_arm_link_hash_newfunc,
2907 sizeof (struct elf32_arm_link_hash_entry),
2908 ARM_ELF_DATA))
2909 {
2910 free (ret);
2911 return NULL;
2912 }
2913
2914 ret->sgot = NULL;
2915 ret->sgotplt = NULL;
2916 ret->srelgot = NULL;
2917 ret->splt = NULL;
2918 ret->srelplt = NULL;
2919 ret->sdynbss = NULL;
2920 ret->srelbss = NULL;
2921 ret->srelplt2 = NULL;
2922 ret->thumb_glue_size = 0;
2923 ret->arm_glue_size = 0;
2924 ret->bx_glue_size = 0;
2925 memset (ret->bx_glue_offset, 0, sizeof (ret->bx_glue_offset));
2926 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
2927 ret->vfp11_erratum_glue_size = 0;
2928 ret->num_vfp11_fixes = 0;
2929 ret->fix_cortex_a8 = 0;
2930 ret->bfd_of_glue_owner = NULL;
2931 ret->byteswap_code = 0;
2932 ret->target1_is_rel = 0;
2933 ret->target2_reloc = R_ARM_NONE;
2934 #ifdef FOUR_WORD_PLT
2935 ret->plt_header_size = 16;
2936 ret->plt_entry_size = 16;
2937 #else
2938 ret->plt_header_size = 20;
2939 ret->plt_entry_size = 12;
2940 #endif
2941 ret->fix_v4bx = 0;
2942 ret->use_blx = 0;
2943 ret->vxworks_p = 0;
2944 ret->symbian_p = 0;
2945 ret->use_rel = 1;
2946 ret->sym_cache.abfd = NULL;
2947 ret->obfd = abfd;
2948 ret->tls_ldm_got.refcount = 0;
2949 ret->stub_bfd = NULL;
2950 ret->add_stub_section = NULL;
2951 ret->layout_sections_again = NULL;
2952 ret->stub_group = NULL;
2953 ret->top_id = 0;
2954 ret->bfd_count = 0;
2955 ret->top_index = 0;
2956 ret->input_list = NULL;
2957
2958 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2959 sizeof (struct elf32_arm_stub_hash_entry)))
2960 {
2961 free (ret);
2962 return NULL;
2963 }
2964
2965 return &ret->root.root;
2966 }
2967
2968 /* Free the derived linker hash table. */
2969
2970 static void
2971 elf32_arm_hash_table_free (struct bfd_link_hash_table *hash)
2972 {
2973 struct elf32_arm_link_hash_table *ret
2974 = (struct elf32_arm_link_hash_table *) hash;
2975
2976 bfd_hash_table_free (&ret->stub_hash_table);
2977 _bfd_generic_link_hash_table_free (hash);
2978 }
2979
2980 /* Determine if we're dealing with a Thumb only architecture. */
2981
2982 static bfd_boolean
2983 using_thumb_only (struct elf32_arm_link_hash_table *globals)
2984 {
2985 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2986 Tag_CPU_arch);
2987 int profile;
2988
2989 if (arch == TAG_CPU_ARCH_V6_M || arch == TAG_CPU_ARCH_V6S_M)
2990 return TRUE;
2991
2992 if (arch != TAG_CPU_ARCH_V7 && arch != TAG_CPU_ARCH_V7E_M)
2993 return FALSE;
2994
2995 profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2996 Tag_CPU_arch_profile);
2997
2998 return profile == 'M';
2999 }
3000
3001 /* Determine if we're dealing with a Thumb-2 object. */
3002
3003 static bfd_boolean
3004 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3005 {
3006 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3007 Tag_CPU_arch);
3008 return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7;
3009 }
3010
3011 /* Determine what kind of NOPs are available. */
3012
3013 static bfd_boolean
3014 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
3015 {
3016 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3017 Tag_CPU_arch);
3018 return arch == TAG_CPU_ARCH_V6T2
3019 || arch == TAG_CPU_ARCH_V6K
3020 || arch == TAG_CPU_ARCH_V7
3021 || arch == TAG_CPU_ARCH_V7E_M;
3022 }
3023
3024 static bfd_boolean
3025 arch_has_thumb2_nop (struct elf32_arm_link_hash_table *globals)
3026 {
3027 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3028 Tag_CPU_arch);
3029 return (arch == TAG_CPU_ARCH_V6T2 || arch == TAG_CPU_ARCH_V7
3030 || arch == TAG_CPU_ARCH_V7E_M);
3031 }
3032
3033 static bfd_boolean
3034 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
3035 {
3036 switch (stub_type)
3037 {
3038 case arm_stub_long_branch_thumb_only:
3039 case arm_stub_long_branch_v4t_thumb_arm:
3040 case arm_stub_short_branch_v4t_thumb_arm:
3041 case arm_stub_long_branch_v4t_thumb_arm_pic:
3042 case arm_stub_long_branch_thumb_only_pic:
3043 return TRUE;
3044 case arm_stub_none:
3045 BFD_FAIL ();
3046 return FALSE;
3047 break;
3048 default:
3049 return FALSE;
3050 }
3051 }
3052
3053 /* Determine the type of stub needed, if any, for a call. */
3054
3055 static enum elf32_arm_stub_type
3056 arm_type_of_stub (struct bfd_link_info *info,
3057 asection *input_sec,
3058 const Elf_Internal_Rela *rel,
3059 int *actual_st_type,
3060 struct elf32_arm_link_hash_entry *hash,
3061 bfd_vma destination,
3062 asection *sym_sec,
3063 bfd *input_bfd,
3064 const char *name)
3065 {
3066 bfd_vma location;
3067 bfd_signed_vma branch_offset;
3068 unsigned int r_type;
3069 struct elf32_arm_link_hash_table * globals;
3070 int thumb2;
3071 int thumb_only;
3072 enum elf32_arm_stub_type stub_type = arm_stub_none;
3073 int use_plt = 0;
3074 int st_type = *actual_st_type;
3075
3076 /* We don't know the actual type of destination in case it is of
3077 type STT_SECTION: give up. */
3078 if (st_type == STT_SECTION)
3079 return stub_type;
3080
3081 globals = elf32_arm_hash_table (info);
3082 if (globals == NULL)
3083 return stub_type;
3084
3085 thumb_only = using_thumb_only (globals);
3086
3087 thumb2 = using_thumb2 (globals);
3088
3089 /* Determine where the call point is. */
3090 location = (input_sec->output_offset
3091 + input_sec->output_section->vma
3092 + rel->r_offset);
3093
3094 r_type = ELF32_R_TYPE (rel->r_info);
3095
3096 /* Keep a simpler condition, for the sake of clarity. */
3097 if (globals->splt != NULL
3098 && hash != NULL
3099 && hash->root.plt.offset != (bfd_vma) -1)
3100 {
3101 use_plt = 1;
3102
3103 /* Note when dealing with PLT entries: the main PLT stub is in
3104 ARM mode, so if the branch is in Thumb mode, another
3105 Thumb->ARM stub will be inserted later just before the ARM
3106 PLT stub. We don't take this extra distance into account
3107 here, because if a long branch stub is needed, we'll add a
3108 Thumb->Arm one and branch directly to the ARM PLT entry
3109 because it avoids spreading offset corrections in several
3110 places. */
3111
3112 destination = (globals->splt->output_section->vma
3113 + globals->splt->output_offset
3114 + hash->root.plt.offset);
3115 st_type = STT_FUNC;
3116 }
3117
3118 branch_offset = (bfd_signed_vma)(destination - location);
3119
3120 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
3121 {
3122 /* Handle cases where:
3123 - this call goes too far (different Thumb/Thumb2 max
3124 distance)
3125 - it's a Thumb->Arm call and blx is not available, or it's a
3126 Thumb->Arm branch (not bl). A stub is needed in this case,
3127 but only if this call is not through a PLT entry. Indeed,
3128 PLT stubs handle mode switching already.
3129 */
3130 if ((!thumb2
3131 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3132 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3133 || (thumb2
3134 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3135 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3136 || ((st_type != STT_ARM_TFUNC)
3137 && (((r_type == R_ARM_THM_CALL) && !globals->use_blx)
3138 || (r_type == R_ARM_THM_JUMP24))
3139 && !use_plt))
3140 {
3141 if (st_type == STT_ARM_TFUNC)
3142 {
3143 /* Thumb to thumb. */
3144 if (!thumb_only)
3145 {
3146 stub_type = (info->shared | globals->pic_veneer)
3147 /* PIC stubs. */
3148 ? ((globals->use_blx
3149 && (r_type ==R_ARM_THM_CALL))
3150 /* V5T and above. Stub starts with ARM code, so
3151 we must be able to switch mode before
3152 reaching it, which is only possible for 'bl'
3153 (ie R_ARM_THM_CALL relocation). */
3154 ? arm_stub_long_branch_any_thumb_pic
3155 /* On V4T, use Thumb code only. */
3156 : arm_stub_long_branch_v4t_thumb_thumb_pic)
3157
3158 /* non-PIC stubs. */
3159 : ((globals->use_blx
3160 && (r_type ==R_ARM_THM_CALL))
3161 /* V5T and above. */
3162 ? arm_stub_long_branch_any_any
3163 /* V4T. */
3164 : arm_stub_long_branch_v4t_thumb_thumb);
3165 }
3166 else
3167 {
3168 stub_type = (info->shared | globals->pic_veneer)
3169 /* PIC stub. */
3170 ? arm_stub_long_branch_thumb_only_pic
3171 /* non-PIC stub. */
3172 : arm_stub_long_branch_thumb_only;
3173 }
3174 }
3175 else
3176 {
3177 /* Thumb to arm. */
3178 if (sym_sec != NULL
3179 && sym_sec->owner != NULL
3180 && !INTERWORK_FLAG (sym_sec->owner))
3181 {
3182 (*_bfd_error_handler)
3183 (_("%B(%s): warning: interworking not enabled.\n"
3184 " first occurrence: %B: Thumb call to ARM"),
3185 sym_sec->owner, input_bfd, name);
3186 }
3187
3188 stub_type = (info->shared | globals->pic_veneer)
3189 /* PIC stubs. */
3190 ? ((globals->use_blx
3191 && (r_type ==R_ARM_THM_CALL))
3192 /* V5T and above. */
3193 ? arm_stub_long_branch_any_arm_pic
3194 /* V4T PIC stub. */
3195 : arm_stub_long_branch_v4t_thumb_arm_pic)
3196
3197 /* non-PIC stubs. */
3198 : ((globals->use_blx
3199 && (r_type ==R_ARM_THM_CALL))
3200 /* V5T and above. */
3201 ? arm_stub_long_branch_any_any
3202 /* V4T. */
3203 : arm_stub_long_branch_v4t_thumb_arm);
3204
3205 /* Handle v4t short branches. */
3206 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
3207 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
3208 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
3209 stub_type = arm_stub_short_branch_v4t_thumb_arm;
3210 }
3211 }
3212 }
3213 else if (r_type == R_ARM_CALL
3214 || r_type == R_ARM_JUMP24
3215 || r_type == R_ARM_PLT32)
3216 {
3217 if (st_type == STT_ARM_TFUNC)
3218 {
3219 /* Arm to thumb. */
3220
3221 if (sym_sec != NULL
3222 && sym_sec->owner != NULL
3223 && !INTERWORK_FLAG (sym_sec->owner))
3224 {
3225 (*_bfd_error_handler)
3226 (_("%B(%s): warning: interworking not enabled.\n"
3227 " first occurrence: %B: ARM call to Thumb"),
3228 sym_sec->owner, input_bfd, name);
3229 }
3230
3231 /* We have an extra 2-bytes reach because of
3232 the mode change (bit 24 (H) of BLX encoding). */
3233 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
3234 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
3235 || ((r_type == R_ARM_CALL) && !globals->use_blx)
3236 || (r_type == R_ARM_JUMP24)
3237 || (r_type == R_ARM_PLT32))
3238 {
3239 stub_type = (info->shared | globals->pic_veneer)
3240 /* PIC stubs. */
3241 ? ((globals->use_blx)
3242 /* V5T and above. */
3243 ? arm_stub_long_branch_any_thumb_pic
3244 /* V4T stub. */
3245 : arm_stub_long_branch_v4t_arm_thumb_pic)
3246
3247 /* non-PIC stubs. */
3248 : ((globals->use_blx)
3249 /* V5T and above. */
3250 ? arm_stub_long_branch_any_any
3251 /* V4T. */
3252 : arm_stub_long_branch_v4t_arm_thumb);
3253 }
3254 }
3255 else
3256 {
3257 /* Arm to arm. */
3258 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
3259 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
3260 {
3261 stub_type = (info->shared | globals->pic_veneer)
3262 /* PIC stubs. */
3263 ? arm_stub_long_branch_any_arm_pic
3264 /* non-PIC stubs. */
3265 : arm_stub_long_branch_any_any;
3266 }
3267 }
3268 }
3269
3270 /* If a stub is needed, record the actual destination type. */
3271 if (stub_type != arm_stub_none)
3272 *actual_st_type = st_type;
3273
3274 return stub_type;
3275 }
3276
3277 /* Build a name for an entry in the stub hash table. */
3278
3279 static char *
3280 elf32_arm_stub_name (const asection *input_section,
3281 const asection *sym_sec,
3282 const struct elf32_arm_link_hash_entry *hash,
3283 const Elf_Internal_Rela *rel,
3284 enum elf32_arm_stub_type stub_type)
3285 {
3286 char *stub_name;
3287 bfd_size_type len;
3288
3289 if (hash)
3290 {
3291 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
3292 stub_name = (char *) bfd_malloc (len);
3293 if (stub_name != NULL)
3294 sprintf (stub_name, "%08x_%s+%x_%d",
3295 input_section->id & 0xffffffff,
3296 hash->root.root.root.string,
3297 (int) rel->r_addend & 0xffffffff,
3298 (int) stub_type);
3299 }
3300 else
3301 {
3302 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
3303 stub_name = (char *) bfd_malloc (len);
3304 if (stub_name != NULL)
3305 sprintf (stub_name, "%08x_%x:%x+%x_%d",
3306 input_section->id & 0xffffffff,
3307 sym_sec->id & 0xffffffff,
3308 (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
3309 (int) rel->r_addend & 0xffffffff,
3310 (int) stub_type);
3311 }
3312
3313 return stub_name;
3314 }
3315
3316 /* Look up an entry in the stub hash. Stub entries are cached because
3317 creating the stub name takes a bit of time. */
3318
3319 static struct elf32_arm_stub_hash_entry *
3320 elf32_arm_get_stub_entry (const asection *input_section,
3321 const asection *sym_sec,
3322 struct elf_link_hash_entry *hash,
3323 const Elf_Internal_Rela *rel,
3324 struct elf32_arm_link_hash_table *htab,
3325 enum elf32_arm_stub_type stub_type)
3326 {
3327 struct elf32_arm_stub_hash_entry *stub_entry;
3328 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
3329 const asection *id_sec;
3330
3331 if ((input_section->flags & SEC_CODE) == 0)
3332 return NULL;
3333
3334 /* If this input section is part of a group of sections sharing one
3335 stub section, then use the id of the first section in the group.
3336 Stub names need to include a section id, as there may well be
3337 more than one stub used to reach say, printf, and we need to
3338 distinguish between them. */
3339 id_sec = htab->stub_group[input_section->id].link_sec;
3340
3341 if (h != NULL && h->stub_cache != NULL
3342 && h->stub_cache->h == h
3343 && h->stub_cache->id_sec == id_sec
3344 && h->stub_cache->stub_type == stub_type)
3345 {
3346 stub_entry = h->stub_cache;
3347 }
3348 else
3349 {
3350 char *stub_name;
3351
3352 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
3353 if (stub_name == NULL)
3354 return NULL;
3355
3356 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
3357 stub_name, FALSE, FALSE);
3358 if (h != NULL)
3359 h->stub_cache = stub_entry;
3360
3361 free (stub_name);
3362 }
3363
3364 return stub_entry;
3365 }
3366
3367 /* Find or create a stub section. Returns a pointer to the stub section, and
3368 the section to which the stub section will be attached (in *LINK_SEC_P).
3369 LINK_SEC_P may be NULL. */
3370
3371 static asection *
3372 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
3373 struct elf32_arm_link_hash_table *htab)
3374 {
3375 asection *link_sec;
3376 asection *stub_sec;
3377
3378 link_sec = htab->stub_group[section->id].link_sec;
3379 stub_sec = htab->stub_group[section->id].stub_sec;
3380 if (stub_sec == NULL)
3381 {
3382 stub_sec = htab->stub_group[link_sec->id].stub_sec;
3383 if (stub_sec == NULL)
3384 {
3385 size_t namelen;
3386 bfd_size_type len;
3387 char *s_name;
3388
3389 namelen = strlen (link_sec->name);
3390 len = namelen + sizeof (STUB_SUFFIX);
3391 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
3392 if (s_name == NULL)
3393 return NULL;
3394
3395 memcpy (s_name, link_sec->name, namelen);
3396 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
3397 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
3398 if (stub_sec == NULL)
3399 return NULL;
3400 htab->stub_group[link_sec->id].stub_sec = stub_sec;
3401 }
3402 htab->stub_group[section->id].stub_sec = stub_sec;
3403 }
3404
3405 if (link_sec_p)
3406 *link_sec_p = link_sec;
3407
3408 return stub_sec;
3409 }
3410
3411 /* Add a new stub entry to the stub hash. Not all fields of the new
3412 stub entry are initialised. */
3413
3414 static struct elf32_arm_stub_hash_entry *
3415 elf32_arm_add_stub (const char *stub_name,
3416 asection *section,
3417 struct elf32_arm_link_hash_table *htab)
3418 {
3419 asection *link_sec;
3420 asection *stub_sec;
3421 struct elf32_arm_stub_hash_entry *stub_entry;
3422
3423 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab);
3424 if (stub_sec == NULL)
3425 return NULL;
3426
3427 /* Enter this entry into the linker stub hash table. */
3428 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3429 TRUE, FALSE);
3430 if (stub_entry == NULL)
3431 {
3432 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
3433 section->owner,
3434 stub_name);
3435 return NULL;
3436 }
3437
3438 stub_entry->stub_sec = stub_sec;
3439 stub_entry->stub_offset = 0;
3440 stub_entry->id_sec = link_sec;
3441
3442 return stub_entry;
3443 }
3444
3445 /* Store an Arm insn into an output section not processed by
3446 elf32_arm_write_section. */
3447
3448 static void
3449 put_arm_insn (struct elf32_arm_link_hash_table * htab,
3450 bfd * output_bfd, bfd_vma val, void * ptr)
3451 {
3452 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3453 bfd_putl32 (val, ptr);
3454 else
3455 bfd_putb32 (val, ptr);
3456 }
3457
3458 /* Store a 16-bit Thumb insn into an output section not processed by
3459 elf32_arm_write_section. */
3460
3461 static void
3462 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
3463 bfd * output_bfd, bfd_vma val, void * ptr)
3464 {
3465 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3466 bfd_putl16 (val, ptr);
3467 else
3468 bfd_putb16 (val, ptr);
3469 }
3470
3471 static bfd_reloc_status_type elf32_arm_final_link_relocate
3472 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
3473 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
3474 const char *, int, struct elf_link_hash_entry *, bfd_boolean *, char **);
3475
3476 static unsigned int
3477 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
3478 {
3479 switch (stub_type)
3480 {
3481 case arm_stub_a8_veneer_b_cond:
3482 case arm_stub_a8_veneer_b:
3483 case arm_stub_a8_veneer_bl:
3484 return 2;
3485
3486 case arm_stub_long_branch_any_any:
3487 case arm_stub_long_branch_v4t_arm_thumb:
3488 case arm_stub_long_branch_thumb_only:
3489 case arm_stub_long_branch_v4t_thumb_thumb:
3490 case arm_stub_long_branch_v4t_thumb_arm:
3491 case arm_stub_short_branch_v4t_thumb_arm:
3492 case arm_stub_long_branch_any_arm_pic:
3493 case arm_stub_long_branch_any_thumb_pic:
3494 case arm_stub_long_branch_v4t_thumb_thumb_pic:
3495 case arm_stub_long_branch_v4t_arm_thumb_pic:
3496 case arm_stub_long_branch_v4t_thumb_arm_pic:
3497 case arm_stub_long_branch_thumb_only_pic:
3498 case arm_stub_a8_veneer_blx:
3499 return 4;
3500
3501 default:
3502 abort (); /* Should be unreachable. */
3503 }
3504 }
3505
3506 static bfd_boolean
3507 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
3508 void * in_arg)
3509 {
3510 #define MAXRELOCS 2
3511 struct elf32_arm_stub_hash_entry *stub_entry;
3512 struct elf32_arm_link_hash_table *globals;
3513 struct bfd_link_info *info;
3514 asection *stub_sec;
3515 bfd *stub_bfd;
3516 bfd_byte *loc;
3517 bfd_vma sym_value;
3518 int template_size;
3519 int size;
3520 const insn_sequence *template_sequence;
3521 int i;
3522 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
3523 int stub_reloc_offset[MAXRELOCS] = {0, 0};
3524 int nrelocs = 0;
3525
3526 /* Massage our args to the form they really have. */
3527 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3528 info = (struct bfd_link_info *) in_arg;
3529
3530 globals = elf32_arm_hash_table (info);
3531 if (globals == NULL)
3532 return FALSE;
3533
3534 stub_sec = stub_entry->stub_sec;
3535
3536 if ((globals->fix_cortex_a8 < 0)
3537 != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
3538 /* We have to do less-strictly-aligned fixes last. */
3539 return TRUE;
3540
3541 /* Make a note of the offset within the stubs for this entry. */
3542 stub_entry->stub_offset = stub_sec->size;
3543 loc = stub_sec->contents + stub_entry->stub_offset;
3544
3545 stub_bfd = stub_sec->owner;
3546
3547 /* This is the address of the stub destination. */
3548 sym_value = (stub_entry->target_value
3549 + stub_entry->target_section->output_offset
3550 + stub_entry->target_section->output_section->vma);
3551
3552 template_sequence = stub_entry->stub_template;
3553 template_size = stub_entry->stub_template_size;
3554
3555 size = 0;
3556 for (i = 0; i < template_size; i++)
3557 {
3558 switch (template_sequence[i].type)
3559 {
3560 case THUMB16_TYPE:
3561 {
3562 bfd_vma data = (bfd_vma) template_sequence[i].data;
3563 if (template_sequence[i].reloc_addend != 0)
3564 {
3565 /* We've borrowed the reloc_addend field to mean we should
3566 insert a condition code into this (Thumb-1 branch)
3567 instruction. See THUMB16_BCOND_INSN. */
3568 BFD_ASSERT ((data & 0xff00) == 0xd000);
3569 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
3570 }
3571 bfd_put_16 (stub_bfd, data, loc + size);
3572 size += 2;
3573 }
3574 break;
3575
3576 case THUMB32_TYPE:
3577 bfd_put_16 (stub_bfd,
3578 (template_sequence[i].data >> 16) & 0xffff,
3579 loc + size);
3580 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
3581 loc + size + 2);
3582 if (template_sequence[i].r_type != R_ARM_NONE)
3583 {
3584 stub_reloc_idx[nrelocs] = i;
3585 stub_reloc_offset[nrelocs++] = size;
3586 }
3587 size += 4;
3588 break;
3589
3590 case ARM_TYPE:
3591 bfd_put_32 (stub_bfd, template_sequence[i].data,
3592 loc + size);
3593 /* Handle cases where the target is encoded within the
3594 instruction. */
3595 if (template_sequence[i].r_type == R_ARM_JUMP24)
3596 {
3597 stub_reloc_idx[nrelocs] = i;
3598 stub_reloc_offset[nrelocs++] = size;
3599 }
3600 size += 4;
3601 break;
3602
3603 case DATA_TYPE:
3604 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
3605 stub_reloc_idx[nrelocs] = i;
3606 stub_reloc_offset[nrelocs++] = size;
3607 size += 4;
3608 break;
3609
3610 default:
3611 BFD_FAIL ();
3612 return FALSE;
3613 }
3614 }
3615
3616 stub_sec->size += size;
3617
3618 /* Stub size has already been computed in arm_size_one_stub. Check
3619 consistency. */
3620 BFD_ASSERT (size == stub_entry->stub_size);
3621
3622 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
3623 if (stub_entry->st_type == STT_ARM_TFUNC)
3624 sym_value |= 1;
3625
3626 /* Assume there is at least one and at most MAXRELOCS entries to relocate
3627 in each stub. */
3628 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
3629
3630 for (i = 0; i < nrelocs; i++)
3631 if (template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP24
3632 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP19
3633 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_CALL
3634 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_XPC22)
3635 {
3636 Elf_Internal_Rela rel;
3637 bfd_boolean unresolved_reloc;
3638 char *error_message;
3639 int sym_flags
3640 = (template_sequence[stub_reloc_idx[i]].r_type != R_ARM_THM_XPC22)
3641 ? STT_ARM_TFUNC : 0;
3642 bfd_vma points_to = sym_value + stub_entry->target_addend;
3643
3644 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
3645 rel.r_info = ELF32_R_INFO (0,
3646 template_sequence[stub_reloc_idx[i]].r_type);
3647 rel.r_addend = template_sequence[stub_reloc_idx[i]].reloc_addend;
3648
3649 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
3650 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
3651 template should refer back to the instruction after the original
3652 branch. */
3653 points_to = sym_value;
3654
3655 /* There may be unintended consequences if this is not true. */
3656 BFD_ASSERT (stub_entry->h == NULL);
3657
3658 /* Note: _bfd_final_link_relocate doesn't handle these relocations
3659 properly. We should probably use this function unconditionally,
3660 rather than only for certain relocations listed in the enclosing
3661 conditional, for the sake of consistency. */
3662 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
3663 (template_sequence[stub_reloc_idx[i]].r_type),
3664 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
3665 points_to, info, stub_entry->target_section, "", sym_flags,
3666 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
3667 &error_message);
3668 }
3669 else
3670 {
3671 Elf_Internal_Rela rel;
3672 bfd_boolean unresolved_reloc;
3673 char *error_message;
3674 bfd_vma points_to = sym_value + stub_entry->target_addend
3675 + template_sequence[stub_reloc_idx[i]].reloc_addend;
3676
3677 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
3678 rel.r_info = ELF32_R_INFO (0,
3679 template_sequence[stub_reloc_idx[i]].r_type);
3680 rel.r_addend = 0;
3681
3682 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
3683 (template_sequence[stub_reloc_idx[i]].r_type),
3684 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
3685 points_to, info, stub_entry->target_section, "", stub_entry->st_type,
3686 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
3687 &error_message);
3688 }
3689
3690 return TRUE;
3691 #undef MAXRELOCS
3692 }
3693
3694 /* Calculate the template, template size and instruction size for a stub.
3695 Return value is the instruction size. */
3696
3697 static unsigned int
3698 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
3699 const insn_sequence **stub_template,
3700 int *stub_template_size)
3701 {
3702 const insn_sequence *template_sequence = NULL;
3703 int template_size = 0, i;
3704 unsigned int size;
3705
3706 template_sequence = stub_definitions[stub_type].template_sequence;
3707 if (stub_template)
3708 *stub_template = template_sequence;
3709
3710 template_size = stub_definitions[stub_type].template_size;
3711 if (stub_template_size)
3712 *stub_template_size = template_size;
3713
3714 size = 0;
3715 for (i = 0; i < template_size; i++)
3716 {
3717 switch (template_sequence[i].type)
3718 {
3719 case THUMB16_TYPE:
3720 size += 2;
3721 break;
3722
3723 case ARM_TYPE:
3724 case THUMB32_TYPE:
3725 case DATA_TYPE:
3726 size += 4;
3727 break;
3728
3729 default:
3730 BFD_FAIL ();
3731 return 0;
3732 }
3733 }
3734
3735 return size;
3736 }
3737
3738 /* As above, but don't actually build the stub. Just bump offset so
3739 we know stub section sizes. */
3740
3741 static bfd_boolean
3742 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
3743 void *in_arg ATTRIBUTE_UNUSED)
3744 {
3745 struct elf32_arm_stub_hash_entry *stub_entry;
3746 const insn_sequence *template_sequence;
3747 int template_size, size;
3748
3749 /* Massage our args to the form they really have. */
3750 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3751
3752 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
3753 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
3754
3755 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
3756 &template_size);
3757
3758 stub_entry->stub_size = size;
3759 stub_entry->stub_template = template_sequence;
3760 stub_entry->stub_template_size = template_size;
3761
3762 size = (size + 7) & ~7;
3763 stub_entry->stub_sec->size += size;
3764
3765 return TRUE;
3766 }
3767
3768 /* External entry points for sizing and building linker stubs. */
3769
3770 /* Set up various things so that we can make a list of input sections
3771 for each output section included in the link. Returns -1 on error,
3772 0 when no stubs will be needed, and 1 on success. */
3773
3774 int
3775 elf32_arm_setup_section_lists (bfd *output_bfd,
3776 struct bfd_link_info *info)
3777 {
3778 bfd *input_bfd;
3779 unsigned int bfd_count;
3780 int top_id, top_index;
3781 asection *section;
3782 asection **input_list, **list;
3783 bfd_size_type amt;
3784 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3785
3786 if (htab == NULL)
3787 return 0;
3788 if (! is_elf_hash_table (htab))
3789 return 0;
3790
3791 /* Count the number of input BFDs and find the top input section id. */
3792 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
3793 input_bfd != NULL;
3794 input_bfd = input_bfd->link_next)
3795 {
3796 bfd_count += 1;
3797 for (section = input_bfd->sections;
3798 section != NULL;
3799 section = section->next)
3800 {
3801 if (top_id < section->id)
3802 top_id = section->id;
3803 }
3804 }
3805 htab->bfd_count = bfd_count;
3806
3807 amt = sizeof (struct map_stub) * (top_id + 1);
3808 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
3809 if (htab->stub_group == NULL)
3810 return -1;
3811 htab->top_id = top_id;
3812
3813 /* We can't use output_bfd->section_count here to find the top output
3814 section index as some sections may have been removed, and
3815 _bfd_strip_section_from_output doesn't renumber the indices. */
3816 for (section = output_bfd->sections, top_index = 0;
3817 section != NULL;
3818 section = section->next)
3819 {
3820 if (top_index < section->index)
3821 top_index = section->index;
3822 }
3823
3824 htab->top_index = top_index;
3825 amt = sizeof (asection *) * (top_index + 1);
3826 input_list = (asection **) bfd_malloc (amt);
3827 htab->input_list = input_list;
3828 if (input_list == NULL)
3829 return -1;
3830
3831 /* For sections we aren't interested in, mark their entries with a
3832 value we can check later. */
3833 list = input_list + top_index;
3834 do
3835 *list = bfd_abs_section_ptr;
3836 while (list-- != input_list);
3837
3838 for (section = output_bfd->sections;
3839 section != NULL;
3840 section = section->next)
3841 {
3842 if ((section->flags & SEC_CODE) != 0)
3843 input_list[section->index] = NULL;
3844 }
3845
3846 return 1;
3847 }
3848
3849 /* The linker repeatedly calls this function for each input section,
3850 in the order that input sections are linked into output sections.
3851 Build lists of input sections to determine groupings between which
3852 we may insert linker stubs. */
3853
3854 void
3855 elf32_arm_next_input_section (struct bfd_link_info *info,
3856 asection *isec)
3857 {
3858 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3859
3860 if (htab == NULL)
3861 return;
3862
3863 if (isec->output_section->index <= htab->top_index)
3864 {
3865 asection **list = htab->input_list + isec->output_section->index;
3866
3867 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
3868 {
3869 /* Steal the link_sec pointer for our list. */
3870 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
3871 /* This happens to make the list in reverse order,
3872 which we reverse later. */
3873 PREV_SEC (isec) = *list;
3874 *list = isec;
3875 }
3876 }
3877 }
3878
3879 /* See whether we can group stub sections together. Grouping stub
3880 sections may result in fewer stubs. More importantly, we need to
3881 put all .init* and .fini* stubs at the end of the .init or
3882 .fini output sections respectively, because glibc splits the
3883 _init and _fini functions into multiple parts. Putting a stub in
3884 the middle of a function is not a good idea. */
3885
3886 static void
3887 group_sections (struct elf32_arm_link_hash_table *htab,
3888 bfd_size_type stub_group_size,
3889 bfd_boolean stubs_always_after_branch)
3890 {
3891 asection **list = htab->input_list;
3892
3893 do
3894 {
3895 asection *tail = *list;
3896 asection *head;
3897
3898 if (tail == bfd_abs_section_ptr)
3899 continue;
3900
3901 /* Reverse the list: we must avoid placing stubs at the
3902 beginning of the section because the beginning of the text
3903 section may be required for an interrupt vector in bare metal
3904 code. */
3905 #define NEXT_SEC PREV_SEC
3906 head = NULL;
3907 while (tail != NULL)
3908 {
3909 /* Pop from tail. */
3910 asection *item = tail;
3911 tail = PREV_SEC (item);
3912
3913 /* Push on head. */
3914 NEXT_SEC (item) = head;
3915 head = item;
3916 }
3917
3918 while (head != NULL)
3919 {
3920 asection *curr;
3921 asection *next;
3922 bfd_vma stub_group_start = head->output_offset;
3923 bfd_vma end_of_next;
3924
3925 curr = head;
3926 while (NEXT_SEC (curr) != NULL)
3927 {
3928 next = NEXT_SEC (curr);
3929 end_of_next = next->output_offset + next->size;
3930 if (end_of_next - stub_group_start >= stub_group_size)
3931 /* End of NEXT is too far from start, so stop. */
3932 break;
3933 /* Add NEXT to the group. */
3934 curr = next;
3935 }
3936
3937 /* OK, the size from the start to the start of CURR is less
3938 than stub_group_size and thus can be handled by one stub
3939 section. (Or the head section is itself larger than
3940 stub_group_size, in which case we may be toast.)
3941 We should really be keeping track of the total size of
3942 stubs added here, as stubs contribute to the final output
3943 section size. */
3944 do
3945 {
3946 next = NEXT_SEC (head);
3947 /* Set up this stub group. */
3948 htab->stub_group[head->id].link_sec = curr;
3949 }
3950 while (head != curr && (head = next) != NULL);
3951
3952 /* But wait, there's more! Input sections up to stub_group_size
3953 bytes after the stub section can be handled by it too. */
3954 if (!stubs_always_after_branch)
3955 {
3956 stub_group_start = curr->output_offset + curr->size;
3957
3958 while (next != NULL)
3959 {
3960 end_of_next = next->output_offset + next->size;
3961 if (end_of_next - stub_group_start >= stub_group_size)
3962 /* End of NEXT is too far from stubs, so stop. */
3963 break;
3964 /* Add NEXT to the stub group. */
3965 head = next;
3966 next = NEXT_SEC (head);
3967 htab->stub_group[head->id].link_sec = curr;
3968 }
3969 }
3970 head = next;
3971 }
3972 }
3973 while (list++ != htab->input_list + htab->top_index);
3974
3975 free (htab->input_list);
3976 #undef PREV_SEC
3977 #undef NEXT_SEC
3978 }
3979
3980 /* Comparison function for sorting/searching relocations relating to Cortex-A8
3981 erratum fix. */
3982
3983 static int
3984 a8_reloc_compare (const void *a, const void *b)
3985 {
3986 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
3987 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
3988
3989 if (ra->from < rb->from)
3990 return -1;
3991 else if (ra->from > rb->from)
3992 return 1;
3993 else
3994 return 0;
3995 }
3996
3997 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
3998 const char *, char **);
3999
4000 /* Helper function to scan code for sequences which might trigger the Cortex-A8
4001 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
4002 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
4003 otherwise. */
4004
4005 static bfd_boolean
4006 cortex_a8_erratum_scan (bfd *input_bfd,
4007 struct bfd_link_info *info,
4008 struct a8_erratum_fix **a8_fixes_p,
4009 unsigned int *num_a8_fixes_p,
4010 unsigned int *a8_fix_table_size_p,
4011 struct a8_erratum_reloc *a8_relocs,
4012 unsigned int num_a8_relocs,
4013 unsigned prev_num_a8_fixes,
4014 bfd_boolean *stub_changed_p)
4015 {
4016 asection *section;
4017 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4018 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
4019 unsigned int num_a8_fixes = *num_a8_fixes_p;
4020 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
4021
4022 if (htab == NULL)
4023 return FALSE;
4024
4025 for (section = input_bfd->sections;
4026 section != NULL;
4027 section = section->next)
4028 {
4029 bfd_byte *contents = NULL;
4030 struct _arm_elf_section_data *sec_data;
4031 unsigned int span;
4032 bfd_vma base_vma;
4033
4034 if (elf_section_type (section) != SHT_PROGBITS
4035 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
4036 || (section->flags & SEC_EXCLUDE) != 0
4037 || (section->sec_info_type == ELF_INFO_TYPE_JUST_SYMS)
4038 || (section->output_section == bfd_abs_section_ptr))
4039 continue;
4040
4041 base_vma = section->output_section->vma + section->output_offset;
4042
4043 if (elf_section_data (section)->this_hdr.contents != NULL)
4044 contents = elf_section_data (section)->this_hdr.contents;
4045 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
4046 return TRUE;
4047
4048 sec_data = elf32_arm_section_data (section);
4049
4050 for (span = 0; span < sec_data->mapcount; span++)
4051 {
4052 unsigned int span_start = sec_data->map[span].vma;
4053 unsigned int span_end = (span == sec_data->mapcount - 1)
4054 ? section->size : sec_data->map[span + 1].vma;
4055 unsigned int i;
4056 char span_type = sec_data->map[span].type;
4057 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
4058
4059 if (span_type != 't')
4060 continue;
4061
4062 /* Span is entirely within a single 4KB region: skip scanning. */
4063 if (((base_vma + span_start) & ~0xfff)
4064 == ((base_vma + span_end) & ~0xfff))
4065 continue;
4066
4067 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
4068
4069 * The opcode is BLX.W, BL.W, B.W, Bcc.W
4070 * The branch target is in the same 4KB region as the
4071 first half of the branch.
4072 * The instruction before the branch is a 32-bit
4073 length non-branch instruction. */
4074 for (i = span_start; i < span_end;)
4075 {
4076 unsigned int insn = bfd_getl16 (&contents[i]);
4077 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
4078 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
4079
4080 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
4081 insn_32bit = TRUE;
4082
4083 if (insn_32bit)
4084 {
4085 /* Load the rest of the insn (in manual-friendly order). */
4086 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
4087
4088 /* Encoding T4: B<c>.W. */
4089 is_b = (insn & 0xf800d000) == 0xf0009000;
4090 /* Encoding T1: BL<c>.W. */
4091 is_bl = (insn & 0xf800d000) == 0xf000d000;
4092 /* Encoding T2: BLX<c>.W. */
4093 is_blx = (insn & 0xf800d000) == 0xf000c000;
4094 /* Encoding T3: B<c>.W (not permitted in IT block). */
4095 is_bcc = (insn & 0xf800d000) == 0xf0008000
4096 && (insn & 0x07f00000) != 0x03800000;
4097 }
4098
4099 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
4100
4101 if (((base_vma + i) & 0xfff) == 0xffe
4102 && insn_32bit
4103 && is_32bit_branch
4104 && last_was_32bit
4105 && ! last_was_branch)
4106 {
4107 bfd_signed_vma offset = 0;
4108 bfd_boolean force_target_arm = FALSE;
4109 bfd_boolean force_target_thumb = FALSE;
4110 bfd_vma target;
4111 enum elf32_arm_stub_type stub_type = arm_stub_none;
4112 struct a8_erratum_reloc key, *found;
4113
4114 key.from = base_vma + i;
4115 found = (struct a8_erratum_reloc *)
4116 bsearch (&key, a8_relocs, num_a8_relocs,
4117 sizeof (struct a8_erratum_reloc),
4118 &a8_reloc_compare);
4119
4120 if (found)
4121 {
4122 char *error_message = NULL;
4123 struct elf_link_hash_entry *entry;
4124 bfd_boolean use_plt = FALSE;
4125
4126 /* We don't care about the error returned from this
4127 function, only if there is glue or not. */
4128 entry = find_thumb_glue (info, found->sym_name,
4129 &error_message);
4130
4131 if (entry)
4132 found->non_a8_stub = TRUE;
4133
4134 /* Keep a simpler condition, for the sake of clarity. */
4135 if (htab->splt != NULL && found->hash != NULL
4136 && found->hash->root.plt.offset != (bfd_vma) -1)
4137 use_plt = TRUE;
4138
4139 if (found->r_type == R_ARM_THM_CALL)
4140 {
4141 if (found->st_type != STT_ARM_TFUNC || use_plt)
4142 force_target_arm = TRUE;
4143 else
4144 force_target_thumb = TRUE;
4145 }
4146 }
4147
4148 /* Check if we have an offending branch instruction. */
4149
4150 if (found && found->non_a8_stub)
4151 /* We've already made a stub for this instruction, e.g.
4152 it's a long branch or a Thumb->ARM stub. Assume that
4153 stub will suffice to work around the A8 erratum (see
4154 setting of always_after_branch above). */
4155 ;
4156 else if (is_bcc)
4157 {
4158 offset = (insn & 0x7ff) << 1;
4159 offset |= (insn & 0x3f0000) >> 4;
4160 offset |= (insn & 0x2000) ? 0x40000 : 0;
4161 offset |= (insn & 0x800) ? 0x80000 : 0;
4162 offset |= (insn & 0x4000000) ? 0x100000 : 0;
4163 if (offset & 0x100000)
4164 offset |= ~ ((bfd_signed_vma) 0xfffff);
4165 stub_type = arm_stub_a8_veneer_b_cond;
4166 }
4167 else if (is_b || is_bl || is_blx)
4168 {
4169 int s = (insn & 0x4000000) != 0;
4170 int j1 = (insn & 0x2000) != 0;
4171 int j2 = (insn & 0x800) != 0;
4172 int i1 = !(j1 ^ s);
4173 int i2 = !(j2 ^ s);
4174
4175 offset = (insn & 0x7ff) << 1;
4176 offset |= (insn & 0x3ff0000) >> 4;
4177 offset |= i2 << 22;
4178 offset |= i1 << 23;
4179 offset |= s << 24;
4180 if (offset & 0x1000000)
4181 offset |= ~ ((bfd_signed_vma) 0xffffff);
4182
4183 if (is_blx)
4184 offset &= ~ ((bfd_signed_vma) 3);
4185
4186 stub_type = is_blx ? arm_stub_a8_veneer_blx :
4187 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
4188 }
4189
4190 if (stub_type != arm_stub_none)
4191 {
4192 bfd_vma pc_for_insn = base_vma + i + 4;
4193
4194 /* The original instruction is a BL, but the target is
4195 an ARM instruction. If we were not making a stub,
4196 the BL would have been converted to a BLX. Use the
4197 BLX stub instead in that case. */
4198 if (htab->use_blx && force_target_arm
4199 && stub_type == arm_stub_a8_veneer_bl)
4200 {
4201 stub_type = arm_stub_a8_veneer_blx;
4202 is_blx = TRUE;
4203 is_bl = FALSE;
4204 }
4205 /* Conversely, if the original instruction was
4206 BLX but the target is Thumb mode, use the BL
4207 stub. */
4208 else if (force_target_thumb
4209 && stub_type == arm_stub_a8_veneer_blx)
4210 {
4211 stub_type = arm_stub_a8_veneer_bl;
4212 is_blx = FALSE;
4213 is_bl = TRUE;
4214 }
4215
4216 if (is_blx)
4217 pc_for_insn &= ~ ((bfd_vma) 3);
4218
4219 /* If we found a relocation, use the proper destination,
4220 not the offset in the (unrelocated) instruction.
4221 Note this is always done if we switched the stub type
4222 above. */
4223 if (found)
4224 offset =
4225 (bfd_signed_vma) (found->destination - pc_for_insn);
4226
4227 target = pc_for_insn + offset;
4228
4229 /* The BLX stub is ARM-mode code. Adjust the offset to
4230 take the different PC value (+8 instead of +4) into
4231 account. */
4232 if (stub_type == arm_stub_a8_veneer_blx)
4233 offset += 4;
4234
4235 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
4236 {
4237 char *stub_name = NULL;
4238
4239 if (num_a8_fixes == a8_fix_table_size)
4240 {
4241 a8_fix_table_size *= 2;
4242 a8_fixes = (struct a8_erratum_fix *)
4243 bfd_realloc (a8_fixes,
4244 sizeof (struct a8_erratum_fix)
4245 * a8_fix_table_size);
4246 }
4247
4248 if (num_a8_fixes < prev_num_a8_fixes)
4249 {
4250 /* If we're doing a subsequent scan,
4251 check if we've found the same fix as
4252 before, and try and reuse the stub
4253 name. */
4254 stub_name = a8_fixes[num_a8_fixes].stub_name;
4255 if ((a8_fixes[num_a8_fixes].section != section)
4256 || (a8_fixes[num_a8_fixes].offset != i))
4257 {
4258 free (stub_name);
4259 stub_name = NULL;
4260 *stub_changed_p = TRUE;
4261 }
4262 }
4263
4264 if (!stub_name)
4265 {
4266 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
4267 if (stub_name != NULL)
4268 sprintf (stub_name, "%x:%x", section->id, i);
4269 }
4270
4271 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
4272 a8_fixes[num_a8_fixes].section = section;
4273 a8_fixes[num_a8_fixes].offset = i;
4274 a8_fixes[num_a8_fixes].addend = offset;
4275 a8_fixes[num_a8_fixes].orig_insn = insn;
4276 a8_fixes[num_a8_fixes].stub_name = stub_name;
4277 a8_fixes[num_a8_fixes].stub_type = stub_type;
4278 a8_fixes[num_a8_fixes].st_type =
4279 is_blx ? STT_FUNC : STT_ARM_TFUNC;
4280
4281 num_a8_fixes++;
4282 }
4283 }
4284 }
4285
4286 i += insn_32bit ? 4 : 2;
4287 last_was_32bit = insn_32bit;
4288 last_was_branch = is_32bit_branch;
4289 }
4290 }
4291
4292 if (elf_section_data (section)->this_hdr.contents == NULL)
4293 free (contents);
4294 }
4295
4296 *a8_fixes_p = a8_fixes;
4297 *num_a8_fixes_p = num_a8_fixes;
4298 *a8_fix_table_size_p = a8_fix_table_size;
4299
4300 return FALSE;
4301 }
4302
4303 /* Determine and set the size of the stub section for a final link.
4304
4305 The basic idea here is to examine all the relocations looking for
4306 PC-relative calls to a target that is unreachable with a "bl"
4307 instruction. */
4308
4309 bfd_boolean
4310 elf32_arm_size_stubs (bfd *output_bfd,
4311 bfd *stub_bfd,
4312 struct bfd_link_info *info,
4313 bfd_signed_vma group_size,
4314 asection * (*add_stub_section) (const char *, asection *),
4315 void (*layout_sections_again) (void))
4316 {
4317 bfd_size_type stub_group_size;
4318 bfd_boolean stubs_always_after_branch;
4319 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4320 struct a8_erratum_fix *a8_fixes = NULL;
4321 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
4322 struct a8_erratum_reloc *a8_relocs = NULL;
4323 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
4324
4325 if (htab == NULL)
4326 return FALSE;
4327
4328 if (htab->fix_cortex_a8)
4329 {
4330 a8_fixes = (struct a8_erratum_fix *)
4331 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
4332 a8_relocs = (struct a8_erratum_reloc *)
4333 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
4334 }
4335
4336 /* Propagate mach to stub bfd, because it may not have been
4337 finalized when we created stub_bfd. */
4338 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
4339 bfd_get_mach (output_bfd));
4340
4341 /* Stash our params away. */
4342 htab->stub_bfd = stub_bfd;
4343 htab->add_stub_section = add_stub_section;
4344 htab->layout_sections_again = layout_sections_again;
4345 stubs_always_after_branch = group_size < 0;
4346
4347 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
4348 as the first half of a 32-bit branch straddling two 4K pages. This is a
4349 crude way of enforcing that. */
4350 if (htab->fix_cortex_a8)
4351 stubs_always_after_branch = 1;
4352
4353 if (group_size < 0)
4354 stub_group_size = -group_size;
4355 else
4356 stub_group_size = group_size;
4357
4358 if (stub_group_size == 1)
4359 {
4360 /* Default values. */
4361 /* Thumb branch range is +-4MB has to be used as the default
4362 maximum size (a given section can contain both ARM and Thumb
4363 code, so the worst case has to be taken into account).
4364
4365 This value is 24K less than that, which allows for 2025
4366 12-byte stubs. If we exceed that, then we will fail to link.
4367 The user will have to relink with an explicit group size
4368 option. */
4369 stub_group_size = 4170000;
4370 }
4371
4372 group_sections (htab, stub_group_size, stubs_always_after_branch);
4373
4374 /* If we're applying the cortex A8 fix, we need to determine the
4375 program header size now, because we cannot change it later --
4376 that could alter section placements. Notice the A8 erratum fix
4377 ends up requiring the section addresses to remain unchanged
4378 modulo the page size. That's something we cannot represent
4379 inside BFD, and we don't want to force the section alignment to
4380 be the page size. */
4381 if (htab->fix_cortex_a8)
4382 (*htab->layout_sections_again) ();
4383
4384 while (1)
4385 {
4386 bfd *input_bfd;
4387 unsigned int bfd_indx;
4388 asection *stub_sec;
4389 bfd_boolean stub_changed = FALSE;
4390 unsigned prev_num_a8_fixes = num_a8_fixes;
4391
4392 num_a8_fixes = 0;
4393 for (input_bfd = info->input_bfds, bfd_indx = 0;
4394 input_bfd != NULL;
4395 input_bfd = input_bfd->link_next, bfd_indx++)
4396 {
4397 Elf_Internal_Shdr *symtab_hdr;
4398 asection *section;
4399 Elf_Internal_Sym *local_syms = NULL;
4400
4401 num_a8_relocs = 0;
4402
4403 /* We'll need the symbol table in a second. */
4404 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4405 if (symtab_hdr->sh_info == 0)
4406 continue;
4407
4408 /* Walk over each section attached to the input bfd. */
4409 for (section = input_bfd->sections;
4410 section != NULL;
4411 section = section->next)
4412 {
4413 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
4414
4415 /* If there aren't any relocs, then there's nothing more
4416 to do. */
4417 if ((section->flags & SEC_RELOC) == 0
4418 || section->reloc_count == 0
4419 || (section->flags & SEC_CODE) == 0)
4420 continue;
4421
4422 /* If this section is a link-once section that will be
4423 discarded, then don't create any stubs. */
4424 if (section->output_section == NULL
4425 || section->output_section->owner != output_bfd)
4426 continue;
4427
4428 /* Get the relocs. */
4429 internal_relocs
4430 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
4431 NULL, info->keep_memory);
4432 if (internal_relocs == NULL)
4433 goto error_ret_free_local;
4434
4435 /* Now examine each relocation. */
4436 irela = internal_relocs;
4437 irelaend = irela + section->reloc_count;
4438 for (; irela < irelaend; irela++)
4439 {
4440 unsigned int r_type, r_indx;
4441 enum elf32_arm_stub_type stub_type;
4442 struct elf32_arm_stub_hash_entry *stub_entry;
4443 asection *sym_sec;
4444 bfd_vma sym_value;
4445 bfd_vma destination;
4446 struct elf32_arm_link_hash_entry *hash;
4447 const char *sym_name;
4448 char *stub_name;
4449 const asection *id_sec;
4450 int st_type;
4451 bfd_boolean created_stub = FALSE;
4452
4453 r_type = ELF32_R_TYPE (irela->r_info);
4454 r_indx = ELF32_R_SYM (irela->r_info);
4455
4456 if (r_type >= (unsigned int) R_ARM_max)
4457 {
4458 bfd_set_error (bfd_error_bad_value);
4459 error_ret_free_internal:
4460 if (elf_section_data (section)->relocs == NULL)
4461 free (internal_relocs);
4462 goto error_ret_free_local;
4463 }
4464
4465 /* Only look for stubs on branch instructions. */
4466 if ((r_type != (unsigned int) R_ARM_CALL)
4467 && (r_type != (unsigned int) R_ARM_THM_CALL)
4468 && (r_type != (unsigned int) R_ARM_JUMP24)
4469 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
4470 && (r_type != (unsigned int) R_ARM_THM_XPC22)
4471 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
4472 && (r_type != (unsigned int) R_ARM_PLT32))
4473 continue;
4474
4475 /* Now determine the call target, its name, value,
4476 section. */
4477 sym_sec = NULL;
4478 sym_value = 0;
4479 destination = 0;
4480 hash = NULL;
4481 sym_name = NULL;
4482 if (r_indx < symtab_hdr->sh_info)
4483 {
4484 /* It's a local symbol. */
4485 Elf_Internal_Sym *sym;
4486
4487 if (local_syms == NULL)
4488 {
4489 local_syms
4490 = (Elf_Internal_Sym *) symtab_hdr->contents;
4491 if (local_syms == NULL)
4492 local_syms
4493 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
4494 symtab_hdr->sh_info, 0,
4495 NULL, NULL, NULL);
4496 if (local_syms == NULL)
4497 goto error_ret_free_internal;
4498 }
4499
4500 sym = local_syms + r_indx;
4501 if (sym->st_shndx == SHN_UNDEF)
4502 sym_sec = bfd_und_section_ptr;
4503 else if (sym->st_shndx == SHN_ABS)
4504 sym_sec = bfd_abs_section_ptr;
4505 else if (sym->st_shndx == SHN_COMMON)
4506 sym_sec = bfd_com_section_ptr;
4507 else
4508 sym_sec =
4509 bfd_section_from_elf_index (input_bfd, sym->st_shndx);
4510
4511 if (!sym_sec)
4512 /* This is an undefined symbol. It can never
4513 be resolved. */
4514 continue;
4515
4516 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
4517 sym_value = sym->st_value;
4518 destination = (sym_value + irela->r_addend
4519 + sym_sec->output_offset
4520 + sym_sec->output_section->vma);
4521 st_type = ELF_ST_TYPE (sym->st_info);
4522 sym_name
4523 = bfd_elf_string_from_elf_section (input_bfd,
4524 symtab_hdr->sh_link,
4525 sym->st_name);
4526 }
4527 else
4528 {
4529 /* It's an external symbol. */
4530 int e_indx;
4531
4532 e_indx = r_indx - symtab_hdr->sh_info;
4533 hash = ((struct elf32_arm_link_hash_entry *)
4534 elf_sym_hashes (input_bfd)[e_indx]);
4535
4536 while (hash->root.root.type == bfd_link_hash_indirect
4537 || hash->root.root.type == bfd_link_hash_warning)
4538 hash = ((struct elf32_arm_link_hash_entry *)
4539 hash->root.root.u.i.link);
4540
4541 if (hash->root.root.type == bfd_link_hash_defined
4542 || hash->root.root.type == bfd_link_hash_defweak)
4543 {
4544 sym_sec = hash->root.root.u.def.section;
4545 sym_value = hash->root.root.u.def.value;
4546
4547 struct elf32_arm_link_hash_table *globals =
4548 elf32_arm_hash_table (info);
4549
4550 /* For a destination in a shared library,
4551 use the PLT stub as target address to
4552 decide whether a branch stub is
4553 needed. */
4554 if (globals != NULL
4555 && globals->splt != NULL
4556 && hash != NULL
4557 && hash->root.plt.offset != (bfd_vma) -1)
4558 {
4559 sym_sec = globals->splt;
4560 sym_value = hash->root.plt.offset;
4561 if (sym_sec->output_section != NULL)
4562 destination = (sym_value
4563 + sym_sec->output_offset
4564 + sym_sec->output_section->vma);
4565 }
4566 else if (sym_sec->output_section != NULL)
4567 destination = (sym_value + irela->r_addend
4568 + sym_sec->output_offset
4569 + sym_sec->output_section->vma);
4570 }
4571 else if ((hash->root.root.type == bfd_link_hash_undefined)
4572 || (hash->root.root.type == bfd_link_hash_undefweak))
4573 {
4574 /* For a shared library, use the PLT stub as
4575 target address to decide whether a long
4576 branch stub is needed.
4577 For absolute code, they cannot be handled. */
4578 struct elf32_arm_link_hash_table *globals =
4579 elf32_arm_hash_table (info);
4580
4581 if (globals != NULL
4582 && globals->splt != NULL
4583 && hash != NULL
4584 && hash->root.plt.offset != (bfd_vma) -1)
4585 {
4586 sym_sec = globals->splt;
4587 sym_value = hash->root.plt.offset;
4588 if (sym_sec->output_section != NULL)
4589 destination = (sym_value
4590 + sym_sec->output_offset
4591 + sym_sec->output_section->vma);
4592 }
4593 else
4594 continue;
4595 }
4596 else
4597 {
4598 bfd_set_error (bfd_error_bad_value);
4599 goto error_ret_free_internal;
4600 }
4601 st_type = ELF_ST_TYPE (hash->root.type);
4602 sym_name = hash->root.root.root.string;
4603 }
4604
4605 do
4606 {
4607 /* Determine what (if any) linker stub is needed. */
4608 stub_type = arm_type_of_stub (info, section, irela,
4609 &st_type, hash,
4610 destination, sym_sec,
4611 input_bfd, sym_name);
4612 if (stub_type == arm_stub_none)
4613 break;
4614
4615 /* Support for grouping stub sections. */
4616 id_sec = htab->stub_group[section->id].link_sec;
4617
4618 /* Get the name of this stub. */
4619 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash,
4620 irela, stub_type);
4621 if (!stub_name)
4622 goto error_ret_free_internal;
4623
4624 /* We've either created a stub for this reloc already,
4625 or we are about to. */
4626 created_stub = TRUE;
4627
4628 stub_entry = arm_stub_hash_lookup
4629 (&htab->stub_hash_table, stub_name,
4630 FALSE, FALSE);
4631 if (stub_entry != NULL)
4632 {
4633 /* The proper stub has already been created. */
4634 free (stub_name);
4635 stub_entry->target_value = sym_value;
4636 break;
4637 }
4638
4639 stub_entry = elf32_arm_add_stub (stub_name, section,
4640 htab);
4641 if (stub_entry == NULL)
4642 {
4643 free (stub_name);
4644 goto error_ret_free_internal;
4645 }
4646
4647 stub_entry->target_value = sym_value;
4648 stub_entry->target_section = sym_sec;
4649 stub_entry->stub_type = stub_type;
4650 stub_entry->h = hash;
4651 stub_entry->st_type = st_type;
4652
4653 if (sym_name == NULL)
4654 sym_name = "unnamed";
4655 stub_entry->output_name = (char *)
4656 bfd_alloc (htab->stub_bfd,
4657 sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
4658 + strlen (sym_name));
4659 if (stub_entry->output_name == NULL)
4660 {
4661 free (stub_name);
4662 goto error_ret_free_internal;
4663 }
4664
4665 /* For historical reasons, use the existing names for
4666 ARM-to-Thumb and Thumb-to-ARM stubs. */
4667 if ( ((r_type == (unsigned int) R_ARM_THM_CALL)
4668 || (r_type == (unsigned int) R_ARM_THM_JUMP24))
4669 && st_type != STT_ARM_TFUNC)
4670 sprintf (stub_entry->output_name,
4671 THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
4672 else if ( ((r_type == (unsigned int) R_ARM_CALL)
4673 || (r_type == (unsigned int) R_ARM_JUMP24))
4674 && st_type == STT_ARM_TFUNC)
4675 sprintf (stub_entry->output_name,
4676 ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
4677 else
4678 sprintf (stub_entry->output_name, STUB_ENTRY_NAME,
4679 sym_name);
4680
4681 stub_changed = TRUE;
4682 }
4683 while (0);
4684
4685 /* Look for relocations which might trigger Cortex-A8
4686 erratum. */
4687 if (htab->fix_cortex_a8
4688 && (r_type == (unsigned int) R_ARM_THM_JUMP24
4689 || r_type == (unsigned int) R_ARM_THM_JUMP19
4690 || r_type == (unsigned int) R_ARM_THM_CALL
4691 || r_type == (unsigned int) R_ARM_THM_XPC22))
4692 {
4693 bfd_vma from = section->output_section->vma
4694 + section->output_offset
4695 + irela->r_offset;
4696
4697 if ((from & 0xfff) == 0xffe)
4698 {
4699 /* Found a candidate. Note we haven't checked the
4700 destination is within 4K here: if we do so (and
4701 don't create an entry in a8_relocs) we can't tell
4702 that a branch should have been relocated when
4703 scanning later. */
4704 if (num_a8_relocs == a8_reloc_table_size)
4705 {
4706 a8_reloc_table_size *= 2;
4707 a8_relocs = (struct a8_erratum_reloc *)
4708 bfd_realloc (a8_relocs,
4709 sizeof (struct a8_erratum_reloc)
4710 * a8_reloc_table_size);
4711 }
4712
4713 a8_relocs[num_a8_relocs].from = from;
4714 a8_relocs[num_a8_relocs].destination = destination;
4715 a8_relocs[num_a8_relocs].r_type = r_type;
4716 a8_relocs[num_a8_relocs].st_type = st_type;
4717 a8_relocs[num_a8_relocs].sym_name = sym_name;
4718 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
4719 a8_relocs[num_a8_relocs].hash = hash;
4720
4721 num_a8_relocs++;
4722 }
4723 }
4724 }
4725
4726 /* We're done with the internal relocs, free them. */
4727 if (elf_section_data (section)->relocs == NULL)
4728 free (internal_relocs);
4729 }
4730
4731 if (htab->fix_cortex_a8)
4732 {
4733 /* Sort relocs which might apply to Cortex-A8 erratum. */
4734 qsort (a8_relocs, num_a8_relocs,
4735 sizeof (struct a8_erratum_reloc),
4736 &a8_reloc_compare);
4737
4738 /* Scan for branches which might trigger Cortex-A8 erratum. */
4739 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
4740 &num_a8_fixes, &a8_fix_table_size,
4741 a8_relocs, num_a8_relocs,
4742 prev_num_a8_fixes, &stub_changed)
4743 != 0)
4744 goto error_ret_free_local;
4745 }
4746 }
4747
4748 if (prev_num_a8_fixes != num_a8_fixes)
4749 stub_changed = TRUE;
4750
4751 if (!stub_changed)
4752 break;
4753
4754 /* OK, we've added some stubs. Find out the new size of the
4755 stub sections. */
4756 for (stub_sec = htab->stub_bfd->sections;
4757 stub_sec != NULL;
4758 stub_sec = stub_sec->next)
4759 {
4760 /* Ignore non-stub sections. */
4761 if (!strstr (stub_sec->name, STUB_SUFFIX))
4762 continue;
4763
4764 stub_sec->size = 0;
4765 }
4766
4767 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
4768
4769 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
4770 if (htab->fix_cortex_a8)
4771 for (i = 0; i < num_a8_fixes; i++)
4772 {
4773 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
4774 a8_fixes[i].section, htab);
4775
4776 if (stub_sec == NULL)
4777 goto error_ret_free_local;
4778
4779 stub_sec->size
4780 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
4781 NULL);
4782 }
4783
4784
4785 /* Ask the linker to do its stuff. */
4786 (*htab->layout_sections_again) ();
4787 }
4788
4789 /* Add stubs for Cortex-A8 erratum fixes now. */
4790 if (htab->fix_cortex_a8)
4791 {
4792 for (i = 0; i < num_a8_fixes; i++)
4793 {
4794 struct elf32_arm_stub_hash_entry *stub_entry;
4795 char *stub_name = a8_fixes[i].stub_name;
4796 asection *section = a8_fixes[i].section;
4797 unsigned int section_id = a8_fixes[i].section->id;
4798 asection *link_sec = htab->stub_group[section_id].link_sec;
4799 asection *stub_sec = htab->stub_group[section_id].stub_sec;
4800 const insn_sequence *template_sequence;
4801 int template_size, size = 0;
4802
4803 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4804 TRUE, FALSE);
4805 if (stub_entry == NULL)
4806 {
4807 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
4808 section->owner,
4809 stub_name);
4810 return FALSE;
4811 }
4812
4813 stub_entry->stub_sec = stub_sec;
4814 stub_entry->stub_offset = 0;
4815 stub_entry->id_sec = link_sec;
4816 stub_entry->stub_type = a8_fixes[i].stub_type;
4817 stub_entry->target_section = a8_fixes[i].section;
4818 stub_entry->target_value = a8_fixes[i].offset;
4819 stub_entry->target_addend = a8_fixes[i].addend;
4820 stub_entry->orig_insn = a8_fixes[i].orig_insn;
4821 stub_entry->st_type = a8_fixes[i].st_type;
4822
4823 size = find_stub_size_and_template (a8_fixes[i].stub_type,
4824 &template_sequence,
4825 &template_size);
4826
4827 stub_entry->stub_size = size;
4828 stub_entry->stub_template = template_sequence;
4829 stub_entry->stub_template_size = template_size;
4830 }
4831
4832 /* Stash the Cortex-A8 erratum fix array for use later in
4833 elf32_arm_write_section(). */
4834 htab->a8_erratum_fixes = a8_fixes;
4835 htab->num_a8_erratum_fixes = num_a8_fixes;
4836 }
4837 else
4838 {
4839 htab->a8_erratum_fixes = NULL;
4840 htab->num_a8_erratum_fixes = 0;
4841 }
4842 return TRUE;
4843
4844 error_ret_free_local:
4845 return FALSE;
4846 }
4847
4848 /* Build all the stubs associated with the current output file. The
4849 stubs are kept in a hash table attached to the main linker hash
4850 table. We also set up the .plt entries for statically linked PIC
4851 functions here. This function is called via arm_elf_finish in the
4852 linker. */
4853
4854 bfd_boolean
4855 elf32_arm_build_stubs (struct bfd_link_info *info)
4856 {
4857 asection *stub_sec;
4858 struct bfd_hash_table *table;
4859 struct elf32_arm_link_hash_table *htab;
4860
4861 htab = elf32_arm_hash_table (info);
4862 if (htab == NULL)
4863 return FALSE;
4864
4865 for (stub_sec = htab->stub_bfd->sections;
4866 stub_sec != NULL;
4867 stub_sec = stub_sec->next)
4868 {
4869 bfd_size_type size;
4870
4871 /* Ignore non-stub sections. */
4872 if (!strstr (stub_sec->name, STUB_SUFFIX))
4873 continue;
4874
4875 /* Allocate memory to hold the linker stubs. */
4876 size = stub_sec->size;
4877 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
4878 if (stub_sec->contents == NULL && size != 0)
4879 return FALSE;
4880 stub_sec->size = 0;
4881 }
4882
4883 /* Build the stubs as directed by the stub hash table. */
4884 table = &htab->stub_hash_table;
4885 bfd_hash_traverse (table, arm_build_one_stub, info);
4886 if (htab->fix_cortex_a8)
4887 {
4888 /* Place the cortex a8 stubs last. */
4889 htab->fix_cortex_a8 = -1;
4890 bfd_hash_traverse (table, arm_build_one_stub, info);
4891 }
4892
4893 return TRUE;
4894 }
4895
4896 /* Locate the Thumb encoded calling stub for NAME. */
4897
4898 static struct elf_link_hash_entry *
4899 find_thumb_glue (struct bfd_link_info *link_info,
4900 const char *name,
4901 char **error_message)
4902 {
4903 char *tmp_name;
4904 struct elf_link_hash_entry *hash;
4905 struct elf32_arm_link_hash_table *hash_table;
4906
4907 /* We need a pointer to the armelf specific hash table. */
4908 hash_table = elf32_arm_hash_table (link_info);
4909 if (hash_table == NULL)
4910 return NULL;
4911
4912 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
4913 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
4914
4915 BFD_ASSERT (tmp_name);
4916
4917 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
4918
4919 hash = elf_link_hash_lookup
4920 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4921
4922 if (hash == NULL
4923 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
4924 tmp_name, name) == -1)
4925 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4926
4927 free (tmp_name);
4928
4929 return hash;
4930 }
4931
4932 /* Locate the ARM encoded calling stub for NAME. */
4933
4934 static struct elf_link_hash_entry *
4935 find_arm_glue (struct bfd_link_info *link_info,
4936 const char *name,
4937 char **error_message)
4938 {
4939 char *tmp_name;
4940 struct elf_link_hash_entry *myh;
4941 struct elf32_arm_link_hash_table *hash_table;
4942
4943 /* We need a pointer to the elfarm specific hash table. */
4944 hash_table = elf32_arm_hash_table (link_info);
4945 if (hash_table == NULL)
4946 return NULL;
4947
4948 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
4949 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
4950
4951 BFD_ASSERT (tmp_name);
4952
4953 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
4954
4955 myh = elf_link_hash_lookup
4956 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4957
4958 if (myh == NULL
4959 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
4960 tmp_name, name) == -1)
4961 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4962
4963 free (tmp_name);
4964
4965 return myh;
4966 }
4967
4968 /* ARM->Thumb glue (static images):
4969
4970 .arm
4971 __func_from_arm:
4972 ldr r12, __func_addr
4973 bx r12
4974 __func_addr:
4975 .word func @ behave as if you saw a ARM_32 reloc.
4976
4977 (v5t static images)
4978 .arm
4979 __func_from_arm:
4980 ldr pc, __func_addr
4981 __func_addr:
4982 .word func @ behave as if you saw a ARM_32 reloc.
4983
4984 (relocatable images)
4985 .arm
4986 __func_from_arm:
4987 ldr r12, __func_offset
4988 add r12, r12, pc
4989 bx r12
4990 __func_offset:
4991 .word func - . */
4992
4993 #define ARM2THUMB_STATIC_GLUE_SIZE 12
4994 static const insn32 a2t1_ldr_insn = 0xe59fc000;
4995 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
4996 static const insn32 a2t3_func_addr_insn = 0x00000001;
4997
4998 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
4999 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
5000 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
5001
5002 #define ARM2THUMB_PIC_GLUE_SIZE 16
5003 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
5004 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
5005 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
5006
5007 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
5008
5009 .thumb .thumb
5010 .align 2 .align 2
5011 __func_from_thumb: __func_from_thumb:
5012 bx pc push {r6, lr}
5013 nop ldr r6, __func_addr
5014 .arm mov lr, pc
5015 b func bx r6
5016 .arm
5017 ;; back_to_thumb
5018 ldmia r13! {r6, lr}
5019 bx lr
5020 __func_addr:
5021 .word func */
5022
5023 #define THUMB2ARM_GLUE_SIZE 8
5024 static const insn16 t2a1_bx_pc_insn = 0x4778;
5025 static const insn16 t2a2_noop_insn = 0x46c0;
5026 static const insn32 t2a3_b_insn = 0xea000000;
5027
5028 #define VFP11_ERRATUM_VENEER_SIZE 8
5029
5030 #define ARM_BX_VENEER_SIZE 12
5031 static const insn32 armbx1_tst_insn = 0xe3100001;
5032 static const insn32 armbx2_moveq_insn = 0x01a0f000;
5033 static const insn32 armbx3_bx_insn = 0xe12fff10;
5034
5035 #ifndef ELFARM_NABI_C_INCLUDED
5036 static void
5037 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
5038 {
5039 asection * s;
5040 bfd_byte * contents;
5041
5042 if (size == 0)
5043 {
5044 /* Do not include empty glue sections in the output. */
5045 if (abfd != NULL)
5046 {
5047 s = bfd_get_section_by_name (abfd, name);
5048 if (s != NULL)
5049 s->flags |= SEC_EXCLUDE;
5050 }
5051 return;
5052 }
5053
5054 BFD_ASSERT (abfd != NULL);
5055
5056 s = bfd_get_section_by_name (abfd, name);
5057 BFD_ASSERT (s != NULL);
5058
5059 contents = (bfd_byte *) bfd_alloc (abfd, size);
5060
5061 BFD_ASSERT (s->size == size);
5062 s->contents = contents;
5063 }
5064
5065 bfd_boolean
5066 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
5067 {
5068 struct elf32_arm_link_hash_table * globals;
5069
5070 globals = elf32_arm_hash_table (info);
5071 BFD_ASSERT (globals != NULL);
5072
5073 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5074 globals->arm_glue_size,
5075 ARM2THUMB_GLUE_SECTION_NAME);
5076
5077 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5078 globals->thumb_glue_size,
5079 THUMB2ARM_GLUE_SECTION_NAME);
5080
5081 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5082 globals->vfp11_erratum_glue_size,
5083 VFP11_ERRATUM_VENEER_SECTION_NAME);
5084
5085 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5086 globals->bx_glue_size,
5087 ARM_BX_GLUE_SECTION_NAME);
5088
5089 return TRUE;
5090 }
5091
5092 /* Allocate space and symbols for calling a Thumb function from Arm mode.
5093 returns the symbol identifying the stub. */
5094
5095 static struct elf_link_hash_entry *
5096 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
5097 struct elf_link_hash_entry * h)
5098 {
5099 const char * name = h->root.root.string;
5100 asection * s;
5101 char * tmp_name;
5102 struct elf_link_hash_entry * myh;
5103 struct bfd_link_hash_entry * bh;
5104 struct elf32_arm_link_hash_table * globals;
5105 bfd_vma val;
5106 bfd_size_type size;
5107
5108 globals = elf32_arm_hash_table (link_info);
5109 BFD_ASSERT (globals != NULL);
5110 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5111
5112 s = bfd_get_section_by_name
5113 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
5114
5115 BFD_ASSERT (s != NULL);
5116
5117 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5118 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
5119
5120 BFD_ASSERT (tmp_name);
5121
5122 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
5123
5124 myh = elf_link_hash_lookup
5125 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
5126
5127 if (myh != NULL)
5128 {
5129 /* We've already seen this guy. */
5130 free (tmp_name);
5131 return myh;
5132 }
5133
5134 /* The only trick here is using hash_table->arm_glue_size as the value.
5135 Even though the section isn't allocated yet, this is where we will be
5136 putting it. The +1 on the value marks that the stub has not been
5137 output yet - not that it is a Thumb function. */
5138 bh = NULL;
5139 val = globals->arm_glue_size + 1;
5140 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5141 tmp_name, BSF_GLOBAL, s, val,
5142 NULL, TRUE, FALSE, &bh);
5143
5144 myh = (struct elf_link_hash_entry *) bh;
5145 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5146 myh->forced_local = 1;
5147
5148 free (tmp_name);
5149
5150 if (link_info->shared || globals->root.is_relocatable_executable
5151 || globals->pic_veneer)
5152 size = ARM2THUMB_PIC_GLUE_SIZE;
5153 else if (globals->use_blx)
5154 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
5155 else
5156 size = ARM2THUMB_STATIC_GLUE_SIZE;
5157
5158 s->size += size;
5159 globals->arm_glue_size += size;
5160
5161 return myh;
5162 }
5163
5164 /* Allocate space for ARMv4 BX veneers. */
5165
5166 static void
5167 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
5168 {
5169 asection * s;
5170 struct elf32_arm_link_hash_table *globals;
5171 char *tmp_name;
5172 struct elf_link_hash_entry *myh;
5173 struct bfd_link_hash_entry *bh;
5174 bfd_vma val;
5175
5176 /* BX PC does not need a veneer. */
5177 if (reg == 15)
5178 return;
5179
5180 globals = elf32_arm_hash_table (link_info);
5181 BFD_ASSERT (globals != NULL);
5182 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5183
5184 /* Check if this veneer has already been allocated. */
5185 if (globals->bx_glue_offset[reg])
5186 return;
5187
5188 s = bfd_get_section_by_name
5189 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
5190
5191 BFD_ASSERT (s != NULL);
5192
5193 /* Add symbol for veneer. */
5194 tmp_name = (char *)
5195 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
5196
5197 BFD_ASSERT (tmp_name);
5198
5199 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
5200
5201 myh = elf_link_hash_lookup
5202 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
5203
5204 BFD_ASSERT (myh == NULL);
5205
5206 bh = NULL;
5207 val = globals->bx_glue_size;
5208 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5209 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5210 NULL, TRUE, FALSE, &bh);
5211
5212 myh = (struct elf_link_hash_entry *) bh;
5213 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5214 myh->forced_local = 1;
5215
5216 s->size += ARM_BX_VENEER_SIZE;
5217 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
5218 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
5219 }
5220
5221
5222 /* Add an entry to the code/data map for section SEC. */
5223
5224 static void
5225 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
5226 {
5227 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
5228 unsigned int newidx;
5229
5230 if (sec_data->map == NULL)
5231 {
5232 sec_data->map = (elf32_arm_section_map *)
5233 bfd_malloc (sizeof (elf32_arm_section_map));
5234 sec_data->mapcount = 0;
5235 sec_data->mapsize = 1;
5236 }
5237
5238 newidx = sec_data->mapcount++;
5239
5240 if (sec_data->mapcount > sec_data->mapsize)
5241 {
5242 sec_data->mapsize *= 2;
5243 sec_data->map = (elf32_arm_section_map *)
5244 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
5245 * sizeof (elf32_arm_section_map));
5246 }
5247
5248 if (sec_data->map)
5249 {
5250 sec_data->map[newidx].vma = vma;
5251 sec_data->map[newidx].type = type;
5252 }
5253 }
5254
5255
5256 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
5257 veneers are handled for now. */
5258
5259 static bfd_vma
5260 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
5261 elf32_vfp11_erratum_list *branch,
5262 bfd *branch_bfd,
5263 asection *branch_sec,
5264 unsigned int offset)
5265 {
5266 asection *s;
5267 struct elf32_arm_link_hash_table *hash_table;
5268 char *tmp_name;
5269 struct elf_link_hash_entry *myh;
5270 struct bfd_link_hash_entry *bh;
5271 bfd_vma val;
5272 struct _arm_elf_section_data *sec_data;
5273 elf32_vfp11_erratum_list *newerr;
5274
5275 hash_table = elf32_arm_hash_table (link_info);
5276 BFD_ASSERT (hash_table != NULL);
5277 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
5278
5279 s = bfd_get_section_by_name
5280 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
5281
5282 sec_data = elf32_arm_section_data (s);
5283
5284 BFD_ASSERT (s != NULL);
5285
5286 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
5287 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
5288
5289 BFD_ASSERT (tmp_name);
5290
5291 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
5292 hash_table->num_vfp11_fixes);
5293
5294 myh = elf_link_hash_lookup
5295 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5296
5297 BFD_ASSERT (myh == NULL);
5298
5299 bh = NULL;
5300 val = hash_table->vfp11_erratum_glue_size;
5301 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
5302 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5303 NULL, TRUE, FALSE, &bh);
5304
5305 myh = (struct elf_link_hash_entry *) bh;
5306 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5307 myh->forced_local = 1;
5308
5309 /* Link veneer back to calling location. */
5310 sec_data->erratumcount += 1;
5311 newerr = (elf32_vfp11_erratum_list *)
5312 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
5313
5314 newerr->type = VFP11_ERRATUM_ARM_VENEER;
5315 newerr->vma = -1;
5316 newerr->u.v.branch = branch;
5317 newerr->u.v.id = hash_table->num_vfp11_fixes;
5318 branch->u.b.veneer = newerr;
5319
5320 newerr->next = sec_data->erratumlist;
5321 sec_data->erratumlist = newerr;
5322
5323 /* A symbol for the return from the veneer. */
5324 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
5325 hash_table->num_vfp11_fixes);
5326
5327 myh = elf_link_hash_lookup
5328 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5329
5330 if (myh != NULL)
5331 abort ();
5332
5333 bh = NULL;
5334 val = offset + 4;
5335 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
5336 branch_sec, val, NULL, TRUE, FALSE, &bh);
5337
5338 myh = (struct elf_link_hash_entry *) bh;
5339 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5340 myh->forced_local = 1;
5341
5342 free (tmp_name);
5343
5344 /* Generate a mapping symbol for the veneer section, and explicitly add an
5345 entry for that symbol to the code/data map for the section. */
5346 if (hash_table->vfp11_erratum_glue_size == 0)
5347 {
5348 bh = NULL;
5349 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
5350 ever requires this erratum fix. */
5351 _bfd_generic_link_add_one_symbol (link_info,
5352 hash_table->bfd_of_glue_owner, "$a",
5353 BSF_LOCAL, s, 0, NULL,
5354 TRUE, FALSE, &bh);
5355
5356 myh = (struct elf_link_hash_entry *) bh;
5357 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
5358 myh->forced_local = 1;
5359
5360 /* The elf32_arm_init_maps function only cares about symbols from input
5361 BFDs. We must make a note of this generated mapping symbol
5362 ourselves so that code byteswapping works properly in
5363 elf32_arm_write_section. */
5364 elf32_arm_section_map_add (s, 'a', 0);
5365 }
5366
5367 s->size += VFP11_ERRATUM_VENEER_SIZE;
5368 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
5369 hash_table->num_vfp11_fixes++;
5370
5371 /* The offset of the veneer. */
5372 return val;
5373 }
5374
5375 #define ARM_GLUE_SECTION_FLAGS \
5376 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
5377 | SEC_READONLY | SEC_LINKER_CREATED)
5378
5379 /* Create a fake section for use by the ARM backend of the linker. */
5380
5381 static bfd_boolean
5382 arm_make_glue_section (bfd * abfd, const char * name)
5383 {
5384 asection * sec;
5385
5386 sec = bfd_get_section_by_name (abfd, name);
5387 if (sec != NULL)
5388 /* Already made. */
5389 return TRUE;
5390
5391 sec = bfd_make_section_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
5392
5393 if (sec == NULL
5394 || !bfd_set_section_alignment (abfd, sec, 2))
5395 return FALSE;
5396
5397 /* Set the gc mark to prevent the section from being removed by garbage
5398 collection, despite the fact that no relocs refer to this section. */
5399 sec->gc_mark = 1;
5400
5401 return TRUE;
5402 }
5403
5404 /* Add the glue sections to ABFD. This function is called from the
5405 linker scripts in ld/emultempl/{armelf}.em. */
5406
5407 bfd_boolean
5408 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
5409 struct bfd_link_info *info)
5410 {
5411 /* If we are only performing a partial
5412 link do not bother adding the glue. */
5413 if (info->relocatable)
5414 return TRUE;
5415
5416 return arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
5417 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
5418 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
5419 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
5420 }
5421
5422 /* Select a BFD to be used to hold the sections used by the glue code.
5423 This function is called from the linker scripts in ld/emultempl/
5424 {armelf/pe}.em. */
5425
5426 bfd_boolean
5427 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
5428 {
5429 struct elf32_arm_link_hash_table *globals;
5430
5431 /* If we are only performing a partial link
5432 do not bother getting a bfd to hold the glue. */
5433 if (info->relocatable)
5434 return TRUE;
5435
5436 /* Make sure we don't attach the glue sections to a dynamic object. */
5437 BFD_ASSERT (!(abfd->flags & DYNAMIC));
5438
5439 globals = elf32_arm_hash_table (info);
5440 BFD_ASSERT (globals != NULL);
5441
5442 if (globals->bfd_of_glue_owner != NULL)
5443 return TRUE;
5444
5445 /* Save the bfd for later use. */
5446 globals->bfd_of_glue_owner = abfd;
5447
5448 return TRUE;
5449 }
5450
5451 static void
5452 check_use_blx (struct elf32_arm_link_hash_table *globals)
5453 {
5454 if (bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
5455 Tag_CPU_arch) > 2)
5456 globals->use_blx = 1;
5457 }
5458
5459 bfd_boolean
5460 bfd_elf32_arm_process_before_allocation (bfd *abfd,
5461 struct bfd_link_info *link_info)
5462 {
5463 Elf_Internal_Shdr *symtab_hdr;
5464 Elf_Internal_Rela *internal_relocs = NULL;
5465 Elf_Internal_Rela *irel, *irelend;
5466 bfd_byte *contents = NULL;
5467
5468 asection *sec;
5469 struct elf32_arm_link_hash_table *globals;
5470
5471 /* If we are only performing a partial link do not bother
5472 to construct any glue. */
5473 if (link_info->relocatable)
5474 return TRUE;
5475
5476 /* Here we have a bfd that is to be included on the link. We have a
5477 hook to do reloc rummaging, before section sizes are nailed down. */
5478 globals = elf32_arm_hash_table (link_info);
5479 BFD_ASSERT (globals != NULL);
5480
5481 check_use_blx (globals);
5482
5483 if (globals->byteswap_code && !bfd_big_endian (abfd))
5484 {
5485 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
5486 abfd);
5487 return FALSE;
5488 }
5489
5490 /* PR 5398: If we have not decided to include any loadable sections in
5491 the output then we will not have a glue owner bfd. This is OK, it
5492 just means that there is nothing else for us to do here. */
5493 if (globals->bfd_of_glue_owner == NULL)
5494 return TRUE;
5495
5496 /* Rummage around all the relocs and map the glue vectors. */
5497 sec = abfd->sections;
5498
5499 if (sec == NULL)
5500 return TRUE;
5501
5502 for (; sec != NULL; sec = sec->next)
5503 {
5504 if (sec->reloc_count == 0)
5505 continue;
5506
5507 if ((sec->flags & SEC_EXCLUDE) != 0)
5508 continue;
5509
5510 symtab_hdr = & elf_symtab_hdr (abfd);
5511
5512 /* Load the relocs. */
5513 internal_relocs
5514 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
5515
5516 if (internal_relocs == NULL)
5517 goto error_return;
5518
5519 irelend = internal_relocs + sec->reloc_count;
5520 for (irel = internal_relocs; irel < irelend; irel++)
5521 {
5522 long r_type;
5523 unsigned long r_index;
5524
5525 struct elf_link_hash_entry *h;
5526
5527 r_type = ELF32_R_TYPE (irel->r_info);
5528 r_index = ELF32_R_SYM (irel->r_info);
5529
5530 /* These are the only relocation types we care about. */
5531 if ( r_type != R_ARM_PC24
5532 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
5533 continue;
5534
5535 /* Get the section contents if we haven't done so already. */
5536 if (contents == NULL)
5537 {
5538 /* Get cached copy if it exists. */
5539 if (elf_section_data (sec)->this_hdr.contents != NULL)
5540 contents = elf_section_data (sec)->this_hdr.contents;
5541 else
5542 {
5543 /* Go get them off disk. */
5544 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
5545 goto error_return;
5546 }
5547 }
5548
5549 if (r_type == R_ARM_V4BX)
5550 {
5551 int reg;
5552
5553 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
5554 record_arm_bx_glue (link_info, reg);
5555 continue;
5556 }
5557
5558 /* If the relocation is not against a symbol it cannot concern us. */
5559 h = NULL;
5560
5561 /* We don't care about local symbols. */
5562 if (r_index < symtab_hdr->sh_info)
5563 continue;
5564
5565 /* This is an external symbol. */
5566 r_index -= symtab_hdr->sh_info;
5567 h = (struct elf_link_hash_entry *)
5568 elf_sym_hashes (abfd)[r_index];
5569
5570 /* If the relocation is against a static symbol it must be within
5571 the current section and so cannot be a cross ARM/Thumb relocation. */
5572 if (h == NULL)
5573 continue;
5574
5575 /* If the call will go through a PLT entry then we do not need
5576 glue. */
5577 if (globals->splt != NULL && h->plt.offset != (bfd_vma) -1)
5578 continue;
5579
5580 switch (r_type)
5581 {
5582 case R_ARM_PC24:
5583 /* This one is a call from arm code. We need to look up
5584 the target of the call. If it is a thumb target, we
5585 insert glue. */
5586 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
5587 record_arm_to_thumb_glue (link_info, h);
5588 break;
5589
5590 default:
5591 abort ();
5592 }
5593 }
5594
5595 if (contents != NULL
5596 && elf_section_data (sec)->this_hdr.contents != contents)
5597 free (contents);
5598 contents = NULL;
5599
5600 if (internal_relocs != NULL
5601 && elf_section_data (sec)->relocs != internal_relocs)
5602 free (internal_relocs);
5603 internal_relocs = NULL;
5604 }
5605
5606 return TRUE;
5607
5608 error_return:
5609 if (contents != NULL
5610 && elf_section_data (sec)->this_hdr.contents != contents)
5611 free (contents);
5612 if (internal_relocs != NULL
5613 && elf_section_data (sec)->relocs != internal_relocs)
5614 free (internal_relocs);
5615
5616 return FALSE;
5617 }
5618 #endif
5619
5620
5621 /* Initialise maps of ARM/Thumb/data for input BFDs. */
5622
5623 void
5624 bfd_elf32_arm_init_maps (bfd *abfd)
5625 {
5626 Elf_Internal_Sym *isymbuf;
5627 Elf_Internal_Shdr *hdr;
5628 unsigned int i, localsyms;
5629
5630 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
5631 if (! is_arm_elf (abfd))
5632 return;
5633
5634 if ((abfd->flags & DYNAMIC) != 0)
5635 return;
5636
5637 hdr = & elf_symtab_hdr (abfd);
5638 localsyms = hdr->sh_info;
5639
5640 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
5641 should contain the number of local symbols, which should come before any
5642 global symbols. Mapping symbols are always local. */
5643 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
5644 NULL);
5645
5646 /* No internal symbols read? Skip this BFD. */
5647 if (isymbuf == NULL)
5648 return;
5649
5650 for (i = 0; i < localsyms; i++)
5651 {
5652 Elf_Internal_Sym *isym = &isymbuf[i];
5653 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
5654 const char *name;
5655
5656 if (sec != NULL
5657 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
5658 {
5659 name = bfd_elf_string_from_elf_section (abfd,
5660 hdr->sh_link, isym->st_name);
5661
5662 if (bfd_is_arm_special_symbol_name (name,
5663 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
5664 elf32_arm_section_map_add (sec, name[1], isym->st_value);
5665 }
5666 }
5667 }
5668
5669
5670 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
5671 say what they wanted. */
5672
5673 void
5674 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
5675 {
5676 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5677 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5678
5679 if (globals == NULL)
5680 return;
5681
5682 if (globals->fix_cortex_a8 == -1)
5683 {
5684 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
5685 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
5686 && (out_attr[Tag_CPU_arch_profile].i == 'A'
5687 || out_attr[Tag_CPU_arch_profile].i == 0))
5688 globals->fix_cortex_a8 = 1;
5689 else
5690 globals->fix_cortex_a8 = 0;
5691 }
5692 }
5693
5694
5695 void
5696 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
5697 {
5698 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5699 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5700
5701 if (globals == NULL)
5702 return;
5703 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
5704 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
5705 {
5706 switch (globals->vfp11_fix)
5707 {
5708 case BFD_ARM_VFP11_FIX_DEFAULT:
5709 case BFD_ARM_VFP11_FIX_NONE:
5710 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5711 break;
5712
5713 default:
5714 /* Give a warning, but do as the user requests anyway. */
5715 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
5716 "workaround is not necessary for target architecture"), obfd);
5717 }
5718 }
5719 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
5720 /* For earlier architectures, we might need the workaround, but do not
5721 enable it by default. If users is running with broken hardware, they
5722 must enable the erratum fix explicitly. */
5723 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5724 }
5725
5726
5727 enum bfd_arm_vfp11_pipe
5728 {
5729 VFP11_FMAC,
5730 VFP11_LS,
5731 VFP11_DS,
5732 VFP11_BAD
5733 };
5734
5735 /* Return a VFP register number. This is encoded as RX:X for single-precision
5736 registers, or X:RX for double-precision registers, where RX is the group of
5737 four bits in the instruction encoding and X is the single extension bit.
5738 RX and X fields are specified using their lowest (starting) bit. The return
5739 value is:
5740
5741 0...31: single-precision registers s0...s31
5742 32...63: double-precision registers d0...d31.
5743
5744 Although X should be zero for VFP11 (encoding d0...d15 only), we might
5745 encounter VFP3 instructions, so we allow the full range for DP registers. */
5746
5747 static unsigned int
5748 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
5749 unsigned int x)
5750 {
5751 if (is_double)
5752 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
5753 else
5754 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
5755 }
5756
5757 /* Set bits in *WMASK according to a register number REG as encoded by
5758 bfd_arm_vfp11_regno(). Ignore d16-d31. */
5759
5760 static void
5761 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
5762 {
5763 if (reg < 32)
5764 *wmask |= 1 << reg;
5765 else if (reg < 48)
5766 *wmask |= 3 << ((reg - 32) * 2);
5767 }
5768
5769 /* Return TRUE if WMASK overwrites anything in REGS. */
5770
5771 static bfd_boolean
5772 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
5773 {
5774 int i;
5775
5776 for (i = 0; i < numregs; i++)
5777 {
5778 unsigned int reg = regs[i];
5779
5780 if (reg < 32 && (wmask & (1 << reg)) != 0)
5781 return TRUE;
5782
5783 reg -= 32;
5784
5785 if (reg >= 16)
5786 continue;
5787
5788 if ((wmask & (3 << (reg * 2))) != 0)
5789 return TRUE;
5790 }
5791
5792 return FALSE;
5793 }
5794
5795 /* In this function, we're interested in two things: finding input registers
5796 for VFP data-processing instructions, and finding the set of registers which
5797 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
5798 hold the written set, so FLDM etc. are easy to deal with (we're only
5799 interested in 32 SP registers or 16 dp registers, due to the VFP version
5800 implemented by the chip in question). DP registers are marked by setting
5801 both SP registers in the write mask). */
5802
5803 static enum bfd_arm_vfp11_pipe
5804 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
5805 int *numregs)
5806 {
5807 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
5808 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
5809
5810 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
5811 {
5812 unsigned int pqrs;
5813 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5814 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5815
5816 pqrs = ((insn & 0x00800000) >> 20)
5817 | ((insn & 0x00300000) >> 19)
5818 | ((insn & 0x00000040) >> 6);
5819
5820 switch (pqrs)
5821 {
5822 case 0: /* fmac[sd]. */
5823 case 1: /* fnmac[sd]. */
5824 case 2: /* fmsc[sd]. */
5825 case 3: /* fnmsc[sd]. */
5826 vpipe = VFP11_FMAC;
5827 bfd_arm_vfp11_write_mask (destmask, fd);
5828 regs[0] = fd;
5829 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5830 regs[2] = fm;
5831 *numregs = 3;
5832 break;
5833
5834 case 4: /* fmul[sd]. */
5835 case 5: /* fnmul[sd]. */
5836 case 6: /* fadd[sd]. */
5837 case 7: /* fsub[sd]. */
5838 vpipe = VFP11_FMAC;
5839 goto vfp_binop;
5840
5841 case 8: /* fdiv[sd]. */
5842 vpipe = VFP11_DS;
5843 vfp_binop:
5844 bfd_arm_vfp11_write_mask (destmask, fd);
5845 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5846 regs[1] = fm;
5847 *numregs = 2;
5848 break;
5849
5850 case 15: /* extended opcode. */
5851 {
5852 unsigned int extn = ((insn >> 15) & 0x1e)
5853 | ((insn >> 7) & 1);
5854
5855 switch (extn)
5856 {
5857 case 0: /* fcpy[sd]. */
5858 case 1: /* fabs[sd]. */
5859 case 2: /* fneg[sd]. */
5860 case 8: /* fcmp[sd]. */
5861 case 9: /* fcmpe[sd]. */
5862 case 10: /* fcmpz[sd]. */
5863 case 11: /* fcmpez[sd]. */
5864 case 16: /* fuito[sd]. */
5865 case 17: /* fsito[sd]. */
5866 case 24: /* ftoui[sd]. */
5867 case 25: /* ftouiz[sd]. */
5868 case 26: /* ftosi[sd]. */
5869 case 27: /* ftosiz[sd]. */
5870 /* These instructions will not bounce due to underflow. */
5871 *numregs = 0;
5872 vpipe = VFP11_FMAC;
5873 break;
5874
5875 case 3: /* fsqrt[sd]. */
5876 /* fsqrt cannot underflow, but it can (perhaps) overwrite
5877 registers to cause the erratum in previous instructions. */
5878 bfd_arm_vfp11_write_mask (destmask, fd);
5879 vpipe = VFP11_DS;
5880 break;
5881
5882 case 15: /* fcvt{ds,sd}. */
5883 {
5884 int rnum = 0;
5885
5886 bfd_arm_vfp11_write_mask (destmask, fd);
5887
5888 /* Only FCVTSD can underflow. */
5889 if ((insn & 0x100) != 0)
5890 regs[rnum++] = fm;
5891
5892 *numregs = rnum;
5893
5894 vpipe = VFP11_FMAC;
5895 }
5896 break;
5897
5898 default:
5899 return VFP11_BAD;
5900 }
5901 }
5902 break;
5903
5904 default:
5905 return VFP11_BAD;
5906 }
5907 }
5908 /* Two-register transfer. */
5909 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
5910 {
5911 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5912
5913 if ((insn & 0x100000) == 0)
5914 {
5915 if (is_double)
5916 bfd_arm_vfp11_write_mask (destmask, fm);
5917 else
5918 {
5919 bfd_arm_vfp11_write_mask (destmask, fm);
5920 bfd_arm_vfp11_write_mask (destmask, fm + 1);
5921 }
5922 }
5923
5924 vpipe = VFP11_LS;
5925 }
5926 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
5927 {
5928 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5929 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
5930
5931 switch (puw)
5932 {
5933 case 0: /* Two-reg transfer. We should catch these above. */
5934 abort ();
5935
5936 case 2: /* fldm[sdx]. */
5937 case 3:
5938 case 5:
5939 {
5940 unsigned int i, offset = insn & 0xff;
5941
5942 if (is_double)
5943 offset >>= 1;
5944
5945 for (i = fd; i < fd + offset; i++)
5946 bfd_arm_vfp11_write_mask (destmask, i);
5947 }
5948 break;
5949
5950 case 4: /* fld[sd]. */
5951 case 6:
5952 bfd_arm_vfp11_write_mask (destmask, fd);
5953 break;
5954
5955 default:
5956 return VFP11_BAD;
5957 }
5958
5959 vpipe = VFP11_LS;
5960 }
5961 /* Single-register transfer. Note L==0. */
5962 else if ((insn & 0x0f100e10) == 0x0e000a10)
5963 {
5964 unsigned int opcode = (insn >> 21) & 7;
5965 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
5966
5967 switch (opcode)
5968 {
5969 case 0: /* fmsr/fmdlr. */
5970 case 1: /* fmdhr. */
5971 /* Mark fmdhr and fmdlr as writing to the whole of the DP
5972 destination register. I don't know if this is exactly right,
5973 but it is the conservative choice. */
5974 bfd_arm_vfp11_write_mask (destmask, fn);
5975 break;
5976
5977 case 7: /* fmxr. */
5978 break;
5979 }
5980
5981 vpipe = VFP11_LS;
5982 }
5983
5984 return vpipe;
5985 }
5986
5987
5988 static int elf32_arm_compare_mapping (const void * a, const void * b);
5989
5990
5991 /* Look for potentially-troublesome code sequences which might trigger the
5992 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
5993 (available from ARM) for details of the erratum. A short version is
5994 described in ld.texinfo. */
5995
5996 bfd_boolean
5997 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
5998 {
5999 asection *sec;
6000 bfd_byte *contents = NULL;
6001 int state = 0;
6002 int regs[3], numregs = 0;
6003 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6004 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
6005
6006 if (globals == NULL)
6007 return FALSE;
6008
6009 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
6010 The states transition as follows:
6011
6012 0 -> 1 (vector) or 0 -> 2 (scalar)
6013 A VFP FMAC-pipeline instruction has been seen. Fill
6014 regs[0]..regs[numregs-1] with its input operands. Remember this
6015 instruction in 'first_fmac'.
6016
6017 1 -> 2
6018 Any instruction, except for a VFP instruction which overwrites
6019 regs[*].
6020
6021 1 -> 3 [ -> 0 ] or
6022 2 -> 3 [ -> 0 ]
6023 A VFP instruction has been seen which overwrites any of regs[*].
6024 We must make a veneer! Reset state to 0 before examining next
6025 instruction.
6026
6027 2 -> 0
6028 If we fail to match anything in state 2, reset to state 0 and reset
6029 the instruction pointer to the instruction after 'first_fmac'.
6030
6031 If the VFP11 vector mode is in use, there must be at least two unrelated
6032 instructions between anti-dependent VFP11 instructions to properly avoid
6033 triggering the erratum, hence the use of the extra state 1. */
6034
6035 /* If we are only performing a partial link do not bother
6036 to construct any glue. */
6037 if (link_info->relocatable)
6038 return TRUE;
6039
6040 /* Skip if this bfd does not correspond to an ELF image. */
6041 if (! is_arm_elf (abfd))
6042 return TRUE;
6043
6044 /* We should have chosen a fix type by the time we get here. */
6045 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
6046
6047 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
6048 return TRUE;
6049
6050 /* Skip this BFD if it corresponds to an executable or dynamic object. */
6051 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
6052 return TRUE;
6053
6054 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6055 {
6056 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
6057 struct _arm_elf_section_data *sec_data;
6058
6059 /* If we don't have executable progbits, we're not interested in this
6060 section. Also skip if section is to be excluded. */
6061 if (elf_section_type (sec) != SHT_PROGBITS
6062 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
6063 || (sec->flags & SEC_EXCLUDE) != 0
6064 || sec->sec_info_type == ELF_INFO_TYPE_JUST_SYMS
6065 || sec->output_section == bfd_abs_section_ptr
6066 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
6067 continue;
6068
6069 sec_data = elf32_arm_section_data (sec);
6070
6071 if (sec_data->mapcount == 0)
6072 continue;
6073
6074 if (elf_section_data (sec)->this_hdr.contents != NULL)
6075 contents = elf_section_data (sec)->this_hdr.contents;
6076 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
6077 goto error_return;
6078
6079 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
6080 elf32_arm_compare_mapping);
6081
6082 for (span = 0; span < sec_data->mapcount; span++)
6083 {
6084 unsigned int span_start = sec_data->map[span].vma;
6085 unsigned int span_end = (span == sec_data->mapcount - 1)
6086 ? sec->size : sec_data->map[span + 1].vma;
6087 char span_type = sec_data->map[span].type;
6088
6089 /* FIXME: Only ARM mode is supported at present. We may need to
6090 support Thumb-2 mode also at some point. */
6091 if (span_type != 'a')
6092 continue;
6093
6094 for (i = span_start; i < span_end;)
6095 {
6096 unsigned int next_i = i + 4;
6097 unsigned int insn = bfd_big_endian (abfd)
6098 ? (contents[i] << 24)
6099 | (contents[i + 1] << 16)
6100 | (contents[i + 2] << 8)
6101 | contents[i + 3]
6102 : (contents[i + 3] << 24)
6103 | (contents[i + 2] << 16)
6104 | (contents[i + 1] << 8)
6105 | contents[i];
6106 unsigned int writemask = 0;
6107 enum bfd_arm_vfp11_pipe vpipe;
6108
6109 switch (state)
6110 {
6111 case 0:
6112 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
6113 &numregs);
6114 /* I'm assuming the VFP11 erratum can trigger with denorm
6115 operands on either the FMAC or the DS pipeline. This might
6116 lead to slightly overenthusiastic veneer insertion. */
6117 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
6118 {
6119 state = use_vector ? 1 : 2;
6120 first_fmac = i;
6121 veneer_of_insn = insn;
6122 }
6123 break;
6124
6125 case 1:
6126 {
6127 int other_regs[3], other_numregs;
6128 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6129 other_regs,
6130 &other_numregs);
6131 if (vpipe != VFP11_BAD
6132 && bfd_arm_vfp11_antidependency (writemask, regs,
6133 numregs))
6134 state = 3;
6135 else
6136 state = 2;
6137 }
6138 break;
6139
6140 case 2:
6141 {
6142 int other_regs[3], other_numregs;
6143 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6144 other_regs,
6145 &other_numregs);
6146 if (vpipe != VFP11_BAD
6147 && bfd_arm_vfp11_antidependency (writemask, regs,
6148 numregs))
6149 state = 3;
6150 else
6151 {
6152 state = 0;
6153 next_i = first_fmac + 4;
6154 }
6155 }
6156 break;
6157
6158 case 3:
6159 abort (); /* Should be unreachable. */
6160 }
6161
6162 if (state == 3)
6163 {
6164 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
6165 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
6166
6167 elf32_arm_section_data (sec)->erratumcount += 1;
6168
6169 newerr->u.b.vfp_insn = veneer_of_insn;
6170
6171 switch (span_type)
6172 {
6173 case 'a':
6174 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
6175 break;
6176
6177 default:
6178 abort ();
6179 }
6180
6181 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
6182 first_fmac);
6183
6184 newerr->vma = -1;
6185
6186 newerr->next = sec_data->erratumlist;
6187 sec_data->erratumlist = newerr;
6188
6189 state = 0;
6190 }
6191
6192 i = next_i;
6193 }
6194 }
6195
6196 if (contents != NULL
6197 && elf_section_data (sec)->this_hdr.contents != contents)
6198 free (contents);
6199 contents = NULL;
6200 }
6201
6202 return TRUE;
6203
6204 error_return:
6205 if (contents != NULL
6206 && elf_section_data (sec)->this_hdr.contents != contents)
6207 free (contents);
6208
6209 return FALSE;
6210 }
6211
6212 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
6213 after sections have been laid out, using specially-named symbols. */
6214
6215 void
6216 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
6217 struct bfd_link_info *link_info)
6218 {
6219 asection *sec;
6220 struct elf32_arm_link_hash_table *globals;
6221 char *tmp_name;
6222
6223 if (link_info->relocatable)
6224 return;
6225
6226 /* Skip if this bfd does not correspond to an ELF image. */
6227 if (! is_arm_elf (abfd))
6228 return;
6229
6230 globals = elf32_arm_hash_table (link_info);
6231 if (globals == NULL)
6232 return;
6233
6234 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
6235 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
6236
6237 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6238 {
6239 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
6240 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
6241
6242 for (; errnode != NULL; errnode = errnode->next)
6243 {
6244 struct elf_link_hash_entry *myh;
6245 bfd_vma vma;
6246
6247 switch (errnode->type)
6248 {
6249 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
6250 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
6251 /* Find veneer symbol. */
6252 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
6253 errnode->u.b.veneer->u.v.id);
6254
6255 myh = elf_link_hash_lookup
6256 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6257
6258 if (myh == NULL)
6259 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6260 "`%s'"), abfd, tmp_name);
6261
6262 vma = myh->root.u.def.section->output_section->vma
6263 + myh->root.u.def.section->output_offset
6264 + myh->root.u.def.value;
6265
6266 errnode->u.b.veneer->vma = vma;
6267 break;
6268
6269 case VFP11_ERRATUM_ARM_VENEER:
6270 case VFP11_ERRATUM_THUMB_VENEER:
6271 /* Find return location. */
6272 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6273 errnode->u.v.id);
6274
6275 myh = elf_link_hash_lookup
6276 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6277
6278 if (myh == NULL)
6279 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6280 "`%s'"), abfd, tmp_name);
6281
6282 vma = myh->root.u.def.section->output_section->vma
6283 + myh->root.u.def.section->output_offset
6284 + myh->root.u.def.value;
6285
6286 errnode->u.v.branch->vma = vma;
6287 break;
6288
6289 default:
6290 abort ();
6291 }
6292 }
6293 }
6294
6295 free (tmp_name);
6296 }
6297
6298
6299 /* Set target relocation values needed during linking. */
6300
6301 void
6302 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
6303 struct bfd_link_info *link_info,
6304 int target1_is_rel,
6305 char * target2_type,
6306 int fix_v4bx,
6307 int use_blx,
6308 bfd_arm_vfp11_fix vfp11_fix,
6309 int no_enum_warn, int no_wchar_warn,
6310 int pic_veneer, int fix_cortex_a8)
6311 {
6312 struct elf32_arm_link_hash_table *globals;
6313
6314 globals = elf32_arm_hash_table (link_info);
6315 if (globals == NULL)
6316 return;
6317
6318 globals->target1_is_rel = target1_is_rel;
6319 if (strcmp (target2_type, "rel") == 0)
6320 globals->target2_reloc = R_ARM_REL32;
6321 else if (strcmp (target2_type, "abs") == 0)
6322 globals->target2_reloc = R_ARM_ABS32;
6323 else if (strcmp (target2_type, "got-rel") == 0)
6324 globals->target2_reloc = R_ARM_GOT_PREL;
6325 else
6326 {
6327 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
6328 target2_type);
6329 }
6330 globals->fix_v4bx = fix_v4bx;
6331 globals->use_blx |= use_blx;
6332 globals->vfp11_fix = vfp11_fix;
6333 globals->pic_veneer = pic_veneer;
6334 globals->fix_cortex_a8 = fix_cortex_a8;
6335
6336 BFD_ASSERT (is_arm_elf (output_bfd));
6337 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
6338 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
6339 }
6340
6341 /* Replace the target offset of a Thumb bl or b.w instruction. */
6342
6343 static void
6344 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
6345 {
6346 bfd_vma upper;
6347 bfd_vma lower;
6348 int reloc_sign;
6349
6350 BFD_ASSERT ((offset & 1) == 0);
6351
6352 upper = bfd_get_16 (abfd, insn);
6353 lower = bfd_get_16 (abfd, insn + 2);
6354 reloc_sign = (offset < 0) ? 1 : 0;
6355 upper = (upper & ~(bfd_vma) 0x7ff)
6356 | ((offset >> 12) & 0x3ff)
6357 | (reloc_sign << 10);
6358 lower = (lower & ~(bfd_vma) 0x2fff)
6359 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
6360 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
6361 | ((offset >> 1) & 0x7ff);
6362 bfd_put_16 (abfd, upper, insn);
6363 bfd_put_16 (abfd, lower, insn + 2);
6364 }
6365
6366 /* Thumb code calling an ARM function. */
6367
6368 static int
6369 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
6370 const char * name,
6371 bfd * input_bfd,
6372 bfd * output_bfd,
6373 asection * input_section,
6374 bfd_byte * hit_data,
6375 asection * sym_sec,
6376 bfd_vma offset,
6377 bfd_signed_vma addend,
6378 bfd_vma val,
6379 char **error_message)
6380 {
6381 asection * s = 0;
6382 bfd_vma my_offset;
6383 long int ret_offset;
6384 struct elf_link_hash_entry * myh;
6385 struct elf32_arm_link_hash_table * globals;
6386
6387 myh = find_thumb_glue (info, name, error_message);
6388 if (myh == NULL)
6389 return FALSE;
6390
6391 globals = elf32_arm_hash_table (info);
6392 BFD_ASSERT (globals != NULL);
6393 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6394
6395 my_offset = myh->root.u.def.value;
6396
6397 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6398 THUMB2ARM_GLUE_SECTION_NAME);
6399
6400 BFD_ASSERT (s != NULL);
6401 BFD_ASSERT (s->contents != NULL);
6402 BFD_ASSERT (s->output_section != NULL);
6403
6404 if ((my_offset & 0x01) == 0x01)
6405 {
6406 if (sym_sec != NULL
6407 && sym_sec->owner != NULL
6408 && !INTERWORK_FLAG (sym_sec->owner))
6409 {
6410 (*_bfd_error_handler)
6411 (_("%B(%s): warning: interworking not enabled.\n"
6412 " first occurrence: %B: thumb call to arm"),
6413 sym_sec->owner, input_bfd, name);
6414
6415 return FALSE;
6416 }
6417
6418 --my_offset;
6419 myh->root.u.def.value = my_offset;
6420
6421 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
6422 s->contents + my_offset);
6423
6424 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
6425 s->contents + my_offset + 2);
6426
6427 ret_offset =
6428 /* Address of destination of the stub. */
6429 ((bfd_signed_vma) val)
6430 - ((bfd_signed_vma)
6431 /* Offset from the start of the current section
6432 to the start of the stubs. */
6433 (s->output_offset
6434 /* Offset of the start of this stub from the start of the stubs. */
6435 + my_offset
6436 /* Address of the start of the current section. */
6437 + s->output_section->vma)
6438 /* The branch instruction is 4 bytes into the stub. */
6439 + 4
6440 /* ARM branches work from the pc of the instruction + 8. */
6441 + 8);
6442
6443 put_arm_insn (globals, output_bfd,
6444 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
6445 s->contents + my_offset + 4);
6446 }
6447
6448 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
6449
6450 /* Now go back and fix up the original BL insn to point to here. */
6451 ret_offset =
6452 /* Address of where the stub is located. */
6453 (s->output_section->vma + s->output_offset + my_offset)
6454 /* Address of where the BL is located. */
6455 - (input_section->output_section->vma + input_section->output_offset
6456 + offset)
6457 /* Addend in the relocation. */
6458 - addend
6459 /* Biassing for PC-relative addressing. */
6460 - 8;
6461
6462 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
6463
6464 return TRUE;
6465 }
6466
6467 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
6468
6469 static struct elf_link_hash_entry *
6470 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
6471 const char * name,
6472 bfd * input_bfd,
6473 bfd * output_bfd,
6474 asection * sym_sec,
6475 bfd_vma val,
6476 asection * s,
6477 char ** error_message)
6478 {
6479 bfd_vma my_offset;
6480 long int ret_offset;
6481 struct elf_link_hash_entry * myh;
6482 struct elf32_arm_link_hash_table * globals;
6483
6484 myh = find_arm_glue (info, name, error_message);
6485 if (myh == NULL)
6486 return NULL;
6487
6488 globals = elf32_arm_hash_table (info);
6489 BFD_ASSERT (globals != NULL);
6490 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6491
6492 my_offset = myh->root.u.def.value;
6493
6494 if ((my_offset & 0x01) == 0x01)
6495 {
6496 if (sym_sec != NULL
6497 && sym_sec->owner != NULL
6498 && !INTERWORK_FLAG (sym_sec->owner))
6499 {
6500 (*_bfd_error_handler)
6501 (_("%B(%s): warning: interworking not enabled.\n"
6502 " first occurrence: %B: arm call to thumb"),
6503 sym_sec->owner, input_bfd, name);
6504 }
6505
6506 --my_offset;
6507 myh->root.u.def.value = my_offset;
6508
6509 if (info->shared || globals->root.is_relocatable_executable
6510 || globals->pic_veneer)
6511 {
6512 /* For relocatable objects we can't use absolute addresses,
6513 so construct the address from a relative offset. */
6514 /* TODO: If the offset is small it's probably worth
6515 constructing the address with adds. */
6516 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
6517 s->contents + my_offset);
6518 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
6519 s->contents + my_offset + 4);
6520 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
6521 s->contents + my_offset + 8);
6522 /* Adjust the offset by 4 for the position of the add,
6523 and 8 for the pipeline offset. */
6524 ret_offset = (val - (s->output_offset
6525 + s->output_section->vma
6526 + my_offset + 12))
6527 | 1;
6528 bfd_put_32 (output_bfd, ret_offset,
6529 s->contents + my_offset + 12);
6530 }
6531 else if (globals->use_blx)
6532 {
6533 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
6534 s->contents + my_offset);
6535
6536 /* It's a thumb address. Add the low order bit. */
6537 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
6538 s->contents + my_offset + 4);
6539 }
6540 else
6541 {
6542 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
6543 s->contents + my_offset);
6544
6545 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
6546 s->contents + my_offset + 4);
6547
6548 /* It's a thumb address. Add the low order bit. */
6549 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
6550 s->contents + my_offset + 8);
6551
6552 my_offset += 12;
6553 }
6554 }
6555
6556 BFD_ASSERT (my_offset <= globals->arm_glue_size);
6557
6558 return myh;
6559 }
6560
6561 /* Arm code calling a Thumb function. */
6562
6563 static int
6564 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
6565 const char * name,
6566 bfd * input_bfd,
6567 bfd * output_bfd,
6568 asection * input_section,
6569 bfd_byte * hit_data,
6570 asection * sym_sec,
6571 bfd_vma offset,
6572 bfd_signed_vma addend,
6573 bfd_vma val,
6574 char **error_message)
6575 {
6576 unsigned long int tmp;
6577 bfd_vma my_offset;
6578 asection * s;
6579 long int ret_offset;
6580 struct elf_link_hash_entry * myh;
6581 struct elf32_arm_link_hash_table * globals;
6582
6583 globals = elf32_arm_hash_table (info);
6584 BFD_ASSERT (globals != NULL);
6585 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6586
6587 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6588 ARM2THUMB_GLUE_SECTION_NAME);
6589 BFD_ASSERT (s != NULL);
6590 BFD_ASSERT (s->contents != NULL);
6591 BFD_ASSERT (s->output_section != NULL);
6592
6593 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
6594 sym_sec, val, s, error_message);
6595 if (!myh)
6596 return FALSE;
6597
6598 my_offset = myh->root.u.def.value;
6599 tmp = bfd_get_32 (input_bfd, hit_data);
6600 tmp = tmp & 0xFF000000;
6601
6602 /* Somehow these are both 4 too far, so subtract 8. */
6603 ret_offset = (s->output_offset
6604 + my_offset
6605 + s->output_section->vma
6606 - (input_section->output_offset
6607 + input_section->output_section->vma
6608 + offset + addend)
6609 - 8);
6610
6611 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
6612
6613 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
6614
6615 return TRUE;
6616 }
6617
6618 /* Populate Arm stub for an exported Thumb function. */
6619
6620 static bfd_boolean
6621 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
6622 {
6623 struct bfd_link_info * info = (struct bfd_link_info *) inf;
6624 asection * s;
6625 struct elf_link_hash_entry * myh;
6626 struct elf32_arm_link_hash_entry *eh;
6627 struct elf32_arm_link_hash_table * globals;
6628 asection *sec;
6629 bfd_vma val;
6630 char *error_message;
6631
6632 eh = elf32_arm_hash_entry (h);
6633 /* Allocate stubs for exported Thumb functions on v4t. */
6634 if (eh->export_glue == NULL)
6635 return TRUE;
6636
6637 globals = elf32_arm_hash_table (info);
6638 BFD_ASSERT (globals != NULL);
6639 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6640
6641 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6642 ARM2THUMB_GLUE_SECTION_NAME);
6643 BFD_ASSERT (s != NULL);
6644 BFD_ASSERT (s->contents != NULL);
6645 BFD_ASSERT (s->output_section != NULL);
6646
6647 sec = eh->export_glue->root.u.def.section;
6648
6649 BFD_ASSERT (sec->output_section != NULL);
6650
6651 val = eh->export_glue->root.u.def.value + sec->output_offset
6652 + sec->output_section->vma;
6653
6654 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
6655 h->root.u.def.section->owner,
6656 globals->obfd, sec, val, s,
6657 &error_message);
6658 BFD_ASSERT (myh);
6659 return TRUE;
6660 }
6661
6662 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
6663
6664 static bfd_vma
6665 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
6666 {
6667 bfd_byte *p;
6668 bfd_vma glue_addr;
6669 asection *s;
6670 struct elf32_arm_link_hash_table *globals;
6671
6672 globals = elf32_arm_hash_table (info);
6673 BFD_ASSERT (globals != NULL);
6674 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6675
6676 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6677 ARM_BX_GLUE_SECTION_NAME);
6678 BFD_ASSERT (s != NULL);
6679 BFD_ASSERT (s->contents != NULL);
6680 BFD_ASSERT (s->output_section != NULL);
6681
6682 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
6683
6684 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
6685
6686 if ((globals->bx_glue_offset[reg] & 1) == 0)
6687 {
6688 p = s->contents + glue_addr;
6689 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
6690 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
6691 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
6692 globals->bx_glue_offset[reg] |= 1;
6693 }
6694
6695 return glue_addr + s->output_section->vma + s->output_offset;
6696 }
6697
6698 /* Generate Arm stubs for exported Thumb symbols. */
6699 static void
6700 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
6701 struct bfd_link_info *link_info)
6702 {
6703 struct elf32_arm_link_hash_table * globals;
6704
6705 if (link_info == NULL)
6706 /* Ignore this if we are not called by the ELF backend linker. */
6707 return;
6708
6709 globals = elf32_arm_hash_table (link_info);
6710 if (globals == NULL)
6711 return;
6712
6713 /* If blx is available then exported Thumb symbols are OK and there is
6714 nothing to do. */
6715 if (globals->use_blx)
6716 return;
6717
6718 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
6719 link_info);
6720 }
6721
6722 /* Some relocations map to different relocations depending on the
6723 target. Return the real relocation. */
6724
6725 static int
6726 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
6727 int r_type)
6728 {
6729 switch (r_type)
6730 {
6731 case R_ARM_TARGET1:
6732 if (globals->target1_is_rel)
6733 return R_ARM_REL32;
6734 else
6735 return R_ARM_ABS32;
6736
6737 case R_ARM_TARGET2:
6738 return globals->target2_reloc;
6739
6740 default:
6741 return r_type;
6742 }
6743 }
6744
6745 /* Return the base VMA address which should be subtracted from real addresses
6746 when resolving @dtpoff relocation.
6747 This is PT_TLS segment p_vaddr. */
6748
6749 static bfd_vma
6750 dtpoff_base (struct bfd_link_info *info)
6751 {
6752 /* If tls_sec is NULL, we should have signalled an error already. */
6753 if (elf_hash_table (info)->tls_sec == NULL)
6754 return 0;
6755 return elf_hash_table (info)->tls_sec->vma;
6756 }
6757
6758 /* Return the relocation value for @tpoff relocation
6759 if STT_TLS virtual address is ADDRESS. */
6760
6761 static bfd_vma
6762 tpoff (struct bfd_link_info *info, bfd_vma address)
6763 {
6764 struct elf_link_hash_table *htab = elf_hash_table (info);
6765 bfd_vma base;
6766
6767 /* If tls_sec is NULL, we should have signalled an error already. */
6768 if (htab->tls_sec == NULL)
6769 return 0;
6770 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
6771 return address - htab->tls_sec->vma + base;
6772 }
6773
6774 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
6775 VALUE is the relocation value. */
6776
6777 static bfd_reloc_status_type
6778 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
6779 {
6780 if (value > 0xfff)
6781 return bfd_reloc_overflow;
6782
6783 value |= bfd_get_32 (abfd, data) & 0xfffff000;
6784 bfd_put_32 (abfd, value, data);
6785 return bfd_reloc_ok;
6786 }
6787
6788 /* For a given value of n, calculate the value of G_n as required to
6789 deal with group relocations. We return it in the form of an
6790 encoded constant-and-rotation, together with the final residual. If n is
6791 specified as less than zero, then final_residual is filled with the
6792 input value and no further action is performed. */
6793
6794 static bfd_vma
6795 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
6796 {
6797 int current_n;
6798 bfd_vma g_n;
6799 bfd_vma encoded_g_n = 0;
6800 bfd_vma residual = value; /* Also known as Y_n. */
6801
6802 for (current_n = 0; current_n <= n; current_n++)
6803 {
6804 int shift;
6805
6806 /* Calculate which part of the value to mask. */
6807 if (residual == 0)
6808 shift = 0;
6809 else
6810 {
6811 int msb;
6812
6813 /* Determine the most significant bit in the residual and
6814 align the resulting value to a 2-bit boundary. */
6815 for (msb = 30; msb >= 0; msb -= 2)
6816 if (residual & (3 << msb))
6817 break;
6818
6819 /* The desired shift is now (msb - 6), or zero, whichever
6820 is the greater. */
6821 shift = msb - 6;
6822 if (shift < 0)
6823 shift = 0;
6824 }
6825
6826 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
6827 g_n = residual & (0xff << shift);
6828 encoded_g_n = (g_n >> shift)
6829 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
6830
6831 /* Calculate the residual for the next time around. */
6832 residual &= ~g_n;
6833 }
6834
6835 *final_residual = residual;
6836
6837 return encoded_g_n;
6838 }
6839
6840 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
6841 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
6842
6843 static int
6844 identify_add_or_sub (bfd_vma insn)
6845 {
6846 int opcode = insn & 0x1e00000;
6847
6848 if (opcode == 1 << 23) /* ADD */
6849 return 1;
6850
6851 if (opcode == 1 << 22) /* SUB */
6852 return -1;
6853
6854 return 0;
6855 }
6856
6857 /* Perform a relocation as part of a final link. */
6858
6859 static bfd_reloc_status_type
6860 elf32_arm_final_link_relocate (reloc_howto_type * howto,
6861 bfd * input_bfd,
6862 bfd * output_bfd,
6863 asection * input_section,
6864 bfd_byte * contents,
6865 Elf_Internal_Rela * rel,
6866 bfd_vma value,
6867 struct bfd_link_info * info,
6868 asection * sym_sec,
6869 const char * sym_name,
6870 int sym_flags,
6871 struct elf_link_hash_entry * h,
6872 bfd_boolean * unresolved_reloc_p,
6873 char ** error_message)
6874 {
6875 unsigned long r_type = howto->type;
6876 unsigned long r_symndx;
6877 bfd_byte * hit_data = contents + rel->r_offset;
6878 bfd * dynobj = NULL;
6879 bfd_vma * local_got_offsets;
6880 asection * sgot = NULL;
6881 asection * splt = NULL;
6882 asection * sreloc = NULL;
6883 bfd_vma addend;
6884 bfd_signed_vma signed_addend;
6885 struct elf32_arm_link_hash_table * globals;
6886
6887 globals = elf32_arm_hash_table (info);
6888 if (globals == NULL)
6889 return bfd_reloc_notsupported;
6890
6891 BFD_ASSERT (is_arm_elf (input_bfd));
6892
6893 /* Some relocation types map to different relocations depending on the
6894 target. We pick the right one here. */
6895 r_type = arm_real_reloc_type (globals, r_type);
6896 if (r_type != howto->type)
6897 howto = elf32_arm_howto_from_type (r_type);
6898
6899 /* If the start address has been set, then set the EF_ARM_HASENTRY
6900 flag. Setting this more than once is redundant, but the cost is
6901 not too high, and it keeps the code simple.
6902
6903 The test is done here, rather than somewhere else, because the
6904 start address is only set just before the final link commences.
6905
6906 Note - if the user deliberately sets a start address of 0, the
6907 flag will not be set. */
6908 if (bfd_get_start_address (output_bfd) != 0)
6909 elf_elfheader (output_bfd)->e_flags |= EF_ARM_HASENTRY;
6910
6911 dynobj = elf_hash_table (info)->dynobj;
6912 if (dynobj)
6913 {
6914 sgot = bfd_get_section_by_name (dynobj, ".got");
6915 splt = bfd_get_section_by_name (dynobj, ".plt");
6916 }
6917 local_got_offsets = elf_local_got_offsets (input_bfd);
6918 r_symndx = ELF32_R_SYM (rel->r_info);
6919
6920 if (globals->use_rel)
6921 {
6922 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
6923
6924 if (addend & ((howto->src_mask + 1) >> 1))
6925 {
6926 signed_addend = -1;
6927 signed_addend &= ~ howto->src_mask;
6928 signed_addend |= addend;
6929 }
6930 else
6931 signed_addend = addend;
6932 }
6933 else
6934 addend = signed_addend = rel->r_addend;
6935
6936 switch (r_type)
6937 {
6938 case R_ARM_NONE:
6939 /* We don't need to find a value for this symbol. It's just a
6940 marker. */
6941 *unresolved_reloc_p = FALSE;
6942 return bfd_reloc_ok;
6943
6944 case R_ARM_ABS12:
6945 if (!globals->vxworks_p)
6946 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
6947
6948 case R_ARM_PC24:
6949 case R_ARM_ABS32:
6950 case R_ARM_ABS32_NOI:
6951 case R_ARM_REL32:
6952 case R_ARM_REL32_NOI:
6953 case R_ARM_CALL:
6954 case R_ARM_JUMP24:
6955 case R_ARM_XPC25:
6956 case R_ARM_PREL31:
6957 case R_ARM_PLT32:
6958 /* Handle relocations which should use the PLT entry. ABS32/REL32
6959 will use the symbol's value, which may point to a PLT entry, but we
6960 don't need to handle that here. If we created a PLT entry, all
6961 branches in this object should go to it, except if the PLT is too
6962 far away, in which case a long branch stub should be inserted. */
6963 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
6964 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
6965 && r_type != R_ARM_CALL
6966 && r_type != R_ARM_JUMP24
6967 && r_type != R_ARM_PLT32)
6968 && h != NULL
6969 && splt != NULL
6970 && h->plt.offset != (bfd_vma) -1)
6971 {
6972 /* If we've created a .plt section, and assigned a PLT entry to
6973 this function, it should not be known to bind locally. If
6974 it were, we would have cleared the PLT entry. */
6975 BFD_ASSERT (!SYMBOL_CALLS_LOCAL (info, h));
6976
6977 value = (splt->output_section->vma
6978 + splt->output_offset
6979 + h->plt.offset);
6980 *unresolved_reloc_p = FALSE;
6981 return _bfd_final_link_relocate (howto, input_bfd, input_section,
6982 contents, rel->r_offset, value,
6983 rel->r_addend);
6984 }
6985
6986 /* When generating a shared object or relocatable executable, these
6987 relocations are copied into the output file to be resolved at
6988 run time. */
6989 if ((info->shared || globals->root.is_relocatable_executable)
6990 && (input_section->flags & SEC_ALLOC)
6991 && !(globals->vxworks_p
6992 && strcmp (input_section->output_section->name,
6993 ".tls_vars") == 0)
6994 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
6995 || !SYMBOL_CALLS_LOCAL (info, h))
6996 && (!strstr (input_section->name, STUB_SUFFIX))
6997 && (h == NULL
6998 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6999 || h->root.type != bfd_link_hash_undefweak)
7000 && r_type != R_ARM_PC24
7001 && r_type != R_ARM_CALL
7002 && r_type != R_ARM_JUMP24
7003 && r_type != R_ARM_PREL31
7004 && r_type != R_ARM_PLT32)
7005 {
7006 Elf_Internal_Rela outrel;
7007 bfd_byte *loc;
7008 bfd_boolean skip, relocate;
7009
7010 *unresolved_reloc_p = FALSE;
7011
7012 if (sreloc == NULL)
7013 {
7014 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
7015 ! globals->use_rel);
7016
7017 if (sreloc == NULL)
7018 return bfd_reloc_notsupported;
7019 }
7020
7021 skip = FALSE;
7022 relocate = FALSE;
7023
7024 outrel.r_addend = addend;
7025 outrel.r_offset =
7026 _bfd_elf_section_offset (output_bfd, info, input_section,
7027 rel->r_offset);
7028 if (outrel.r_offset == (bfd_vma) -1)
7029 skip = TRUE;
7030 else if (outrel.r_offset == (bfd_vma) -2)
7031 skip = TRUE, relocate = TRUE;
7032 outrel.r_offset += (input_section->output_section->vma
7033 + input_section->output_offset);
7034
7035 if (skip)
7036 memset (&outrel, 0, sizeof outrel);
7037 else if (h != NULL
7038 && h->dynindx != -1
7039 && (!info->shared
7040 || !info->symbolic
7041 || !h->def_regular))
7042 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
7043 else
7044 {
7045 int symbol;
7046
7047 /* This symbol is local, or marked to become local. */
7048 if (sym_flags == STT_ARM_TFUNC)
7049 value |= 1;
7050 if (globals->symbian_p)
7051 {
7052 asection *osec;
7053
7054 /* On Symbian OS, the data segment and text segement
7055 can be relocated independently. Therefore, we
7056 must indicate the segment to which this
7057 relocation is relative. The BPABI allows us to
7058 use any symbol in the right segment; we just use
7059 the section symbol as it is convenient. (We
7060 cannot use the symbol given by "h" directly as it
7061 will not appear in the dynamic symbol table.)
7062
7063 Note that the dynamic linker ignores the section
7064 symbol value, so we don't subtract osec->vma
7065 from the emitted reloc addend. */
7066 if (sym_sec)
7067 osec = sym_sec->output_section;
7068 else
7069 osec = input_section->output_section;
7070 symbol = elf_section_data (osec)->dynindx;
7071 if (symbol == 0)
7072 {
7073 struct elf_link_hash_table *htab = elf_hash_table (info);
7074
7075 if ((osec->flags & SEC_READONLY) == 0
7076 && htab->data_index_section != NULL)
7077 osec = htab->data_index_section;
7078 else
7079 osec = htab->text_index_section;
7080 symbol = elf_section_data (osec)->dynindx;
7081 }
7082 BFD_ASSERT (symbol != 0);
7083 }
7084 else
7085 /* On SVR4-ish systems, the dynamic loader cannot
7086 relocate the text and data segments independently,
7087 so the symbol does not matter. */
7088 symbol = 0;
7089 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
7090 if (globals->use_rel)
7091 relocate = TRUE;
7092 else
7093 outrel.r_addend += value;
7094 }
7095
7096 loc = sreloc->contents;
7097 loc += sreloc->reloc_count++ * RELOC_SIZE (globals);
7098 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7099
7100 /* If this reloc is against an external symbol, we do not want to
7101 fiddle with the addend. Otherwise, we need to include the symbol
7102 value so that it becomes an addend for the dynamic reloc. */
7103 if (! relocate)
7104 return bfd_reloc_ok;
7105
7106 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7107 contents, rel->r_offset, value,
7108 (bfd_vma) 0);
7109 }
7110 else switch (r_type)
7111 {
7112 case R_ARM_ABS12:
7113 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
7114
7115 case R_ARM_XPC25: /* Arm BLX instruction. */
7116 case R_ARM_CALL:
7117 case R_ARM_JUMP24:
7118 case R_ARM_PC24: /* Arm B/BL instruction. */
7119 case R_ARM_PLT32:
7120 {
7121 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
7122
7123 if (r_type == R_ARM_XPC25)
7124 {
7125 /* Check for Arm calling Arm function. */
7126 /* FIXME: Should we translate the instruction into a BL
7127 instruction instead ? */
7128 if (sym_flags != STT_ARM_TFUNC)
7129 (*_bfd_error_handler)
7130 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
7131 input_bfd,
7132 h ? h->root.root.string : "(local)");
7133 }
7134 else if (r_type == R_ARM_PC24)
7135 {
7136 /* Check for Arm calling Thumb function. */
7137 if (sym_flags == STT_ARM_TFUNC)
7138 {
7139 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
7140 output_bfd, input_section,
7141 hit_data, sym_sec, rel->r_offset,
7142 signed_addend, value,
7143 error_message))
7144 return bfd_reloc_ok;
7145 else
7146 return bfd_reloc_dangerous;
7147 }
7148 }
7149
7150 /* Check if a stub has to be inserted because the
7151 destination is too far or we are changing mode. */
7152 if ( r_type == R_ARM_CALL
7153 || r_type == R_ARM_JUMP24
7154 || r_type == R_ARM_PLT32)
7155 {
7156 enum elf32_arm_stub_type stub_type = arm_stub_none;
7157 struct elf32_arm_link_hash_entry *hash;
7158
7159 hash = (struct elf32_arm_link_hash_entry *) h;
7160 stub_type = arm_type_of_stub (info, input_section, rel,
7161 &sym_flags, hash,
7162 value, sym_sec,
7163 input_bfd, sym_name);
7164
7165 if (stub_type != arm_stub_none)
7166 {
7167 /* The target is out of reach, so redirect the
7168 branch to the local stub for this function. */
7169
7170 stub_entry = elf32_arm_get_stub_entry (input_section,
7171 sym_sec, h,
7172 rel, globals,
7173 stub_type);
7174 if (stub_entry != NULL)
7175 value = (stub_entry->stub_offset
7176 + stub_entry->stub_sec->output_offset
7177 + stub_entry->stub_sec->output_section->vma);
7178 }
7179 else
7180 {
7181 /* If the call goes through a PLT entry, make sure to
7182 check distance to the right destination address. */
7183 if (h != NULL
7184 && splt != NULL
7185 && h->plt.offset != (bfd_vma) -1)
7186 {
7187 value = (splt->output_section->vma
7188 + splt->output_offset
7189 + h->plt.offset);
7190 *unresolved_reloc_p = FALSE;
7191 /* The PLT entry is in ARM mode, regardless of the
7192 target function. */
7193 sym_flags = STT_FUNC;
7194 }
7195 }
7196 }
7197
7198 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
7199 where:
7200 S is the address of the symbol in the relocation.
7201 P is address of the instruction being relocated.
7202 A is the addend (extracted from the instruction) in bytes.
7203
7204 S is held in 'value'.
7205 P is the base address of the section containing the
7206 instruction plus the offset of the reloc into that
7207 section, ie:
7208 (input_section->output_section->vma +
7209 input_section->output_offset +
7210 rel->r_offset).
7211 A is the addend, converted into bytes, ie:
7212 (signed_addend * 4)
7213
7214 Note: None of these operations have knowledge of the pipeline
7215 size of the processor, thus it is up to the assembler to
7216 encode this information into the addend. */
7217 value -= (input_section->output_section->vma
7218 + input_section->output_offset);
7219 value -= rel->r_offset;
7220 if (globals->use_rel)
7221 value += (signed_addend << howto->size);
7222 else
7223 /* RELA addends do not have to be adjusted by howto->size. */
7224 value += signed_addend;
7225
7226 signed_addend = value;
7227 signed_addend >>= howto->rightshift;
7228
7229 /* A branch to an undefined weak symbol is turned into a jump to
7230 the next instruction unless a PLT entry will be created.
7231 Do the same for local undefined symbols (but not for STN_UNDEF).
7232 The jump to the next instruction is optimized as a NOP depending
7233 on the architecture. */
7234 if (h ? (h->root.type == bfd_link_hash_undefweak
7235 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7236 : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
7237 {
7238 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
7239
7240 if (arch_has_arm_nop (globals))
7241 value |= 0x0320f000;
7242 else
7243 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
7244 }
7245 else
7246 {
7247 /* Perform a signed range check. */
7248 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
7249 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
7250 return bfd_reloc_overflow;
7251
7252 addend = (value & 2);
7253
7254 value = (signed_addend & howto->dst_mask)
7255 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
7256
7257 if (r_type == R_ARM_CALL)
7258 {
7259 /* Set the H bit in the BLX instruction. */
7260 if (sym_flags == STT_ARM_TFUNC)
7261 {
7262 if (addend)
7263 value |= (1 << 24);
7264 else
7265 value &= ~(bfd_vma)(1 << 24);
7266 }
7267
7268 /* Select the correct instruction (BL or BLX). */
7269 /* Only if we are not handling a BL to a stub. In this
7270 case, mode switching is performed by the stub. */
7271 if (sym_flags == STT_ARM_TFUNC && !stub_entry)
7272 value |= (1 << 28);
7273 else
7274 {
7275 value &= ~(bfd_vma)(1 << 28);
7276 value |= (1 << 24);
7277 }
7278 }
7279 }
7280 }
7281 break;
7282
7283 case R_ARM_ABS32:
7284 value += addend;
7285 if (sym_flags == STT_ARM_TFUNC)
7286 value |= 1;
7287 break;
7288
7289 case R_ARM_ABS32_NOI:
7290 value += addend;
7291 break;
7292
7293 case R_ARM_REL32:
7294 value += addend;
7295 if (sym_flags == STT_ARM_TFUNC)
7296 value |= 1;
7297 value -= (input_section->output_section->vma
7298 + input_section->output_offset + rel->r_offset);
7299 break;
7300
7301 case R_ARM_REL32_NOI:
7302 value += addend;
7303 value -= (input_section->output_section->vma
7304 + input_section->output_offset + rel->r_offset);
7305 break;
7306
7307 case R_ARM_PREL31:
7308 value -= (input_section->output_section->vma
7309 + input_section->output_offset + rel->r_offset);
7310 value += signed_addend;
7311 if (! h || h->root.type != bfd_link_hash_undefweak)
7312 {
7313 /* Check for overflow. */
7314 if ((value ^ (value >> 1)) & (1 << 30))
7315 return bfd_reloc_overflow;
7316 }
7317 value &= 0x7fffffff;
7318 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
7319 if (sym_flags == STT_ARM_TFUNC)
7320 value |= 1;
7321 break;
7322 }
7323
7324 bfd_put_32 (input_bfd, value, hit_data);
7325 return bfd_reloc_ok;
7326
7327 case R_ARM_ABS8:
7328 value += addend;
7329
7330 /* There is no way to tell whether the user intended to use a signed or
7331 unsigned addend. When checking for overflow we accept either,
7332 as specified by the AAELF. */
7333 if ((long) value > 0xff || (long) value < -0x80)
7334 return bfd_reloc_overflow;
7335
7336 bfd_put_8 (input_bfd, value, hit_data);
7337 return bfd_reloc_ok;
7338
7339 case R_ARM_ABS16:
7340 value += addend;
7341
7342 /* See comment for R_ARM_ABS8. */
7343 if ((long) value > 0xffff || (long) value < -0x8000)
7344 return bfd_reloc_overflow;
7345
7346 bfd_put_16 (input_bfd, value, hit_data);
7347 return bfd_reloc_ok;
7348
7349 case R_ARM_THM_ABS5:
7350 /* Support ldr and str instructions for the thumb. */
7351 if (globals->use_rel)
7352 {
7353 /* Need to refetch addend. */
7354 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7355 /* ??? Need to determine shift amount from operand size. */
7356 addend >>= howto->rightshift;
7357 }
7358 value += addend;
7359
7360 /* ??? Isn't value unsigned? */
7361 if ((long) value > 0x1f || (long) value < -0x10)
7362 return bfd_reloc_overflow;
7363
7364 /* ??? Value needs to be properly shifted into place first. */
7365 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
7366 bfd_put_16 (input_bfd, value, hit_data);
7367 return bfd_reloc_ok;
7368
7369 case R_ARM_THM_ALU_PREL_11_0:
7370 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
7371 {
7372 bfd_vma insn;
7373 bfd_signed_vma relocation;
7374
7375 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7376 | bfd_get_16 (input_bfd, hit_data + 2);
7377
7378 if (globals->use_rel)
7379 {
7380 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
7381 | ((insn & (1 << 26)) >> 15);
7382 if (insn & 0xf00000)
7383 signed_addend = -signed_addend;
7384 }
7385
7386 relocation = value + signed_addend;
7387 relocation -= (input_section->output_section->vma
7388 + input_section->output_offset
7389 + rel->r_offset);
7390
7391 value = abs (relocation);
7392
7393 if (value >= 0x1000)
7394 return bfd_reloc_overflow;
7395
7396 insn = (insn & 0xfb0f8f00) | (value & 0xff)
7397 | ((value & 0x700) << 4)
7398 | ((value & 0x800) << 15);
7399 if (relocation < 0)
7400 insn |= 0xa00000;
7401
7402 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7403 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7404
7405 return bfd_reloc_ok;
7406 }
7407
7408 case R_ARM_THM_PC8:
7409 /* PR 10073: This reloc is not generated by the GNU toolchain,
7410 but it is supported for compatibility with third party libraries
7411 generated by other compilers, specifically the ARM/IAR. */
7412 {
7413 bfd_vma insn;
7414 bfd_signed_vma relocation;
7415
7416 insn = bfd_get_16 (input_bfd, hit_data);
7417
7418 if (globals->use_rel)
7419 addend = (insn & 0x00ff) << 2;
7420
7421 relocation = value + addend;
7422 relocation -= (input_section->output_section->vma
7423 + input_section->output_offset
7424 + rel->r_offset);
7425
7426 value = abs (relocation);
7427
7428 /* We do not check for overflow of this reloc. Although strictly
7429 speaking this is incorrect, it appears to be necessary in order
7430 to work with IAR generated relocs. Since GCC and GAS do not
7431 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
7432 a problem for them. */
7433 value &= 0x3fc;
7434
7435 insn = (insn & 0xff00) | (value >> 2);
7436
7437 bfd_put_16 (input_bfd, insn, hit_data);
7438
7439 return bfd_reloc_ok;
7440 }
7441
7442 case R_ARM_THM_PC12:
7443 /* Corresponds to: ldr.w reg, [pc, #offset]. */
7444 {
7445 bfd_vma insn;
7446 bfd_signed_vma relocation;
7447
7448 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7449 | bfd_get_16 (input_bfd, hit_data + 2);
7450
7451 if (globals->use_rel)
7452 {
7453 signed_addend = insn & 0xfff;
7454 if (!(insn & (1 << 23)))
7455 signed_addend = -signed_addend;
7456 }
7457
7458 relocation = value + signed_addend;
7459 relocation -= (input_section->output_section->vma
7460 + input_section->output_offset
7461 + rel->r_offset);
7462
7463 value = abs (relocation);
7464
7465 if (value >= 0x1000)
7466 return bfd_reloc_overflow;
7467
7468 insn = (insn & 0xff7ff000) | value;
7469 if (relocation >= 0)
7470 insn |= (1 << 23);
7471
7472 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7473 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7474
7475 return bfd_reloc_ok;
7476 }
7477
7478 case R_ARM_THM_XPC22:
7479 case R_ARM_THM_CALL:
7480 case R_ARM_THM_JUMP24:
7481 /* Thumb BL (branch long instruction). */
7482 {
7483 bfd_vma relocation;
7484 bfd_vma reloc_sign;
7485 bfd_boolean overflow = FALSE;
7486 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7487 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7488 bfd_signed_vma reloc_signed_max;
7489 bfd_signed_vma reloc_signed_min;
7490 bfd_vma check;
7491 bfd_signed_vma signed_check;
7492 int bitsize;
7493 const int thumb2 = using_thumb2 (globals);
7494
7495 /* A branch to an undefined weak symbol is turned into a jump to
7496 the next instruction unless a PLT entry will be created.
7497 The jump to the next instruction is optimized as a NOP.W for
7498 Thumb-2 enabled architectures. */
7499 if (h && h->root.type == bfd_link_hash_undefweak
7500 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7501 {
7502 if (arch_has_thumb2_nop (globals))
7503 {
7504 bfd_put_16 (input_bfd, 0xf3af, hit_data);
7505 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
7506 }
7507 else
7508 {
7509 bfd_put_16 (input_bfd, 0xe000, hit_data);
7510 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
7511 }
7512 return bfd_reloc_ok;
7513 }
7514
7515 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
7516 with Thumb-1) involving the J1 and J2 bits. */
7517 if (globals->use_rel)
7518 {
7519 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
7520 bfd_vma upper = upper_insn & 0x3ff;
7521 bfd_vma lower = lower_insn & 0x7ff;
7522 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
7523 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
7524 bfd_vma i1 = j1 ^ s ? 0 : 1;
7525 bfd_vma i2 = j2 ^ s ? 0 : 1;
7526
7527 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
7528 /* Sign extend. */
7529 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
7530
7531 signed_addend = addend;
7532 }
7533
7534 if (r_type == R_ARM_THM_XPC22)
7535 {
7536 /* Check for Thumb to Thumb call. */
7537 /* FIXME: Should we translate the instruction into a BL
7538 instruction instead ? */
7539 if (sym_flags == STT_ARM_TFUNC)
7540 (*_bfd_error_handler)
7541 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
7542 input_bfd,
7543 h ? h->root.root.string : "(local)");
7544 }
7545 else
7546 {
7547 /* If it is not a call to Thumb, assume call to Arm.
7548 If it is a call relative to a section name, then it is not a
7549 function call at all, but rather a long jump. Calls through
7550 the PLT do not require stubs. */
7551 if (sym_flags != STT_ARM_TFUNC && sym_flags != STT_SECTION
7552 && (h == NULL || splt == NULL
7553 || h->plt.offset == (bfd_vma) -1))
7554 {
7555 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7556 {
7557 /* Convert BL to BLX. */
7558 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7559 }
7560 else if (( r_type != R_ARM_THM_CALL)
7561 && (r_type != R_ARM_THM_JUMP24))
7562 {
7563 if (elf32_thumb_to_arm_stub
7564 (info, sym_name, input_bfd, output_bfd, input_section,
7565 hit_data, sym_sec, rel->r_offset, signed_addend, value,
7566 error_message))
7567 return bfd_reloc_ok;
7568 else
7569 return bfd_reloc_dangerous;
7570 }
7571 }
7572 else if (sym_flags == STT_ARM_TFUNC && globals->use_blx
7573 && r_type == R_ARM_THM_CALL)
7574 {
7575 /* Make sure this is a BL. */
7576 lower_insn |= 0x1800;
7577 }
7578 }
7579
7580 enum elf32_arm_stub_type stub_type = arm_stub_none;
7581 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
7582 {
7583 /* Check if a stub has to be inserted because the destination
7584 is too far. */
7585 struct elf32_arm_stub_hash_entry *stub_entry;
7586 struct elf32_arm_link_hash_entry *hash;
7587
7588 hash = (struct elf32_arm_link_hash_entry *) h;
7589
7590 stub_type = arm_type_of_stub (info, input_section, rel,
7591 &sym_flags, hash, value, sym_sec,
7592 input_bfd, sym_name);
7593
7594 if (stub_type != arm_stub_none)
7595 {
7596 /* The target is out of reach or we are changing modes, so
7597 redirect the branch to the local stub for this
7598 function. */
7599 stub_entry = elf32_arm_get_stub_entry (input_section,
7600 sym_sec, h,
7601 rel, globals,
7602 stub_type);
7603 if (stub_entry != NULL)
7604 value = (stub_entry->stub_offset
7605 + stub_entry->stub_sec->output_offset
7606 + stub_entry->stub_sec->output_section->vma);
7607
7608 /* If this call becomes a call to Arm, force BLX. */
7609 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
7610 {
7611 if ((stub_entry
7612 && !arm_stub_is_thumb (stub_entry->stub_type))
7613 || (sym_flags != STT_ARM_TFUNC))
7614 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7615 }
7616 }
7617 }
7618
7619 /* Handle calls via the PLT. */
7620 if (stub_type == arm_stub_none
7621 && h != NULL
7622 && splt != NULL
7623 && h->plt.offset != (bfd_vma) -1)
7624 {
7625 value = (splt->output_section->vma
7626 + splt->output_offset
7627 + h->plt.offset);
7628
7629 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7630 {
7631 /* If the Thumb BLX instruction is available, convert
7632 the BL to a BLX instruction to call the ARM-mode
7633 PLT entry. */
7634 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7635 sym_flags = STT_FUNC;
7636 }
7637 else
7638 {
7639 /* Target the Thumb stub before the ARM PLT entry. */
7640 value -= PLT_THUMB_STUB_SIZE;
7641 sym_flags = STT_ARM_TFUNC;
7642 }
7643 *unresolved_reloc_p = FALSE;
7644 }
7645
7646 relocation = value + signed_addend;
7647
7648 relocation -= (input_section->output_section->vma
7649 + input_section->output_offset
7650 + rel->r_offset);
7651
7652 check = relocation >> howto->rightshift;
7653
7654 /* If this is a signed value, the rightshift just dropped
7655 leading 1 bits (assuming twos complement). */
7656 if ((bfd_signed_vma) relocation >= 0)
7657 signed_check = check;
7658 else
7659 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
7660
7661 /* Calculate the permissable maximum and minimum values for
7662 this relocation according to whether we're relocating for
7663 Thumb-2 or not. */
7664 bitsize = howto->bitsize;
7665 if (!thumb2)
7666 bitsize -= 2;
7667 reloc_signed_max = (1 << (bitsize - 1)) - 1;
7668 reloc_signed_min = ~reloc_signed_max;
7669
7670 /* Assumes two's complement. */
7671 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7672 overflow = TRUE;
7673
7674 if ((lower_insn & 0x5000) == 0x4000)
7675 /* For a BLX instruction, make sure that the relocation is rounded up
7676 to a word boundary. This follows the semantics of the instruction
7677 which specifies that bit 1 of the target address will come from bit
7678 1 of the base address. */
7679 relocation = (relocation + 2) & ~ 3;
7680
7681 /* Put RELOCATION back into the insn. Assumes two's complement.
7682 We use the Thumb-2 encoding, which is safe even if dealing with
7683 a Thumb-1 instruction by virtue of our overflow check above. */
7684 reloc_sign = (signed_check < 0) ? 1 : 0;
7685 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
7686 | ((relocation >> 12) & 0x3ff)
7687 | (reloc_sign << 10);
7688 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
7689 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
7690 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
7691 | ((relocation >> 1) & 0x7ff);
7692
7693 /* Put the relocated value back in the object file: */
7694 bfd_put_16 (input_bfd, upper_insn, hit_data);
7695 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7696
7697 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7698 }
7699 break;
7700
7701 case R_ARM_THM_JUMP19:
7702 /* Thumb32 conditional branch instruction. */
7703 {
7704 bfd_vma relocation;
7705 bfd_boolean overflow = FALSE;
7706 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7707 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7708 bfd_signed_vma reloc_signed_max = 0xffffe;
7709 bfd_signed_vma reloc_signed_min = -0x100000;
7710 bfd_signed_vma signed_check;
7711
7712 /* Need to refetch the addend, reconstruct the top three bits,
7713 and squish the two 11 bit pieces together. */
7714 if (globals->use_rel)
7715 {
7716 bfd_vma S = (upper_insn & 0x0400) >> 10;
7717 bfd_vma upper = (upper_insn & 0x003f);
7718 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
7719 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
7720 bfd_vma lower = (lower_insn & 0x07ff);
7721
7722 upper |= J1 << 6;
7723 upper |= J2 << 7;
7724 upper |= (!S) << 8;
7725 upper -= 0x0100; /* Sign extend. */
7726
7727 addend = (upper << 12) | (lower << 1);
7728 signed_addend = addend;
7729 }
7730
7731 /* Handle calls via the PLT. */
7732 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
7733 {
7734 value = (splt->output_section->vma
7735 + splt->output_offset
7736 + h->plt.offset);
7737 /* Target the Thumb stub before the ARM PLT entry. */
7738 value -= PLT_THUMB_STUB_SIZE;
7739 *unresolved_reloc_p = FALSE;
7740 }
7741
7742 /* ??? Should handle interworking? GCC might someday try to
7743 use this for tail calls. */
7744
7745 relocation = value + signed_addend;
7746 relocation -= (input_section->output_section->vma
7747 + input_section->output_offset
7748 + rel->r_offset);
7749 signed_check = (bfd_signed_vma) relocation;
7750
7751 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7752 overflow = TRUE;
7753
7754 /* Put RELOCATION back into the insn. */
7755 {
7756 bfd_vma S = (relocation & 0x00100000) >> 20;
7757 bfd_vma J2 = (relocation & 0x00080000) >> 19;
7758 bfd_vma J1 = (relocation & 0x00040000) >> 18;
7759 bfd_vma hi = (relocation & 0x0003f000) >> 12;
7760 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
7761
7762 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
7763 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
7764 }
7765
7766 /* Put the relocated value back in the object file: */
7767 bfd_put_16 (input_bfd, upper_insn, hit_data);
7768 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7769
7770 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7771 }
7772
7773 case R_ARM_THM_JUMP11:
7774 case R_ARM_THM_JUMP8:
7775 case R_ARM_THM_JUMP6:
7776 /* Thumb B (branch) instruction). */
7777 {
7778 bfd_signed_vma relocation;
7779 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
7780 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
7781 bfd_signed_vma signed_check;
7782
7783 /* CZB cannot jump backward. */
7784 if (r_type == R_ARM_THM_JUMP6)
7785 reloc_signed_min = 0;
7786
7787 if (globals->use_rel)
7788 {
7789 /* Need to refetch addend. */
7790 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7791 if (addend & ((howto->src_mask + 1) >> 1))
7792 {
7793 signed_addend = -1;
7794 signed_addend &= ~ howto->src_mask;
7795 signed_addend |= addend;
7796 }
7797 else
7798 signed_addend = addend;
7799 /* The value in the insn has been right shifted. We need to
7800 undo this, so that we can perform the address calculation
7801 in terms of bytes. */
7802 signed_addend <<= howto->rightshift;
7803 }
7804 relocation = value + signed_addend;
7805
7806 relocation -= (input_section->output_section->vma
7807 + input_section->output_offset
7808 + rel->r_offset);
7809
7810 relocation >>= howto->rightshift;
7811 signed_check = relocation;
7812
7813 if (r_type == R_ARM_THM_JUMP6)
7814 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
7815 else
7816 relocation &= howto->dst_mask;
7817 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
7818
7819 bfd_put_16 (input_bfd, relocation, hit_data);
7820
7821 /* Assumes two's complement. */
7822 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7823 return bfd_reloc_overflow;
7824
7825 return bfd_reloc_ok;
7826 }
7827
7828 case R_ARM_ALU_PCREL7_0:
7829 case R_ARM_ALU_PCREL15_8:
7830 case R_ARM_ALU_PCREL23_15:
7831 {
7832 bfd_vma insn;
7833 bfd_vma relocation;
7834
7835 insn = bfd_get_32 (input_bfd, hit_data);
7836 if (globals->use_rel)
7837 {
7838 /* Extract the addend. */
7839 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
7840 signed_addend = addend;
7841 }
7842 relocation = value + signed_addend;
7843
7844 relocation -= (input_section->output_section->vma
7845 + input_section->output_offset
7846 + rel->r_offset);
7847 insn = (insn & ~0xfff)
7848 | ((howto->bitpos << 7) & 0xf00)
7849 | ((relocation >> howto->bitpos) & 0xff);
7850 bfd_put_32 (input_bfd, value, hit_data);
7851 }
7852 return bfd_reloc_ok;
7853
7854 case R_ARM_GNU_VTINHERIT:
7855 case R_ARM_GNU_VTENTRY:
7856 return bfd_reloc_ok;
7857
7858 case R_ARM_GOTOFF32:
7859 /* Relocation is relative to the start of the
7860 global offset table. */
7861
7862 BFD_ASSERT (sgot != NULL);
7863 if (sgot == NULL)
7864 return bfd_reloc_notsupported;
7865
7866 /* If we are addressing a Thumb function, we need to adjust the
7867 address by one, so that attempts to call the function pointer will
7868 correctly interpret it as Thumb code. */
7869 if (sym_flags == STT_ARM_TFUNC)
7870 value += 1;
7871
7872 /* Note that sgot->output_offset is not involved in this
7873 calculation. We always want the start of .got. If we
7874 define _GLOBAL_OFFSET_TABLE in a different way, as is
7875 permitted by the ABI, we might have to change this
7876 calculation. */
7877 value -= sgot->output_section->vma;
7878 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7879 contents, rel->r_offset, value,
7880 rel->r_addend);
7881
7882 case R_ARM_GOTPC:
7883 /* Use global offset table as symbol value. */
7884 BFD_ASSERT (sgot != NULL);
7885
7886 if (sgot == NULL)
7887 return bfd_reloc_notsupported;
7888
7889 *unresolved_reloc_p = FALSE;
7890 value = sgot->output_section->vma;
7891 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7892 contents, rel->r_offset, value,
7893 rel->r_addend);
7894
7895 case R_ARM_GOT32:
7896 case R_ARM_GOT_PREL:
7897 /* Relocation is to the entry for this symbol in the
7898 global offset table. */
7899 if (sgot == NULL)
7900 return bfd_reloc_notsupported;
7901
7902 if (h != NULL)
7903 {
7904 bfd_vma off;
7905 bfd_boolean dyn;
7906
7907 off = h->got.offset;
7908 BFD_ASSERT (off != (bfd_vma) -1);
7909 dyn = globals->root.dynamic_sections_created;
7910
7911 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
7912 || (info->shared
7913 && SYMBOL_REFERENCES_LOCAL (info, h))
7914 || (ELF_ST_VISIBILITY (h->other)
7915 && h->root.type == bfd_link_hash_undefweak))
7916 {
7917 /* This is actually a static link, or it is a -Bsymbolic link
7918 and the symbol is defined locally. We must initialize this
7919 entry in the global offset table. Since the offset must
7920 always be a multiple of 4, we use the least significant bit
7921 to record whether we have initialized it already.
7922
7923 When doing a dynamic link, we create a .rel(a).got relocation
7924 entry to initialize the value. This is done in the
7925 finish_dynamic_symbol routine. */
7926 if ((off & 1) != 0)
7927 off &= ~1;
7928 else
7929 {
7930 /* If we are addressing a Thumb function, we need to
7931 adjust the address by one, so that attempts to
7932 call the function pointer will correctly
7933 interpret it as Thumb code. */
7934 if (sym_flags == STT_ARM_TFUNC)
7935 value |= 1;
7936
7937 bfd_put_32 (output_bfd, value, sgot->contents + off);
7938 h->got.offset |= 1;
7939 }
7940 }
7941 else
7942 *unresolved_reloc_p = FALSE;
7943
7944 value = sgot->output_offset + off;
7945 }
7946 else
7947 {
7948 bfd_vma off;
7949
7950 BFD_ASSERT (local_got_offsets != NULL &&
7951 local_got_offsets[r_symndx] != (bfd_vma) -1);
7952
7953 off = local_got_offsets[r_symndx];
7954
7955 /* The offset must always be a multiple of 4. We use the
7956 least significant bit to record whether we have already
7957 generated the necessary reloc. */
7958 if ((off & 1) != 0)
7959 off &= ~1;
7960 else
7961 {
7962 /* If we are addressing a Thumb function, we need to
7963 adjust the address by one, so that attempts to
7964 call the function pointer will correctly
7965 interpret it as Thumb code. */
7966 if (sym_flags == STT_ARM_TFUNC)
7967 value |= 1;
7968
7969 if (globals->use_rel)
7970 bfd_put_32 (output_bfd, value, sgot->contents + off);
7971
7972 if (info->shared)
7973 {
7974 asection * srelgot;
7975 Elf_Internal_Rela outrel;
7976 bfd_byte *loc;
7977
7978 srelgot = (bfd_get_section_by_name
7979 (dynobj, RELOC_SECTION (globals, ".got")));
7980 BFD_ASSERT (srelgot != NULL);
7981
7982 outrel.r_addend = addend + value;
7983 outrel.r_offset = (sgot->output_section->vma
7984 + sgot->output_offset
7985 + off);
7986 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
7987 loc = srelgot->contents;
7988 loc += srelgot->reloc_count++ * RELOC_SIZE (globals);
7989 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7990 }
7991
7992 local_got_offsets[r_symndx] |= 1;
7993 }
7994
7995 value = sgot->output_offset + off;
7996 }
7997 if (r_type != R_ARM_GOT32)
7998 value += sgot->output_section->vma;
7999
8000 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8001 contents, rel->r_offset, value,
8002 rel->r_addend);
8003
8004 case R_ARM_TLS_LDO32:
8005 value = value - dtpoff_base (info);
8006
8007 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8008 contents, rel->r_offset, value,
8009 rel->r_addend);
8010
8011 case R_ARM_TLS_LDM32:
8012 {
8013 bfd_vma off;
8014
8015 if (globals->sgot == NULL)
8016 abort ();
8017
8018 off = globals->tls_ldm_got.offset;
8019
8020 if ((off & 1) != 0)
8021 off &= ~1;
8022 else
8023 {
8024 /* If we don't know the module number, create a relocation
8025 for it. */
8026 if (info->shared)
8027 {
8028 Elf_Internal_Rela outrel;
8029 bfd_byte *loc;
8030
8031 if (globals->srelgot == NULL)
8032 abort ();
8033
8034 outrel.r_addend = 0;
8035 outrel.r_offset = (globals->sgot->output_section->vma
8036 + globals->sgot->output_offset + off);
8037 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
8038
8039 if (globals->use_rel)
8040 bfd_put_32 (output_bfd, outrel.r_addend,
8041 globals->sgot->contents + off);
8042
8043 loc = globals->srelgot->contents;
8044 loc += globals->srelgot->reloc_count++ * RELOC_SIZE (globals);
8045 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8046 }
8047 else
8048 bfd_put_32 (output_bfd, 1, globals->sgot->contents + off);
8049
8050 globals->tls_ldm_got.offset |= 1;
8051 }
8052
8053 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
8054 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
8055
8056 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8057 contents, rel->r_offset, value,
8058 rel->r_addend);
8059 }
8060
8061 case R_ARM_TLS_GD32:
8062 case R_ARM_TLS_IE32:
8063 {
8064 bfd_vma off;
8065 int indx;
8066 char tls_type;
8067
8068 if (globals->sgot == NULL)
8069 abort ();
8070
8071 indx = 0;
8072 if (h != NULL)
8073 {
8074 bfd_boolean dyn;
8075 dyn = globals->root.dynamic_sections_created;
8076 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
8077 && (!info->shared
8078 || !SYMBOL_REFERENCES_LOCAL (info, h)))
8079 {
8080 *unresolved_reloc_p = FALSE;
8081 indx = h->dynindx;
8082 }
8083 off = h->got.offset;
8084 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
8085 }
8086 else
8087 {
8088 if (local_got_offsets == NULL)
8089 abort ();
8090 off = local_got_offsets[r_symndx];
8091 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
8092 }
8093
8094 if (tls_type == GOT_UNKNOWN)
8095 abort ();
8096
8097 if ((off & 1) != 0)
8098 off &= ~1;
8099 else
8100 {
8101 bfd_boolean need_relocs = FALSE;
8102 Elf_Internal_Rela outrel;
8103 bfd_byte *loc = NULL;
8104 int cur_off = off;
8105
8106 /* The GOT entries have not been initialized yet. Do it
8107 now, and emit any relocations. If both an IE GOT and a
8108 GD GOT are necessary, we emit the GD first. */
8109
8110 if ((info->shared || indx != 0)
8111 && (h == NULL
8112 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
8113 || h->root.type != bfd_link_hash_undefweak))
8114 {
8115 need_relocs = TRUE;
8116 if (globals->srelgot == NULL)
8117 abort ();
8118 loc = globals->srelgot->contents;
8119 loc += globals->srelgot->reloc_count * RELOC_SIZE (globals);
8120 }
8121
8122 if (tls_type & GOT_TLS_GD)
8123 {
8124 if (need_relocs)
8125 {
8126 outrel.r_addend = 0;
8127 outrel.r_offset = (globals->sgot->output_section->vma
8128 + globals->sgot->output_offset
8129 + cur_off);
8130 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
8131
8132 if (globals->use_rel)
8133 bfd_put_32 (output_bfd, outrel.r_addend,
8134 globals->sgot->contents + cur_off);
8135
8136 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8137 globals->srelgot->reloc_count++;
8138 loc += RELOC_SIZE (globals);
8139
8140 if (indx == 0)
8141 bfd_put_32 (output_bfd, value - dtpoff_base (info),
8142 globals->sgot->contents + cur_off + 4);
8143 else
8144 {
8145 outrel.r_addend = 0;
8146 outrel.r_info = ELF32_R_INFO (indx,
8147 R_ARM_TLS_DTPOFF32);
8148 outrel.r_offset += 4;
8149
8150 if (globals->use_rel)
8151 bfd_put_32 (output_bfd, outrel.r_addend,
8152 globals->sgot->contents + cur_off + 4);
8153
8154
8155 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8156 globals->srelgot->reloc_count++;
8157 loc += RELOC_SIZE (globals);
8158 }
8159 }
8160 else
8161 {
8162 /* If we are not emitting relocations for a
8163 general dynamic reference, then we must be in a
8164 static link or an executable link with the
8165 symbol binding locally. Mark it as belonging
8166 to module 1, the executable. */
8167 bfd_put_32 (output_bfd, 1,
8168 globals->sgot->contents + cur_off);
8169 bfd_put_32 (output_bfd, value - dtpoff_base (info),
8170 globals->sgot->contents + cur_off + 4);
8171 }
8172
8173 cur_off += 8;
8174 }
8175
8176 if (tls_type & GOT_TLS_IE)
8177 {
8178 if (need_relocs)
8179 {
8180 if (indx == 0)
8181 outrel.r_addend = value - dtpoff_base (info);
8182 else
8183 outrel.r_addend = 0;
8184 outrel.r_offset = (globals->sgot->output_section->vma
8185 + globals->sgot->output_offset
8186 + cur_off);
8187 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
8188
8189 if (globals->use_rel)
8190 bfd_put_32 (output_bfd, outrel.r_addend,
8191 globals->sgot->contents + cur_off);
8192
8193 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8194 globals->srelgot->reloc_count++;
8195 loc += RELOC_SIZE (globals);
8196 }
8197 else
8198 bfd_put_32 (output_bfd, tpoff (info, value),
8199 globals->sgot->contents + cur_off);
8200 cur_off += 4;
8201 }
8202
8203 if (h != NULL)
8204 h->got.offset |= 1;
8205 else
8206 local_got_offsets[r_symndx] |= 1;
8207 }
8208
8209 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
8210 off += 8;
8211 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
8212 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
8213
8214 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8215 contents, rel->r_offset, value,
8216 rel->r_addend);
8217 }
8218
8219 case R_ARM_TLS_LE32:
8220 if (info->shared)
8221 {
8222 (*_bfd_error_handler)
8223 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
8224 input_bfd, input_section,
8225 (long) rel->r_offset, howto->name);
8226 return (bfd_reloc_status_type) FALSE;
8227 }
8228 else
8229 value = tpoff (info, value);
8230
8231 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8232 contents, rel->r_offset, value,
8233 rel->r_addend);
8234
8235 case R_ARM_V4BX:
8236 if (globals->fix_v4bx)
8237 {
8238 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8239
8240 /* Ensure that we have a BX instruction. */
8241 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
8242
8243 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
8244 {
8245 /* Branch to veneer. */
8246 bfd_vma glue_addr;
8247 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
8248 glue_addr -= input_section->output_section->vma
8249 + input_section->output_offset
8250 + rel->r_offset + 8;
8251 insn = (insn & 0xf0000000) | 0x0a000000
8252 | ((glue_addr >> 2) & 0x00ffffff);
8253 }
8254 else
8255 {
8256 /* Preserve Rm (lowest four bits) and the condition code
8257 (highest four bits). Other bits encode MOV PC,Rm. */
8258 insn = (insn & 0xf000000f) | 0x01a0f000;
8259 }
8260
8261 bfd_put_32 (input_bfd, insn, hit_data);
8262 }
8263 return bfd_reloc_ok;
8264
8265 case R_ARM_MOVW_ABS_NC:
8266 case R_ARM_MOVT_ABS:
8267 case R_ARM_MOVW_PREL_NC:
8268 case R_ARM_MOVT_PREL:
8269 /* Until we properly support segment-base-relative addressing then
8270 we assume the segment base to be zero, as for the group relocations.
8271 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
8272 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
8273 case R_ARM_MOVW_BREL_NC:
8274 case R_ARM_MOVW_BREL:
8275 case R_ARM_MOVT_BREL:
8276 {
8277 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8278
8279 if (globals->use_rel)
8280 {
8281 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8282 signed_addend = (addend ^ 0x8000) - 0x8000;
8283 }
8284
8285 value += signed_addend;
8286
8287 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
8288 value -= (input_section->output_section->vma
8289 + input_section->output_offset + rel->r_offset);
8290
8291 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
8292 return bfd_reloc_overflow;
8293
8294 if (sym_flags == STT_ARM_TFUNC)
8295 value |= 1;
8296
8297 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
8298 || r_type == R_ARM_MOVT_BREL)
8299 value >>= 16;
8300
8301 insn &= 0xfff0f000;
8302 insn |= value & 0xfff;
8303 insn |= (value & 0xf000) << 4;
8304 bfd_put_32 (input_bfd, insn, hit_data);
8305 }
8306 return bfd_reloc_ok;
8307
8308 case R_ARM_THM_MOVW_ABS_NC:
8309 case R_ARM_THM_MOVT_ABS:
8310 case R_ARM_THM_MOVW_PREL_NC:
8311 case R_ARM_THM_MOVT_PREL:
8312 /* Until we properly support segment-base-relative addressing then
8313 we assume the segment base to be zero, as for the above relocations.
8314 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
8315 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
8316 as R_ARM_THM_MOVT_ABS. */
8317 case R_ARM_THM_MOVW_BREL_NC:
8318 case R_ARM_THM_MOVW_BREL:
8319 case R_ARM_THM_MOVT_BREL:
8320 {
8321 bfd_vma insn;
8322
8323 insn = bfd_get_16 (input_bfd, hit_data) << 16;
8324 insn |= bfd_get_16 (input_bfd, hit_data + 2);
8325
8326 if (globals->use_rel)
8327 {
8328 addend = ((insn >> 4) & 0xf000)
8329 | ((insn >> 15) & 0x0800)
8330 | ((insn >> 4) & 0x0700)
8331 | (insn & 0x00ff);
8332 signed_addend = (addend ^ 0x8000) - 0x8000;
8333 }
8334
8335 value += signed_addend;
8336
8337 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
8338 value -= (input_section->output_section->vma
8339 + input_section->output_offset + rel->r_offset);
8340
8341 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
8342 return bfd_reloc_overflow;
8343
8344 if (sym_flags == STT_ARM_TFUNC)
8345 value |= 1;
8346
8347 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
8348 || r_type == R_ARM_THM_MOVT_BREL)
8349 value >>= 16;
8350
8351 insn &= 0xfbf08f00;
8352 insn |= (value & 0xf000) << 4;
8353 insn |= (value & 0x0800) << 15;
8354 insn |= (value & 0x0700) << 4;
8355 insn |= (value & 0x00ff);
8356
8357 bfd_put_16 (input_bfd, insn >> 16, hit_data);
8358 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
8359 }
8360 return bfd_reloc_ok;
8361
8362 case R_ARM_ALU_PC_G0_NC:
8363 case R_ARM_ALU_PC_G1_NC:
8364 case R_ARM_ALU_PC_G0:
8365 case R_ARM_ALU_PC_G1:
8366 case R_ARM_ALU_PC_G2:
8367 case R_ARM_ALU_SB_G0_NC:
8368 case R_ARM_ALU_SB_G1_NC:
8369 case R_ARM_ALU_SB_G0:
8370 case R_ARM_ALU_SB_G1:
8371 case R_ARM_ALU_SB_G2:
8372 {
8373 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8374 bfd_vma pc = input_section->output_section->vma
8375 + input_section->output_offset + rel->r_offset;
8376 /* sb should be the origin of the *segment* containing the symbol.
8377 It is not clear how to obtain this OS-dependent value, so we
8378 make an arbitrary choice of zero. */
8379 bfd_vma sb = 0;
8380 bfd_vma residual;
8381 bfd_vma g_n;
8382 bfd_signed_vma signed_value;
8383 int group = 0;
8384
8385 /* Determine which group of bits to select. */
8386 switch (r_type)
8387 {
8388 case R_ARM_ALU_PC_G0_NC:
8389 case R_ARM_ALU_PC_G0:
8390 case R_ARM_ALU_SB_G0_NC:
8391 case R_ARM_ALU_SB_G0:
8392 group = 0;
8393 break;
8394
8395 case R_ARM_ALU_PC_G1_NC:
8396 case R_ARM_ALU_PC_G1:
8397 case R_ARM_ALU_SB_G1_NC:
8398 case R_ARM_ALU_SB_G1:
8399 group = 1;
8400 break;
8401
8402 case R_ARM_ALU_PC_G2:
8403 case R_ARM_ALU_SB_G2:
8404 group = 2;
8405 break;
8406
8407 default:
8408 abort ();
8409 }
8410
8411 /* If REL, extract the addend from the insn. If RELA, it will
8412 have already been fetched for us. */
8413 if (globals->use_rel)
8414 {
8415 int negative;
8416 bfd_vma constant = insn & 0xff;
8417 bfd_vma rotation = (insn & 0xf00) >> 8;
8418
8419 if (rotation == 0)
8420 signed_addend = constant;
8421 else
8422 {
8423 /* Compensate for the fact that in the instruction, the
8424 rotation is stored in multiples of 2 bits. */
8425 rotation *= 2;
8426
8427 /* Rotate "constant" right by "rotation" bits. */
8428 signed_addend = (constant >> rotation) |
8429 (constant << (8 * sizeof (bfd_vma) - rotation));
8430 }
8431
8432 /* Determine if the instruction is an ADD or a SUB.
8433 (For REL, this determines the sign of the addend.) */
8434 negative = identify_add_or_sub (insn);
8435 if (negative == 0)
8436 {
8437 (*_bfd_error_handler)
8438 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
8439 input_bfd, input_section,
8440 (long) rel->r_offset, howto->name);
8441 return bfd_reloc_overflow;
8442 }
8443
8444 signed_addend *= negative;
8445 }
8446
8447 /* Compute the value (X) to go in the place. */
8448 if (r_type == R_ARM_ALU_PC_G0_NC
8449 || r_type == R_ARM_ALU_PC_G1_NC
8450 || r_type == R_ARM_ALU_PC_G0
8451 || r_type == R_ARM_ALU_PC_G1
8452 || r_type == R_ARM_ALU_PC_G2)
8453 /* PC relative. */
8454 signed_value = value - pc + signed_addend;
8455 else
8456 /* Section base relative. */
8457 signed_value = value - sb + signed_addend;
8458
8459 /* If the target symbol is a Thumb function, then set the
8460 Thumb bit in the address. */
8461 if (sym_flags == STT_ARM_TFUNC)
8462 signed_value |= 1;
8463
8464 /* Calculate the value of the relevant G_n, in encoded
8465 constant-with-rotation format. */
8466 g_n = calculate_group_reloc_mask (abs (signed_value), group,
8467 &residual);
8468
8469 /* Check for overflow if required. */
8470 if ((r_type == R_ARM_ALU_PC_G0
8471 || r_type == R_ARM_ALU_PC_G1
8472 || r_type == R_ARM_ALU_PC_G2
8473 || r_type == R_ARM_ALU_SB_G0
8474 || r_type == R_ARM_ALU_SB_G1
8475 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
8476 {
8477 (*_bfd_error_handler)
8478 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8479 input_bfd, input_section,
8480 (long) rel->r_offset, abs (signed_value), howto->name);
8481 return bfd_reloc_overflow;
8482 }
8483
8484 /* Mask out the value and the ADD/SUB part of the opcode; take care
8485 not to destroy the S bit. */
8486 insn &= 0xff1ff000;
8487
8488 /* Set the opcode according to whether the value to go in the
8489 place is negative. */
8490 if (signed_value < 0)
8491 insn |= 1 << 22;
8492 else
8493 insn |= 1 << 23;
8494
8495 /* Encode the offset. */
8496 insn |= g_n;
8497
8498 bfd_put_32 (input_bfd, insn, hit_data);
8499 }
8500 return bfd_reloc_ok;
8501
8502 case R_ARM_LDR_PC_G0:
8503 case R_ARM_LDR_PC_G1:
8504 case R_ARM_LDR_PC_G2:
8505 case R_ARM_LDR_SB_G0:
8506 case R_ARM_LDR_SB_G1:
8507 case R_ARM_LDR_SB_G2:
8508 {
8509 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8510 bfd_vma pc = input_section->output_section->vma
8511 + input_section->output_offset + rel->r_offset;
8512 bfd_vma sb = 0; /* See note above. */
8513 bfd_vma residual;
8514 bfd_signed_vma signed_value;
8515 int group = 0;
8516
8517 /* Determine which groups of bits to calculate. */
8518 switch (r_type)
8519 {
8520 case R_ARM_LDR_PC_G0:
8521 case R_ARM_LDR_SB_G0:
8522 group = 0;
8523 break;
8524
8525 case R_ARM_LDR_PC_G1:
8526 case R_ARM_LDR_SB_G1:
8527 group = 1;
8528 break;
8529
8530 case R_ARM_LDR_PC_G2:
8531 case R_ARM_LDR_SB_G2:
8532 group = 2;
8533 break;
8534
8535 default:
8536 abort ();
8537 }
8538
8539 /* If REL, extract the addend from the insn. If RELA, it will
8540 have already been fetched for us. */
8541 if (globals->use_rel)
8542 {
8543 int negative = (insn & (1 << 23)) ? 1 : -1;
8544 signed_addend = negative * (insn & 0xfff);
8545 }
8546
8547 /* Compute the value (X) to go in the place. */
8548 if (r_type == R_ARM_LDR_PC_G0
8549 || r_type == R_ARM_LDR_PC_G1
8550 || r_type == R_ARM_LDR_PC_G2)
8551 /* PC relative. */
8552 signed_value = value - pc + signed_addend;
8553 else
8554 /* Section base relative. */
8555 signed_value = value - sb + signed_addend;
8556
8557 /* Calculate the value of the relevant G_{n-1} to obtain
8558 the residual at that stage. */
8559 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8560
8561 /* Check for overflow. */
8562 if (residual >= 0x1000)
8563 {
8564 (*_bfd_error_handler)
8565 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8566 input_bfd, input_section,
8567 (long) rel->r_offset, abs (signed_value), howto->name);
8568 return bfd_reloc_overflow;
8569 }
8570
8571 /* Mask out the value and U bit. */
8572 insn &= 0xff7ff000;
8573
8574 /* Set the U bit if the value to go in the place is non-negative. */
8575 if (signed_value >= 0)
8576 insn |= 1 << 23;
8577
8578 /* Encode the offset. */
8579 insn |= residual;
8580
8581 bfd_put_32 (input_bfd, insn, hit_data);
8582 }
8583 return bfd_reloc_ok;
8584
8585 case R_ARM_LDRS_PC_G0:
8586 case R_ARM_LDRS_PC_G1:
8587 case R_ARM_LDRS_PC_G2:
8588 case R_ARM_LDRS_SB_G0:
8589 case R_ARM_LDRS_SB_G1:
8590 case R_ARM_LDRS_SB_G2:
8591 {
8592 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8593 bfd_vma pc = input_section->output_section->vma
8594 + input_section->output_offset + rel->r_offset;
8595 bfd_vma sb = 0; /* See note above. */
8596 bfd_vma residual;
8597 bfd_signed_vma signed_value;
8598 int group = 0;
8599
8600 /* Determine which groups of bits to calculate. */
8601 switch (r_type)
8602 {
8603 case R_ARM_LDRS_PC_G0:
8604 case R_ARM_LDRS_SB_G0:
8605 group = 0;
8606 break;
8607
8608 case R_ARM_LDRS_PC_G1:
8609 case R_ARM_LDRS_SB_G1:
8610 group = 1;
8611 break;
8612
8613 case R_ARM_LDRS_PC_G2:
8614 case R_ARM_LDRS_SB_G2:
8615 group = 2;
8616 break;
8617
8618 default:
8619 abort ();
8620 }
8621
8622 /* If REL, extract the addend from the insn. If RELA, it will
8623 have already been fetched for us. */
8624 if (globals->use_rel)
8625 {
8626 int negative = (insn & (1 << 23)) ? 1 : -1;
8627 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
8628 }
8629
8630 /* Compute the value (X) to go in the place. */
8631 if (r_type == R_ARM_LDRS_PC_G0
8632 || r_type == R_ARM_LDRS_PC_G1
8633 || r_type == R_ARM_LDRS_PC_G2)
8634 /* PC relative. */
8635 signed_value = value - pc + signed_addend;
8636 else
8637 /* Section base relative. */
8638 signed_value = value - sb + signed_addend;
8639
8640 /* Calculate the value of the relevant G_{n-1} to obtain
8641 the residual at that stage. */
8642 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8643
8644 /* Check for overflow. */
8645 if (residual >= 0x100)
8646 {
8647 (*_bfd_error_handler)
8648 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8649 input_bfd, input_section,
8650 (long) rel->r_offset, abs (signed_value), howto->name);
8651 return bfd_reloc_overflow;
8652 }
8653
8654 /* Mask out the value and U bit. */
8655 insn &= 0xff7ff0f0;
8656
8657 /* Set the U bit if the value to go in the place is non-negative. */
8658 if (signed_value >= 0)
8659 insn |= 1 << 23;
8660
8661 /* Encode the offset. */
8662 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
8663
8664 bfd_put_32 (input_bfd, insn, hit_data);
8665 }
8666 return bfd_reloc_ok;
8667
8668 case R_ARM_LDC_PC_G0:
8669 case R_ARM_LDC_PC_G1:
8670 case R_ARM_LDC_PC_G2:
8671 case R_ARM_LDC_SB_G0:
8672 case R_ARM_LDC_SB_G1:
8673 case R_ARM_LDC_SB_G2:
8674 {
8675 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8676 bfd_vma pc = input_section->output_section->vma
8677 + input_section->output_offset + rel->r_offset;
8678 bfd_vma sb = 0; /* See note above. */
8679 bfd_vma residual;
8680 bfd_signed_vma signed_value;
8681 int group = 0;
8682
8683 /* Determine which groups of bits to calculate. */
8684 switch (r_type)
8685 {
8686 case R_ARM_LDC_PC_G0:
8687 case R_ARM_LDC_SB_G0:
8688 group = 0;
8689 break;
8690
8691 case R_ARM_LDC_PC_G1:
8692 case R_ARM_LDC_SB_G1:
8693 group = 1;
8694 break;
8695
8696 case R_ARM_LDC_PC_G2:
8697 case R_ARM_LDC_SB_G2:
8698 group = 2;
8699 break;
8700
8701 default:
8702 abort ();
8703 }
8704
8705 /* If REL, extract the addend from the insn. If RELA, it will
8706 have already been fetched for us. */
8707 if (globals->use_rel)
8708 {
8709 int negative = (insn & (1 << 23)) ? 1 : -1;
8710 signed_addend = negative * ((insn & 0xff) << 2);
8711 }
8712
8713 /* Compute the value (X) to go in the place. */
8714 if (r_type == R_ARM_LDC_PC_G0
8715 || r_type == R_ARM_LDC_PC_G1
8716 || r_type == R_ARM_LDC_PC_G2)
8717 /* PC relative. */
8718 signed_value = value - pc + signed_addend;
8719 else
8720 /* Section base relative. */
8721 signed_value = value - sb + signed_addend;
8722
8723 /* Calculate the value of the relevant G_{n-1} to obtain
8724 the residual at that stage. */
8725 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8726
8727 /* Check for overflow. (The absolute value to go in the place must be
8728 divisible by four and, after having been divided by four, must
8729 fit in eight bits.) */
8730 if ((residual & 0x3) != 0 || residual >= 0x400)
8731 {
8732 (*_bfd_error_handler)
8733 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8734 input_bfd, input_section,
8735 (long) rel->r_offset, abs (signed_value), howto->name);
8736 return bfd_reloc_overflow;
8737 }
8738
8739 /* Mask out the value and U bit. */
8740 insn &= 0xff7fff00;
8741
8742 /* Set the U bit if the value to go in the place is non-negative. */
8743 if (signed_value >= 0)
8744 insn |= 1 << 23;
8745
8746 /* Encode the offset. */
8747 insn |= residual >> 2;
8748
8749 bfd_put_32 (input_bfd, insn, hit_data);
8750 }
8751 return bfd_reloc_ok;
8752
8753 default:
8754 return bfd_reloc_notsupported;
8755 }
8756 }
8757
8758 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
8759 static void
8760 arm_add_to_rel (bfd * abfd,
8761 bfd_byte * address,
8762 reloc_howto_type * howto,
8763 bfd_signed_vma increment)
8764 {
8765 bfd_signed_vma addend;
8766
8767 if (howto->type == R_ARM_THM_CALL
8768 || howto->type == R_ARM_THM_JUMP24)
8769 {
8770 int upper_insn, lower_insn;
8771 int upper, lower;
8772
8773 upper_insn = bfd_get_16 (abfd, address);
8774 lower_insn = bfd_get_16 (abfd, address + 2);
8775 upper = upper_insn & 0x7ff;
8776 lower = lower_insn & 0x7ff;
8777
8778 addend = (upper << 12) | (lower << 1);
8779 addend += increment;
8780 addend >>= 1;
8781
8782 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
8783 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
8784
8785 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
8786 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
8787 }
8788 else
8789 {
8790 bfd_vma contents;
8791
8792 contents = bfd_get_32 (abfd, address);
8793
8794 /* Get the (signed) value from the instruction. */
8795 addend = contents & howto->src_mask;
8796 if (addend & ((howto->src_mask + 1) >> 1))
8797 {
8798 bfd_signed_vma mask;
8799
8800 mask = -1;
8801 mask &= ~ howto->src_mask;
8802 addend |= mask;
8803 }
8804
8805 /* Add in the increment, (which is a byte value). */
8806 switch (howto->type)
8807 {
8808 default:
8809 addend += increment;
8810 break;
8811
8812 case R_ARM_PC24:
8813 case R_ARM_PLT32:
8814 case R_ARM_CALL:
8815 case R_ARM_JUMP24:
8816 addend <<= howto->size;
8817 addend += increment;
8818
8819 /* Should we check for overflow here ? */
8820
8821 /* Drop any undesired bits. */
8822 addend >>= howto->rightshift;
8823 break;
8824 }
8825
8826 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
8827
8828 bfd_put_32 (abfd, contents, address);
8829 }
8830 }
8831
8832 #define IS_ARM_TLS_RELOC(R_TYPE) \
8833 ((R_TYPE) == R_ARM_TLS_GD32 \
8834 || (R_TYPE) == R_ARM_TLS_LDO32 \
8835 || (R_TYPE) == R_ARM_TLS_LDM32 \
8836 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
8837 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
8838 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
8839 || (R_TYPE) == R_ARM_TLS_LE32 \
8840 || (R_TYPE) == R_ARM_TLS_IE32)
8841
8842 /* Relocate an ARM ELF section. */
8843
8844 static bfd_boolean
8845 elf32_arm_relocate_section (bfd * output_bfd,
8846 struct bfd_link_info * info,
8847 bfd * input_bfd,
8848 asection * input_section,
8849 bfd_byte * contents,
8850 Elf_Internal_Rela * relocs,
8851 Elf_Internal_Sym * local_syms,
8852 asection ** local_sections)
8853 {
8854 Elf_Internal_Shdr *symtab_hdr;
8855 struct elf_link_hash_entry **sym_hashes;
8856 Elf_Internal_Rela *rel;
8857 Elf_Internal_Rela *relend;
8858 const char *name;
8859 struct elf32_arm_link_hash_table * globals;
8860
8861 globals = elf32_arm_hash_table (info);
8862 if (globals == NULL)
8863 return FALSE;
8864
8865 symtab_hdr = & elf_symtab_hdr (input_bfd);
8866 sym_hashes = elf_sym_hashes (input_bfd);
8867
8868 rel = relocs;
8869 relend = relocs + input_section->reloc_count;
8870 for (; rel < relend; rel++)
8871 {
8872 int r_type;
8873 reloc_howto_type * howto;
8874 unsigned long r_symndx;
8875 Elf_Internal_Sym * sym;
8876 asection * sec;
8877 struct elf_link_hash_entry * h;
8878 bfd_vma relocation;
8879 bfd_reloc_status_type r;
8880 arelent bfd_reloc;
8881 char sym_type;
8882 bfd_boolean unresolved_reloc = FALSE;
8883 char *error_message = NULL;
8884
8885 r_symndx = ELF32_R_SYM (rel->r_info);
8886 r_type = ELF32_R_TYPE (rel->r_info);
8887 r_type = arm_real_reloc_type (globals, r_type);
8888
8889 if ( r_type == R_ARM_GNU_VTENTRY
8890 || r_type == R_ARM_GNU_VTINHERIT)
8891 continue;
8892
8893 bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
8894 howto = bfd_reloc.howto;
8895
8896 h = NULL;
8897 sym = NULL;
8898 sec = NULL;
8899
8900 if (r_symndx < symtab_hdr->sh_info)
8901 {
8902 sym = local_syms + r_symndx;
8903 sym_type = ELF32_ST_TYPE (sym->st_info);
8904 sec = local_sections[r_symndx];
8905
8906 /* An object file might have a reference to a local
8907 undefined symbol. This is a daft object file, but we
8908 should at least do something about it. V4BX & NONE
8909 relocations do not use the symbol and are explicitly
8910 allowed to use the undefined symbol, so allow those.
8911 Likewise for relocations against STN_UNDEF. */
8912 if (r_type != R_ARM_V4BX
8913 && r_type != R_ARM_NONE
8914 && r_symndx != STN_UNDEF
8915 && bfd_is_und_section (sec)
8916 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
8917 {
8918 if (!info->callbacks->undefined_symbol
8919 (info, bfd_elf_string_from_elf_section
8920 (input_bfd, symtab_hdr->sh_link, sym->st_name),
8921 input_bfd, input_section,
8922 rel->r_offset, TRUE))
8923 return FALSE;
8924 }
8925
8926 if (globals->use_rel)
8927 {
8928 relocation = (sec->output_section->vma
8929 + sec->output_offset
8930 + sym->st_value);
8931 if (!info->relocatable
8932 && (sec->flags & SEC_MERGE)
8933 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
8934 {
8935 asection *msec;
8936 bfd_vma addend, value;
8937
8938 switch (r_type)
8939 {
8940 case R_ARM_MOVW_ABS_NC:
8941 case R_ARM_MOVT_ABS:
8942 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8943 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
8944 addend = (addend ^ 0x8000) - 0x8000;
8945 break;
8946
8947 case R_ARM_THM_MOVW_ABS_NC:
8948 case R_ARM_THM_MOVT_ABS:
8949 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
8950 << 16;
8951 value |= bfd_get_16 (input_bfd,
8952 contents + rel->r_offset + 2);
8953 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
8954 | ((value & 0x04000000) >> 15);
8955 addend = (addend ^ 0x8000) - 0x8000;
8956 break;
8957
8958 default:
8959 if (howto->rightshift
8960 || (howto->src_mask & (howto->src_mask + 1)))
8961 {
8962 (*_bfd_error_handler)
8963 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
8964 input_bfd, input_section,
8965 (long) rel->r_offset, howto->name);
8966 return FALSE;
8967 }
8968
8969 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8970
8971 /* Get the (signed) value from the instruction. */
8972 addend = value & howto->src_mask;
8973 if (addend & ((howto->src_mask + 1) >> 1))
8974 {
8975 bfd_signed_vma mask;
8976
8977 mask = -1;
8978 mask &= ~ howto->src_mask;
8979 addend |= mask;
8980 }
8981 break;
8982 }
8983
8984 msec = sec;
8985 addend =
8986 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
8987 - relocation;
8988 addend += msec->output_section->vma + msec->output_offset;
8989
8990 /* Cases here must match those in the preceeding
8991 switch statement. */
8992 switch (r_type)
8993 {
8994 case R_ARM_MOVW_ABS_NC:
8995 case R_ARM_MOVT_ABS:
8996 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
8997 | (addend & 0xfff);
8998 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
8999 break;
9000
9001 case R_ARM_THM_MOVW_ABS_NC:
9002 case R_ARM_THM_MOVT_ABS:
9003 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
9004 | (addend & 0xff) | ((addend & 0x0800) << 15);
9005 bfd_put_16 (input_bfd, value >> 16,
9006 contents + rel->r_offset);
9007 bfd_put_16 (input_bfd, value,
9008 contents + rel->r_offset + 2);
9009 break;
9010
9011 default:
9012 value = (value & ~ howto->dst_mask)
9013 | (addend & howto->dst_mask);
9014 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
9015 break;
9016 }
9017 }
9018 }
9019 else
9020 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
9021 }
9022 else
9023 {
9024 bfd_boolean warned;
9025
9026 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
9027 r_symndx, symtab_hdr, sym_hashes,
9028 h, sec, relocation,
9029 unresolved_reloc, warned);
9030
9031 sym_type = h->type;
9032 }
9033
9034 if (sec != NULL && elf_discarded_section (sec))
9035 {
9036 /* For relocs against symbols from removed linkonce sections,
9037 or sections discarded by a linker script, we just want the
9038 section contents zeroed. Avoid any special processing. */
9039 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
9040 rel->r_info = 0;
9041 rel->r_addend = 0;
9042 continue;
9043 }
9044
9045 if (info->relocatable)
9046 {
9047 /* This is a relocatable link. We don't have to change
9048 anything, unless the reloc is against a section symbol,
9049 in which case we have to adjust according to where the
9050 section symbol winds up in the output section. */
9051 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
9052 {
9053 if (globals->use_rel)
9054 arm_add_to_rel (input_bfd, contents + rel->r_offset,
9055 howto, (bfd_signed_vma) sec->output_offset);
9056 else
9057 rel->r_addend += sec->output_offset;
9058 }
9059 continue;
9060 }
9061
9062 if (h != NULL)
9063 name = h->root.root.string;
9064 else
9065 {
9066 name = (bfd_elf_string_from_elf_section
9067 (input_bfd, symtab_hdr->sh_link, sym->st_name));
9068 if (name == NULL || *name == '\0')
9069 name = bfd_section_name (input_bfd, sec);
9070 }
9071
9072 if (r_symndx != STN_UNDEF
9073 && r_type != R_ARM_NONE
9074 && (h == NULL
9075 || h->root.type == bfd_link_hash_defined
9076 || h->root.type == bfd_link_hash_defweak)
9077 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
9078 {
9079 (*_bfd_error_handler)
9080 ((sym_type == STT_TLS
9081 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
9082 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
9083 input_bfd,
9084 input_section,
9085 (long) rel->r_offset,
9086 howto->name,
9087 name);
9088 }
9089
9090 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
9091 input_section, contents, rel,
9092 relocation, info, sec, name,
9093 (h ? ELF_ST_TYPE (h->type) :
9094 ELF_ST_TYPE (sym->st_info)), h,
9095 &unresolved_reloc, &error_message);
9096
9097 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
9098 because such sections are not SEC_ALLOC and thus ld.so will
9099 not process them. */
9100 if (unresolved_reloc
9101 && !((input_section->flags & SEC_DEBUGGING) != 0
9102 && h->def_dynamic))
9103 {
9104 (*_bfd_error_handler)
9105 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
9106 input_bfd,
9107 input_section,
9108 (long) rel->r_offset,
9109 howto->name,
9110 h->root.root.string);
9111 return FALSE;
9112 }
9113
9114 if (r != bfd_reloc_ok)
9115 {
9116 switch (r)
9117 {
9118 case bfd_reloc_overflow:
9119 /* If the overflowing reloc was to an undefined symbol,
9120 we have already printed one error message and there
9121 is no point complaining again. */
9122 if ((! h ||
9123 h->root.type != bfd_link_hash_undefined)
9124 && (!((*info->callbacks->reloc_overflow)
9125 (info, (h ? &h->root : NULL), name, howto->name,
9126 (bfd_vma) 0, input_bfd, input_section,
9127 rel->r_offset))))
9128 return FALSE;
9129 break;
9130
9131 case bfd_reloc_undefined:
9132 if (!((*info->callbacks->undefined_symbol)
9133 (info, name, input_bfd, input_section,
9134 rel->r_offset, TRUE)))
9135 return FALSE;
9136 break;
9137
9138 case bfd_reloc_outofrange:
9139 error_message = _("out of range");
9140 goto common_error;
9141
9142 case bfd_reloc_notsupported:
9143 error_message = _("unsupported relocation");
9144 goto common_error;
9145
9146 case bfd_reloc_dangerous:
9147 /* error_message should already be set. */
9148 goto common_error;
9149
9150 default:
9151 error_message = _("unknown error");
9152 /* Fall through. */
9153
9154 common_error:
9155 BFD_ASSERT (error_message != NULL);
9156 if (!((*info->callbacks->reloc_dangerous)
9157 (info, error_message, input_bfd, input_section,
9158 rel->r_offset)))
9159 return FALSE;
9160 break;
9161 }
9162 }
9163 }
9164
9165 return TRUE;
9166 }
9167
9168 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
9169 adds the edit to the start of the list. (The list must be built in order of
9170 ascending TINDEX: the function's callers are primarily responsible for
9171 maintaining that condition). */
9172
9173 static void
9174 add_unwind_table_edit (arm_unwind_table_edit **head,
9175 arm_unwind_table_edit **tail,
9176 arm_unwind_edit_type type,
9177 asection *linked_section,
9178 unsigned int tindex)
9179 {
9180 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
9181 xmalloc (sizeof (arm_unwind_table_edit));
9182
9183 new_edit->type = type;
9184 new_edit->linked_section = linked_section;
9185 new_edit->index = tindex;
9186
9187 if (tindex > 0)
9188 {
9189 new_edit->next = NULL;
9190
9191 if (*tail)
9192 (*tail)->next = new_edit;
9193
9194 (*tail) = new_edit;
9195
9196 if (!*head)
9197 (*head) = new_edit;
9198 }
9199 else
9200 {
9201 new_edit->next = *head;
9202
9203 if (!*tail)
9204 *tail = new_edit;
9205
9206 *head = new_edit;
9207 }
9208 }
9209
9210 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
9211
9212 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
9213 static void
9214 adjust_exidx_size(asection *exidx_sec, int adjust)
9215 {
9216 asection *out_sec;
9217
9218 if (!exidx_sec->rawsize)
9219 exidx_sec->rawsize = exidx_sec->size;
9220
9221 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
9222 out_sec = exidx_sec->output_section;
9223 /* Adjust size of output section. */
9224 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
9225 }
9226
9227 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
9228 static void
9229 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
9230 {
9231 struct _arm_elf_section_data *exidx_arm_data;
9232
9233 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
9234 add_unwind_table_edit (
9235 &exidx_arm_data->u.exidx.unwind_edit_list,
9236 &exidx_arm_data->u.exidx.unwind_edit_tail,
9237 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
9238
9239 adjust_exidx_size(exidx_sec, 8);
9240 }
9241
9242 /* Scan .ARM.exidx tables, and create a list describing edits which should be
9243 made to those tables, such that:
9244
9245 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
9246 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
9247 codes which have been inlined into the index).
9248
9249 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
9250
9251 The edits are applied when the tables are written
9252 (in elf32_arm_write_section).
9253 */
9254
9255 bfd_boolean
9256 elf32_arm_fix_exidx_coverage (asection **text_section_order,
9257 unsigned int num_text_sections,
9258 struct bfd_link_info *info,
9259 bfd_boolean merge_exidx_entries)
9260 {
9261 bfd *inp;
9262 unsigned int last_second_word = 0, i;
9263 asection *last_exidx_sec = NULL;
9264 asection *last_text_sec = NULL;
9265 int last_unwind_type = -1;
9266
9267 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
9268 text sections. */
9269 for (inp = info->input_bfds; inp != NULL; inp = inp->link_next)
9270 {
9271 asection *sec;
9272
9273 for (sec = inp->sections; sec != NULL; sec = sec->next)
9274 {
9275 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
9276 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
9277
9278 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
9279 continue;
9280
9281 if (elf_sec->linked_to)
9282 {
9283 Elf_Internal_Shdr *linked_hdr
9284 = &elf_section_data (elf_sec->linked_to)->this_hdr;
9285 struct _arm_elf_section_data *linked_sec_arm_data
9286 = get_arm_elf_section_data (linked_hdr->bfd_section);
9287
9288 if (linked_sec_arm_data == NULL)
9289 continue;
9290
9291 /* Link this .ARM.exidx section back from the text section it
9292 describes. */
9293 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
9294 }
9295 }
9296 }
9297
9298 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
9299 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
9300 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
9301
9302 for (i = 0; i < num_text_sections; i++)
9303 {
9304 asection *sec = text_section_order[i];
9305 asection *exidx_sec;
9306 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
9307 struct _arm_elf_section_data *exidx_arm_data;
9308 bfd_byte *contents = NULL;
9309 int deleted_exidx_bytes = 0;
9310 bfd_vma j;
9311 arm_unwind_table_edit *unwind_edit_head = NULL;
9312 arm_unwind_table_edit *unwind_edit_tail = NULL;
9313 Elf_Internal_Shdr *hdr;
9314 bfd *ibfd;
9315
9316 if (arm_data == NULL)
9317 continue;
9318
9319 exidx_sec = arm_data->u.text.arm_exidx_sec;
9320 if (exidx_sec == NULL)
9321 {
9322 /* Section has no unwind data. */
9323 if (last_unwind_type == 0 || !last_exidx_sec)
9324 continue;
9325
9326 /* Ignore zero sized sections. */
9327 if (sec->size == 0)
9328 continue;
9329
9330 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9331 last_unwind_type = 0;
9332 continue;
9333 }
9334
9335 /* Skip /DISCARD/ sections. */
9336 if (bfd_is_abs_section (exidx_sec->output_section))
9337 continue;
9338
9339 hdr = &elf_section_data (exidx_sec)->this_hdr;
9340 if (hdr->sh_type != SHT_ARM_EXIDX)
9341 continue;
9342
9343 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
9344 if (exidx_arm_data == NULL)
9345 continue;
9346
9347 ibfd = exidx_sec->owner;
9348
9349 if (hdr->contents != NULL)
9350 contents = hdr->contents;
9351 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
9352 /* An error? */
9353 continue;
9354
9355 for (j = 0; j < hdr->sh_size; j += 8)
9356 {
9357 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
9358 int unwind_type;
9359 int elide = 0;
9360
9361 /* An EXIDX_CANTUNWIND entry. */
9362 if (second_word == 1)
9363 {
9364 if (last_unwind_type == 0)
9365 elide = 1;
9366 unwind_type = 0;
9367 }
9368 /* Inlined unwinding data. Merge if equal to previous. */
9369 else if ((second_word & 0x80000000) != 0)
9370 {
9371 if (merge_exidx_entries
9372 && last_second_word == second_word && last_unwind_type == 1)
9373 elide = 1;
9374 unwind_type = 1;
9375 last_second_word = second_word;
9376 }
9377 /* Normal table entry. In theory we could merge these too,
9378 but duplicate entries are likely to be much less common. */
9379 else
9380 unwind_type = 2;
9381
9382 if (elide)
9383 {
9384 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
9385 DELETE_EXIDX_ENTRY, NULL, j / 8);
9386
9387 deleted_exidx_bytes += 8;
9388 }
9389
9390 last_unwind_type = unwind_type;
9391 }
9392
9393 /* Free contents if we allocated it ourselves. */
9394 if (contents != hdr->contents)
9395 free (contents);
9396
9397 /* Record edits to be applied later (in elf32_arm_write_section). */
9398 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
9399 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
9400
9401 if (deleted_exidx_bytes > 0)
9402 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
9403
9404 last_exidx_sec = exidx_sec;
9405 last_text_sec = sec;
9406 }
9407
9408 /* Add terminating CANTUNWIND entry. */
9409 if (last_exidx_sec && last_unwind_type != 0)
9410 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9411
9412 return TRUE;
9413 }
9414
9415 static bfd_boolean
9416 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
9417 bfd *ibfd, const char *name)
9418 {
9419 asection *sec, *osec;
9420
9421 sec = bfd_get_section_by_name (ibfd, name);
9422 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
9423 return TRUE;
9424
9425 osec = sec->output_section;
9426 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
9427 return TRUE;
9428
9429 if (! bfd_set_section_contents (obfd, osec, sec->contents,
9430 sec->output_offset, sec->size))
9431 return FALSE;
9432
9433 return TRUE;
9434 }
9435
9436 static bfd_boolean
9437 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
9438 {
9439 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
9440 asection *sec, *osec;
9441
9442 if (globals == NULL)
9443 return FALSE;
9444
9445 /* Invoke the regular ELF backend linker to do all the work. */
9446 if (!bfd_elf_final_link (abfd, info))
9447 return FALSE;
9448
9449 /* Process stub sections (eg BE8 encoding, ...). */
9450 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
9451 int i;
9452 for (i=0; i<htab->top_id; i++)
9453 {
9454 sec = htab->stub_group[i].stub_sec;
9455 /* Only process it once, in its link_sec slot. */
9456 if (sec && i == htab->stub_group[i].link_sec->id)
9457 {
9458 osec = sec->output_section;
9459 elf32_arm_write_section (abfd, info, sec, sec->contents);
9460 if (! bfd_set_section_contents (abfd, osec, sec->contents,
9461 sec->output_offset, sec->size))
9462 return FALSE;
9463 }
9464 }
9465
9466 /* Write out any glue sections now that we have created all the
9467 stubs. */
9468 if (globals->bfd_of_glue_owner != NULL)
9469 {
9470 if (! elf32_arm_output_glue_section (info, abfd,
9471 globals->bfd_of_glue_owner,
9472 ARM2THUMB_GLUE_SECTION_NAME))
9473 return FALSE;
9474
9475 if (! elf32_arm_output_glue_section (info, abfd,
9476 globals->bfd_of_glue_owner,
9477 THUMB2ARM_GLUE_SECTION_NAME))
9478 return FALSE;
9479
9480 if (! elf32_arm_output_glue_section (info, abfd,
9481 globals->bfd_of_glue_owner,
9482 VFP11_ERRATUM_VENEER_SECTION_NAME))
9483 return FALSE;
9484
9485 if (! elf32_arm_output_glue_section (info, abfd,
9486 globals->bfd_of_glue_owner,
9487 ARM_BX_GLUE_SECTION_NAME))
9488 return FALSE;
9489 }
9490
9491 return TRUE;
9492 }
9493
9494 /* Set the right machine number. */
9495
9496 static bfd_boolean
9497 elf32_arm_object_p (bfd *abfd)
9498 {
9499 unsigned int mach;
9500
9501 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
9502
9503 if (mach != bfd_mach_arm_unknown)
9504 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9505
9506 else if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
9507 bfd_default_set_arch_mach (abfd, bfd_arch_arm, bfd_mach_arm_ep9312);
9508
9509 else
9510 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9511
9512 return TRUE;
9513 }
9514
9515 /* Function to keep ARM specific flags in the ELF header. */
9516
9517 static bfd_boolean
9518 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
9519 {
9520 if (elf_flags_init (abfd)
9521 && elf_elfheader (abfd)->e_flags != flags)
9522 {
9523 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
9524 {
9525 if (flags & EF_ARM_INTERWORK)
9526 (*_bfd_error_handler)
9527 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
9528 abfd);
9529 else
9530 _bfd_error_handler
9531 (_("Warning: Clearing the interworking flag of %B due to outside request"),
9532 abfd);
9533 }
9534 }
9535 else
9536 {
9537 elf_elfheader (abfd)->e_flags = flags;
9538 elf_flags_init (abfd) = TRUE;
9539 }
9540
9541 return TRUE;
9542 }
9543
9544 /* Copy backend specific data from one object module to another. */
9545
9546 static bfd_boolean
9547 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
9548 {
9549 flagword in_flags;
9550 flagword out_flags;
9551
9552 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
9553 return TRUE;
9554
9555 in_flags = elf_elfheader (ibfd)->e_flags;
9556 out_flags = elf_elfheader (obfd)->e_flags;
9557
9558 if (elf_flags_init (obfd)
9559 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
9560 && in_flags != out_flags)
9561 {
9562 /* Cannot mix APCS26 and APCS32 code. */
9563 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
9564 return FALSE;
9565
9566 /* Cannot mix float APCS and non-float APCS code. */
9567 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
9568 return FALSE;
9569
9570 /* If the src and dest have different interworking flags
9571 then turn off the interworking bit. */
9572 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
9573 {
9574 if (out_flags & EF_ARM_INTERWORK)
9575 _bfd_error_handler
9576 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
9577 obfd, ibfd);
9578
9579 in_flags &= ~EF_ARM_INTERWORK;
9580 }
9581
9582 /* Likewise for PIC, though don't warn for this case. */
9583 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
9584 in_flags &= ~EF_ARM_PIC;
9585 }
9586
9587 elf_elfheader (obfd)->e_flags = in_flags;
9588 elf_flags_init (obfd) = TRUE;
9589
9590 /* Also copy the EI_OSABI field. */
9591 elf_elfheader (obfd)->e_ident[EI_OSABI] =
9592 elf_elfheader (ibfd)->e_ident[EI_OSABI];
9593
9594 /* Copy object attributes. */
9595 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9596
9597 return TRUE;
9598 }
9599
9600 /* Values for Tag_ABI_PCS_R9_use. */
9601 enum
9602 {
9603 AEABI_R9_V6,
9604 AEABI_R9_SB,
9605 AEABI_R9_TLS,
9606 AEABI_R9_unused
9607 };
9608
9609 /* Values for Tag_ABI_PCS_RW_data. */
9610 enum
9611 {
9612 AEABI_PCS_RW_data_absolute,
9613 AEABI_PCS_RW_data_PCrel,
9614 AEABI_PCS_RW_data_SBrel,
9615 AEABI_PCS_RW_data_unused
9616 };
9617
9618 /* Values for Tag_ABI_enum_size. */
9619 enum
9620 {
9621 AEABI_enum_unused,
9622 AEABI_enum_short,
9623 AEABI_enum_wide,
9624 AEABI_enum_forced_wide
9625 };
9626
9627 /* Determine whether an object attribute tag takes an integer, a
9628 string or both. */
9629
9630 static int
9631 elf32_arm_obj_attrs_arg_type (int tag)
9632 {
9633 if (tag == Tag_compatibility)
9634 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
9635 else if (tag == Tag_nodefaults)
9636 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
9637 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
9638 return ATTR_TYPE_FLAG_STR_VAL;
9639 else if (tag < 32)
9640 return ATTR_TYPE_FLAG_INT_VAL;
9641 else
9642 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
9643 }
9644
9645 /* The ABI defines that Tag_conformance should be emitted first, and that
9646 Tag_nodefaults should be second (if either is defined). This sets those
9647 two positions, and bumps up the position of all the remaining tags to
9648 compensate. */
9649 static int
9650 elf32_arm_obj_attrs_order (int num)
9651 {
9652 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
9653 return Tag_conformance;
9654 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
9655 return Tag_nodefaults;
9656 if ((num - 2) < Tag_nodefaults)
9657 return num - 2;
9658 if ((num - 1) < Tag_conformance)
9659 return num - 1;
9660 return num;
9661 }
9662
9663 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
9664 Returns -1 if no architecture could be read. */
9665
9666 static int
9667 get_secondary_compatible_arch (bfd *abfd)
9668 {
9669 obj_attribute *attr =
9670 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9671
9672 /* Note: the tag and its argument below are uleb128 values, though
9673 currently-defined values fit in one byte for each. */
9674 if (attr->s
9675 && attr->s[0] == Tag_CPU_arch
9676 && (attr->s[1] & 128) != 128
9677 && attr->s[2] == 0)
9678 return attr->s[1];
9679
9680 /* This tag is "safely ignorable", so don't complain if it looks funny. */
9681 return -1;
9682 }
9683
9684 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
9685 The tag is removed if ARCH is -1. */
9686
9687 static void
9688 set_secondary_compatible_arch (bfd *abfd, int arch)
9689 {
9690 obj_attribute *attr =
9691 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9692
9693 if (arch == -1)
9694 {
9695 attr->s = NULL;
9696 return;
9697 }
9698
9699 /* Note: the tag and its argument below are uleb128 values, though
9700 currently-defined values fit in one byte for each. */
9701 if (!attr->s)
9702 attr->s = (char *) bfd_alloc (abfd, 3);
9703 attr->s[0] = Tag_CPU_arch;
9704 attr->s[1] = arch;
9705 attr->s[2] = '\0';
9706 }
9707
9708 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
9709 into account. */
9710
9711 static int
9712 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
9713 int newtag, int secondary_compat)
9714 {
9715 #define T(X) TAG_CPU_ARCH_##X
9716 int tagl, tagh, result;
9717 const int v6t2[] =
9718 {
9719 T(V6T2), /* PRE_V4. */
9720 T(V6T2), /* V4. */
9721 T(V6T2), /* V4T. */
9722 T(V6T2), /* V5T. */
9723 T(V6T2), /* V5TE. */
9724 T(V6T2), /* V5TEJ. */
9725 T(V6T2), /* V6. */
9726 T(V7), /* V6KZ. */
9727 T(V6T2) /* V6T2. */
9728 };
9729 const int v6k[] =
9730 {
9731 T(V6K), /* PRE_V4. */
9732 T(V6K), /* V4. */
9733 T(V6K), /* V4T. */
9734 T(V6K), /* V5T. */
9735 T(V6K), /* V5TE. */
9736 T(V6K), /* V5TEJ. */
9737 T(V6K), /* V6. */
9738 T(V6KZ), /* V6KZ. */
9739 T(V7), /* V6T2. */
9740 T(V6K) /* V6K. */
9741 };
9742 const int v7[] =
9743 {
9744 T(V7), /* PRE_V4. */
9745 T(V7), /* V4. */
9746 T(V7), /* V4T. */
9747 T(V7), /* V5T. */
9748 T(V7), /* V5TE. */
9749 T(V7), /* V5TEJ. */
9750 T(V7), /* V6. */
9751 T(V7), /* V6KZ. */
9752 T(V7), /* V6T2. */
9753 T(V7), /* V6K. */
9754 T(V7) /* V7. */
9755 };
9756 const int v6_m[] =
9757 {
9758 -1, /* PRE_V4. */
9759 -1, /* V4. */
9760 T(V6K), /* V4T. */
9761 T(V6K), /* V5T. */
9762 T(V6K), /* V5TE. */
9763 T(V6K), /* V5TEJ. */
9764 T(V6K), /* V6. */
9765 T(V6KZ), /* V6KZ. */
9766 T(V7), /* V6T2. */
9767 T(V6K), /* V6K. */
9768 T(V7), /* V7. */
9769 T(V6_M) /* V6_M. */
9770 };
9771 const int v6s_m[] =
9772 {
9773 -1, /* PRE_V4. */
9774 -1, /* V4. */
9775 T(V6K), /* V4T. */
9776 T(V6K), /* V5T. */
9777 T(V6K), /* V5TE. */
9778 T(V6K), /* V5TEJ. */
9779 T(V6K), /* V6. */
9780 T(V6KZ), /* V6KZ. */
9781 T(V7), /* V6T2. */
9782 T(V6K), /* V6K. */
9783 T(V7), /* V7. */
9784 T(V6S_M), /* V6_M. */
9785 T(V6S_M) /* V6S_M. */
9786 };
9787 const int v7e_m[] =
9788 {
9789 -1, /* PRE_V4. */
9790 -1, /* V4. */
9791 T(V7E_M), /* V4T. */
9792 T(V7E_M), /* V5T. */
9793 T(V7E_M), /* V5TE. */
9794 T(V7E_M), /* V5TEJ. */
9795 T(V7E_M), /* V6. */
9796 T(V7E_M), /* V6KZ. */
9797 T(V7E_M), /* V6T2. */
9798 T(V7E_M), /* V6K. */
9799 T(V7E_M), /* V7. */
9800 T(V7E_M), /* V6_M. */
9801 T(V7E_M), /* V6S_M. */
9802 T(V7E_M) /* V7E_M. */
9803 };
9804 const int v4t_plus_v6_m[] =
9805 {
9806 -1, /* PRE_V4. */
9807 -1, /* V4. */
9808 T(V4T), /* V4T. */
9809 T(V5T), /* V5T. */
9810 T(V5TE), /* V5TE. */
9811 T(V5TEJ), /* V5TEJ. */
9812 T(V6), /* V6. */
9813 T(V6KZ), /* V6KZ. */
9814 T(V6T2), /* V6T2. */
9815 T(V6K), /* V6K. */
9816 T(V7), /* V7. */
9817 T(V6_M), /* V6_M. */
9818 T(V6S_M), /* V6S_M. */
9819 T(V7E_M), /* V7E_M. */
9820 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
9821 };
9822 const int *comb[] =
9823 {
9824 v6t2,
9825 v6k,
9826 v7,
9827 v6_m,
9828 v6s_m,
9829 v7e_m,
9830 /* Pseudo-architecture. */
9831 v4t_plus_v6_m
9832 };
9833
9834 /* Check we've not got a higher architecture than we know about. */
9835
9836 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
9837 {
9838 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
9839 return -1;
9840 }
9841
9842 /* Override old tag if we have a Tag_also_compatible_with on the output. */
9843
9844 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
9845 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
9846 oldtag = T(V4T_PLUS_V6_M);
9847
9848 /* And override the new tag if we have a Tag_also_compatible_with on the
9849 input. */
9850
9851 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
9852 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
9853 newtag = T(V4T_PLUS_V6_M);
9854
9855 tagl = (oldtag < newtag) ? oldtag : newtag;
9856 result = tagh = (oldtag > newtag) ? oldtag : newtag;
9857
9858 /* Architectures before V6KZ add features monotonically. */
9859 if (tagh <= TAG_CPU_ARCH_V6KZ)
9860 return result;
9861
9862 result = comb[tagh - T(V6T2)][tagl];
9863
9864 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
9865 as the canonical version. */
9866 if (result == T(V4T_PLUS_V6_M))
9867 {
9868 result = T(V4T);
9869 *secondary_compat_out = T(V6_M);
9870 }
9871 else
9872 *secondary_compat_out = -1;
9873
9874 if (result == -1)
9875 {
9876 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
9877 ibfd, oldtag, newtag);
9878 return -1;
9879 }
9880
9881 return result;
9882 #undef T
9883 }
9884
9885 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
9886 are conflicting attributes. */
9887
9888 static bfd_boolean
9889 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
9890 {
9891 obj_attribute *in_attr;
9892 obj_attribute *out_attr;
9893 obj_attribute_list *in_list;
9894 obj_attribute_list *out_list;
9895 obj_attribute_list **out_listp;
9896 /* Some tags have 0 = don't care, 1 = strong requirement,
9897 2 = weak requirement. */
9898 static const int order_021[3] = {0, 2, 1};
9899 int i;
9900 bfd_boolean result = TRUE;
9901
9902 /* Skip the linker stubs file. This preserves previous behavior
9903 of accepting unknown attributes in the first input file - but
9904 is that a bug? */
9905 if (ibfd->flags & BFD_LINKER_CREATED)
9906 return TRUE;
9907
9908 if (!elf_known_obj_attributes_proc (obfd)[0].i)
9909 {
9910 /* This is the first object. Copy the attributes. */
9911 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9912
9913 out_attr = elf_known_obj_attributes_proc (obfd);
9914
9915 /* Use the Tag_null value to indicate the attributes have been
9916 initialized. */
9917 out_attr[0].i = 1;
9918
9919 /* We do not output objects with Tag_MPextension_use_legacy - we move
9920 the attribute's value to Tag_MPextension_use. */
9921 if (out_attr[Tag_MPextension_use_legacy].i != 0)
9922 {
9923 if (out_attr[Tag_MPextension_use].i != 0
9924 && out_attr[Tag_MPextension_use_legacy].i
9925 != out_attr[Tag_MPextension_use].i)
9926 {
9927 _bfd_error_handler
9928 (_("Error: %B has both the current and legacy "
9929 "Tag_MPextension_use attributes"), ibfd);
9930 result = FALSE;
9931 }
9932
9933 out_attr[Tag_MPextension_use] =
9934 out_attr[Tag_MPextension_use_legacy];
9935 out_attr[Tag_MPextension_use_legacy].type = 0;
9936 out_attr[Tag_MPextension_use_legacy].i = 0;
9937 }
9938
9939 return result;
9940 }
9941
9942 in_attr = elf_known_obj_attributes_proc (ibfd);
9943 out_attr = elf_known_obj_attributes_proc (obfd);
9944 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
9945 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
9946 {
9947 /* Ignore mismatches if the object doesn't use floating point. */
9948 if (out_attr[Tag_ABI_FP_number_model].i == 0)
9949 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
9950 else if (in_attr[Tag_ABI_FP_number_model].i != 0)
9951 {
9952 _bfd_error_handler
9953 (_("error: %B uses VFP register arguments, %B does not"),
9954 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
9955 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
9956 result = FALSE;
9957 }
9958 }
9959
9960 for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
9961 {
9962 /* Merge this attribute with existing attributes. */
9963 switch (i)
9964 {
9965 case Tag_CPU_raw_name:
9966 case Tag_CPU_name:
9967 /* These are merged after Tag_CPU_arch. */
9968 break;
9969
9970 case Tag_ABI_optimization_goals:
9971 case Tag_ABI_FP_optimization_goals:
9972 /* Use the first value seen. */
9973 break;
9974
9975 case Tag_CPU_arch:
9976 {
9977 int secondary_compat = -1, secondary_compat_out = -1;
9978 unsigned int saved_out_attr = out_attr[i].i;
9979 static const char *name_table[] = {
9980 /* These aren't real CPU names, but we can't guess
9981 that from the architecture version alone. */
9982 "Pre v4",
9983 "ARM v4",
9984 "ARM v4T",
9985 "ARM v5T",
9986 "ARM v5TE",
9987 "ARM v5TEJ",
9988 "ARM v6",
9989 "ARM v6KZ",
9990 "ARM v6T2",
9991 "ARM v6K",
9992 "ARM v7",
9993 "ARM v6-M",
9994 "ARM v6S-M"
9995 };
9996
9997 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
9998 secondary_compat = get_secondary_compatible_arch (ibfd);
9999 secondary_compat_out = get_secondary_compatible_arch (obfd);
10000 out_attr[i].i = tag_cpu_arch_combine (ibfd, out_attr[i].i,
10001 &secondary_compat_out,
10002 in_attr[i].i,
10003 secondary_compat);
10004 set_secondary_compatible_arch (obfd, secondary_compat_out);
10005
10006 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
10007 if (out_attr[i].i == saved_out_attr)
10008 ; /* Leave the names alone. */
10009 else if (out_attr[i].i == in_attr[i].i)
10010 {
10011 /* The output architecture has been changed to match the
10012 input architecture. Use the input names. */
10013 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
10014 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
10015 : NULL;
10016 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
10017 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
10018 : NULL;
10019 }
10020 else
10021 {
10022 out_attr[Tag_CPU_name].s = NULL;
10023 out_attr[Tag_CPU_raw_name].s = NULL;
10024 }
10025
10026 /* If we still don't have a value for Tag_CPU_name,
10027 make one up now. Tag_CPU_raw_name remains blank. */
10028 if (out_attr[Tag_CPU_name].s == NULL
10029 && out_attr[i].i < ARRAY_SIZE (name_table))
10030 out_attr[Tag_CPU_name].s =
10031 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
10032 }
10033 break;
10034
10035 case Tag_ARM_ISA_use:
10036 case Tag_THUMB_ISA_use:
10037 case Tag_WMMX_arch:
10038 case Tag_Advanced_SIMD_arch:
10039 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
10040 case Tag_ABI_FP_rounding:
10041 case Tag_ABI_FP_exceptions:
10042 case Tag_ABI_FP_user_exceptions:
10043 case Tag_ABI_FP_number_model:
10044 case Tag_FP_HP_extension:
10045 case Tag_CPU_unaligned_access:
10046 case Tag_T2EE_use:
10047 case Tag_MPextension_use:
10048 /* Use the largest value specified. */
10049 if (in_attr[i].i > out_attr[i].i)
10050 out_attr[i].i = in_attr[i].i;
10051 break;
10052
10053 case Tag_ABI_align_preserved:
10054 case Tag_ABI_PCS_RO_data:
10055 /* Use the smallest value specified. */
10056 if (in_attr[i].i < out_attr[i].i)
10057 out_attr[i].i = in_attr[i].i;
10058 break;
10059
10060 case Tag_ABI_align_needed:
10061 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
10062 && (in_attr[Tag_ABI_align_preserved].i == 0
10063 || out_attr[Tag_ABI_align_preserved].i == 0))
10064 {
10065 /* This error message should be enabled once all non-conformant
10066 binaries in the toolchain have had the attributes set
10067 properly.
10068 _bfd_error_handler
10069 (_("error: %B: 8-byte data alignment conflicts with %B"),
10070 obfd, ibfd);
10071 result = FALSE; */
10072 }
10073 /* Fall through. */
10074 case Tag_ABI_FP_denormal:
10075 case Tag_ABI_PCS_GOT_use:
10076 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
10077 value if greater than 2 (for future-proofing). */
10078 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
10079 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
10080 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
10081 out_attr[i].i = in_attr[i].i;
10082 break;
10083
10084 case Tag_Virtualization_use:
10085 /* The virtualization tag effectively stores two bits of
10086 information: the intended use of TrustZone (in bit 0), and the
10087 intended use of Virtualization (in bit 1). */
10088 if (out_attr[i].i == 0)
10089 out_attr[i].i = in_attr[i].i;
10090 else if (in_attr[i].i != 0
10091 && in_attr[i].i != out_attr[i].i)
10092 {
10093 if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
10094 out_attr[i].i = 3;
10095 else
10096 {
10097 _bfd_error_handler
10098 (_("error: %B: unable to merge virtualization attributes "
10099 "with %B"),
10100 obfd, ibfd);
10101 result = FALSE;
10102 }
10103 }
10104 break;
10105
10106 case Tag_CPU_arch_profile:
10107 if (out_attr[i].i != in_attr[i].i)
10108 {
10109 /* 0 will merge with anything.
10110 'A' and 'S' merge to 'A'.
10111 'R' and 'S' merge to 'R'.
10112 'M' and 'A|R|S' is an error. */
10113 if (out_attr[i].i == 0
10114 || (out_attr[i].i == 'S'
10115 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
10116 out_attr[i].i = in_attr[i].i;
10117 else if (in_attr[i].i == 0
10118 || (in_attr[i].i == 'S'
10119 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
10120 ; /* Do nothing. */
10121 else
10122 {
10123 _bfd_error_handler
10124 (_("error: %B: Conflicting architecture profiles %c/%c"),
10125 ibfd,
10126 in_attr[i].i ? in_attr[i].i : '0',
10127 out_attr[i].i ? out_attr[i].i : '0');
10128 result = FALSE;
10129 }
10130 }
10131 break;
10132 case Tag_FP_arch:
10133 {
10134 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
10135 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
10136 when it's 0. It might mean absence of FP hardware if
10137 Tag_FP_arch is zero, otherwise it is effectively SP + DP. */
10138
10139 static const struct
10140 {
10141 int ver;
10142 int regs;
10143 } vfp_versions[7] =
10144 {
10145 {0, 0},
10146 {1, 16},
10147 {2, 16},
10148 {3, 32},
10149 {3, 16},
10150 {4, 32},
10151 {4, 16}
10152 };
10153 int ver;
10154 int regs;
10155 int newval;
10156
10157 /* If the output has no requirement about FP hardware,
10158 follow the requirement of the input. */
10159 if (out_attr[i].i == 0)
10160 {
10161 BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
10162 out_attr[i].i = in_attr[i].i;
10163 out_attr[Tag_ABI_HardFP_use].i
10164 = in_attr[Tag_ABI_HardFP_use].i;
10165 break;
10166 }
10167 /* If the input has no requirement about FP hardware, do
10168 nothing. */
10169 else if (in_attr[i].i == 0)
10170 {
10171 BFD_ASSERT (in_attr[Tag_ABI_HardFP_use].i == 0);
10172 break;
10173 }
10174
10175 /* Both the input and the output have nonzero Tag_FP_arch.
10176 So Tag_ABI_HardFP_use is (SP & DP) when it's zero. */
10177
10178 /* If both the input and the output have zero Tag_ABI_HardFP_use,
10179 do nothing. */
10180 if (in_attr[Tag_ABI_HardFP_use].i == 0
10181 && out_attr[Tag_ABI_HardFP_use].i == 0)
10182 ;
10183 /* If the input and the output have different Tag_ABI_HardFP_use,
10184 the combination of them is 3 (SP & DP). */
10185 else if (in_attr[Tag_ABI_HardFP_use].i
10186 != out_attr[Tag_ABI_HardFP_use].i)
10187 out_attr[Tag_ABI_HardFP_use].i = 3;
10188
10189 /* Now we can handle Tag_FP_arch. */
10190
10191 /* Values greater than 6 aren't defined, so just pick the
10192 biggest */
10193 if (in_attr[i].i > 6 && in_attr[i].i > out_attr[i].i)
10194 {
10195 out_attr[i] = in_attr[i];
10196 break;
10197 }
10198 /* The output uses the superset of input features
10199 (ISA version) and registers. */
10200 ver = vfp_versions[in_attr[i].i].ver;
10201 if (ver < vfp_versions[out_attr[i].i].ver)
10202 ver = vfp_versions[out_attr[i].i].ver;
10203 regs = vfp_versions[in_attr[i].i].regs;
10204 if (regs < vfp_versions[out_attr[i].i].regs)
10205 regs = vfp_versions[out_attr[i].i].regs;
10206 /* This assumes all possible supersets are also a valid
10207 options. */
10208 for (newval = 6; newval > 0; newval--)
10209 {
10210 if (regs == vfp_versions[newval].regs
10211 && ver == vfp_versions[newval].ver)
10212 break;
10213 }
10214 out_attr[i].i = newval;
10215 }
10216 break;
10217 case Tag_PCS_config:
10218 if (out_attr[i].i == 0)
10219 out_attr[i].i = in_attr[i].i;
10220 else if (in_attr[i].i != 0 && out_attr[i].i != 0)
10221 {
10222 /* It's sometimes ok to mix different configs, so this is only
10223 a warning. */
10224 _bfd_error_handler
10225 (_("Warning: %B: Conflicting platform configuration"), ibfd);
10226 }
10227 break;
10228 case Tag_ABI_PCS_R9_use:
10229 if (in_attr[i].i != out_attr[i].i
10230 && out_attr[i].i != AEABI_R9_unused
10231 && in_attr[i].i != AEABI_R9_unused)
10232 {
10233 _bfd_error_handler
10234 (_("error: %B: Conflicting use of R9"), ibfd);
10235 result = FALSE;
10236 }
10237 if (out_attr[i].i == AEABI_R9_unused)
10238 out_attr[i].i = in_attr[i].i;
10239 break;
10240 case Tag_ABI_PCS_RW_data:
10241 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
10242 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
10243 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
10244 {
10245 _bfd_error_handler
10246 (_("error: %B: SB relative addressing conflicts with use of R9"),
10247 ibfd);
10248 result = FALSE;
10249 }
10250 /* Use the smallest value specified. */
10251 if (in_attr[i].i < out_attr[i].i)
10252 out_attr[i].i = in_attr[i].i;
10253 break;
10254 case Tag_ABI_PCS_wchar_t:
10255 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
10256 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
10257 {
10258 _bfd_error_handler
10259 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
10260 ibfd, in_attr[i].i, out_attr[i].i);
10261 }
10262 else if (in_attr[i].i && !out_attr[i].i)
10263 out_attr[i].i = in_attr[i].i;
10264 break;
10265 case Tag_ABI_enum_size:
10266 if (in_attr[i].i != AEABI_enum_unused)
10267 {
10268 if (out_attr[i].i == AEABI_enum_unused
10269 || out_attr[i].i == AEABI_enum_forced_wide)
10270 {
10271 /* The existing object is compatible with anything.
10272 Use whatever requirements the new object has. */
10273 out_attr[i].i = in_attr[i].i;
10274 }
10275 else if (in_attr[i].i != AEABI_enum_forced_wide
10276 && out_attr[i].i != in_attr[i].i
10277 && !elf_arm_tdata (obfd)->no_enum_size_warning)
10278 {
10279 static const char *aeabi_enum_names[] =
10280 { "", "variable-size", "32-bit", "" };
10281 const char *in_name =
10282 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
10283 ? aeabi_enum_names[in_attr[i].i]
10284 : "<unknown>";
10285 const char *out_name =
10286 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
10287 ? aeabi_enum_names[out_attr[i].i]
10288 : "<unknown>";
10289 _bfd_error_handler
10290 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
10291 ibfd, in_name, out_name);
10292 }
10293 }
10294 break;
10295 case Tag_ABI_VFP_args:
10296 /* Aready done. */
10297 break;
10298 case Tag_ABI_WMMX_args:
10299 if (in_attr[i].i != out_attr[i].i)
10300 {
10301 _bfd_error_handler
10302 (_("error: %B uses iWMMXt register arguments, %B does not"),
10303 ibfd, obfd);
10304 result = FALSE;
10305 }
10306 break;
10307 case Tag_compatibility:
10308 /* Merged in target-independent code. */
10309 break;
10310 case Tag_ABI_HardFP_use:
10311 /* This is handled along with Tag_FP_arch. */
10312 break;
10313 case Tag_ABI_FP_16bit_format:
10314 if (in_attr[i].i != 0 && out_attr[i].i != 0)
10315 {
10316 if (in_attr[i].i != out_attr[i].i)
10317 {
10318 _bfd_error_handler
10319 (_("error: fp16 format mismatch between %B and %B"),
10320 ibfd, obfd);
10321 result = FALSE;
10322 }
10323 }
10324 if (in_attr[i].i != 0)
10325 out_attr[i].i = in_attr[i].i;
10326 break;
10327
10328 case Tag_DIV_use:
10329 /* This tag is set to zero if we can use UDIV and SDIV in Thumb
10330 mode on a v7-M or v7-R CPU; to one if we can not use UDIV or
10331 SDIV at all; and to two if we can use UDIV or SDIV on a v7-A
10332 CPU. We will merge as follows: If the input attribute's value
10333 is one then the output attribute's value remains unchanged. If
10334 the input attribute's value is zero or two then if the output
10335 attribute's value is one the output value is set to the input
10336 value, otherwise the output value must be the same as the
10337 inputs. */
10338 if (in_attr[i].i != 1 && out_attr[i].i != 1)
10339 {
10340 if (in_attr[i].i != out_attr[i].i)
10341 {
10342 _bfd_error_handler
10343 (_("DIV usage mismatch between %B and %B"),
10344 ibfd, obfd);
10345 result = FALSE;
10346 }
10347 }
10348
10349 if (in_attr[i].i != 1)
10350 out_attr[i].i = in_attr[i].i;
10351
10352 break;
10353
10354 case Tag_MPextension_use_legacy:
10355 /* We don't output objects with Tag_MPextension_use_legacy - we
10356 move the value to Tag_MPextension_use. */
10357 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
10358 {
10359 if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
10360 {
10361 _bfd_error_handler
10362 (_("%B has has both the current and legacy "
10363 "Tag_MPextension_use attributes"),
10364 ibfd);
10365 result = FALSE;
10366 }
10367 }
10368
10369 if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
10370 out_attr[Tag_MPextension_use] = in_attr[i];
10371
10372 break;
10373
10374 case Tag_nodefaults:
10375 /* This tag is set if it exists, but the value is unused (and is
10376 typically zero). We don't actually need to do anything here -
10377 the merge happens automatically when the type flags are merged
10378 below. */
10379 break;
10380 case Tag_also_compatible_with:
10381 /* Already done in Tag_CPU_arch. */
10382 break;
10383 case Tag_conformance:
10384 /* Keep the attribute if it matches. Throw it away otherwise.
10385 No attribute means no claim to conform. */
10386 if (!in_attr[i].s || !out_attr[i].s
10387 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
10388 out_attr[i].s = NULL;
10389 break;
10390
10391 default:
10392 {
10393 bfd *err_bfd = NULL;
10394
10395 /* The "known_obj_attributes" table does contain some undefined
10396 attributes. Ensure that there are unused. */
10397 if (out_attr[i].i != 0 || out_attr[i].s != NULL)
10398 err_bfd = obfd;
10399 else if (in_attr[i].i != 0 || in_attr[i].s != NULL)
10400 err_bfd = ibfd;
10401
10402 if (err_bfd != NULL)
10403 {
10404 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
10405 if ((i & 127) < 64)
10406 {
10407 _bfd_error_handler
10408 (_("%B: Unknown mandatory EABI object attribute %d"),
10409 err_bfd, i);
10410 bfd_set_error (bfd_error_bad_value);
10411 result = FALSE;
10412 }
10413 else
10414 {
10415 _bfd_error_handler
10416 (_("Warning: %B: Unknown EABI object attribute %d"),
10417 err_bfd, i);
10418 }
10419 }
10420
10421 /* Only pass on attributes that match in both inputs. */
10422 if (in_attr[i].i != out_attr[i].i
10423 || in_attr[i].s != out_attr[i].s
10424 || (in_attr[i].s != NULL && out_attr[i].s != NULL
10425 && strcmp (in_attr[i].s, out_attr[i].s) != 0))
10426 {
10427 out_attr[i].i = 0;
10428 out_attr[i].s = NULL;
10429 }
10430 }
10431 }
10432
10433 /* If out_attr was copied from in_attr then it won't have a type yet. */
10434 if (in_attr[i].type && !out_attr[i].type)
10435 out_attr[i].type = in_attr[i].type;
10436 }
10437
10438 /* Merge Tag_compatibility attributes and any common GNU ones. */
10439 if (!_bfd_elf_merge_object_attributes (ibfd, obfd))
10440 return FALSE;
10441
10442 /* Check for any attributes not known on ARM. */
10443 in_list = elf_other_obj_attributes_proc (ibfd);
10444 out_listp = &elf_other_obj_attributes_proc (obfd);
10445 out_list = *out_listp;
10446
10447 for (; in_list || out_list; )
10448 {
10449 bfd *err_bfd = NULL;
10450 int err_tag = 0;
10451
10452 /* The tags for each list are in numerical order. */
10453 /* If the tags are equal, then merge. */
10454 if (out_list && (!in_list || in_list->tag > out_list->tag))
10455 {
10456 /* This attribute only exists in obfd. We can't merge, and we don't
10457 know what the tag means, so delete it. */
10458 err_bfd = obfd;
10459 err_tag = out_list->tag;
10460 *out_listp = out_list->next;
10461 out_list = *out_listp;
10462 }
10463 else if (in_list && (!out_list || in_list->tag < out_list->tag))
10464 {
10465 /* This attribute only exists in ibfd. We can't merge, and we don't
10466 know what the tag means, so ignore it. */
10467 err_bfd = ibfd;
10468 err_tag = in_list->tag;
10469 in_list = in_list->next;
10470 }
10471 else /* The tags are equal. */
10472 {
10473 /* As present, all attributes in the list are unknown, and
10474 therefore can't be merged meaningfully. */
10475 err_bfd = obfd;
10476 err_tag = out_list->tag;
10477
10478 /* Only pass on attributes that match in both inputs. */
10479 if (in_list->attr.i != out_list->attr.i
10480 || in_list->attr.s != out_list->attr.s
10481 || (in_list->attr.s && out_list->attr.s
10482 && strcmp (in_list->attr.s, out_list->attr.s) != 0))
10483 {
10484 /* No match. Delete the attribute. */
10485 *out_listp = out_list->next;
10486 out_list = *out_listp;
10487 }
10488 else
10489 {
10490 /* Matched. Keep the attribute and move to the next. */
10491 out_list = out_list->next;
10492 in_list = in_list->next;
10493 }
10494 }
10495
10496 if (err_bfd)
10497 {
10498 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
10499 if ((err_tag & 127) < 64)
10500 {
10501 _bfd_error_handler
10502 (_("%B: Unknown mandatory EABI object attribute %d"),
10503 err_bfd, err_tag);
10504 bfd_set_error (bfd_error_bad_value);
10505 result = FALSE;
10506 }
10507 else
10508 {
10509 _bfd_error_handler
10510 (_("Warning: %B: Unknown EABI object attribute %d"),
10511 err_bfd, err_tag);
10512 }
10513 }
10514 }
10515 return result;
10516 }
10517
10518
10519 /* Return TRUE if the two EABI versions are incompatible. */
10520
10521 static bfd_boolean
10522 elf32_arm_versions_compatible (unsigned iver, unsigned over)
10523 {
10524 /* v4 and v5 are the same spec before and after it was released,
10525 so allow mixing them. */
10526 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
10527 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
10528 return TRUE;
10529
10530 return (iver == over);
10531 }
10532
10533 /* Merge backend specific data from an object file to the output
10534 object file when linking. */
10535
10536 static bfd_boolean
10537 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd);
10538
10539 /* Display the flags field. */
10540
10541 static bfd_boolean
10542 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
10543 {
10544 FILE * file = (FILE *) ptr;
10545 unsigned long flags;
10546
10547 BFD_ASSERT (abfd != NULL && ptr != NULL);
10548
10549 /* Print normal ELF private data. */
10550 _bfd_elf_print_private_bfd_data (abfd, ptr);
10551
10552 flags = elf_elfheader (abfd)->e_flags;
10553 /* Ignore init flag - it may not be set, despite the flags field
10554 containing valid data. */
10555
10556 /* xgettext:c-format */
10557 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
10558
10559 switch (EF_ARM_EABI_VERSION (flags))
10560 {
10561 case EF_ARM_EABI_UNKNOWN:
10562 /* The following flag bits are GNU extensions and not part of the
10563 official ARM ELF extended ABI. Hence they are only decoded if
10564 the EABI version is not set. */
10565 if (flags & EF_ARM_INTERWORK)
10566 fprintf (file, _(" [interworking enabled]"));
10567
10568 if (flags & EF_ARM_APCS_26)
10569 fprintf (file, " [APCS-26]");
10570 else
10571 fprintf (file, " [APCS-32]");
10572
10573 if (flags & EF_ARM_VFP_FLOAT)
10574 fprintf (file, _(" [VFP float format]"));
10575 else if (flags & EF_ARM_MAVERICK_FLOAT)
10576 fprintf (file, _(" [Maverick float format]"));
10577 else
10578 fprintf (file, _(" [FPA float format]"));
10579
10580 if (flags & EF_ARM_APCS_FLOAT)
10581 fprintf (file, _(" [floats passed in float registers]"));
10582
10583 if (flags & EF_ARM_PIC)
10584 fprintf (file, _(" [position independent]"));
10585
10586 if (flags & EF_ARM_NEW_ABI)
10587 fprintf (file, _(" [new ABI]"));
10588
10589 if (flags & EF_ARM_OLD_ABI)
10590 fprintf (file, _(" [old ABI]"));
10591
10592 if (flags & EF_ARM_SOFT_FLOAT)
10593 fprintf (file, _(" [software FP]"));
10594
10595 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
10596 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
10597 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
10598 | EF_ARM_MAVERICK_FLOAT);
10599 break;
10600
10601 case EF_ARM_EABI_VER1:
10602 fprintf (file, _(" [Version1 EABI]"));
10603
10604 if (flags & EF_ARM_SYMSARESORTED)
10605 fprintf (file, _(" [sorted symbol table]"));
10606 else
10607 fprintf (file, _(" [unsorted symbol table]"));
10608
10609 flags &= ~ EF_ARM_SYMSARESORTED;
10610 break;
10611
10612 case EF_ARM_EABI_VER2:
10613 fprintf (file, _(" [Version2 EABI]"));
10614
10615 if (flags & EF_ARM_SYMSARESORTED)
10616 fprintf (file, _(" [sorted symbol table]"));
10617 else
10618 fprintf (file, _(" [unsorted symbol table]"));
10619
10620 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
10621 fprintf (file, _(" [dynamic symbols use segment index]"));
10622
10623 if (flags & EF_ARM_MAPSYMSFIRST)
10624 fprintf (file, _(" [mapping symbols precede others]"));
10625
10626 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
10627 | EF_ARM_MAPSYMSFIRST);
10628 break;
10629
10630 case EF_ARM_EABI_VER3:
10631 fprintf (file, _(" [Version3 EABI]"));
10632 break;
10633
10634 case EF_ARM_EABI_VER4:
10635 fprintf (file, _(" [Version4 EABI]"));
10636 goto eabi;
10637
10638 case EF_ARM_EABI_VER5:
10639 fprintf (file, _(" [Version5 EABI]"));
10640 eabi:
10641 if (flags & EF_ARM_BE8)
10642 fprintf (file, _(" [BE8]"));
10643
10644 if (flags & EF_ARM_LE8)
10645 fprintf (file, _(" [LE8]"));
10646
10647 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
10648 break;
10649
10650 default:
10651 fprintf (file, _(" <EABI version unrecognised>"));
10652 break;
10653 }
10654
10655 flags &= ~ EF_ARM_EABIMASK;
10656
10657 if (flags & EF_ARM_RELEXEC)
10658 fprintf (file, _(" [relocatable executable]"));
10659
10660 if (flags & EF_ARM_HASENTRY)
10661 fprintf (file, _(" [has entry point]"));
10662
10663 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_HASENTRY);
10664
10665 if (flags)
10666 fprintf (file, _("<Unrecognised flag bits set>"));
10667
10668 fputc ('\n', file);
10669
10670 return TRUE;
10671 }
10672
10673 static int
10674 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
10675 {
10676 switch (ELF_ST_TYPE (elf_sym->st_info))
10677 {
10678 case STT_ARM_TFUNC:
10679 return ELF_ST_TYPE (elf_sym->st_info);
10680
10681 case STT_ARM_16BIT:
10682 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
10683 This allows us to distinguish between data used by Thumb instructions
10684 and non-data (which is probably code) inside Thumb regions of an
10685 executable. */
10686 if (type != STT_OBJECT && type != STT_TLS)
10687 return ELF_ST_TYPE (elf_sym->st_info);
10688 break;
10689
10690 default:
10691 break;
10692 }
10693
10694 return type;
10695 }
10696
10697 static asection *
10698 elf32_arm_gc_mark_hook (asection *sec,
10699 struct bfd_link_info *info,
10700 Elf_Internal_Rela *rel,
10701 struct elf_link_hash_entry *h,
10702 Elf_Internal_Sym *sym)
10703 {
10704 if (h != NULL)
10705 switch (ELF32_R_TYPE (rel->r_info))
10706 {
10707 case R_ARM_GNU_VTINHERIT:
10708 case R_ARM_GNU_VTENTRY:
10709 return NULL;
10710 }
10711
10712 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
10713 }
10714
10715 /* Update the got entry reference counts for the section being removed. */
10716
10717 static bfd_boolean
10718 elf32_arm_gc_sweep_hook (bfd * abfd,
10719 struct bfd_link_info * info,
10720 asection * sec,
10721 const Elf_Internal_Rela * relocs)
10722 {
10723 Elf_Internal_Shdr *symtab_hdr;
10724 struct elf_link_hash_entry **sym_hashes;
10725 bfd_signed_vma *local_got_refcounts;
10726 const Elf_Internal_Rela *rel, *relend;
10727 struct elf32_arm_link_hash_table * globals;
10728
10729 if (info->relocatable)
10730 return TRUE;
10731
10732 globals = elf32_arm_hash_table (info);
10733 if (globals == NULL)
10734 return FALSE;
10735
10736 elf_section_data (sec)->local_dynrel = NULL;
10737
10738 symtab_hdr = & elf_symtab_hdr (abfd);
10739 sym_hashes = elf_sym_hashes (abfd);
10740 local_got_refcounts = elf_local_got_refcounts (abfd);
10741
10742 check_use_blx (globals);
10743
10744 relend = relocs + sec->reloc_count;
10745 for (rel = relocs; rel < relend; rel++)
10746 {
10747 unsigned long r_symndx;
10748 struct elf_link_hash_entry *h = NULL;
10749 int r_type;
10750
10751 r_symndx = ELF32_R_SYM (rel->r_info);
10752 if (r_symndx >= symtab_hdr->sh_info)
10753 {
10754 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10755 while (h->root.type == bfd_link_hash_indirect
10756 || h->root.type == bfd_link_hash_warning)
10757 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10758 }
10759
10760 r_type = ELF32_R_TYPE (rel->r_info);
10761 r_type = arm_real_reloc_type (globals, r_type);
10762 switch (r_type)
10763 {
10764 case R_ARM_GOT32:
10765 case R_ARM_GOT_PREL:
10766 case R_ARM_TLS_GD32:
10767 case R_ARM_TLS_IE32:
10768 if (h != NULL)
10769 {
10770 if (h->got.refcount > 0)
10771 h->got.refcount -= 1;
10772 }
10773 else if (local_got_refcounts != NULL)
10774 {
10775 if (local_got_refcounts[r_symndx] > 0)
10776 local_got_refcounts[r_symndx] -= 1;
10777 }
10778 break;
10779
10780 case R_ARM_TLS_LDM32:
10781 globals->tls_ldm_got.refcount -= 1;
10782 break;
10783
10784 case R_ARM_ABS32:
10785 case R_ARM_ABS32_NOI:
10786 case R_ARM_REL32:
10787 case R_ARM_REL32_NOI:
10788 case R_ARM_PC24:
10789 case R_ARM_PLT32:
10790 case R_ARM_CALL:
10791 case R_ARM_JUMP24:
10792 case R_ARM_PREL31:
10793 case R_ARM_THM_CALL:
10794 case R_ARM_THM_JUMP24:
10795 case R_ARM_THM_JUMP19:
10796 case R_ARM_MOVW_ABS_NC:
10797 case R_ARM_MOVT_ABS:
10798 case R_ARM_MOVW_PREL_NC:
10799 case R_ARM_MOVT_PREL:
10800 case R_ARM_THM_MOVW_ABS_NC:
10801 case R_ARM_THM_MOVT_ABS:
10802 case R_ARM_THM_MOVW_PREL_NC:
10803 case R_ARM_THM_MOVT_PREL:
10804 /* Should the interworking branches be here also? */
10805
10806 if (h != NULL)
10807 {
10808 struct elf32_arm_link_hash_entry *eh;
10809 struct elf32_arm_relocs_copied **pp;
10810 struct elf32_arm_relocs_copied *p;
10811
10812 eh = (struct elf32_arm_link_hash_entry *) h;
10813
10814 if (h->plt.refcount > 0)
10815 {
10816 h->plt.refcount -= 1;
10817 if (r_type == R_ARM_THM_CALL)
10818 eh->plt_maybe_thumb_refcount--;
10819
10820 if (r_type == R_ARM_THM_JUMP24
10821 || r_type == R_ARM_THM_JUMP19)
10822 eh->plt_thumb_refcount--;
10823 }
10824
10825 if (r_type == R_ARM_ABS32
10826 || r_type == R_ARM_REL32
10827 || r_type == R_ARM_ABS32_NOI
10828 || r_type == R_ARM_REL32_NOI)
10829 {
10830 for (pp = &eh->relocs_copied; (p = *pp) != NULL;
10831 pp = &p->next)
10832 if (p->section == sec)
10833 {
10834 p->count -= 1;
10835 if (ELF32_R_TYPE (rel->r_info) == R_ARM_REL32
10836 || ELF32_R_TYPE (rel->r_info) == R_ARM_REL32_NOI)
10837 p->pc_count -= 1;
10838 if (p->count == 0)
10839 *pp = p->next;
10840 break;
10841 }
10842 }
10843 }
10844 break;
10845
10846 default:
10847 break;
10848 }
10849 }
10850
10851 return TRUE;
10852 }
10853
10854 /* Look through the relocs for a section during the first phase. */
10855
10856 static bfd_boolean
10857 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
10858 asection *sec, const Elf_Internal_Rela *relocs)
10859 {
10860 Elf_Internal_Shdr *symtab_hdr;
10861 struct elf_link_hash_entry **sym_hashes;
10862 const Elf_Internal_Rela *rel;
10863 const Elf_Internal_Rela *rel_end;
10864 bfd *dynobj;
10865 asection *sreloc;
10866 struct elf32_arm_link_hash_table *htab;
10867 bfd_boolean needs_plt;
10868 unsigned long nsyms;
10869
10870 if (info->relocatable)
10871 return TRUE;
10872
10873 BFD_ASSERT (is_arm_elf (abfd));
10874
10875 htab = elf32_arm_hash_table (info);
10876 if (htab == NULL)
10877 return FALSE;
10878
10879 sreloc = NULL;
10880
10881 /* Create dynamic sections for relocatable executables so that we can
10882 copy relocations. */
10883 if (htab->root.is_relocatable_executable
10884 && ! htab->root.dynamic_sections_created)
10885 {
10886 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
10887 return FALSE;
10888 }
10889
10890 dynobj = elf_hash_table (info)->dynobj;
10891 symtab_hdr = & elf_symtab_hdr (abfd);
10892 sym_hashes = elf_sym_hashes (abfd);
10893 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
10894
10895 rel_end = relocs + sec->reloc_count;
10896 for (rel = relocs; rel < rel_end; rel++)
10897 {
10898 struct elf_link_hash_entry *h;
10899 struct elf32_arm_link_hash_entry *eh;
10900 unsigned long r_symndx;
10901 int r_type;
10902
10903 r_symndx = ELF32_R_SYM (rel->r_info);
10904 r_type = ELF32_R_TYPE (rel->r_info);
10905 r_type = arm_real_reloc_type (htab, r_type);
10906
10907 if (r_symndx >= nsyms
10908 /* PR 9934: It is possible to have relocations that do not
10909 refer to symbols, thus it is also possible to have an
10910 object file containing relocations but no symbol table. */
10911 && (r_symndx > STN_UNDEF || nsyms > 0))
10912 {
10913 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
10914 r_symndx);
10915 return FALSE;
10916 }
10917
10918 if (nsyms == 0 || r_symndx < symtab_hdr->sh_info)
10919 h = NULL;
10920 else
10921 {
10922 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10923 while (h->root.type == bfd_link_hash_indirect
10924 || h->root.type == bfd_link_hash_warning)
10925 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10926 }
10927
10928 eh = (struct elf32_arm_link_hash_entry *) h;
10929
10930 switch (r_type)
10931 {
10932 case R_ARM_GOT32:
10933 case R_ARM_GOT_PREL:
10934 case R_ARM_TLS_GD32:
10935 case R_ARM_TLS_IE32:
10936 /* This symbol requires a global offset table entry. */
10937 {
10938 int tls_type, old_tls_type;
10939
10940 switch (r_type)
10941 {
10942 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
10943 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
10944 default: tls_type = GOT_NORMAL; break;
10945 }
10946
10947 if (h != NULL)
10948 {
10949 h->got.refcount++;
10950 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
10951 }
10952 else
10953 {
10954 bfd_signed_vma *local_got_refcounts;
10955
10956 /* This is a global offset table entry for a local symbol. */
10957 local_got_refcounts = elf_local_got_refcounts (abfd);
10958 if (local_got_refcounts == NULL)
10959 {
10960 bfd_size_type size;
10961
10962 size = symtab_hdr->sh_info;
10963 size *= (sizeof (bfd_signed_vma) + sizeof (char));
10964 local_got_refcounts = (bfd_signed_vma *)
10965 bfd_zalloc (abfd, size);
10966 if (local_got_refcounts == NULL)
10967 return FALSE;
10968 elf_local_got_refcounts (abfd) = local_got_refcounts;
10969 elf32_arm_local_got_tls_type (abfd)
10970 = (char *) (local_got_refcounts + symtab_hdr->sh_info);
10971 }
10972 local_got_refcounts[r_symndx] += 1;
10973 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
10974 }
10975
10976 /* We will already have issued an error message if there is a
10977 TLS / non-TLS mismatch, based on the symbol type. We don't
10978 support any linker relaxations. So just combine any TLS
10979 types needed. */
10980 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
10981 && tls_type != GOT_NORMAL)
10982 tls_type |= old_tls_type;
10983
10984 if (old_tls_type != tls_type)
10985 {
10986 if (h != NULL)
10987 elf32_arm_hash_entry (h)->tls_type = tls_type;
10988 else
10989 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
10990 }
10991 }
10992 /* Fall through. */
10993
10994 case R_ARM_TLS_LDM32:
10995 if (r_type == R_ARM_TLS_LDM32)
10996 htab->tls_ldm_got.refcount++;
10997 /* Fall through. */
10998
10999 case R_ARM_GOTOFF32:
11000 case R_ARM_GOTPC:
11001 if (htab->sgot == NULL)
11002 {
11003 if (htab->root.dynobj == NULL)
11004 htab->root.dynobj = abfd;
11005 if (!create_got_section (htab->root.dynobj, info))
11006 return FALSE;
11007 }
11008 break;
11009
11010 case R_ARM_ABS12:
11011 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
11012 ldr __GOTT_INDEX__ offsets. */
11013 if (!htab->vxworks_p)
11014 break;
11015 /* Fall through. */
11016
11017 case R_ARM_PC24:
11018 case R_ARM_PLT32:
11019 case R_ARM_CALL:
11020 case R_ARM_JUMP24:
11021 case R_ARM_PREL31:
11022 case R_ARM_THM_CALL:
11023 case R_ARM_THM_JUMP24:
11024 case R_ARM_THM_JUMP19:
11025 needs_plt = 1;
11026 goto normal_reloc;
11027
11028 case R_ARM_MOVW_ABS_NC:
11029 case R_ARM_MOVT_ABS:
11030 case R_ARM_THM_MOVW_ABS_NC:
11031 case R_ARM_THM_MOVT_ABS:
11032 if (info->shared)
11033 {
11034 (*_bfd_error_handler)
11035 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
11036 abfd, elf32_arm_howto_table_1[r_type].name,
11037 (h) ? h->root.root.string : "a local symbol");
11038 bfd_set_error (bfd_error_bad_value);
11039 return FALSE;
11040 }
11041
11042 /* Fall through. */
11043 case R_ARM_ABS32:
11044 case R_ARM_ABS32_NOI:
11045 case R_ARM_REL32:
11046 case R_ARM_REL32_NOI:
11047 case R_ARM_MOVW_PREL_NC:
11048 case R_ARM_MOVT_PREL:
11049 case R_ARM_THM_MOVW_PREL_NC:
11050 case R_ARM_THM_MOVT_PREL:
11051 needs_plt = 0;
11052 normal_reloc:
11053
11054 /* Should the interworking branches be listed here? */
11055 if (h != NULL)
11056 {
11057 /* If this reloc is in a read-only section, we might
11058 need a copy reloc. We can't check reliably at this
11059 stage whether the section is read-only, as input
11060 sections have not yet been mapped to output sections.
11061 Tentatively set the flag for now, and correct in
11062 adjust_dynamic_symbol. */
11063 if (!info->shared)
11064 h->non_got_ref = 1;
11065
11066 /* We may need a .plt entry if the function this reloc
11067 refers to is in a different object. We can't tell for
11068 sure yet, because something later might force the
11069 symbol local. */
11070 if (needs_plt)
11071 h->needs_plt = 1;
11072
11073 /* If we create a PLT entry, this relocation will reference
11074 it, even if it's an ABS32 relocation. */
11075 h->plt.refcount += 1;
11076
11077 /* It's too early to use htab->use_blx here, so we have to
11078 record possible blx references separately from
11079 relocs that definitely need a thumb stub. */
11080
11081 if (r_type == R_ARM_THM_CALL)
11082 eh->plt_maybe_thumb_refcount += 1;
11083
11084 if (r_type == R_ARM_THM_JUMP24
11085 || r_type == R_ARM_THM_JUMP19)
11086 eh->plt_thumb_refcount += 1;
11087 }
11088
11089 /* If we are creating a shared library or relocatable executable,
11090 and this is a reloc against a global symbol, or a non PC
11091 relative reloc against a local symbol, then we need to copy
11092 the reloc into the shared library. However, if we are linking
11093 with -Bsymbolic, we do not need to copy a reloc against a
11094 global symbol which is defined in an object we are
11095 including in the link (i.e., DEF_REGULAR is set). At
11096 this point we have not seen all the input files, so it is
11097 possible that DEF_REGULAR is not set now but will be set
11098 later (it is never cleared). We account for that
11099 possibility below by storing information in the
11100 relocs_copied field of the hash table entry. */
11101 if ((info->shared || htab->root.is_relocatable_executable)
11102 && (sec->flags & SEC_ALLOC) != 0
11103 && ((r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI)
11104 || (h != NULL && ! h->needs_plt
11105 && (! info->symbolic || ! h->def_regular))))
11106 {
11107 struct elf32_arm_relocs_copied *p, **head;
11108
11109 /* When creating a shared object, we must copy these
11110 reloc types into the output file. We create a reloc
11111 section in dynobj and make room for this reloc. */
11112 if (sreloc == NULL)
11113 {
11114 sreloc = _bfd_elf_make_dynamic_reloc_section
11115 (sec, dynobj, 2, abfd, ! htab->use_rel);
11116
11117 if (sreloc == NULL)
11118 return FALSE;
11119
11120 /* BPABI objects never have dynamic relocations mapped. */
11121 if (htab->symbian_p)
11122 {
11123 flagword flags;
11124
11125 flags = bfd_get_section_flags (dynobj, sreloc);
11126 flags &= ~(SEC_LOAD | SEC_ALLOC);
11127 bfd_set_section_flags (dynobj, sreloc, flags);
11128 }
11129 }
11130
11131 /* If this is a global symbol, we count the number of
11132 relocations we need for this symbol. */
11133 if (h != NULL)
11134 {
11135 head = &((struct elf32_arm_link_hash_entry *) h)->relocs_copied;
11136 }
11137 else
11138 {
11139 /* Track dynamic relocs needed for local syms too.
11140 We really need local syms available to do this
11141 easily. Oh well. */
11142 asection *s;
11143 void *vpp;
11144 Elf_Internal_Sym *isym;
11145
11146 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
11147 abfd, r_symndx);
11148 if (isym == NULL)
11149 return FALSE;
11150
11151 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
11152 if (s == NULL)
11153 s = sec;
11154
11155 vpp = &elf_section_data (s)->local_dynrel;
11156 head = (struct elf32_arm_relocs_copied **) vpp;
11157 }
11158
11159 p = *head;
11160 if (p == NULL || p->section != sec)
11161 {
11162 bfd_size_type amt = sizeof *p;
11163
11164 p = (struct elf32_arm_relocs_copied *)
11165 bfd_alloc (htab->root.dynobj, amt);
11166 if (p == NULL)
11167 return FALSE;
11168 p->next = *head;
11169 *head = p;
11170 p->section = sec;
11171 p->count = 0;
11172 p->pc_count = 0;
11173 }
11174
11175 if (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
11176 p->pc_count += 1;
11177 p->count += 1;
11178 }
11179 break;
11180
11181 /* This relocation describes the C++ object vtable hierarchy.
11182 Reconstruct it for later use during GC. */
11183 case R_ARM_GNU_VTINHERIT:
11184 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
11185 return FALSE;
11186 break;
11187
11188 /* This relocation describes which C++ vtable entries are actually
11189 used. Record for later use during GC. */
11190 case R_ARM_GNU_VTENTRY:
11191 BFD_ASSERT (h != NULL);
11192 if (h != NULL
11193 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
11194 return FALSE;
11195 break;
11196 }
11197 }
11198
11199 return TRUE;
11200 }
11201
11202 /* Unwinding tables are not referenced directly. This pass marks them as
11203 required if the corresponding code section is marked. */
11204
11205 static bfd_boolean
11206 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
11207 elf_gc_mark_hook_fn gc_mark_hook)
11208 {
11209 bfd *sub;
11210 Elf_Internal_Shdr **elf_shdrp;
11211 bfd_boolean again;
11212
11213 /* Marking EH data may cause additional code sections to be marked,
11214 requiring multiple passes. */
11215 again = TRUE;
11216 while (again)
11217 {
11218 again = FALSE;
11219 for (sub = info->input_bfds; sub != NULL; sub = sub->link_next)
11220 {
11221 asection *o;
11222
11223 if (! is_arm_elf (sub))
11224 continue;
11225
11226 elf_shdrp = elf_elfsections (sub);
11227 for (o = sub->sections; o != NULL; o = o->next)
11228 {
11229 Elf_Internal_Shdr *hdr;
11230
11231 hdr = &elf_section_data (o)->this_hdr;
11232 if (hdr->sh_type == SHT_ARM_EXIDX
11233 && hdr->sh_link
11234 && hdr->sh_link < elf_numsections (sub)
11235 && !o->gc_mark
11236 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
11237 {
11238 again = TRUE;
11239 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
11240 return FALSE;
11241 }
11242 }
11243 }
11244 }
11245
11246 return TRUE;
11247 }
11248
11249 /* Treat mapping symbols as special target symbols. */
11250
11251 static bfd_boolean
11252 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
11253 {
11254 return bfd_is_arm_special_symbol_name (sym->name,
11255 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
11256 }
11257
11258 /* This is a copy of elf_find_function() from elf.c except that
11259 ARM mapping symbols are ignored when looking for function names
11260 and STT_ARM_TFUNC is considered to a function type. */
11261
11262 static bfd_boolean
11263 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
11264 asection * section,
11265 asymbol ** symbols,
11266 bfd_vma offset,
11267 const char ** filename_ptr,
11268 const char ** functionname_ptr)
11269 {
11270 const char * filename = NULL;
11271 asymbol * func = NULL;
11272 bfd_vma low_func = 0;
11273 asymbol ** p;
11274
11275 for (p = symbols; *p != NULL; p++)
11276 {
11277 elf_symbol_type *q;
11278
11279 q = (elf_symbol_type *) *p;
11280
11281 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
11282 {
11283 default:
11284 break;
11285 case STT_FILE:
11286 filename = bfd_asymbol_name (&q->symbol);
11287 break;
11288 case STT_FUNC:
11289 case STT_ARM_TFUNC:
11290 case STT_NOTYPE:
11291 /* Skip mapping symbols. */
11292 if ((q->symbol.flags & BSF_LOCAL)
11293 && bfd_is_arm_special_symbol_name (q->symbol.name,
11294 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
11295 continue;
11296 /* Fall through. */
11297 if (bfd_get_section (&q->symbol) == section
11298 && q->symbol.value >= low_func
11299 && q->symbol.value <= offset)
11300 {
11301 func = (asymbol *) q;
11302 low_func = q->symbol.value;
11303 }
11304 break;
11305 }
11306 }
11307
11308 if (func == NULL)
11309 return FALSE;
11310
11311 if (filename_ptr)
11312 *filename_ptr = filename;
11313 if (functionname_ptr)
11314 *functionname_ptr = bfd_asymbol_name (func);
11315
11316 return TRUE;
11317 }
11318
11319
11320 /* Find the nearest line to a particular section and offset, for error
11321 reporting. This code is a duplicate of the code in elf.c, except
11322 that it uses arm_elf_find_function. */
11323
11324 static bfd_boolean
11325 elf32_arm_find_nearest_line (bfd * abfd,
11326 asection * section,
11327 asymbol ** symbols,
11328 bfd_vma offset,
11329 const char ** filename_ptr,
11330 const char ** functionname_ptr,
11331 unsigned int * line_ptr)
11332 {
11333 bfd_boolean found = FALSE;
11334
11335 /* We skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain uses it. */
11336
11337 if (_bfd_dwarf2_find_nearest_line (abfd, section, symbols, offset,
11338 filename_ptr, functionname_ptr,
11339 line_ptr, 0,
11340 & elf_tdata (abfd)->dwarf2_find_line_info))
11341 {
11342 if (!*functionname_ptr)
11343 arm_elf_find_function (abfd, section, symbols, offset,
11344 *filename_ptr ? NULL : filename_ptr,
11345 functionname_ptr);
11346
11347 return TRUE;
11348 }
11349
11350 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
11351 & found, filename_ptr,
11352 functionname_ptr, line_ptr,
11353 & elf_tdata (abfd)->line_info))
11354 return FALSE;
11355
11356 if (found && (*functionname_ptr || *line_ptr))
11357 return TRUE;
11358
11359 if (symbols == NULL)
11360 return FALSE;
11361
11362 if (! arm_elf_find_function (abfd, section, symbols, offset,
11363 filename_ptr, functionname_ptr))
11364 return FALSE;
11365
11366 *line_ptr = 0;
11367 return TRUE;
11368 }
11369
11370 static bfd_boolean
11371 elf32_arm_find_inliner_info (bfd * abfd,
11372 const char ** filename_ptr,
11373 const char ** functionname_ptr,
11374 unsigned int * line_ptr)
11375 {
11376 bfd_boolean found;
11377 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
11378 functionname_ptr, line_ptr,
11379 & elf_tdata (abfd)->dwarf2_find_line_info);
11380 return found;
11381 }
11382
11383 /* Adjust a symbol defined by a dynamic object and referenced by a
11384 regular object. The current definition is in some section of the
11385 dynamic object, but we're not including those sections. We have to
11386 change the definition to something the rest of the link can
11387 understand. */
11388
11389 static bfd_boolean
11390 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
11391 struct elf_link_hash_entry * h)
11392 {
11393 bfd * dynobj;
11394 asection * s;
11395 struct elf32_arm_link_hash_entry * eh;
11396 struct elf32_arm_link_hash_table *globals;
11397
11398 globals = elf32_arm_hash_table (info);
11399 if (globals == NULL)
11400 return FALSE;
11401
11402 dynobj = elf_hash_table (info)->dynobj;
11403
11404 /* Make sure we know what is going on here. */
11405 BFD_ASSERT (dynobj != NULL
11406 && (h->needs_plt
11407 || h->u.weakdef != NULL
11408 || (h->def_dynamic
11409 && h->ref_regular
11410 && !h->def_regular)));
11411
11412 eh = (struct elf32_arm_link_hash_entry *) h;
11413
11414 /* If this is a function, put it in the procedure linkage table. We
11415 will fill in the contents of the procedure linkage table later,
11416 when we know the address of the .got section. */
11417 if (h->type == STT_FUNC || h->type == STT_ARM_TFUNC
11418 || h->needs_plt)
11419 {
11420 if (h->plt.refcount <= 0
11421 || SYMBOL_CALLS_LOCAL (info, h)
11422 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
11423 && h->root.type == bfd_link_hash_undefweak))
11424 {
11425 /* This case can occur if we saw a PLT32 reloc in an input
11426 file, but the symbol was never referred to by a dynamic
11427 object, or if all references were garbage collected. In
11428 such a case, we don't actually need to build a procedure
11429 linkage table, and we can just do a PC24 reloc instead. */
11430 h->plt.offset = (bfd_vma) -1;
11431 eh->plt_thumb_refcount = 0;
11432 eh->plt_maybe_thumb_refcount = 0;
11433 h->needs_plt = 0;
11434 }
11435
11436 return TRUE;
11437 }
11438 else
11439 {
11440 /* It's possible that we incorrectly decided a .plt reloc was
11441 needed for an R_ARM_PC24 or similar reloc to a non-function sym
11442 in check_relocs. We can't decide accurately between function
11443 and non-function syms in check-relocs; Objects loaded later in
11444 the link may change h->type. So fix it now. */
11445 h->plt.offset = (bfd_vma) -1;
11446 eh->plt_thumb_refcount = 0;
11447 eh->plt_maybe_thumb_refcount = 0;
11448 }
11449
11450 /* If this is a weak symbol, and there is a real definition, the
11451 processor independent code will have arranged for us to see the
11452 real definition first, and we can just use the same value. */
11453 if (h->u.weakdef != NULL)
11454 {
11455 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
11456 || h->u.weakdef->root.type == bfd_link_hash_defweak);
11457 h->root.u.def.section = h->u.weakdef->root.u.def.section;
11458 h->root.u.def.value = h->u.weakdef->root.u.def.value;
11459 return TRUE;
11460 }
11461
11462 /* If there are no non-GOT references, we do not need a copy
11463 relocation. */
11464 if (!h->non_got_ref)
11465 return TRUE;
11466
11467 /* This is a reference to a symbol defined by a dynamic object which
11468 is not a function. */
11469
11470 /* If we are creating a shared library, we must presume that the
11471 only references to the symbol are via the global offset table.
11472 For such cases we need not do anything here; the relocations will
11473 be handled correctly by relocate_section. Relocatable executables
11474 can reference data in shared objects directly, so we don't need to
11475 do anything here. */
11476 if (info->shared || globals->root.is_relocatable_executable)
11477 return TRUE;
11478
11479 if (h->size == 0)
11480 {
11481 (*_bfd_error_handler) (_("dynamic variable `%s' is zero size"),
11482 h->root.root.string);
11483 return TRUE;
11484 }
11485
11486 /* We must allocate the symbol in our .dynbss section, which will
11487 become part of the .bss section of the executable. There will be
11488 an entry for this symbol in the .dynsym section. The dynamic
11489 object will contain position independent code, so all references
11490 from the dynamic object to this symbol will go through the global
11491 offset table. The dynamic linker will use the .dynsym entry to
11492 determine the address it must put in the global offset table, so
11493 both the dynamic object and the regular object will refer to the
11494 same memory location for the variable. */
11495 s = bfd_get_section_by_name (dynobj, ".dynbss");
11496 BFD_ASSERT (s != NULL);
11497
11498 /* We must generate a R_ARM_COPY reloc to tell the dynamic linker to
11499 copy the initial value out of the dynamic object and into the
11500 runtime process image. We need to remember the offset into the
11501 .rel(a).bss section we are going to use. */
11502 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0)
11503 {
11504 asection *srel;
11505
11506 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (globals, ".bss"));
11507 BFD_ASSERT (srel != NULL);
11508 srel->size += RELOC_SIZE (globals);
11509 h->needs_copy = 1;
11510 }
11511
11512 return _bfd_elf_adjust_dynamic_copy (h, s);
11513 }
11514
11515 /* Allocate space in .plt, .got and associated reloc sections for
11516 dynamic relocs. */
11517
11518 static bfd_boolean
11519 allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
11520 {
11521 struct bfd_link_info *info;
11522 struct elf32_arm_link_hash_table *htab;
11523 struct elf32_arm_link_hash_entry *eh;
11524 struct elf32_arm_relocs_copied *p;
11525 bfd_signed_vma thumb_refs;
11526
11527 eh = (struct elf32_arm_link_hash_entry *) h;
11528
11529 if (h->root.type == bfd_link_hash_indirect)
11530 return TRUE;
11531
11532 if (h->root.type == bfd_link_hash_warning)
11533 /* When warning symbols are created, they **replace** the "real"
11534 entry in the hash table, thus we never get to see the real
11535 symbol in a hash traversal. So look at it now. */
11536 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11537
11538 info = (struct bfd_link_info *) inf;
11539 htab = elf32_arm_hash_table (info);
11540 if (htab == NULL)
11541 return FALSE;
11542
11543 if (htab->root.dynamic_sections_created
11544 && h->plt.refcount > 0)
11545 {
11546 /* Make sure this symbol is output as a dynamic symbol.
11547 Undefined weak syms won't yet be marked as dynamic. */
11548 if (h->dynindx == -1
11549 && !h->forced_local)
11550 {
11551 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11552 return FALSE;
11553 }
11554
11555 if (info->shared
11556 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
11557 {
11558 asection *s = htab->splt;
11559
11560 /* If this is the first .plt entry, make room for the special
11561 first entry. */
11562 if (s->size == 0)
11563 s->size += htab->plt_header_size;
11564
11565 h->plt.offset = s->size;
11566
11567 /* If we will insert a Thumb trampoline before this PLT, leave room
11568 for it. */
11569 thumb_refs = eh->plt_thumb_refcount;
11570 if (!htab->use_blx)
11571 thumb_refs += eh->plt_maybe_thumb_refcount;
11572
11573 if (thumb_refs > 0)
11574 {
11575 h->plt.offset += PLT_THUMB_STUB_SIZE;
11576 s->size += PLT_THUMB_STUB_SIZE;
11577 }
11578
11579 /* If this symbol is not defined in a regular file, and we are
11580 not generating a shared library, then set the symbol to this
11581 location in the .plt. This is required to make function
11582 pointers compare as equal between the normal executable and
11583 the shared library. */
11584 if (! info->shared
11585 && !h->def_regular)
11586 {
11587 h->root.u.def.section = s;
11588 h->root.u.def.value = h->plt.offset;
11589
11590 /* Make sure the function is not marked as Thumb, in case
11591 it is the target of an ABS32 relocation, which will
11592 point to the PLT entry. */
11593 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
11594 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11595 }
11596
11597 /* Make room for this entry. */
11598 s->size += htab->plt_entry_size;
11599
11600 if (!htab->symbian_p)
11601 {
11602 /* We also need to make an entry in the .got.plt section, which
11603 will be placed in the .got section by the linker script. */
11604 eh->plt_got_offset = htab->sgotplt->size;
11605 htab->sgotplt->size += 4;
11606 }
11607
11608 /* We also need to make an entry in the .rel(a).plt section. */
11609 htab->srelplt->size += RELOC_SIZE (htab);
11610
11611 /* VxWorks executables have a second set of relocations for
11612 each PLT entry. They go in a separate relocation section,
11613 which is processed by the kernel loader. */
11614 if (htab->vxworks_p && !info->shared)
11615 {
11616 /* There is a relocation for the initial PLT entry:
11617 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
11618 if (h->plt.offset == htab->plt_header_size)
11619 htab->srelplt2->size += RELOC_SIZE (htab);
11620
11621 /* There are two extra relocations for each subsequent
11622 PLT entry: an R_ARM_32 relocation for the GOT entry,
11623 and an R_ARM_32 relocation for the PLT entry. */
11624 htab->srelplt2->size += RELOC_SIZE (htab) * 2;
11625 }
11626 }
11627 else
11628 {
11629 h->plt.offset = (bfd_vma) -1;
11630 h->needs_plt = 0;
11631 }
11632 }
11633 else
11634 {
11635 h->plt.offset = (bfd_vma) -1;
11636 h->needs_plt = 0;
11637 }
11638
11639 if (h->got.refcount > 0)
11640 {
11641 asection *s;
11642 bfd_boolean dyn;
11643 int tls_type = elf32_arm_hash_entry (h)->tls_type;
11644 int indx;
11645
11646 /* Make sure this symbol is output as a dynamic symbol.
11647 Undefined weak syms won't yet be marked as dynamic. */
11648 if (h->dynindx == -1
11649 && !h->forced_local)
11650 {
11651 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11652 return FALSE;
11653 }
11654
11655 if (!htab->symbian_p)
11656 {
11657 s = htab->sgot;
11658 h->got.offset = s->size;
11659
11660 if (tls_type == GOT_UNKNOWN)
11661 abort ();
11662
11663 if (tls_type == GOT_NORMAL)
11664 /* Non-TLS symbols need one GOT slot. */
11665 s->size += 4;
11666 else
11667 {
11668 if (tls_type & GOT_TLS_GD)
11669 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. */
11670 s->size += 8;
11671 if (tls_type & GOT_TLS_IE)
11672 /* R_ARM_TLS_IE32 needs one GOT slot. */
11673 s->size += 4;
11674 }
11675
11676 dyn = htab->root.dynamic_sections_created;
11677
11678 indx = 0;
11679 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
11680 && (!info->shared
11681 || !SYMBOL_REFERENCES_LOCAL (info, h)))
11682 indx = h->dynindx;
11683
11684 if (tls_type != GOT_NORMAL
11685 && (info->shared || indx != 0)
11686 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11687 || h->root.type != bfd_link_hash_undefweak))
11688 {
11689 if (tls_type & GOT_TLS_IE)
11690 htab->srelgot->size += RELOC_SIZE (htab);
11691
11692 if (tls_type & GOT_TLS_GD)
11693 htab->srelgot->size += RELOC_SIZE (htab);
11694
11695 if ((tls_type & GOT_TLS_GD) && indx != 0)
11696 htab->srelgot->size += RELOC_SIZE (htab);
11697 }
11698 else if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11699 || h->root.type != bfd_link_hash_undefweak)
11700 && (info->shared
11701 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
11702 htab->srelgot->size += RELOC_SIZE (htab);
11703 }
11704 }
11705 else
11706 h->got.offset = (bfd_vma) -1;
11707
11708 /* Allocate stubs for exported Thumb functions on v4t. */
11709 if (!htab->use_blx && h->dynindx != -1
11710 && h->def_regular
11711 && ELF_ST_TYPE (h->type) == STT_ARM_TFUNC
11712 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
11713 {
11714 struct elf_link_hash_entry * th;
11715 struct bfd_link_hash_entry * bh;
11716 struct elf_link_hash_entry * myh;
11717 char name[1024];
11718 asection *s;
11719 bh = NULL;
11720 /* Create a new symbol to regist the real location of the function. */
11721 s = h->root.u.def.section;
11722 sprintf (name, "__real_%s", h->root.root.string);
11723 _bfd_generic_link_add_one_symbol (info, s->owner,
11724 name, BSF_GLOBAL, s,
11725 h->root.u.def.value,
11726 NULL, TRUE, FALSE, &bh);
11727
11728 myh = (struct elf_link_hash_entry *) bh;
11729 myh->type = ELF_ST_INFO (STB_LOCAL, STT_ARM_TFUNC);
11730 myh->forced_local = 1;
11731 eh->export_glue = myh;
11732 th = record_arm_to_thumb_glue (info, h);
11733 /* Point the symbol at the stub. */
11734 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11735 h->root.u.def.section = th->root.u.def.section;
11736 h->root.u.def.value = th->root.u.def.value & ~1;
11737 }
11738
11739 if (eh->relocs_copied == NULL)
11740 return TRUE;
11741
11742 /* In the shared -Bsymbolic case, discard space allocated for
11743 dynamic pc-relative relocs against symbols which turn out to be
11744 defined in regular objects. For the normal shared case, discard
11745 space for pc-relative relocs that have become local due to symbol
11746 visibility changes. */
11747
11748 if (info->shared || htab->root.is_relocatable_executable)
11749 {
11750 /* The only relocs that use pc_count are R_ARM_REL32 and
11751 R_ARM_REL32_NOI, which will appear on something like
11752 ".long foo - .". We want calls to protected symbols to resolve
11753 directly to the function rather than going via the plt. If people
11754 want function pointer comparisons to work as expected then they
11755 should avoid writing assembly like ".long foo - .". */
11756 if (SYMBOL_CALLS_LOCAL (info, h))
11757 {
11758 struct elf32_arm_relocs_copied **pp;
11759
11760 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11761 {
11762 p->count -= p->pc_count;
11763 p->pc_count = 0;
11764 if (p->count == 0)
11765 *pp = p->next;
11766 else
11767 pp = &p->next;
11768 }
11769 }
11770
11771 if (htab->vxworks_p)
11772 {
11773 struct elf32_arm_relocs_copied **pp;
11774
11775 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11776 {
11777 if (strcmp (p->section->output_section->name, ".tls_vars") == 0)
11778 *pp = p->next;
11779 else
11780 pp = &p->next;
11781 }
11782 }
11783
11784 /* Also discard relocs on undefined weak syms with non-default
11785 visibility. */
11786 if (eh->relocs_copied != NULL
11787 && h->root.type == bfd_link_hash_undefweak)
11788 {
11789 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
11790 eh->relocs_copied = NULL;
11791
11792 /* Make sure undefined weak symbols are output as a dynamic
11793 symbol in PIEs. */
11794 else if (h->dynindx == -1
11795 && !h->forced_local)
11796 {
11797 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11798 return FALSE;
11799 }
11800 }
11801
11802 else if (htab->root.is_relocatable_executable && h->dynindx == -1
11803 && h->root.type == bfd_link_hash_new)
11804 {
11805 /* Output absolute symbols so that we can create relocations
11806 against them. For normal symbols we output a relocation
11807 against the section that contains them. */
11808 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11809 return FALSE;
11810 }
11811
11812 }
11813 else
11814 {
11815 /* For the non-shared case, discard space for relocs against
11816 symbols which turn out to need copy relocs or are not
11817 dynamic. */
11818
11819 if (!h->non_got_ref
11820 && ((h->def_dynamic
11821 && !h->def_regular)
11822 || (htab->root.dynamic_sections_created
11823 && (h->root.type == bfd_link_hash_undefweak
11824 || h->root.type == bfd_link_hash_undefined))))
11825 {
11826 /* Make sure this symbol is output as a dynamic symbol.
11827 Undefined weak syms won't yet be marked as dynamic. */
11828 if (h->dynindx == -1
11829 && !h->forced_local)
11830 {
11831 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11832 return FALSE;
11833 }
11834
11835 /* If that succeeded, we know we'll be keeping all the
11836 relocs. */
11837 if (h->dynindx != -1)
11838 goto keep;
11839 }
11840
11841 eh->relocs_copied = NULL;
11842
11843 keep: ;
11844 }
11845
11846 /* Finally, allocate space. */
11847 for (p = eh->relocs_copied; p != NULL; p = p->next)
11848 {
11849 asection *sreloc = elf_section_data (p->section)->sreloc;
11850 sreloc->size += p->count * RELOC_SIZE (htab);
11851 }
11852
11853 return TRUE;
11854 }
11855
11856 /* Find any dynamic relocs that apply to read-only sections. */
11857
11858 static bfd_boolean
11859 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
11860 {
11861 struct elf32_arm_link_hash_entry * eh;
11862 struct elf32_arm_relocs_copied * p;
11863
11864 if (h->root.type == bfd_link_hash_warning)
11865 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11866
11867 eh = (struct elf32_arm_link_hash_entry *) h;
11868 for (p = eh->relocs_copied; p != NULL; p = p->next)
11869 {
11870 asection *s = p->section;
11871
11872 if (s != NULL && (s->flags & SEC_READONLY) != 0)
11873 {
11874 struct bfd_link_info *info = (struct bfd_link_info *) inf;
11875
11876 info->flags |= DF_TEXTREL;
11877
11878 /* Not an error, just cut short the traversal. */
11879 return FALSE;
11880 }
11881 }
11882 return TRUE;
11883 }
11884
11885 void
11886 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
11887 int byteswap_code)
11888 {
11889 struct elf32_arm_link_hash_table *globals;
11890
11891 globals = elf32_arm_hash_table (info);
11892 if (globals == NULL)
11893 return;
11894
11895 globals->byteswap_code = byteswap_code;
11896 }
11897
11898 /* Set the sizes of the dynamic sections. */
11899
11900 static bfd_boolean
11901 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
11902 struct bfd_link_info * info)
11903 {
11904 bfd * dynobj;
11905 asection * s;
11906 bfd_boolean plt;
11907 bfd_boolean relocs;
11908 bfd *ibfd;
11909 struct elf32_arm_link_hash_table *htab;
11910
11911 htab = elf32_arm_hash_table (info);
11912 if (htab == NULL)
11913 return FALSE;
11914
11915 dynobj = elf_hash_table (info)->dynobj;
11916 BFD_ASSERT (dynobj != NULL);
11917 check_use_blx (htab);
11918
11919 if (elf_hash_table (info)->dynamic_sections_created)
11920 {
11921 /* Set the contents of the .interp section to the interpreter. */
11922 if (info->executable)
11923 {
11924 s = bfd_get_section_by_name (dynobj, ".interp");
11925 BFD_ASSERT (s != NULL);
11926 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
11927 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
11928 }
11929 }
11930
11931 /* Set up .got offsets for local syms, and space for local dynamic
11932 relocs. */
11933 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
11934 {
11935 bfd_signed_vma *local_got;
11936 bfd_signed_vma *end_local_got;
11937 char *local_tls_type;
11938 bfd_size_type locsymcount;
11939 Elf_Internal_Shdr *symtab_hdr;
11940 asection *srel;
11941 bfd_boolean is_vxworks = htab->vxworks_p;
11942
11943 if (! is_arm_elf (ibfd))
11944 continue;
11945
11946 for (s = ibfd->sections; s != NULL; s = s->next)
11947 {
11948 struct elf32_arm_relocs_copied *p;
11949
11950 for (p = (struct elf32_arm_relocs_copied *)
11951 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
11952 {
11953 if (!bfd_is_abs_section (p->section)
11954 && bfd_is_abs_section (p->section->output_section))
11955 {
11956 /* Input section has been discarded, either because
11957 it is a copy of a linkonce section or due to
11958 linker script /DISCARD/, so we'll be discarding
11959 the relocs too. */
11960 }
11961 else if (is_vxworks
11962 && strcmp (p->section->output_section->name,
11963 ".tls_vars") == 0)
11964 {
11965 /* Relocations in vxworks .tls_vars sections are
11966 handled specially by the loader. */
11967 }
11968 else if (p->count != 0)
11969 {
11970 srel = elf_section_data (p->section)->sreloc;
11971 srel->size += p->count * RELOC_SIZE (htab);
11972 if ((p->section->output_section->flags & SEC_READONLY) != 0)
11973 info->flags |= DF_TEXTREL;
11974 }
11975 }
11976 }
11977
11978 local_got = elf_local_got_refcounts (ibfd);
11979 if (!local_got)
11980 continue;
11981
11982 symtab_hdr = & elf_symtab_hdr (ibfd);
11983 locsymcount = symtab_hdr->sh_info;
11984 end_local_got = local_got + locsymcount;
11985 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
11986 s = htab->sgot;
11987 srel = htab->srelgot;
11988 for (; local_got < end_local_got; ++local_got, ++local_tls_type)
11989 {
11990 if (*local_got > 0)
11991 {
11992 *local_got = s->size;
11993 if (*local_tls_type & GOT_TLS_GD)
11994 /* TLS_GD relocs need an 8-byte structure in the GOT. */
11995 s->size += 8;
11996 if (*local_tls_type & GOT_TLS_IE)
11997 s->size += 4;
11998 if (*local_tls_type == GOT_NORMAL)
11999 s->size += 4;
12000
12001 if (info->shared || *local_tls_type == GOT_TLS_GD)
12002 srel->size += RELOC_SIZE (htab);
12003 }
12004 else
12005 *local_got = (bfd_vma) -1;
12006 }
12007 }
12008
12009 if (htab->tls_ldm_got.refcount > 0)
12010 {
12011 /* Allocate two GOT entries and one dynamic relocation (if necessary)
12012 for R_ARM_TLS_LDM32 relocations. */
12013 htab->tls_ldm_got.offset = htab->sgot->size;
12014 htab->sgot->size += 8;
12015 if (info->shared)
12016 htab->srelgot->size += RELOC_SIZE (htab);
12017 }
12018 else
12019 htab->tls_ldm_got.offset = -1;
12020
12021 /* Allocate global sym .plt and .got entries, and space for global
12022 sym dynamic relocs. */
12023 elf_link_hash_traverse (& htab->root, allocate_dynrelocs, info);
12024
12025 /* Here we rummage through the found bfds to collect glue information. */
12026 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
12027 {
12028 if (! is_arm_elf (ibfd))
12029 continue;
12030
12031 /* Initialise mapping tables for code/data. */
12032 bfd_elf32_arm_init_maps (ibfd);
12033
12034 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
12035 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info))
12036 /* xgettext:c-format */
12037 _bfd_error_handler (_("Errors encountered processing file %s"),
12038 ibfd->filename);
12039 }
12040
12041 /* Allocate space for the glue sections now that we've sized them. */
12042 bfd_elf32_arm_allocate_interworking_sections (info);
12043
12044 /* The check_relocs and adjust_dynamic_symbol entry points have
12045 determined the sizes of the various dynamic sections. Allocate
12046 memory for them. */
12047 plt = FALSE;
12048 relocs = FALSE;
12049 for (s = dynobj->sections; s != NULL; s = s->next)
12050 {
12051 const char * name;
12052
12053 if ((s->flags & SEC_LINKER_CREATED) == 0)
12054 continue;
12055
12056 /* It's OK to base decisions on the section name, because none
12057 of the dynobj section names depend upon the input files. */
12058 name = bfd_get_section_name (dynobj, s);
12059
12060 if (strcmp (name, ".plt") == 0)
12061 {
12062 /* Remember whether there is a PLT. */
12063 plt = s->size != 0;
12064 }
12065 else if (CONST_STRNEQ (name, ".rel"))
12066 {
12067 if (s->size != 0)
12068 {
12069 /* Remember whether there are any reloc sections other
12070 than .rel(a).plt and .rela.plt.unloaded. */
12071 if (s != htab->srelplt && s != htab->srelplt2)
12072 relocs = TRUE;
12073
12074 /* We use the reloc_count field as a counter if we need
12075 to copy relocs into the output file. */
12076 s->reloc_count = 0;
12077 }
12078 }
12079 else if (! CONST_STRNEQ (name, ".got")
12080 && strcmp (name, ".dynbss") != 0)
12081 {
12082 /* It's not one of our sections, so don't allocate space. */
12083 continue;
12084 }
12085
12086 if (s->size == 0)
12087 {
12088 /* If we don't need this section, strip it from the
12089 output file. This is mostly to handle .rel(a).bss and
12090 .rel(a).plt. We must create both sections in
12091 create_dynamic_sections, because they must be created
12092 before the linker maps input sections to output
12093 sections. The linker does that before
12094 adjust_dynamic_symbol is called, and it is that
12095 function which decides whether anything needs to go
12096 into these sections. */
12097 s->flags |= SEC_EXCLUDE;
12098 continue;
12099 }
12100
12101 if ((s->flags & SEC_HAS_CONTENTS) == 0)
12102 continue;
12103
12104 /* Allocate memory for the section contents. */
12105 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
12106 if (s->contents == NULL)
12107 return FALSE;
12108 }
12109
12110 if (elf_hash_table (info)->dynamic_sections_created)
12111 {
12112 /* Add some entries to the .dynamic section. We fill in the
12113 values later, in elf32_arm_finish_dynamic_sections, but we
12114 must add the entries now so that we get the correct size for
12115 the .dynamic section. The DT_DEBUG entry is filled in by the
12116 dynamic linker and used by the debugger. */
12117 #define add_dynamic_entry(TAG, VAL) \
12118 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
12119
12120 if (info->executable)
12121 {
12122 if (!add_dynamic_entry (DT_DEBUG, 0))
12123 return FALSE;
12124 }
12125
12126 if (plt)
12127 {
12128 if ( !add_dynamic_entry (DT_PLTGOT, 0)
12129 || !add_dynamic_entry (DT_PLTRELSZ, 0)
12130 || !add_dynamic_entry (DT_PLTREL,
12131 htab->use_rel ? DT_REL : DT_RELA)
12132 || !add_dynamic_entry (DT_JMPREL, 0))
12133 return FALSE;
12134 }
12135
12136 if (relocs)
12137 {
12138 if (htab->use_rel)
12139 {
12140 if (!add_dynamic_entry (DT_REL, 0)
12141 || !add_dynamic_entry (DT_RELSZ, 0)
12142 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
12143 return FALSE;
12144 }
12145 else
12146 {
12147 if (!add_dynamic_entry (DT_RELA, 0)
12148 || !add_dynamic_entry (DT_RELASZ, 0)
12149 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
12150 return FALSE;
12151 }
12152 }
12153
12154 /* If any dynamic relocs apply to a read-only section,
12155 then we need a DT_TEXTREL entry. */
12156 if ((info->flags & DF_TEXTREL) == 0)
12157 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs,
12158 info);
12159
12160 if ((info->flags & DF_TEXTREL) != 0)
12161 {
12162 if (!add_dynamic_entry (DT_TEXTREL, 0))
12163 return FALSE;
12164 }
12165 if (htab->vxworks_p
12166 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
12167 return FALSE;
12168 }
12169 #undef add_dynamic_entry
12170
12171 return TRUE;
12172 }
12173
12174 /* Finish up dynamic symbol handling. We set the contents of various
12175 dynamic sections here. */
12176
12177 static bfd_boolean
12178 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
12179 struct bfd_link_info * info,
12180 struct elf_link_hash_entry * h,
12181 Elf_Internal_Sym * sym)
12182 {
12183 bfd * dynobj;
12184 struct elf32_arm_link_hash_table *htab;
12185 struct elf32_arm_link_hash_entry *eh;
12186
12187 dynobj = elf_hash_table (info)->dynobj;
12188 htab = elf32_arm_hash_table (info);
12189 if (htab == NULL)
12190 return FALSE;
12191
12192 eh = (struct elf32_arm_link_hash_entry *) h;
12193
12194 if (h->plt.offset != (bfd_vma) -1)
12195 {
12196 asection * splt;
12197 asection * srel;
12198 bfd_byte *loc;
12199 bfd_vma plt_index;
12200 Elf_Internal_Rela rel;
12201
12202 /* This symbol has an entry in the procedure linkage table. Set
12203 it up. */
12204
12205 BFD_ASSERT (h->dynindx != -1);
12206
12207 splt = bfd_get_section_by_name (dynobj, ".plt");
12208 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".plt"));
12209 BFD_ASSERT (splt != NULL && srel != NULL);
12210
12211 /* Fill in the entry in the procedure linkage table. */
12212 if (htab->symbian_p)
12213 {
12214 put_arm_insn (htab, output_bfd,
12215 elf32_arm_symbian_plt_entry[0],
12216 splt->contents + h->plt.offset);
12217 bfd_put_32 (output_bfd,
12218 elf32_arm_symbian_plt_entry[1],
12219 splt->contents + h->plt.offset + 4);
12220
12221 /* Fill in the entry in the .rel.plt section. */
12222 rel.r_offset = (splt->output_section->vma
12223 + splt->output_offset
12224 + h->plt.offset + 4);
12225 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
12226
12227 /* Get the index in the procedure linkage table which
12228 corresponds to this symbol. This is the index of this symbol
12229 in all the symbols for which we are making plt entries. The
12230 first entry in the procedure linkage table is reserved. */
12231 plt_index = ((h->plt.offset - htab->plt_header_size)
12232 / htab->plt_entry_size);
12233 }
12234 else
12235 {
12236 bfd_vma got_offset, got_address, plt_address;
12237 bfd_vma got_displacement;
12238 asection * sgot;
12239 bfd_byte * ptr;
12240
12241 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
12242 BFD_ASSERT (sgot != NULL);
12243
12244 /* Get the offset into the .got.plt table of the entry that
12245 corresponds to this function. */
12246 got_offset = eh->plt_got_offset;
12247
12248 /* Get the index in the procedure linkage table which
12249 corresponds to this symbol. This is the index of this symbol
12250 in all the symbols for which we are making plt entries. The
12251 first three entries in .got.plt are reserved; after that
12252 symbols appear in the same order as in .plt. */
12253 plt_index = (got_offset - 12) / 4;
12254
12255 /* Calculate the address of the GOT entry. */
12256 got_address = (sgot->output_section->vma
12257 + sgot->output_offset
12258 + got_offset);
12259
12260 /* ...and the address of the PLT entry. */
12261 plt_address = (splt->output_section->vma
12262 + splt->output_offset
12263 + h->plt.offset);
12264
12265 ptr = htab->splt->contents + h->plt.offset;
12266 if (htab->vxworks_p && info->shared)
12267 {
12268 unsigned int i;
12269 bfd_vma val;
12270
12271 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
12272 {
12273 val = elf32_arm_vxworks_shared_plt_entry[i];
12274 if (i == 2)
12275 val |= got_address - sgot->output_section->vma;
12276 if (i == 5)
12277 val |= plt_index * RELOC_SIZE (htab);
12278 if (i == 2 || i == 5)
12279 bfd_put_32 (output_bfd, val, ptr);
12280 else
12281 put_arm_insn (htab, output_bfd, val, ptr);
12282 }
12283 }
12284 else if (htab->vxworks_p)
12285 {
12286 unsigned int i;
12287 bfd_vma val;
12288
12289 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
12290 {
12291 val = elf32_arm_vxworks_exec_plt_entry[i];
12292 if (i == 2)
12293 val |= got_address;
12294 if (i == 4)
12295 val |= 0xffffff & -((h->plt.offset + i * 4 + 8) >> 2);
12296 if (i == 5)
12297 val |= plt_index * RELOC_SIZE (htab);
12298 if (i == 2 || i == 5)
12299 bfd_put_32 (output_bfd, val, ptr);
12300 else
12301 put_arm_insn (htab, output_bfd, val, ptr);
12302 }
12303
12304 loc = (htab->srelplt2->contents
12305 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
12306
12307 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
12308 referencing the GOT for this PLT entry. */
12309 rel.r_offset = plt_address + 8;
12310 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12311 rel.r_addend = got_offset;
12312 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12313 loc += RELOC_SIZE (htab);
12314
12315 /* Create the R_ARM_ABS32 relocation referencing the
12316 beginning of the PLT for this GOT entry. */
12317 rel.r_offset = got_address;
12318 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12319 rel.r_addend = 0;
12320 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12321 }
12322 else
12323 {
12324 bfd_signed_vma thumb_refs;
12325 /* Calculate the displacement between the PLT slot and the
12326 entry in the GOT. The eight-byte offset accounts for the
12327 value produced by adding to pc in the first instruction
12328 of the PLT stub. */
12329 got_displacement = got_address - (plt_address + 8);
12330
12331 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
12332
12333 thumb_refs = eh->plt_thumb_refcount;
12334 if (!htab->use_blx)
12335 thumb_refs += eh->plt_maybe_thumb_refcount;
12336
12337 if (thumb_refs > 0)
12338 {
12339 put_thumb_insn (htab, output_bfd,
12340 elf32_arm_plt_thumb_stub[0], ptr - 4);
12341 put_thumb_insn (htab, output_bfd,
12342 elf32_arm_plt_thumb_stub[1], ptr - 2);
12343 }
12344
12345 put_arm_insn (htab, output_bfd,
12346 elf32_arm_plt_entry[0]
12347 | ((got_displacement & 0x0ff00000) >> 20),
12348 ptr + 0);
12349 put_arm_insn (htab, output_bfd,
12350 elf32_arm_plt_entry[1]
12351 | ((got_displacement & 0x000ff000) >> 12),
12352 ptr+ 4);
12353 put_arm_insn (htab, output_bfd,
12354 elf32_arm_plt_entry[2]
12355 | (got_displacement & 0x00000fff),
12356 ptr + 8);
12357 #ifdef FOUR_WORD_PLT
12358 bfd_put_32 (output_bfd, elf32_arm_plt_entry[3], ptr + 12);
12359 #endif
12360 }
12361
12362 /* Fill in the entry in the global offset table. */
12363 bfd_put_32 (output_bfd,
12364 (splt->output_section->vma
12365 + splt->output_offset),
12366 sgot->contents + got_offset);
12367
12368 /* Fill in the entry in the .rel(a).plt section. */
12369 rel.r_addend = 0;
12370 rel.r_offset = got_address;
12371 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_JUMP_SLOT);
12372 }
12373
12374 loc = srel->contents + plt_index * RELOC_SIZE (htab);
12375 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12376
12377 if (!h->def_regular)
12378 {
12379 /* Mark the symbol as undefined, rather than as defined in
12380 the .plt section. Leave the value alone. */
12381 sym->st_shndx = SHN_UNDEF;
12382 /* If the symbol is weak, we do need to clear the value.
12383 Otherwise, the PLT entry would provide a definition for
12384 the symbol even if the symbol wasn't defined anywhere,
12385 and so the symbol would never be NULL. */
12386 if (!h->ref_regular_nonweak)
12387 sym->st_value = 0;
12388 }
12389 }
12390
12391 if (h->got.offset != (bfd_vma) -1
12392 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_GD) == 0
12393 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_IE) == 0)
12394 {
12395 asection * sgot;
12396 asection * srel;
12397 Elf_Internal_Rela rel;
12398 bfd_byte *loc;
12399 bfd_vma offset;
12400
12401 /* This symbol has an entry in the global offset table. Set it
12402 up. */
12403 sgot = bfd_get_section_by_name (dynobj, ".got");
12404 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".got"));
12405 BFD_ASSERT (sgot != NULL && srel != NULL);
12406
12407 offset = (h->got.offset & ~(bfd_vma) 1);
12408 rel.r_addend = 0;
12409 rel.r_offset = (sgot->output_section->vma
12410 + sgot->output_offset
12411 + offset);
12412
12413 /* If this is a static link, or it is a -Bsymbolic link and the
12414 symbol is defined locally or was forced to be local because
12415 of a version file, we just want to emit a RELATIVE reloc.
12416 The entry in the global offset table will already have been
12417 initialized in the relocate_section function. */
12418 if (info->shared
12419 && SYMBOL_REFERENCES_LOCAL (info, h))
12420 {
12421 BFD_ASSERT ((h->got.offset & 1) != 0);
12422 rel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12423 if (!htab->use_rel)
12424 {
12425 rel.r_addend = bfd_get_32 (output_bfd, sgot->contents + offset);
12426 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12427 }
12428 }
12429 else
12430 {
12431 BFD_ASSERT ((h->got.offset & 1) == 0);
12432 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12433 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
12434 }
12435
12436 loc = srel->contents + srel->reloc_count++ * RELOC_SIZE (htab);
12437 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12438 }
12439
12440 if (h->needs_copy)
12441 {
12442 asection * s;
12443 Elf_Internal_Rela rel;
12444 bfd_byte *loc;
12445
12446 /* This symbol needs a copy reloc. Set it up. */
12447 BFD_ASSERT (h->dynindx != -1
12448 && (h->root.type == bfd_link_hash_defined
12449 || h->root.type == bfd_link_hash_defweak));
12450
12451 s = bfd_get_section_by_name (h->root.u.def.section->owner,
12452 RELOC_SECTION (htab, ".bss"));
12453 BFD_ASSERT (s != NULL);
12454
12455 rel.r_addend = 0;
12456 rel.r_offset = (h->root.u.def.value
12457 + h->root.u.def.section->output_section->vma
12458 + h->root.u.def.section->output_offset);
12459 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
12460 loc = s->contents + s->reloc_count++ * RELOC_SIZE (htab);
12461 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12462 }
12463
12464 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
12465 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
12466 to the ".got" section. */
12467 if (strcmp (h->root.root.string, "_DYNAMIC") == 0
12468 || (!htab->vxworks_p && h == htab->root.hgot))
12469 sym->st_shndx = SHN_ABS;
12470
12471 return TRUE;
12472 }
12473
12474 /* Finish up the dynamic sections. */
12475
12476 static bfd_boolean
12477 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
12478 {
12479 bfd * dynobj;
12480 asection * sgot;
12481 asection * sdyn;
12482 struct elf32_arm_link_hash_table *htab;
12483
12484 htab = elf32_arm_hash_table (info);
12485 if (htab == NULL)
12486 return FALSE;
12487
12488 dynobj = elf_hash_table (info)->dynobj;
12489
12490 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
12491 BFD_ASSERT (htab->symbian_p || sgot != NULL);
12492 sdyn = bfd_get_section_by_name (dynobj, ".dynamic");
12493
12494 if (elf_hash_table (info)->dynamic_sections_created)
12495 {
12496 asection *splt;
12497 Elf32_External_Dyn *dyncon, *dynconend;
12498
12499 splt = bfd_get_section_by_name (dynobj, ".plt");
12500 BFD_ASSERT (splt != NULL && sdyn != NULL);
12501
12502 dyncon = (Elf32_External_Dyn *) sdyn->contents;
12503 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
12504
12505 for (; dyncon < dynconend; dyncon++)
12506 {
12507 Elf_Internal_Dyn dyn;
12508 const char * name;
12509 asection * s;
12510
12511 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
12512
12513 switch (dyn.d_tag)
12514 {
12515 unsigned int type;
12516
12517 default:
12518 if (htab->vxworks_p
12519 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
12520 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12521 break;
12522
12523 case DT_HASH:
12524 name = ".hash";
12525 goto get_vma_if_bpabi;
12526 case DT_STRTAB:
12527 name = ".dynstr";
12528 goto get_vma_if_bpabi;
12529 case DT_SYMTAB:
12530 name = ".dynsym";
12531 goto get_vma_if_bpabi;
12532 case DT_VERSYM:
12533 name = ".gnu.version";
12534 goto get_vma_if_bpabi;
12535 case DT_VERDEF:
12536 name = ".gnu.version_d";
12537 goto get_vma_if_bpabi;
12538 case DT_VERNEED:
12539 name = ".gnu.version_r";
12540 goto get_vma_if_bpabi;
12541
12542 case DT_PLTGOT:
12543 name = ".got";
12544 goto get_vma;
12545 case DT_JMPREL:
12546 name = RELOC_SECTION (htab, ".plt");
12547 get_vma:
12548 s = bfd_get_section_by_name (output_bfd, name);
12549 BFD_ASSERT (s != NULL);
12550 if (!htab->symbian_p)
12551 dyn.d_un.d_ptr = s->vma;
12552 else
12553 /* In the BPABI, tags in the PT_DYNAMIC section point
12554 at the file offset, not the memory address, for the
12555 convenience of the post linker. */
12556 dyn.d_un.d_ptr = s->filepos;
12557 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12558 break;
12559
12560 get_vma_if_bpabi:
12561 if (htab->symbian_p)
12562 goto get_vma;
12563 break;
12564
12565 case DT_PLTRELSZ:
12566 s = bfd_get_section_by_name (output_bfd,
12567 RELOC_SECTION (htab, ".plt"));
12568 BFD_ASSERT (s != NULL);
12569 dyn.d_un.d_val = s->size;
12570 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12571 break;
12572
12573 case DT_RELSZ:
12574 case DT_RELASZ:
12575 if (!htab->symbian_p)
12576 {
12577 /* My reading of the SVR4 ABI indicates that the
12578 procedure linkage table relocs (DT_JMPREL) should be
12579 included in the overall relocs (DT_REL). This is
12580 what Solaris does. However, UnixWare can not handle
12581 that case. Therefore, we override the DT_RELSZ entry
12582 here to make it not include the JMPREL relocs. Since
12583 the linker script arranges for .rel(a).plt to follow all
12584 other relocation sections, we don't have to worry
12585 about changing the DT_REL entry. */
12586 s = bfd_get_section_by_name (output_bfd,
12587 RELOC_SECTION (htab, ".plt"));
12588 if (s != NULL)
12589 dyn.d_un.d_val -= s->size;
12590 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12591 break;
12592 }
12593 /* Fall through. */
12594
12595 case DT_REL:
12596 case DT_RELA:
12597 /* In the BPABI, the DT_REL tag must point at the file
12598 offset, not the VMA, of the first relocation
12599 section. So, we use code similar to that in
12600 elflink.c, but do not check for SHF_ALLOC on the
12601 relcoation section, since relocations sections are
12602 never allocated under the BPABI. The comments above
12603 about Unixware notwithstanding, we include all of the
12604 relocations here. */
12605 if (htab->symbian_p)
12606 {
12607 unsigned int i;
12608 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
12609 ? SHT_REL : SHT_RELA);
12610 dyn.d_un.d_val = 0;
12611 for (i = 1; i < elf_numsections (output_bfd); i++)
12612 {
12613 Elf_Internal_Shdr *hdr
12614 = elf_elfsections (output_bfd)[i];
12615 if (hdr->sh_type == type)
12616 {
12617 if (dyn.d_tag == DT_RELSZ
12618 || dyn.d_tag == DT_RELASZ)
12619 dyn.d_un.d_val += hdr->sh_size;
12620 else if ((ufile_ptr) hdr->sh_offset
12621 <= dyn.d_un.d_val - 1)
12622 dyn.d_un.d_val = hdr->sh_offset;
12623 }
12624 }
12625 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12626 }
12627 break;
12628
12629 /* Set the bottom bit of DT_INIT/FINI if the
12630 corresponding function is Thumb. */
12631 case DT_INIT:
12632 name = info->init_function;
12633 goto get_sym;
12634 case DT_FINI:
12635 name = info->fini_function;
12636 get_sym:
12637 /* If it wasn't set by elf_bfd_final_link
12638 then there is nothing to adjust. */
12639 if (dyn.d_un.d_val != 0)
12640 {
12641 struct elf_link_hash_entry * eh;
12642
12643 eh = elf_link_hash_lookup (elf_hash_table (info), name,
12644 FALSE, FALSE, TRUE);
12645 if (eh != NULL
12646 && ELF_ST_TYPE (eh->type) == STT_ARM_TFUNC)
12647 {
12648 dyn.d_un.d_val |= 1;
12649 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12650 }
12651 }
12652 break;
12653 }
12654 }
12655
12656 /* Fill in the first entry in the procedure linkage table. */
12657 if (splt->size > 0 && htab->plt_header_size)
12658 {
12659 const bfd_vma *plt0_entry;
12660 bfd_vma got_address, plt_address, got_displacement;
12661
12662 /* Calculate the addresses of the GOT and PLT. */
12663 got_address = sgot->output_section->vma + sgot->output_offset;
12664 plt_address = splt->output_section->vma + splt->output_offset;
12665
12666 if (htab->vxworks_p)
12667 {
12668 /* The VxWorks GOT is relocated by the dynamic linker.
12669 Therefore, we must emit relocations rather than simply
12670 computing the values now. */
12671 Elf_Internal_Rela rel;
12672
12673 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
12674 put_arm_insn (htab, output_bfd, plt0_entry[0],
12675 splt->contents + 0);
12676 put_arm_insn (htab, output_bfd, plt0_entry[1],
12677 splt->contents + 4);
12678 put_arm_insn (htab, output_bfd, plt0_entry[2],
12679 splt->contents + 8);
12680 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
12681
12682 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
12683 rel.r_offset = plt_address + 12;
12684 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12685 rel.r_addend = 0;
12686 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
12687 htab->srelplt2->contents);
12688 }
12689 else
12690 {
12691 got_displacement = got_address - (plt_address + 16);
12692
12693 plt0_entry = elf32_arm_plt0_entry;
12694 put_arm_insn (htab, output_bfd, plt0_entry[0],
12695 splt->contents + 0);
12696 put_arm_insn (htab, output_bfd, plt0_entry[1],
12697 splt->contents + 4);
12698 put_arm_insn (htab, output_bfd, plt0_entry[2],
12699 splt->contents + 8);
12700 put_arm_insn (htab, output_bfd, plt0_entry[3],
12701 splt->contents + 12);
12702
12703 #ifdef FOUR_WORD_PLT
12704 /* The displacement value goes in the otherwise-unused
12705 last word of the second entry. */
12706 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
12707 #else
12708 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
12709 #endif
12710 }
12711 }
12712
12713 /* UnixWare sets the entsize of .plt to 4, although that doesn't
12714 really seem like the right value. */
12715 if (splt->output_section->owner == output_bfd)
12716 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
12717
12718 if (htab->vxworks_p && !info->shared && htab->splt->size > 0)
12719 {
12720 /* Correct the .rel(a).plt.unloaded relocations. They will have
12721 incorrect symbol indexes. */
12722 int num_plts;
12723 unsigned char *p;
12724
12725 num_plts = ((htab->splt->size - htab->plt_header_size)
12726 / htab->plt_entry_size);
12727 p = htab->srelplt2->contents + RELOC_SIZE (htab);
12728
12729 for (; num_plts; num_plts--)
12730 {
12731 Elf_Internal_Rela rel;
12732
12733 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12734 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12735 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12736 p += RELOC_SIZE (htab);
12737
12738 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12739 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12740 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12741 p += RELOC_SIZE (htab);
12742 }
12743 }
12744 }
12745
12746 /* Fill in the first three entries in the global offset table. */
12747 if (sgot)
12748 {
12749 if (sgot->size > 0)
12750 {
12751 if (sdyn == NULL)
12752 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
12753 else
12754 bfd_put_32 (output_bfd,
12755 sdyn->output_section->vma + sdyn->output_offset,
12756 sgot->contents);
12757 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
12758 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
12759 }
12760
12761 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
12762 }
12763
12764 return TRUE;
12765 }
12766
12767 static void
12768 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
12769 {
12770 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
12771 struct elf32_arm_link_hash_table *globals;
12772
12773 i_ehdrp = elf_elfheader (abfd);
12774
12775 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
12776 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
12777 else
12778 i_ehdrp->e_ident[EI_OSABI] = 0;
12779 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
12780
12781 if (link_info)
12782 {
12783 globals = elf32_arm_hash_table (link_info);
12784 if (globals != NULL && globals->byteswap_code)
12785 i_ehdrp->e_flags |= EF_ARM_BE8;
12786 }
12787 }
12788
12789 static enum elf_reloc_type_class
12790 elf32_arm_reloc_type_class (const Elf_Internal_Rela *rela)
12791 {
12792 switch ((int) ELF32_R_TYPE (rela->r_info))
12793 {
12794 case R_ARM_RELATIVE:
12795 return reloc_class_relative;
12796 case R_ARM_JUMP_SLOT:
12797 return reloc_class_plt;
12798 case R_ARM_COPY:
12799 return reloc_class_copy;
12800 default:
12801 return reloc_class_normal;
12802 }
12803 }
12804
12805 /* Set the right machine number for an Arm ELF file. */
12806
12807 static bfd_boolean
12808 elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr *hdr)
12809 {
12810 if (hdr->sh_type == SHT_NOTE)
12811 *flags |= SEC_LINK_ONCE | SEC_LINK_DUPLICATES_SAME_CONTENTS;
12812
12813 return TRUE;
12814 }
12815
12816 static void
12817 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
12818 {
12819 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
12820 }
12821
12822 /* Return TRUE if this is an unwinding table entry. */
12823
12824 static bfd_boolean
12825 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
12826 {
12827 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
12828 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
12829 }
12830
12831
12832 /* Set the type and flags for an ARM section. We do this by
12833 the section name, which is a hack, but ought to work. */
12834
12835 static bfd_boolean
12836 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
12837 {
12838 const char * name;
12839
12840 name = bfd_get_section_name (abfd, sec);
12841
12842 if (is_arm_elf_unwind_section_name (abfd, name))
12843 {
12844 hdr->sh_type = SHT_ARM_EXIDX;
12845 hdr->sh_flags |= SHF_LINK_ORDER;
12846 }
12847 return TRUE;
12848 }
12849
12850 /* Handle an ARM specific section when reading an object file. This is
12851 called when bfd_section_from_shdr finds a section with an unknown
12852 type. */
12853
12854 static bfd_boolean
12855 elf32_arm_section_from_shdr (bfd *abfd,
12856 Elf_Internal_Shdr * hdr,
12857 const char *name,
12858 int shindex)
12859 {
12860 /* There ought to be a place to keep ELF backend specific flags, but
12861 at the moment there isn't one. We just keep track of the
12862 sections by their name, instead. Fortunately, the ABI gives
12863 names for all the ARM specific sections, so we will probably get
12864 away with this. */
12865 switch (hdr->sh_type)
12866 {
12867 case SHT_ARM_EXIDX:
12868 case SHT_ARM_PREEMPTMAP:
12869 case SHT_ARM_ATTRIBUTES:
12870 break;
12871
12872 default:
12873 return FALSE;
12874 }
12875
12876 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
12877 return FALSE;
12878
12879 return TRUE;
12880 }
12881
12882 static _arm_elf_section_data *
12883 get_arm_elf_section_data (asection * sec)
12884 {
12885 if (sec && sec->owner && is_arm_elf (sec->owner))
12886 return elf32_arm_section_data (sec);
12887 else
12888 return NULL;
12889 }
12890
12891 typedef struct
12892 {
12893 void *finfo;
12894 struct bfd_link_info *info;
12895 asection *sec;
12896 int sec_shndx;
12897 int (*func) (void *, const char *, Elf_Internal_Sym *,
12898 asection *, struct elf_link_hash_entry *);
12899 } output_arch_syminfo;
12900
12901 enum map_symbol_type
12902 {
12903 ARM_MAP_ARM,
12904 ARM_MAP_THUMB,
12905 ARM_MAP_DATA
12906 };
12907
12908
12909 /* Output a single mapping symbol. */
12910
12911 static bfd_boolean
12912 elf32_arm_output_map_sym (output_arch_syminfo *osi,
12913 enum map_symbol_type type,
12914 bfd_vma offset)
12915 {
12916 static const char *names[3] = {"$a", "$t", "$d"};
12917 Elf_Internal_Sym sym;
12918
12919 sym.st_value = osi->sec->output_section->vma
12920 + osi->sec->output_offset
12921 + offset;
12922 sym.st_size = 0;
12923 sym.st_other = 0;
12924 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
12925 sym.st_shndx = osi->sec_shndx;
12926 elf32_arm_section_map_add (osi->sec, names[type][1], offset);
12927 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
12928 }
12929
12930
12931 /* Output mapping symbols for PLT entries associated with H. */
12932
12933 static bfd_boolean
12934 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
12935 {
12936 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
12937 struct elf32_arm_link_hash_table *htab;
12938 struct elf32_arm_link_hash_entry *eh;
12939 bfd_vma addr;
12940
12941 if (h->root.type == bfd_link_hash_indirect)
12942 return TRUE;
12943
12944 if (h->root.type == bfd_link_hash_warning)
12945 /* When warning symbols are created, they **replace** the "real"
12946 entry in the hash table, thus we never get to see the real
12947 symbol in a hash traversal. So look at it now. */
12948 h = (struct elf_link_hash_entry *) h->root.u.i.link;
12949
12950 if (h->plt.offset == (bfd_vma) -1)
12951 return TRUE;
12952
12953 htab = elf32_arm_hash_table (osi->info);
12954 if (htab == NULL)
12955 return FALSE;
12956
12957 eh = (struct elf32_arm_link_hash_entry *) h;
12958 addr = h->plt.offset;
12959 if (htab->symbian_p)
12960 {
12961 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12962 return FALSE;
12963 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
12964 return FALSE;
12965 }
12966 else if (htab->vxworks_p)
12967 {
12968 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12969 return FALSE;
12970 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
12971 return FALSE;
12972 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
12973 return FALSE;
12974 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
12975 return FALSE;
12976 }
12977 else
12978 {
12979 bfd_signed_vma thumb_refs;
12980
12981 thumb_refs = eh->plt_thumb_refcount;
12982 if (!htab->use_blx)
12983 thumb_refs += eh->plt_maybe_thumb_refcount;
12984
12985 if (thumb_refs > 0)
12986 {
12987 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
12988 return FALSE;
12989 }
12990 #ifdef FOUR_WORD_PLT
12991 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12992 return FALSE;
12993 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
12994 return FALSE;
12995 #else
12996 /* A three-word PLT with no Thumb thunk contains only Arm code,
12997 so only need to output a mapping symbol for the first PLT entry and
12998 entries with thumb thunks. */
12999 if (thumb_refs > 0 || addr == 20)
13000 {
13001 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
13002 return FALSE;
13003 }
13004 #endif
13005 }
13006
13007 return TRUE;
13008 }
13009
13010 /* Output a single local symbol for a generated stub. */
13011
13012 static bfd_boolean
13013 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
13014 bfd_vma offset, bfd_vma size)
13015 {
13016 Elf_Internal_Sym sym;
13017
13018 sym.st_value = osi->sec->output_section->vma
13019 + osi->sec->output_offset
13020 + offset;
13021 sym.st_size = size;
13022 sym.st_other = 0;
13023 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
13024 sym.st_shndx = osi->sec_shndx;
13025 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
13026 }
13027
13028 static bfd_boolean
13029 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
13030 void * in_arg)
13031 {
13032 struct elf32_arm_stub_hash_entry *stub_entry;
13033 asection *stub_sec;
13034 bfd_vma addr;
13035 char *stub_name;
13036 output_arch_syminfo *osi;
13037 const insn_sequence *template_sequence;
13038 enum stub_insn_type prev_type;
13039 int size;
13040 int i;
13041 enum map_symbol_type sym_type;
13042
13043 /* Massage our args to the form they really have. */
13044 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
13045 osi = (output_arch_syminfo *) in_arg;
13046
13047 stub_sec = stub_entry->stub_sec;
13048
13049 /* Ensure this stub is attached to the current section being
13050 processed. */
13051 if (stub_sec != osi->sec)
13052 return TRUE;
13053
13054 addr = (bfd_vma) stub_entry->stub_offset;
13055 stub_name = stub_entry->output_name;
13056
13057 template_sequence = stub_entry->stub_template;
13058 switch (template_sequence[0].type)
13059 {
13060 case ARM_TYPE:
13061 if (!elf32_arm_output_stub_sym (osi, stub_name, addr, stub_entry->stub_size))
13062 return FALSE;
13063 break;
13064 case THUMB16_TYPE:
13065 case THUMB32_TYPE:
13066 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
13067 stub_entry->stub_size))
13068 return FALSE;
13069 break;
13070 default:
13071 BFD_FAIL ();
13072 return 0;
13073 }
13074
13075 prev_type = DATA_TYPE;
13076 size = 0;
13077 for (i = 0; i < stub_entry->stub_template_size; i++)
13078 {
13079 switch (template_sequence[i].type)
13080 {
13081 case ARM_TYPE:
13082 sym_type = ARM_MAP_ARM;
13083 break;
13084
13085 case THUMB16_TYPE:
13086 case THUMB32_TYPE:
13087 sym_type = ARM_MAP_THUMB;
13088 break;
13089
13090 case DATA_TYPE:
13091 sym_type = ARM_MAP_DATA;
13092 break;
13093
13094 default:
13095 BFD_FAIL ();
13096 return FALSE;
13097 }
13098
13099 if (template_sequence[i].type != prev_type)
13100 {
13101 prev_type = template_sequence[i].type;
13102 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
13103 return FALSE;
13104 }
13105
13106 switch (template_sequence[i].type)
13107 {
13108 case ARM_TYPE:
13109 case THUMB32_TYPE:
13110 size += 4;
13111 break;
13112
13113 case THUMB16_TYPE:
13114 size += 2;
13115 break;
13116
13117 case DATA_TYPE:
13118 size += 4;
13119 break;
13120
13121 default:
13122 BFD_FAIL ();
13123 return FALSE;
13124 }
13125 }
13126
13127 return TRUE;
13128 }
13129
13130 /* Output mapping symbols for linker generated sections,
13131 and for those data-only sections that do not have a
13132 $d. */
13133
13134 static bfd_boolean
13135 elf32_arm_output_arch_local_syms (bfd *output_bfd,
13136 struct bfd_link_info *info,
13137 void *finfo,
13138 int (*func) (void *, const char *,
13139 Elf_Internal_Sym *,
13140 asection *,
13141 struct elf_link_hash_entry *))
13142 {
13143 output_arch_syminfo osi;
13144 struct elf32_arm_link_hash_table *htab;
13145 bfd_vma offset;
13146 bfd_size_type size;
13147 bfd *input_bfd;
13148
13149 htab = elf32_arm_hash_table (info);
13150 if (htab == NULL)
13151 return FALSE;
13152
13153 check_use_blx (htab);
13154
13155 osi.finfo = finfo;
13156 osi.info = info;
13157 osi.func = func;
13158
13159 /* Add a $d mapping symbol to data-only sections that
13160 don't have any mapping symbol. This may result in (harmless) redundant
13161 mapping symbols. */
13162 for (input_bfd = info->input_bfds;
13163 input_bfd != NULL;
13164 input_bfd = input_bfd->link_next)
13165 {
13166 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
13167 for (osi.sec = input_bfd->sections;
13168 osi.sec != NULL;
13169 osi.sec = osi.sec->next)
13170 {
13171 if (osi.sec->output_section != NULL
13172 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
13173 != 0)
13174 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
13175 == SEC_HAS_CONTENTS
13176 && get_arm_elf_section_data (osi.sec) != NULL
13177 && get_arm_elf_section_data (osi.sec)->mapcount == 0
13178 && osi.sec->size > 0)
13179 {
13180 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13181 (output_bfd, osi.sec->output_section);
13182 if (osi.sec_shndx != (int)SHN_BAD)
13183 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
13184 }
13185 }
13186 }
13187
13188 /* ARM->Thumb glue. */
13189 if (htab->arm_glue_size > 0)
13190 {
13191 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13192 ARM2THUMB_GLUE_SECTION_NAME);
13193
13194 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13195 (output_bfd, osi.sec->output_section);
13196 if (info->shared || htab->root.is_relocatable_executable
13197 || htab->pic_veneer)
13198 size = ARM2THUMB_PIC_GLUE_SIZE;
13199 else if (htab->use_blx)
13200 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
13201 else
13202 size = ARM2THUMB_STATIC_GLUE_SIZE;
13203
13204 for (offset = 0; offset < htab->arm_glue_size; offset += size)
13205 {
13206 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
13207 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
13208 }
13209 }
13210
13211 /* Thumb->ARM glue. */
13212 if (htab->thumb_glue_size > 0)
13213 {
13214 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13215 THUMB2ARM_GLUE_SECTION_NAME);
13216
13217 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13218 (output_bfd, osi.sec->output_section);
13219 size = THUMB2ARM_GLUE_SIZE;
13220
13221 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
13222 {
13223 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
13224 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
13225 }
13226 }
13227
13228 /* ARMv4 BX veneers. */
13229 if (htab->bx_glue_size > 0)
13230 {
13231 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13232 ARM_BX_GLUE_SECTION_NAME);
13233
13234 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13235 (output_bfd, osi.sec->output_section);
13236
13237 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
13238 }
13239
13240 /* Long calls stubs. */
13241 if (htab->stub_bfd && htab->stub_bfd->sections)
13242 {
13243 asection* stub_sec;
13244
13245 for (stub_sec = htab->stub_bfd->sections;
13246 stub_sec != NULL;
13247 stub_sec = stub_sec->next)
13248 {
13249 /* Ignore non-stub sections. */
13250 if (!strstr (stub_sec->name, STUB_SUFFIX))
13251 continue;
13252
13253 osi.sec = stub_sec;
13254
13255 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13256 (output_bfd, osi.sec->output_section);
13257
13258 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
13259 }
13260 }
13261
13262 /* Finally, output mapping symbols for the PLT. */
13263 if (!htab->splt || htab->splt->size == 0)
13264 return TRUE;
13265
13266 osi.sec_shndx = _bfd_elf_section_from_bfd_section (output_bfd,
13267 htab->splt->output_section);
13268 osi.sec = htab->splt;
13269 /* Output mapping symbols for the plt header. SymbianOS does not have a
13270 plt header. */
13271 if (htab->vxworks_p)
13272 {
13273 /* VxWorks shared libraries have no PLT header. */
13274 if (!info->shared)
13275 {
13276 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13277 return FALSE;
13278 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
13279 return FALSE;
13280 }
13281 }
13282 else if (!htab->symbian_p)
13283 {
13284 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13285 return FALSE;
13286 #ifndef FOUR_WORD_PLT
13287 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
13288 return FALSE;
13289 #endif
13290 }
13291
13292 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, (void *) &osi);
13293 return TRUE;
13294 }
13295
13296 /* Allocate target specific section data. */
13297
13298 static bfd_boolean
13299 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
13300 {
13301 if (!sec->used_by_bfd)
13302 {
13303 _arm_elf_section_data *sdata;
13304 bfd_size_type amt = sizeof (*sdata);
13305
13306 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
13307 if (sdata == NULL)
13308 return FALSE;
13309 sec->used_by_bfd = sdata;
13310 }
13311
13312 return _bfd_elf_new_section_hook (abfd, sec);
13313 }
13314
13315
13316 /* Used to order a list of mapping symbols by address. */
13317
13318 static int
13319 elf32_arm_compare_mapping (const void * a, const void * b)
13320 {
13321 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
13322 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
13323
13324 if (amap->vma > bmap->vma)
13325 return 1;
13326 else if (amap->vma < bmap->vma)
13327 return -1;
13328 else if (amap->type > bmap->type)
13329 /* Ensure results do not depend on the host qsort for objects with
13330 multiple mapping symbols at the same address by sorting on type
13331 after vma. */
13332 return 1;
13333 else if (amap->type < bmap->type)
13334 return -1;
13335 else
13336 return 0;
13337 }
13338
13339 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
13340
13341 static unsigned long
13342 offset_prel31 (unsigned long addr, bfd_vma offset)
13343 {
13344 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
13345 }
13346
13347 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
13348 relocations. */
13349
13350 static void
13351 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
13352 {
13353 unsigned long first_word = bfd_get_32 (output_bfd, from);
13354 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
13355
13356 /* High bit of first word is supposed to be zero. */
13357 if ((first_word & 0x80000000ul) == 0)
13358 first_word = offset_prel31 (first_word, offset);
13359
13360 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
13361 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
13362 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
13363 second_word = offset_prel31 (second_word, offset);
13364
13365 bfd_put_32 (output_bfd, first_word, to);
13366 bfd_put_32 (output_bfd, second_word, to + 4);
13367 }
13368
13369 /* Data for make_branch_to_a8_stub(). */
13370
13371 struct a8_branch_to_stub_data {
13372 asection *writing_section;
13373 bfd_byte *contents;
13374 };
13375
13376
13377 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
13378 places for a particular section. */
13379
13380 static bfd_boolean
13381 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
13382 void *in_arg)
13383 {
13384 struct elf32_arm_stub_hash_entry *stub_entry;
13385 struct a8_branch_to_stub_data *data;
13386 bfd_byte *contents;
13387 unsigned long branch_insn;
13388 bfd_vma veneered_insn_loc, veneer_entry_loc;
13389 bfd_signed_vma branch_offset;
13390 bfd *abfd;
13391 unsigned int target;
13392
13393 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
13394 data = (struct a8_branch_to_stub_data *) in_arg;
13395
13396 if (stub_entry->target_section != data->writing_section
13397 || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
13398 return TRUE;
13399
13400 contents = data->contents;
13401
13402 veneered_insn_loc = stub_entry->target_section->output_section->vma
13403 + stub_entry->target_section->output_offset
13404 + stub_entry->target_value;
13405
13406 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
13407 + stub_entry->stub_sec->output_offset
13408 + stub_entry->stub_offset;
13409
13410 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
13411 veneered_insn_loc &= ~3u;
13412
13413 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
13414
13415 abfd = stub_entry->target_section->owner;
13416 target = stub_entry->target_value;
13417
13418 /* We attempt to avoid this condition by setting stubs_always_after_branch
13419 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
13420 This check is just to be on the safe side... */
13421 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
13422 {
13423 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
13424 "allocated in unsafe location"), abfd);
13425 return FALSE;
13426 }
13427
13428 switch (stub_entry->stub_type)
13429 {
13430 case arm_stub_a8_veneer_b:
13431 case arm_stub_a8_veneer_b_cond:
13432 branch_insn = 0xf0009000;
13433 goto jump24;
13434
13435 case arm_stub_a8_veneer_blx:
13436 branch_insn = 0xf000e800;
13437 goto jump24;
13438
13439 case arm_stub_a8_veneer_bl:
13440 {
13441 unsigned int i1, j1, i2, j2, s;
13442
13443 branch_insn = 0xf000d000;
13444
13445 jump24:
13446 if (branch_offset < -16777216 || branch_offset > 16777214)
13447 {
13448 /* There's not much we can do apart from complain if this
13449 happens. */
13450 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
13451 "of range (input file too large)"), abfd);
13452 return FALSE;
13453 }
13454
13455 /* i1 = not(j1 eor s), so:
13456 not i1 = j1 eor s
13457 j1 = (not i1) eor s. */
13458
13459 branch_insn |= (branch_offset >> 1) & 0x7ff;
13460 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
13461 i2 = (branch_offset >> 22) & 1;
13462 i1 = (branch_offset >> 23) & 1;
13463 s = (branch_offset >> 24) & 1;
13464 j1 = (!i1) ^ s;
13465 j2 = (!i2) ^ s;
13466 branch_insn |= j2 << 11;
13467 branch_insn |= j1 << 13;
13468 branch_insn |= s << 26;
13469 }
13470 break;
13471
13472 default:
13473 BFD_FAIL ();
13474 return FALSE;
13475 }
13476
13477 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[target]);
13478 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[target + 2]);
13479
13480 return TRUE;
13481 }
13482
13483 /* Do code byteswapping. Return FALSE afterwards so that the section is
13484 written out as normal. */
13485
13486 static bfd_boolean
13487 elf32_arm_write_section (bfd *output_bfd,
13488 struct bfd_link_info *link_info,
13489 asection *sec,
13490 bfd_byte *contents)
13491 {
13492 unsigned int mapcount, errcount;
13493 _arm_elf_section_data *arm_data;
13494 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
13495 elf32_arm_section_map *map;
13496 elf32_vfp11_erratum_list *errnode;
13497 bfd_vma ptr;
13498 bfd_vma end;
13499 bfd_vma offset = sec->output_section->vma + sec->output_offset;
13500 bfd_byte tmp;
13501 unsigned int i;
13502
13503 if (globals == NULL)
13504 return FALSE;
13505
13506 /* If this section has not been allocated an _arm_elf_section_data
13507 structure then we cannot record anything. */
13508 arm_data = get_arm_elf_section_data (sec);
13509 if (arm_data == NULL)
13510 return FALSE;
13511
13512 mapcount = arm_data->mapcount;
13513 map = arm_data->map;
13514 errcount = arm_data->erratumcount;
13515
13516 if (errcount != 0)
13517 {
13518 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
13519
13520 for (errnode = arm_data->erratumlist; errnode != 0;
13521 errnode = errnode->next)
13522 {
13523 bfd_vma target = errnode->vma - offset;
13524
13525 switch (errnode->type)
13526 {
13527 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
13528 {
13529 bfd_vma branch_to_veneer;
13530 /* Original condition code of instruction, plus bit mask for
13531 ARM B instruction. */
13532 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
13533 | 0x0a000000;
13534
13535 /* The instruction is before the label. */
13536 target -= 4;
13537
13538 /* Above offset included in -4 below. */
13539 branch_to_veneer = errnode->u.b.veneer->vma
13540 - errnode->vma - 4;
13541
13542 if ((signed) branch_to_veneer < -(1 << 25)
13543 || (signed) branch_to_veneer >= (1 << 25))
13544 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13545 "range"), output_bfd);
13546
13547 insn |= (branch_to_veneer >> 2) & 0xffffff;
13548 contents[endianflip ^ target] = insn & 0xff;
13549 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
13550 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
13551 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
13552 }
13553 break;
13554
13555 case VFP11_ERRATUM_ARM_VENEER:
13556 {
13557 bfd_vma branch_from_veneer;
13558 unsigned int insn;
13559
13560 /* Take size of veneer into account. */
13561 branch_from_veneer = errnode->u.v.branch->vma
13562 - errnode->vma - 12;
13563
13564 if ((signed) branch_from_veneer < -(1 << 25)
13565 || (signed) branch_from_veneer >= (1 << 25))
13566 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13567 "range"), output_bfd);
13568
13569 /* Original instruction. */
13570 insn = errnode->u.v.branch->u.b.vfp_insn;
13571 contents[endianflip ^ target] = insn & 0xff;
13572 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
13573 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
13574 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
13575
13576 /* Branch back to insn after original insn. */
13577 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
13578 contents[endianflip ^ (target + 4)] = insn & 0xff;
13579 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
13580 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
13581 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
13582 }
13583 break;
13584
13585 default:
13586 abort ();
13587 }
13588 }
13589 }
13590
13591 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
13592 {
13593 arm_unwind_table_edit *edit_node
13594 = arm_data->u.exidx.unwind_edit_list;
13595 /* Now, sec->size is the size of the section we will write. The original
13596 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
13597 markers) was sec->rawsize. (This isn't the case if we perform no
13598 edits, then rawsize will be zero and we should use size). */
13599 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
13600 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
13601 unsigned int in_index, out_index;
13602 bfd_vma add_to_offsets = 0;
13603
13604 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
13605 {
13606 if (edit_node)
13607 {
13608 unsigned int edit_index = edit_node->index;
13609
13610 if (in_index < edit_index && in_index * 8 < input_size)
13611 {
13612 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13613 contents + in_index * 8, add_to_offsets);
13614 out_index++;
13615 in_index++;
13616 }
13617 else if (in_index == edit_index
13618 || (in_index * 8 >= input_size
13619 && edit_index == UINT_MAX))
13620 {
13621 switch (edit_node->type)
13622 {
13623 case DELETE_EXIDX_ENTRY:
13624 in_index++;
13625 add_to_offsets += 8;
13626 break;
13627
13628 case INSERT_EXIDX_CANTUNWIND_AT_END:
13629 {
13630 asection *text_sec = edit_node->linked_section;
13631 bfd_vma text_offset = text_sec->output_section->vma
13632 + text_sec->output_offset
13633 + text_sec->size;
13634 bfd_vma exidx_offset = offset + out_index * 8;
13635 unsigned long prel31_offset;
13636
13637 /* Note: this is meant to be equivalent to an
13638 R_ARM_PREL31 relocation. These synthetic
13639 EXIDX_CANTUNWIND markers are not relocated by the
13640 usual BFD method. */
13641 prel31_offset = (text_offset - exidx_offset)
13642 & 0x7ffffffful;
13643
13644 /* First address we can't unwind. */
13645 bfd_put_32 (output_bfd, prel31_offset,
13646 &edited_contents[out_index * 8]);
13647
13648 /* Code for EXIDX_CANTUNWIND. */
13649 bfd_put_32 (output_bfd, 0x1,
13650 &edited_contents[out_index * 8 + 4]);
13651
13652 out_index++;
13653 add_to_offsets -= 8;
13654 }
13655 break;
13656 }
13657
13658 edit_node = edit_node->next;
13659 }
13660 }
13661 else
13662 {
13663 /* No more edits, copy remaining entries verbatim. */
13664 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13665 contents + in_index * 8, add_to_offsets);
13666 out_index++;
13667 in_index++;
13668 }
13669 }
13670
13671 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
13672 bfd_set_section_contents (output_bfd, sec->output_section,
13673 edited_contents,
13674 (file_ptr) sec->output_offset, sec->size);
13675
13676 return TRUE;
13677 }
13678
13679 /* Fix code to point to Cortex-A8 erratum stubs. */
13680 if (globals->fix_cortex_a8)
13681 {
13682 struct a8_branch_to_stub_data data;
13683
13684 data.writing_section = sec;
13685 data.contents = contents;
13686
13687 bfd_hash_traverse (&globals->stub_hash_table, make_branch_to_a8_stub,
13688 &data);
13689 }
13690
13691 if (mapcount == 0)
13692 return FALSE;
13693
13694 if (globals->byteswap_code)
13695 {
13696 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
13697
13698 ptr = map[0].vma;
13699 for (i = 0; i < mapcount; i++)
13700 {
13701 if (i == mapcount - 1)
13702 end = sec->size;
13703 else
13704 end = map[i + 1].vma;
13705
13706 switch (map[i].type)
13707 {
13708 case 'a':
13709 /* Byte swap code words. */
13710 while (ptr + 3 < end)
13711 {
13712 tmp = contents[ptr];
13713 contents[ptr] = contents[ptr + 3];
13714 contents[ptr + 3] = tmp;
13715 tmp = contents[ptr + 1];
13716 contents[ptr + 1] = contents[ptr + 2];
13717 contents[ptr + 2] = tmp;
13718 ptr += 4;
13719 }
13720 break;
13721
13722 case 't':
13723 /* Byte swap code halfwords. */
13724 while (ptr + 1 < end)
13725 {
13726 tmp = contents[ptr];
13727 contents[ptr] = contents[ptr + 1];
13728 contents[ptr + 1] = tmp;
13729 ptr += 2;
13730 }
13731 break;
13732
13733 case 'd':
13734 /* Leave data alone. */
13735 break;
13736 }
13737 ptr = end;
13738 }
13739 }
13740
13741 free (map);
13742 arm_data->mapcount = -1;
13743 arm_data->mapsize = 0;
13744 arm_data->map = NULL;
13745
13746 return FALSE;
13747 }
13748
13749 /* Display STT_ARM_TFUNC symbols as functions. */
13750
13751 static void
13752 elf32_arm_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
13753 asymbol *asym)
13754 {
13755 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
13756
13757 if (ELF_ST_TYPE (elfsym->internal_elf_sym.st_info) == STT_ARM_TFUNC)
13758 elfsym->symbol.flags |= BSF_FUNCTION;
13759 }
13760
13761
13762 /* Mangle thumb function symbols as we read them in. */
13763
13764 static bfd_boolean
13765 elf32_arm_swap_symbol_in (bfd * abfd,
13766 const void *psrc,
13767 const void *pshn,
13768 Elf_Internal_Sym *dst)
13769 {
13770 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
13771 return FALSE;
13772
13773 /* New EABI objects mark thumb function symbols by setting the low bit of
13774 the address. Turn these into STT_ARM_TFUNC. */
13775 if ((ELF_ST_TYPE (dst->st_info) == STT_FUNC)
13776 && (dst->st_value & 1))
13777 {
13778 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_ARM_TFUNC);
13779 dst->st_value &= ~(bfd_vma) 1;
13780 }
13781 return TRUE;
13782 }
13783
13784
13785 /* Mangle thumb function symbols as we write them out. */
13786
13787 static void
13788 elf32_arm_swap_symbol_out (bfd *abfd,
13789 const Elf_Internal_Sym *src,
13790 void *cdst,
13791 void *shndx)
13792 {
13793 Elf_Internal_Sym newsym;
13794
13795 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
13796 of the address set, as per the new EABI. We do this unconditionally
13797 because objcopy does not set the elf header flags until after
13798 it writes out the symbol table. */
13799 if (ELF_ST_TYPE (src->st_info) == STT_ARM_TFUNC)
13800 {
13801 newsym = *src;
13802 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
13803 if (newsym.st_shndx != SHN_UNDEF)
13804 {
13805 /* Do this only for defined symbols. At link type, the static
13806 linker will simulate the work of dynamic linker of resolving
13807 symbols and will carry over the thumbness of found symbols to
13808 the output symbol table. It's not clear how it happens, but
13809 the thumbness of undefined symbols can well be different at
13810 runtime, and writing '1' for them will be confusing for users
13811 and possibly for dynamic linker itself.
13812 */
13813 newsym.st_value |= 1;
13814 }
13815
13816 src = &newsym;
13817 }
13818 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
13819 }
13820
13821 /* Add the PT_ARM_EXIDX program header. */
13822
13823 static bfd_boolean
13824 elf32_arm_modify_segment_map (bfd *abfd,
13825 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13826 {
13827 struct elf_segment_map *m;
13828 asection *sec;
13829
13830 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13831 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13832 {
13833 /* If there is already a PT_ARM_EXIDX header, then we do not
13834 want to add another one. This situation arises when running
13835 "strip"; the input binary already has the header. */
13836 m = elf_tdata (abfd)->segment_map;
13837 while (m && m->p_type != PT_ARM_EXIDX)
13838 m = m->next;
13839 if (!m)
13840 {
13841 m = (struct elf_segment_map *)
13842 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
13843 if (m == NULL)
13844 return FALSE;
13845 m->p_type = PT_ARM_EXIDX;
13846 m->count = 1;
13847 m->sections[0] = sec;
13848
13849 m->next = elf_tdata (abfd)->segment_map;
13850 elf_tdata (abfd)->segment_map = m;
13851 }
13852 }
13853
13854 return TRUE;
13855 }
13856
13857 /* We may add a PT_ARM_EXIDX program header. */
13858
13859 static int
13860 elf32_arm_additional_program_headers (bfd *abfd,
13861 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13862 {
13863 asection *sec;
13864
13865 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13866 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13867 return 1;
13868 else
13869 return 0;
13870 }
13871
13872 /* We have two function types: STT_FUNC and STT_ARM_TFUNC. */
13873
13874 static bfd_boolean
13875 elf32_arm_is_function_type (unsigned int type)
13876 {
13877 return (type == STT_FUNC) || (type == STT_ARM_TFUNC);
13878 }
13879
13880 /* We use this to override swap_symbol_in and swap_symbol_out. */
13881 const struct elf_size_info elf32_arm_size_info =
13882 {
13883 sizeof (Elf32_External_Ehdr),
13884 sizeof (Elf32_External_Phdr),
13885 sizeof (Elf32_External_Shdr),
13886 sizeof (Elf32_External_Rel),
13887 sizeof (Elf32_External_Rela),
13888 sizeof (Elf32_External_Sym),
13889 sizeof (Elf32_External_Dyn),
13890 sizeof (Elf_External_Note),
13891 4,
13892 1,
13893 32, 2,
13894 ELFCLASS32, EV_CURRENT,
13895 bfd_elf32_write_out_phdrs,
13896 bfd_elf32_write_shdrs_and_ehdr,
13897 bfd_elf32_checksum_contents,
13898 bfd_elf32_write_relocs,
13899 elf32_arm_swap_symbol_in,
13900 elf32_arm_swap_symbol_out,
13901 bfd_elf32_slurp_reloc_table,
13902 bfd_elf32_slurp_symbol_table,
13903 bfd_elf32_swap_dyn_in,
13904 bfd_elf32_swap_dyn_out,
13905 bfd_elf32_swap_reloc_in,
13906 bfd_elf32_swap_reloc_out,
13907 bfd_elf32_swap_reloca_in,
13908 bfd_elf32_swap_reloca_out
13909 };
13910
13911 #define ELF_ARCH bfd_arch_arm
13912 #define ELF_TARGET_ID ARM_ELF_DATA
13913 #define ELF_MACHINE_CODE EM_ARM
13914 #ifdef __QNXTARGET__
13915 #define ELF_MAXPAGESIZE 0x1000
13916 #else
13917 #define ELF_MAXPAGESIZE 0x8000
13918 #endif
13919 #define ELF_MINPAGESIZE 0x1000
13920 #define ELF_COMMONPAGESIZE 0x1000
13921
13922 #define bfd_elf32_mkobject elf32_arm_mkobject
13923
13924 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
13925 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
13926 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
13927 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
13928 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
13929 #define bfd_elf32_bfd_link_hash_table_free elf32_arm_hash_table_free
13930 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
13931 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
13932 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
13933 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
13934 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
13935 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
13936 #define bfd_elf32_bfd_final_link elf32_arm_final_link
13937
13938 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
13939 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
13940 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
13941 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
13942 #define elf_backend_check_relocs elf32_arm_check_relocs
13943 #define elf_backend_relocate_section elf32_arm_relocate_section
13944 #define elf_backend_write_section elf32_arm_write_section
13945 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
13946 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
13947 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
13948 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
13949 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
13950 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
13951 #define elf_backend_post_process_headers elf32_arm_post_process_headers
13952 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
13953 #define elf_backend_object_p elf32_arm_object_p
13954 #define elf_backend_section_flags elf32_arm_section_flags
13955 #define elf_backend_fake_sections elf32_arm_fake_sections
13956 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
13957 #define elf_backend_final_write_processing elf32_arm_final_write_processing
13958 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
13959 #define elf_backend_symbol_processing elf32_arm_symbol_processing
13960 #define elf_backend_size_info elf32_arm_size_info
13961 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
13962 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
13963 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
13964 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
13965 #define elf_backend_is_function_type elf32_arm_is_function_type
13966
13967 #define elf_backend_can_refcount 1
13968 #define elf_backend_can_gc_sections 1
13969 #define elf_backend_plt_readonly 1
13970 #define elf_backend_want_got_plt 1
13971 #define elf_backend_want_plt_sym 0
13972 #define elf_backend_may_use_rel_p 1
13973 #define elf_backend_may_use_rela_p 0
13974 #define elf_backend_default_use_rela_p 0
13975
13976 #define elf_backend_got_header_size 12
13977
13978 #undef elf_backend_obj_attrs_vendor
13979 #define elf_backend_obj_attrs_vendor "aeabi"
13980 #undef elf_backend_obj_attrs_section
13981 #define elf_backend_obj_attrs_section ".ARM.attributes"
13982 #undef elf_backend_obj_attrs_arg_type
13983 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
13984 #undef elf_backend_obj_attrs_section_type
13985 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
13986 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
13987
13988 #include "elf32-target.h"
13989
13990 /* VxWorks Targets. */
13991
13992 #undef TARGET_LITTLE_SYM
13993 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vxworks_vec
13994 #undef TARGET_LITTLE_NAME
13995 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
13996 #undef TARGET_BIG_SYM
13997 #define TARGET_BIG_SYM bfd_elf32_bigarm_vxworks_vec
13998 #undef TARGET_BIG_NAME
13999 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
14000
14001 /* Like elf32_arm_link_hash_table_create -- but overrides
14002 appropriately for VxWorks. */
14003
14004 static struct bfd_link_hash_table *
14005 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
14006 {
14007 struct bfd_link_hash_table *ret;
14008
14009 ret = elf32_arm_link_hash_table_create (abfd);
14010 if (ret)
14011 {
14012 struct elf32_arm_link_hash_table *htab
14013 = (struct elf32_arm_link_hash_table *) ret;
14014 htab->use_rel = 0;
14015 htab->vxworks_p = 1;
14016 }
14017 return ret;
14018 }
14019
14020 static void
14021 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
14022 {
14023 elf32_arm_final_write_processing (abfd, linker);
14024 elf_vxworks_final_write_processing (abfd, linker);
14025 }
14026
14027 #undef elf32_bed
14028 #define elf32_bed elf32_arm_vxworks_bed
14029
14030 #undef bfd_elf32_bfd_link_hash_table_create
14031 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
14032 #undef elf_backend_add_symbol_hook
14033 #define elf_backend_add_symbol_hook elf_vxworks_add_symbol_hook
14034 #undef elf_backend_final_write_processing
14035 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
14036 #undef elf_backend_emit_relocs
14037 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
14038
14039 #undef elf_backend_may_use_rel_p
14040 #define elf_backend_may_use_rel_p 0
14041 #undef elf_backend_may_use_rela_p
14042 #define elf_backend_may_use_rela_p 1
14043 #undef elf_backend_default_use_rela_p
14044 #define elf_backend_default_use_rela_p 1
14045 #undef elf_backend_want_plt_sym
14046 #define elf_backend_want_plt_sym 1
14047 #undef ELF_MAXPAGESIZE
14048 #define ELF_MAXPAGESIZE 0x1000
14049
14050 #include "elf32-target.h"
14051
14052
14053 /* Merge backend specific data from an object file to the output
14054 object file when linking. */
14055
14056 static bfd_boolean
14057 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
14058 {
14059 flagword out_flags;
14060 flagword in_flags;
14061 bfd_boolean flags_compatible = TRUE;
14062 asection *sec;
14063
14064 /* Check if we have the same endianess. */
14065 if (! _bfd_generic_verify_endian_match (ibfd, obfd))
14066 return FALSE;
14067
14068 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
14069 return TRUE;
14070
14071 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
14072 return FALSE;
14073
14074 /* The input BFD must have had its flags initialised. */
14075 /* The following seems bogus to me -- The flags are initialized in
14076 the assembler but I don't think an elf_flags_init field is
14077 written into the object. */
14078 /* BFD_ASSERT (elf_flags_init (ibfd)); */
14079
14080 in_flags = elf_elfheader (ibfd)->e_flags;
14081 out_flags = elf_elfheader (obfd)->e_flags;
14082
14083 /* In theory there is no reason why we couldn't handle this. However
14084 in practice it isn't even close to working and there is no real
14085 reason to want it. */
14086 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
14087 && !(ibfd->flags & DYNAMIC)
14088 && (in_flags & EF_ARM_BE8))
14089 {
14090 _bfd_error_handler (_("error: %B is already in final BE8 format"),
14091 ibfd);
14092 return FALSE;
14093 }
14094
14095 if (!elf_flags_init (obfd))
14096 {
14097 /* If the input is the default architecture and had the default
14098 flags then do not bother setting the flags for the output
14099 architecture, instead allow future merges to do this. If no
14100 future merges ever set these flags then they will retain their
14101 uninitialised values, which surprise surprise, correspond
14102 to the default values. */
14103 if (bfd_get_arch_info (ibfd)->the_default
14104 && elf_elfheader (ibfd)->e_flags == 0)
14105 return TRUE;
14106
14107 elf_flags_init (obfd) = TRUE;
14108 elf_elfheader (obfd)->e_flags = in_flags;
14109
14110 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
14111 && bfd_get_arch_info (obfd)->the_default)
14112 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
14113
14114 return TRUE;
14115 }
14116
14117 /* Determine what should happen if the input ARM architecture
14118 does not match the output ARM architecture. */
14119 if (! bfd_arm_merge_machines (ibfd, obfd))
14120 return FALSE;
14121
14122 /* Identical flags must be compatible. */
14123 if (in_flags == out_flags)
14124 return TRUE;
14125
14126 /* Check to see if the input BFD actually contains any sections. If
14127 not, its flags may not have been initialised either, but it
14128 cannot actually cause any incompatiblity. Do not short-circuit
14129 dynamic objects; their section list may be emptied by
14130 elf_link_add_object_symbols.
14131
14132 Also check to see if there are no code sections in the input.
14133 In this case there is no need to check for code specific flags.
14134 XXX - do we need to worry about floating-point format compatability
14135 in data sections ? */
14136 if (!(ibfd->flags & DYNAMIC))
14137 {
14138 bfd_boolean null_input_bfd = TRUE;
14139 bfd_boolean only_data_sections = TRUE;
14140
14141 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
14142 {
14143 /* Ignore synthetic glue sections. */
14144 if (strcmp (sec->name, ".glue_7")
14145 && strcmp (sec->name, ".glue_7t"))
14146 {
14147 if ((bfd_get_section_flags (ibfd, sec)
14148 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
14149 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
14150 only_data_sections = FALSE;
14151
14152 null_input_bfd = FALSE;
14153 break;
14154 }
14155 }
14156
14157 if (null_input_bfd || only_data_sections)
14158 return TRUE;
14159 }
14160
14161 /* Complain about various flag mismatches. */
14162 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
14163 EF_ARM_EABI_VERSION (out_flags)))
14164 {
14165 _bfd_error_handler
14166 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
14167 ibfd, obfd,
14168 (in_flags & EF_ARM_EABIMASK) >> 24,
14169 (out_flags & EF_ARM_EABIMASK) >> 24);
14170 return FALSE;
14171 }
14172
14173 /* Not sure what needs to be checked for EABI versions >= 1. */
14174 /* VxWorks libraries do not use these flags. */
14175 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
14176 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
14177 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
14178 {
14179 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
14180 {
14181 _bfd_error_handler
14182 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
14183 ibfd, obfd,
14184 in_flags & EF_ARM_APCS_26 ? 26 : 32,
14185 out_flags & EF_ARM_APCS_26 ? 26 : 32);
14186 flags_compatible = FALSE;
14187 }
14188
14189 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
14190 {
14191 if (in_flags & EF_ARM_APCS_FLOAT)
14192 _bfd_error_handler
14193 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
14194 ibfd, obfd);
14195 else
14196 _bfd_error_handler
14197 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
14198 ibfd, obfd);
14199
14200 flags_compatible = FALSE;
14201 }
14202
14203 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
14204 {
14205 if (in_flags & EF_ARM_VFP_FLOAT)
14206 _bfd_error_handler
14207 (_("error: %B uses VFP instructions, whereas %B does not"),
14208 ibfd, obfd);
14209 else
14210 _bfd_error_handler
14211 (_("error: %B uses FPA instructions, whereas %B does not"),
14212 ibfd, obfd);
14213
14214 flags_compatible = FALSE;
14215 }
14216
14217 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
14218 {
14219 if (in_flags & EF_ARM_MAVERICK_FLOAT)
14220 _bfd_error_handler
14221 (_("error: %B uses Maverick instructions, whereas %B does not"),
14222 ibfd, obfd);
14223 else
14224 _bfd_error_handler
14225 (_("error: %B does not use Maverick instructions, whereas %B does"),
14226 ibfd, obfd);
14227
14228 flags_compatible = FALSE;
14229 }
14230
14231 #ifdef EF_ARM_SOFT_FLOAT
14232 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
14233 {
14234 /* We can allow interworking between code that is VFP format
14235 layout, and uses either soft float or integer regs for
14236 passing floating point arguments and results. We already
14237 know that the APCS_FLOAT flags match; similarly for VFP
14238 flags. */
14239 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
14240 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
14241 {
14242 if (in_flags & EF_ARM_SOFT_FLOAT)
14243 _bfd_error_handler
14244 (_("error: %B uses software FP, whereas %B uses hardware FP"),
14245 ibfd, obfd);
14246 else
14247 _bfd_error_handler
14248 (_("error: %B uses hardware FP, whereas %B uses software FP"),
14249 ibfd, obfd);
14250
14251 flags_compatible = FALSE;
14252 }
14253 }
14254 #endif
14255
14256 /* Interworking mismatch is only a warning. */
14257 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
14258 {
14259 if (in_flags & EF_ARM_INTERWORK)
14260 {
14261 _bfd_error_handler
14262 (_("Warning: %B supports interworking, whereas %B does not"),
14263 ibfd, obfd);
14264 }
14265 else
14266 {
14267 _bfd_error_handler
14268 (_("Warning: %B does not support interworking, whereas %B does"),
14269 ibfd, obfd);
14270 }
14271 }
14272 }
14273
14274 return flags_compatible;
14275 }
14276
14277
14278 /* Symbian OS Targets. */
14279
14280 #undef TARGET_LITTLE_SYM
14281 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_symbian_vec
14282 #undef TARGET_LITTLE_NAME
14283 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
14284 #undef TARGET_BIG_SYM
14285 #define TARGET_BIG_SYM bfd_elf32_bigarm_symbian_vec
14286 #undef TARGET_BIG_NAME
14287 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
14288
14289 /* Like elf32_arm_link_hash_table_create -- but overrides
14290 appropriately for Symbian OS. */
14291
14292 static struct bfd_link_hash_table *
14293 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
14294 {
14295 struct bfd_link_hash_table *ret;
14296
14297 ret = elf32_arm_link_hash_table_create (abfd);
14298 if (ret)
14299 {
14300 struct elf32_arm_link_hash_table *htab
14301 = (struct elf32_arm_link_hash_table *)ret;
14302 /* There is no PLT header for Symbian OS. */
14303 htab->plt_header_size = 0;
14304 /* The PLT entries are each one instruction and one word. */
14305 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
14306 htab->symbian_p = 1;
14307 /* Symbian uses armv5t or above, so use_blx is always true. */
14308 htab->use_blx = 1;
14309 htab->root.is_relocatable_executable = 1;
14310 }
14311 return ret;
14312 }
14313
14314 static const struct bfd_elf_special_section
14315 elf32_arm_symbian_special_sections[] =
14316 {
14317 /* In a BPABI executable, the dynamic linking sections do not go in
14318 the loadable read-only segment. The post-linker may wish to
14319 refer to these sections, but they are not part of the final
14320 program image. */
14321 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
14322 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
14323 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
14324 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
14325 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
14326 /* These sections do not need to be writable as the SymbianOS
14327 postlinker will arrange things so that no dynamic relocation is
14328 required. */
14329 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
14330 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
14331 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
14332 { NULL, 0, 0, 0, 0 }
14333 };
14334
14335 static void
14336 elf32_arm_symbian_begin_write_processing (bfd *abfd,
14337 struct bfd_link_info *link_info)
14338 {
14339 /* BPABI objects are never loaded directly by an OS kernel; they are
14340 processed by a postlinker first, into an OS-specific format. If
14341 the D_PAGED bit is set on the file, BFD will align segments on
14342 page boundaries, so that an OS can directly map the file. With
14343 BPABI objects, that just results in wasted space. In addition,
14344 because we clear the D_PAGED bit, map_sections_to_segments will
14345 recognize that the program headers should not be mapped into any
14346 loadable segment. */
14347 abfd->flags &= ~D_PAGED;
14348 elf32_arm_begin_write_processing (abfd, link_info);
14349 }
14350
14351 static bfd_boolean
14352 elf32_arm_symbian_modify_segment_map (bfd *abfd,
14353 struct bfd_link_info *info)
14354 {
14355 struct elf_segment_map *m;
14356 asection *dynsec;
14357
14358 /* BPABI shared libraries and executables should have a PT_DYNAMIC
14359 segment. However, because the .dynamic section is not marked
14360 with SEC_LOAD, the generic ELF code will not create such a
14361 segment. */
14362 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
14363 if (dynsec)
14364 {
14365 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
14366 if (m->p_type == PT_DYNAMIC)
14367 break;
14368
14369 if (m == NULL)
14370 {
14371 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
14372 m->next = elf_tdata (abfd)->segment_map;
14373 elf_tdata (abfd)->segment_map = m;
14374 }
14375 }
14376
14377 /* Also call the generic arm routine. */
14378 return elf32_arm_modify_segment_map (abfd, info);
14379 }
14380
14381 /* Return address for Ith PLT stub in section PLT, for relocation REL
14382 or (bfd_vma) -1 if it should not be included. */
14383
14384 static bfd_vma
14385 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
14386 const arelent *rel ATTRIBUTE_UNUSED)
14387 {
14388 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
14389 }
14390
14391
14392 #undef elf32_bed
14393 #define elf32_bed elf32_arm_symbian_bed
14394
14395 /* The dynamic sections are not allocated on SymbianOS; the postlinker
14396 will process them and then discard them. */
14397 #undef ELF_DYNAMIC_SEC_FLAGS
14398 #define ELF_DYNAMIC_SEC_FLAGS \
14399 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
14400
14401 #undef elf_backend_add_symbol_hook
14402 #undef elf_backend_emit_relocs
14403
14404 #undef bfd_elf32_bfd_link_hash_table_create
14405 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
14406 #undef elf_backend_special_sections
14407 #define elf_backend_special_sections elf32_arm_symbian_special_sections
14408 #undef elf_backend_begin_write_processing
14409 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
14410 #undef elf_backend_final_write_processing
14411 #define elf_backend_final_write_processing elf32_arm_final_write_processing
14412
14413 #undef elf_backend_modify_segment_map
14414 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
14415
14416 /* There is no .got section for BPABI objects, and hence no header. */
14417 #undef elf_backend_got_header_size
14418 #define elf_backend_got_header_size 0
14419
14420 /* Similarly, there is no .got.plt section. */
14421 #undef elf_backend_want_got_plt
14422 #define elf_backend_want_got_plt 0
14423
14424 #undef elf_backend_plt_sym_val
14425 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
14426
14427 #undef elf_backend_may_use_rel_p
14428 #define elf_backend_may_use_rel_p 1
14429 #undef elf_backend_may_use_rela_p
14430 #define elf_backend_may_use_rela_p 0
14431 #undef elf_backend_default_use_rela_p
14432 #define elf_backend_default_use_rela_p 0
14433 #undef elf_backend_want_plt_sym
14434 #define elf_backend_want_plt_sym 0
14435 #undef ELF_MAXPAGESIZE
14436 #define ELF_MAXPAGESIZE 0x8000
14437
14438 #include "elf32-target.h"