* ld-pe/pe-run.exp (proc test_direct_link_dll): Fix incorrect
[binutils-gdb.git] / bfd / elf32-arm.c
1 /* 32-bit ELF support for ARM
2 Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
3 2008, 2009 Free Software Foundation, Inc.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "sysdep.h"
23 #include <limits.h>
24
25 #include "bfd.h"
26 #include "libiberty.h"
27 #include "libbfd.h"
28 #include "elf-bfd.h"
29 #include "elf-vxworks.h"
30 #include "elf/arm.h"
31
32 /* Return the relocation section associated with NAME. HTAB is the
33 bfd's elf32_arm_link_hash_entry. */
34 #define RELOC_SECTION(HTAB, NAME) \
35 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
36
37 /* Return size of a relocation entry. HTAB is the bfd's
38 elf32_arm_link_hash_entry. */
39 #define RELOC_SIZE(HTAB) \
40 ((HTAB)->use_rel \
41 ? sizeof (Elf32_External_Rel) \
42 : sizeof (Elf32_External_Rela))
43
44 /* Return function to swap relocations in. HTAB is the bfd's
45 elf32_arm_link_hash_entry. */
46 #define SWAP_RELOC_IN(HTAB) \
47 ((HTAB)->use_rel \
48 ? bfd_elf32_swap_reloc_in \
49 : bfd_elf32_swap_reloca_in)
50
51 /* Return function to swap relocations out. HTAB is the bfd's
52 elf32_arm_link_hash_entry. */
53 #define SWAP_RELOC_OUT(HTAB) \
54 ((HTAB)->use_rel \
55 ? bfd_elf32_swap_reloc_out \
56 : bfd_elf32_swap_reloca_out)
57
58 #define elf_info_to_howto 0
59 #define elf_info_to_howto_rel elf32_arm_info_to_howto
60
61 #define ARM_ELF_ABI_VERSION 0
62 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
63
64 static struct elf_backend_data elf32_arm_vxworks_bed;
65
66 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
67 struct bfd_link_info *link_info,
68 asection *sec,
69 bfd_byte *contents);
70
71 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
72 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
73 in that slot. */
74
75 static reloc_howto_type elf32_arm_howto_table_1[] =
76 {
77 /* No relocation. */
78 HOWTO (R_ARM_NONE, /* type */
79 0, /* rightshift */
80 0, /* size (0 = byte, 1 = short, 2 = long) */
81 0, /* bitsize */
82 FALSE, /* pc_relative */
83 0, /* bitpos */
84 complain_overflow_dont,/* complain_on_overflow */
85 bfd_elf_generic_reloc, /* special_function */
86 "R_ARM_NONE", /* name */
87 FALSE, /* partial_inplace */
88 0, /* src_mask */
89 0, /* dst_mask */
90 FALSE), /* pcrel_offset */
91
92 HOWTO (R_ARM_PC24, /* type */
93 2, /* rightshift */
94 2, /* size (0 = byte, 1 = short, 2 = long) */
95 24, /* bitsize */
96 TRUE, /* pc_relative */
97 0, /* bitpos */
98 complain_overflow_signed,/* complain_on_overflow */
99 bfd_elf_generic_reloc, /* special_function */
100 "R_ARM_PC24", /* name */
101 FALSE, /* partial_inplace */
102 0x00ffffff, /* src_mask */
103 0x00ffffff, /* dst_mask */
104 TRUE), /* pcrel_offset */
105
106 /* 32 bit absolute */
107 HOWTO (R_ARM_ABS32, /* type */
108 0, /* rightshift */
109 2, /* size (0 = byte, 1 = short, 2 = long) */
110 32, /* bitsize */
111 FALSE, /* pc_relative */
112 0, /* bitpos */
113 complain_overflow_bitfield,/* complain_on_overflow */
114 bfd_elf_generic_reloc, /* special_function */
115 "R_ARM_ABS32", /* name */
116 FALSE, /* partial_inplace */
117 0xffffffff, /* src_mask */
118 0xffffffff, /* dst_mask */
119 FALSE), /* pcrel_offset */
120
121 /* standard 32bit pc-relative reloc */
122 HOWTO (R_ARM_REL32, /* type */
123 0, /* rightshift */
124 2, /* size (0 = byte, 1 = short, 2 = long) */
125 32, /* bitsize */
126 TRUE, /* pc_relative */
127 0, /* bitpos */
128 complain_overflow_bitfield,/* complain_on_overflow */
129 bfd_elf_generic_reloc, /* special_function */
130 "R_ARM_REL32", /* name */
131 FALSE, /* partial_inplace */
132 0xffffffff, /* src_mask */
133 0xffffffff, /* dst_mask */
134 TRUE), /* pcrel_offset */
135
136 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
137 HOWTO (R_ARM_LDR_PC_G0, /* type */
138 0, /* rightshift */
139 0, /* size (0 = byte, 1 = short, 2 = long) */
140 32, /* bitsize */
141 TRUE, /* pc_relative */
142 0, /* bitpos */
143 complain_overflow_dont,/* complain_on_overflow */
144 bfd_elf_generic_reloc, /* special_function */
145 "R_ARM_LDR_PC_G0", /* name */
146 FALSE, /* partial_inplace */
147 0xffffffff, /* src_mask */
148 0xffffffff, /* dst_mask */
149 TRUE), /* pcrel_offset */
150
151 /* 16 bit absolute */
152 HOWTO (R_ARM_ABS16, /* type */
153 0, /* rightshift */
154 1, /* size (0 = byte, 1 = short, 2 = long) */
155 16, /* bitsize */
156 FALSE, /* pc_relative */
157 0, /* bitpos */
158 complain_overflow_bitfield,/* complain_on_overflow */
159 bfd_elf_generic_reloc, /* special_function */
160 "R_ARM_ABS16", /* name */
161 FALSE, /* partial_inplace */
162 0x0000ffff, /* src_mask */
163 0x0000ffff, /* dst_mask */
164 FALSE), /* pcrel_offset */
165
166 /* 12 bit absolute */
167 HOWTO (R_ARM_ABS12, /* type */
168 0, /* rightshift */
169 2, /* size (0 = byte, 1 = short, 2 = long) */
170 12, /* bitsize */
171 FALSE, /* pc_relative */
172 0, /* bitpos */
173 complain_overflow_bitfield,/* complain_on_overflow */
174 bfd_elf_generic_reloc, /* special_function */
175 "R_ARM_ABS12", /* name */
176 FALSE, /* partial_inplace */
177 0x00000fff, /* src_mask */
178 0x00000fff, /* dst_mask */
179 FALSE), /* pcrel_offset */
180
181 HOWTO (R_ARM_THM_ABS5, /* type */
182 6, /* rightshift */
183 1, /* size (0 = byte, 1 = short, 2 = long) */
184 5, /* bitsize */
185 FALSE, /* pc_relative */
186 0, /* bitpos */
187 complain_overflow_bitfield,/* complain_on_overflow */
188 bfd_elf_generic_reloc, /* special_function */
189 "R_ARM_THM_ABS5", /* name */
190 FALSE, /* partial_inplace */
191 0x000007e0, /* src_mask */
192 0x000007e0, /* dst_mask */
193 FALSE), /* pcrel_offset */
194
195 /* 8 bit absolute */
196 HOWTO (R_ARM_ABS8, /* type */
197 0, /* rightshift */
198 0, /* size (0 = byte, 1 = short, 2 = long) */
199 8, /* bitsize */
200 FALSE, /* pc_relative */
201 0, /* bitpos */
202 complain_overflow_bitfield,/* complain_on_overflow */
203 bfd_elf_generic_reloc, /* special_function */
204 "R_ARM_ABS8", /* name */
205 FALSE, /* partial_inplace */
206 0x000000ff, /* src_mask */
207 0x000000ff, /* dst_mask */
208 FALSE), /* pcrel_offset */
209
210 HOWTO (R_ARM_SBREL32, /* type */
211 0, /* rightshift */
212 2, /* size (0 = byte, 1 = short, 2 = long) */
213 32, /* bitsize */
214 FALSE, /* pc_relative */
215 0, /* bitpos */
216 complain_overflow_dont,/* complain_on_overflow */
217 bfd_elf_generic_reloc, /* special_function */
218 "R_ARM_SBREL32", /* name */
219 FALSE, /* partial_inplace */
220 0xffffffff, /* src_mask */
221 0xffffffff, /* dst_mask */
222 FALSE), /* pcrel_offset */
223
224 HOWTO (R_ARM_THM_CALL, /* type */
225 1, /* rightshift */
226 2, /* size (0 = byte, 1 = short, 2 = long) */
227 25, /* bitsize */
228 TRUE, /* pc_relative */
229 0, /* bitpos */
230 complain_overflow_signed,/* complain_on_overflow */
231 bfd_elf_generic_reloc, /* special_function */
232 "R_ARM_THM_CALL", /* name */
233 FALSE, /* partial_inplace */
234 0x07ff07ff, /* src_mask */
235 0x07ff07ff, /* dst_mask */
236 TRUE), /* pcrel_offset */
237
238 HOWTO (R_ARM_THM_PC8, /* type */
239 1, /* rightshift */
240 1, /* size (0 = byte, 1 = short, 2 = long) */
241 8, /* bitsize */
242 TRUE, /* pc_relative */
243 0, /* bitpos */
244 complain_overflow_signed,/* complain_on_overflow */
245 bfd_elf_generic_reloc, /* special_function */
246 "R_ARM_THM_PC8", /* name */
247 FALSE, /* partial_inplace */
248 0x000000ff, /* src_mask */
249 0x000000ff, /* dst_mask */
250 TRUE), /* pcrel_offset */
251
252 HOWTO (R_ARM_BREL_ADJ, /* type */
253 1, /* rightshift */
254 1, /* size (0 = byte, 1 = short, 2 = long) */
255 32, /* bitsize */
256 FALSE, /* pc_relative */
257 0, /* bitpos */
258 complain_overflow_signed,/* complain_on_overflow */
259 bfd_elf_generic_reloc, /* special_function */
260 "R_ARM_BREL_ADJ", /* name */
261 FALSE, /* partial_inplace */
262 0xffffffff, /* src_mask */
263 0xffffffff, /* dst_mask */
264 FALSE), /* pcrel_offset */
265
266 HOWTO (R_ARM_SWI24, /* type */
267 0, /* rightshift */
268 0, /* size (0 = byte, 1 = short, 2 = long) */
269 0, /* bitsize */
270 FALSE, /* pc_relative */
271 0, /* bitpos */
272 complain_overflow_signed,/* complain_on_overflow */
273 bfd_elf_generic_reloc, /* special_function */
274 "R_ARM_SWI24", /* name */
275 FALSE, /* partial_inplace */
276 0x00000000, /* src_mask */
277 0x00000000, /* dst_mask */
278 FALSE), /* pcrel_offset */
279
280 HOWTO (R_ARM_THM_SWI8, /* type */
281 0, /* rightshift */
282 0, /* size (0 = byte, 1 = short, 2 = long) */
283 0, /* bitsize */
284 FALSE, /* pc_relative */
285 0, /* bitpos */
286 complain_overflow_signed,/* complain_on_overflow */
287 bfd_elf_generic_reloc, /* special_function */
288 "R_ARM_SWI8", /* name */
289 FALSE, /* partial_inplace */
290 0x00000000, /* src_mask */
291 0x00000000, /* dst_mask */
292 FALSE), /* pcrel_offset */
293
294 /* BLX instruction for the ARM. */
295 HOWTO (R_ARM_XPC25, /* type */
296 2, /* rightshift */
297 2, /* size (0 = byte, 1 = short, 2 = long) */
298 25, /* bitsize */
299 TRUE, /* pc_relative */
300 0, /* bitpos */
301 complain_overflow_signed,/* complain_on_overflow */
302 bfd_elf_generic_reloc, /* special_function */
303 "R_ARM_XPC25", /* name */
304 FALSE, /* partial_inplace */
305 0x00ffffff, /* src_mask */
306 0x00ffffff, /* dst_mask */
307 TRUE), /* pcrel_offset */
308
309 /* BLX instruction for the Thumb. */
310 HOWTO (R_ARM_THM_XPC22, /* type */
311 2, /* rightshift */
312 2, /* size (0 = byte, 1 = short, 2 = long) */
313 22, /* bitsize */
314 TRUE, /* pc_relative */
315 0, /* bitpos */
316 complain_overflow_signed,/* complain_on_overflow */
317 bfd_elf_generic_reloc, /* special_function */
318 "R_ARM_THM_XPC22", /* name */
319 FALSE, /* partial_inplace */
320 0x07ff07ff, /* src_mask */
321 0x07ff07ff, /* dst_mask */
322 TRUE), /* pcrel_offset */
323
324 /* Dynamic TLS relocations. */
325
326 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
327 0, /* rightshift */
328 2, /* size (0 = byte, 1 = short, 2 = long) */
329 32, /* bitsize */
330 FALSE, /* pc_relative */
331 0, /* bitpos */
332 complain_overflow_bitfield,/* complain_on_overflow */
333 bfd_elf_generic_reloc, /* special_function */
334 "R_ARM_TLS_DTPMOD32", /* name */
335 TRUE, /* partial_inplace */
336 0xffffffff, /* src_mask */
337 0xffffffff, /* dst_mask */
338 FALSE), /* pcrel_offset */
339
340 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
341 0, /* rightshift */
342 2, /* size (0 = byte, 1 = short, 2 = long) */
343 32, /* bitsize */
344 FALSE, /* pc_relative */
345 0, /* bitpos */
346 complain_overflow_bitfield,/* complain_on_overflow */
347 bfd_elf_generic_reloc, /* special_function */
348 "R_ARM_TLS_DTPOFF32", /* name */
349 TRUE, /* partial_inplace */
350 0xffffffff, /* src_mask */
351 0xffffffff, /* dst_mask */
352 FALSE), /* pcrel_offset */
353
354 HOWTO (R_ARM_TLS_TPOFF32, /* type */
355 0, /* rightshift */
356 2, /* size (0 = byte, 1 = short, 2 = long) */
357 32, /* bitsize */
358 FALSE, /* pc_relative */
359 0, /* bitpos */
360 complain_overflow_bitfield,/* complain_on_overflow */
361 bfd_elf_generic_reloc, /* special_function */
362 "R_ARM_TLS_TPOFF32", /* name */
363 TRUE, /* partial_inplace */
364 0xffffffff, /* src_mask */
365 0xffffffff, /* dst_mask */
366 FALSE), /* pcrel_offset */
367
368 /* Relocs used in ARM Linux */
369
370 HOWTO (R_ARM_COPY, /* type */
371 0, /* rightshift */
372 2, /* size (0 = byte, 1 = short, 2 = long) */
373 32, /* bitsize */
374 FALSE, /* pc_relative */
375 0, /* bitpos */
376 complain_overflow_bitfield,/* complain_on_overflow */
377 bfd_elf_generic_reloc, /* special_function */
378 "R_ARM_COPY", /* name */
379 TRUE, /* partial_inplace */
380 0xffffffff, /* src_mask */
381 0xffffffff, /* dst_mask */
382 FALSE), /* pcrel_offset */
383
384 HOWTO (R_ARM_GLOB_DAT, /* type */
385 0, /* rightshift */
386 2, /* size (0 = byte, 1 = short, 2 = long) */
387 32, /* bitsize */
388 FALSE, /* pc_relative */
389 0, /* bitpos */
390 complain_overflow_bitfield,/* complain_on_overflow */
391 bfd_elf_generic_reloc, /* special_function */
392 "R_ARM_GLOB_DAT", /* name */
393 TRUE, /* partial_inplace */
394 0xffffffff, /* src_mask */
395 0xffffffff, /* dst_mask */
396 FALSE), /* pcrel_offset */
397
398 HOWTO (R_ARM_JUMP_SLOT, /* type */
399 0, /* rightshift */
400 2, /* size (0 = byte, 1 = short, 2 = long) */
401 32, /* bitsize */
402 FALSE, /* pc_relative */
403 0, /* bitpos */
404 complain_overflow_bitfield,/* complain_on_overflow */
405 bfd_elf_generic_reloc, /* special_function */
406 "R_ARM_JUMP_SLOT", /* name */
407 TRUE, /* partial_inplace */
408 0xffffffff, /* src_mask */
409 0xffffffff, /* dst_mask */
410 FALSE), /* pcrel_offset */
411
412 HOWTO (R_ARM_RELATIVE, /* type */
413 0, /* rightshift */
414 2, /* size (0 = byte, 1 = short, 2 = long) */
415 32, /* bitsize */
416 FALSE, /* pc_relative */
417 0, /* bitpos */
418 complain_overflow_bitfield,/* complain_on_overflow */
419 bfd_elf_generic_reloc, /* special_function */
420 "R_ARM_RELATIVE", /* name */
421 TRUE, /* partial_inplace */
422 0xffffffff, /* src_mask */
423 0xffffffff, /* dst_mask */
424 FALSE), /* pcrel_offset */
425
426 HOWTO (R_ARM_GOTOFF32, /* type */
427 0, /* rightshift */
428 2, /* size (0 = byte, 1 = short, 2 = long) */
429 32, /* bitsize */
430 FALSE, /* pc_relative */
431 0, /* bitpos */
432 complain_overflow_bitfield,/* complain_on_overflow */
433 bfd_elf_generic_reloc, /* special_function */
434 "R_ARM_GOTOFF32", /* name */
435 TRUE, /* partial_inplace */
436 0xffffffff, /* src_mask */
437 0xffffffff, /* dst_mask */
438 FALSE), /* pcrel_offset */
439
440 HOWTO (R_ARM_GOTPC, /* type */
441 0, /* rightshift */
442 2, /* size (0 = byte, 1 = short, 2 = long) */
443 32, /* bitsize */
444 TRUE, /* pc_relative */
445 0, /* bitpos */
446 complain_overflow_bitfield,/* complain_on_overflow */
447 bfd_elf_generic_reloc, /* special_function */
448 "R_ARM_GOTPC", /* name */
449 TRUE, /* partial_inplace */
450 0xffffffff, /* src_mask */
451 0xffffffff, /* dst_mask */
452 TRUE), /* pcrel_offset */
453
454 HOWTO (R_ARM_GOT32, /* type */
455 0, /* rightshift */
456 2, /* size (0 = byte, 1 = short, 2 = long) */
457 32, /* bitsize */
458 FALSE, /* pc_relative */
459 0, /* bitpos */
460 complain_overflow_bitfield,/* complain_on_overflow */
461 bfd_elf_generic_reloc, /* special_function */
462 "R_ARM_GOT32", /* name */
463 TRUE, /* partial_inplace */
464 0xffffffff, /* src_mask */
465 0xffffffff, /* dst_mask */
466 FALSE), /* pcrel_offset */
467
468 HOWTO (R_ARM_PLT32, /* type */
469 2, /* rightshift */
470 2, /* size (0 = byte, 1 = short, 2 = long) */
471 24, /* bitsize */
472 TRUE, /* pc_relative */
473 0, /* bitpos */
474 complain_overflow_bitfield,/* complain_on_overflow */
475 bfd_elf_generic_reloc, /* special_function */
476 "R_ARM_PLT32", /* name */
477 FALSE, /* partial_inplace */
478 0x00ffffff, /* src_mask */
479 0x00ffffff, /* dst_mask */
480 TRUE), /* pcrel_offset */
481
482 HOWTO (R_ARM_CALL, /* type */
483 2, /* rightshift */
484 2, /* size (0 = byte, 1 = short, 2 = long) */
485 24, /* bitsize */
486 TRUE, /* pc_relative */
487 0, /* bitpos */
488 complain_overflow_signed,/* complain_on_overflow */
489 bfd_elf_generic_reloc, /* special_function */
490 "R_ARM_CALL", /* name */
491 FALSE, /* partial_inplace */
492 0x00ffffff, /* src_mask */
493 0x00ffffff, /* dst_mask */
494 TRUE), /* pcrel_offset */
495
496 HOWTO (R_ARM_JUMP24, /* type */
497 2, /* rightshift */
498 2, /* size (0 = byte, 1 = short, 2 = long) */
499 24, /* bitsize */
500 TRUE, /* pc_relative */
501 0, /* bitpos */
502 complain_overflow_signed,/* complain_on_overflow */
503 bfd_elf_generic_reloc, /* special_function */
504 "R_ARM_JUMP24", /* name */
505 FALSE, /* partial_inplace */
506 0x00ffffff, /* src_mask */
507 0x00ffffff, /* dst_mask */
508 TRUE), /* pcrel_offset */
509
510 HOWTO (R_ARM_THM_JUMP24, /* type */
511 1, /* rightshift */
512 2, /* size (0 = byte, 1 = short, 2 = long) */
513 24, /* bitsize */
514 TRUE, /* pc_relative */
515 0, /* bitpos */
516 complain_overflow_signed,/* complain_on_overflow */
517 bfd_elf_generic_reloc, /* special_function */
518 "R_ARM_THM_JUMP24", /* name */
519 FALSE, /* partial_inplace */
520 0x07ff2fff, /* src_mask */
521 0x07ff2fff, /* dst_mask */
522 TRUE), /* pcrel_offset */
523
524 HOWTO (R_ARM_BASE_ABS, /* type */
525 0, /* rightshift */
526 2, /* size (0 = byte, 1 = short, 2 = long) */
527 32, /* bitsize */
528 FALSE, /* pc_relative */
529 0, /* bitpos */
530 complain_overflow_dont,/* complain_on_overflow */
531 bfd_elf_generic_reloc, /* special_function */
532 "R_ARM_BASE_ABS", /* name */
533 FALSE, /* partial_inplace */
534 0xffffffff, /* src_mask */
535 0xffffffff, /* dst_mask */
536 FALSE), /* pcrel_offset */
537
538 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
539 0, /* rightshift */
540 2, /* size (0 = byte, 1 = short, 2 = long) */
541 12, /* bitsize */
542 TRUE, /* pc_relative */
543 0, /* bitpos */
544 complain_overflow_dont,/* complain_on_overflow */
545 bfd_elf_generic_reloc, /* special_function */
546 "R_ARM_ALU_PCREL_7_0", /* name */
547 FALSE, /* partial_inplace */
548 0x00000fff, /* src_mask */
549 0x00000fff, /* dst_mask */
550 TRUE), /* pcrel_offset */
551
552 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
553 0, /* rightshift */
554 2, /* size (0 = byte, 1 = short, 2 = long) */
555 12, /* bitsize */
556 TRUE, /* pc_relative */
557 8, /* bitpos */
558 complain_overflow_dont,/* complain_on_overflow */
559 bfd_elf_generic_reloc, /* special_function */
560 "R_ARM_ALU_PCREL_15_8",/* name */
561 FALSE, /* partial_inplace */
562 0x00000fff, /* src_mask */
563 0x00000fff, /* dst_mask */
564 TRUE), /* pcrel_offset */
565
566 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
567 0, /* rightshift */
568 2, /* size (0 = byte, 1 = short, 2 = long) */
569 12, /* bitsize */
570 TRUE, /* pc_relative */
571 16, /* bitpos */
572 complain_overflow_dont,/* complain_on_overflow */
573 bfd_elf_generic_reloc, /* special_function */
574 "R_ARM_ALU_PCREL_23_15",/* name */
575 FALSE, /* partial_inplace */
576 0x00000fff, /* src_mask */
577 0x00000fff, /* dst_mask */
578 TRUE), /* pcrel_offset */
579
580 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
581 0, /* rightshift */
582 2, /* size (0 = byte, 1 = short, 2 = long) */
583 12, /* bitsize */
584 FALSE, /* pc_relative */
585 0, /* bitpos */
586 complain_overflow_dont,/* complain_on_overflow */
587 bfd_elf_generic_reloc, /* special_function */
588 "R_ARM_LDR_SBREL_11_0",/* name */
589 FALSE, /* partial_inplace */
590 0x00000fff, /* src_mask */
591 0x00000fff, /* dst_mask */
592 FALSE), /* pcrel_offset */
593
594 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
595 0, /* rightshift */
596 2, /* size (0 = byte, 1 = short, 2 = long) */
597 8, /* bitsize */
598 FALSE, /* pc_relative */
599 12, /* bitpos */
600 complain_overflow_dont,/* complain_on_overflow */
601 bfd_elf_generic_reloc, /* special_function */
602 "R_ARM_ALU_SBREL_19_12",/* name */
603 FALSE, /* partial_inplace */
604 0x000ff000, /* src_mask */
605 0x000ff000, /* dst_mask */
606 FALSE), /* pcrel_offset */
607
608 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
609 0, /* rightshift */
610 2, /* size (0 = byte, 1 = short, 2 = long) */
611 8, /* bitsize */
612 FALSE, /* pc_relative */
613 20, /* bitpos */
614 complain_overflow_dont,/* complain_on_overflow */
615 bfd_elf_generic_reloc, /* special_function */
616 "R_ARM_ALU_SBREL_27_20",/* name */
617 FALSE, /* partial_inplace */
618 0x0ff00000, /* src_mask */
619 0x0ff00000, /* dst_mask */
620 FALSE), /* pcrel_offset */
621
622 HOWTO (R_ARM_TARGET1, /* type */
623 0, /* rightshift */
624 2, /* size (0 = byte, 1 = short, 2 = long) */
625 32, /* bitsize */
626 FALSE, /* pc_relative */
627 0, /* bitpos */
628 complain_overflow_dont,/* complain_on_overflow */
629 bfd_elf_generic_reloc, /* special_function */
630 "R_ARM_TARGET1", /* name */
631 FALSE, /* partial_inplace */
632 0xffffffff, /* src_mask */
633 0xffffffff, /* dst_mask */
634 FALSE), /* pcrel_offset */
635
636 HOWTO (R_ARM_ROSEGREL32, /* type */
637 0, /* rightshift */
638 2, /* size (0 = byte, 1 = short, 2 = long) */
639 32, /* bitsize */
640 FALSE, /* pc_relative */
641 0, /* bitpos */
642 complain_overflow_dont,/* complain_on_overflow */
643 bfd_elf_generic_reloc, /* special_function */
644 "R_ARM_ROSEGREL32", /* name */
645 FALSE, /* partial_inplace */
646 0xffffffff, /* src_mask */
647 0xffffffff, /* dst_mask */
648 FALSE), /* pcrel_offset */
649
650 HOWTO (R_ARM_V4BX, /* type */
651 0, /* rightshift */
652 2, /* size (0 = byte, 1 = short, 2 = long) */
653 32, /* bitsize */
654 FALSE, /* pc_relative */
655 0, /* bitpos */
656 complain_overflow_dont,/* complain_on_overflow */
657 bfd_elf_generic_reloc, /* special_function */
658 "R_ARM_V4BX", /* name */
659 FALSE, /* partial_inplace */
660 0xffffffff, /* src_mask */
661 0xffffffff, /* dst_mask */
662 FALSE), /* pcrel_offset */
663
664 HOWTO (R_ARM_TARGET2, /* type */
665 0, /* rightshift */
666 2, /* size (0 = byte, 1 = short, 2 = long) */
667 32, /* bitsize */
668 FALSE, /* pc_relative */
669 0, /* bitpos */
670 complain_overflow_signed,/* complain_on_overflow */
671 bfd_elf_generic_reloc, /* special_function */
672 "R_ARM_TARGET2", /* name */
673 FALSE, /* partial_inplace */
674 0xffffffff, /* src_mask */
675 0xffffffff, /* dst_mask */
676 TRUE), /* pcrel_offset */
677
678 HOWTO (R_ARM_PREL31, /* type */
679 0, /* rightshift */
680 2, /* size (0 = byte, 1 = short, 2 = long) */
681 31, /* bitsize */
682 TRUE, /* pc_relative */
683 0, /* bitpos */
684 complain_overflow_signed,/* complain_on_overflow */
685 bfd_elf_generic_reloc, /* special_function */
686 "R_ARM_PREL31", /* name */
687 FALSE, /* partial_inplace */
688 0x7fffffff, /* src_mask */
689 0x7fffffff, /* dst_mask */
690 TRUE), /* pcrel_offset */
691
692 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
693 0, /* rightshift */
694 2, /* size (0 = byte, 1 = short, 2 = long) */
695 16, /* bitsize */
696 FALSE, /* pc_relative */
697 0, /* bitpos */
698 complain_overflow_dont,/* complain_on_overflow */
699 bfd_elf_generic_reloc, /* special_function */
700 "R_ARM_MOVW_ABS_NC", /* name */
701 FALSE, /* partial_inplace */
702 0x000f0fff, /* src_mask */
703 0x000f0fff, /* dst_mask */
704 FALSE), /* pcrel_offset */
705
706 HOWTO (R_ARM_MOVT_ABS, /* type */
707 0, /* rightshift */
708 2, /* size (0 = byte, 1 = short, 2 = long) */
709 16, /* bitsize */
710 FALSE, /* pc_relative */
711 0, /* bitpos */
712 complain_overflow_bitfield,/* complain_on_overflow */
713 bfd_elf_generic_reloc, /* special_function */
714 "R_ARM_MOVT_ABS", /* name */
715 FALSE, /* partial_inplace */
716 0x000f0fff, /* src_mask */
717 0x000f0fff, /* dst_mask */
718 FALSE), /* pcrel_offset */
719
720 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
721 0, /* rightshift */
722 2, /* size (0 = byte, 1 = short, 2 = long) */
723 16, /* bitsize */
724 TRUE, /* pc_relative */
725 0, /* bitpos */
726 complain_overflow_dont,/* complain_on_overflow */
727 bfd_elf_generic_reloc, /* special_function */
728 "R_ARM_MOVW_PREL_NC", /* name */
729 FALSE, /* partial_inplace */
730 0x000f0fff, /* src_mask */
731 0x000f0fff, /* dst_mask */
732 TRUE), /* pcrel_offset */
733
734 HOWTO (R_ARM_MOVT_PREL, /* type */
735 0, /* rightshift */
736 2, /* size (0 = byte, 1 = short, 2 = long) */
737 16, /* bitsize */
738 TRUE, /* pc_relative */
739 0, /* bitpos */
740 complain_overflow_bitfield,/* complain_on_overflow */
741 bfd_elf_generic_reloc, /* special_function */
742 "R_ARM_MOVT_PREL", /* name */
743 FALSE, /* partial_inplace */
744 0x000f0fff, /* src_mask */
745 0x000f0fff, /* dst_mask */
746 TRUE), /* pcrel_offset */
747
748 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
749 0, /* rightshift */
750 2, /* size (0 = byte, 1 = short, 2 = long) */
751 16, /* bitsize */
752 FALSE, /* pc_relative */
753 0, /* bitpos */
754 complain_overflow_dont,/* complain_on_overflow */
755 bfd_elf_generic_reloc, /* special_function */
756 "R_ARM_THM_MOVW_ABS_NC",/* name */
757 FALSE, /* partial_inplace */
758 0x040f70ff, /* src_mask */
759 0x040f70ff, /* dst_mask */
760 FALSE), /* pcrel_offset */
761
762 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
763 0, /* rightshift */
764 2, /* size (0 = byte, 1 = short, 2 = long) */
765 16, /* bitsize */
766 FALSE, /* pc_relative */
767 0, /* bitpos */
768 complain_overflow_bitfield,/* complain_on_overflow */
769 bfd_elf_generic_reloc, /* special_function */
770 "R_ARM_THM_MOVT_ABS", /* name */
771 FALSE, /* partial_inplace */
772 0x040f70ff, /* src_mask */
773 0x040f70ff, /* dst_mask */
774 FALSE), /* pcrel_offset */
775
776 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
777 0, /* rightshift */
778 2, /* size (0 = byte, 1 = short, 2 = long) */
779 16, /* bitsize */
780 TRUE, /* pc_relative */
781 0, /* bitpos */
782 complain_overflow_dont,/* complain_on_overflow */
783 bfd_elf_generic_reloc, /* special_function */
784 "R_ARM_THM_MOVW_PREL_NC",/* name */
785 FALSE, /* partial_inplace */
786 0x040f70ff, /* src_mask */
787 0x040f70ff, /* dst_mask */
788 TRUE), /* pcrel_offset */
789
790 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
791 0, /* rightshift */
792 2, /* size (0 = byte, 1 = short, 2 = long) */
793 16, /* bitsize */
794 TRUE, /* pc_relative */
795 0, /* bitpos */
796 complain_overflow_bitfield,/* complain_on_overflow */
797 bfd_elf_generic_reloc, /* special_function */
798 "R_ARM_THM_MOVT_PREL", /* name */
799 FALSE, /* partial_inplace */
800 0x040f70ff, /* src_mask */
801 0x040f70ff, /* dst_mask */
802 TRUE), /* pcrel_offset */
803
804 HOWTO (R_ARM_THM_JUMP19, /* type */
805 1, /* rightshift */
806 2, /* size (0 = byte, 1 = short, 2 = long) */
807 19, /* bitsize */
808 TRUE, /* pc_relative */
809 0, /* bitpos */
810 complain_overflow_signed,/* complain_on_overflow */
811 bfd_elf_generic_reloc, /* special_function */
812 "R_ARM_THM_JUMP19", /* name */
813 FALSE, /* partial_inplace */
814 0x043f2fff, /* src_mask */
815 0x043f2fff, /* dst_mask */
816 TRUE), /* pcrel_offset */
817
818 HOWTO (R_ARM_THM_JUMP6, /* type */
819 1, /* rightshift */
820 1, /* size (0 = byte, 1 = short, 2 = long) */
821 6, /* bitsize */
822 TRUE, /* pc_relative */
823 0, /* bitpos */
824 complain_overflow_unsigned,/* complain_on_overflow */
825 bfd_elf_generic_reloc, /* special_function */
826 "R_ARM_THM_JUMP6", /* name */
827 FALSE, /* partial_inplace */
828 0x02f8, /* src_mask */
829 0x02f8, /* dst_mask */
830 TRUE), /* pcrel_offset */
831
832 /* These are declared as 13-bit signed relocations because we can
833 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
834 versa. */
835 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
836 0, /* rightshift */
837 2, /* size (0 = byte, 1 = short, 2 = long) */
838 13, /* bitsize */
839 TRUE, /* pc_relative */
840 0, /* bitpos */
841 complain_overflow_dont,/* complain_on_overflow */
842 bfd_elf_generic_reloc, /* special_function */
843 "R_ARM_THM_ALU_PREL_11_0",/* name */
844 FALSE, /* partial_inplace */
845 0xffffffff, /* src_mask */
846 0xffffffff, /* dst_mask */
847 TRUE), /* pcrel_offset */
848
849 HOWTO (R_ARM_THM_PC12, /* type */
850 0, /* rightshift */
851 2, /* size (0 = byte, 1 = short, 2 = long) */
852 13, /* bitsize */
853 TRUE, /* pc_relative */
854 0, /* bitpos */
855 complain_overflow_dont,/* complain_on_overflow */
856 bfd_elf_generic_reloc, /* special_function */
857 "R_ARM_THM_PC12", /* name */
858 FALSE, /* partial_inplace */
859 0xffffffff, /* src_mask */
860 0xffffffff, /* dst_mask */
861 TRUE), /* pcrel_offset */
862
863 HOWTO (R_ARM_ABS32_NOI, /* type */
864 0, /* rightshift */
865 2, /* size (0 = byte, 1 = short, 2 = long) */
866 32, /* bitsize */
867 FALSE, /* pc_relative */
868 0, /* bitpos */
869 complain_overflow_dont,/* complain_on_overflow */
870 bfd_elf_generic_reloc, /* special_function */
871 "R_ARM_ABS32_NOI", /* name */
872 FALSE, /* partial_inplace */
873 0xffffffff, /* src_mask */
874 0xffffffff, /* dst_mask */
875 FALSE), /* pcrel_offset */
876
877 HOWTO (R_ARM_REL32_NOI, /* type */
878 0, /* rightshift */
879 2, /* size (0 = byte, 1 = short, 2 = long) */
880 32, /* bitsize */
881 TRUE, /* pc_relative */
882 0, /* bitpos */
883 complain_overflow_dont,/* complain_on_overflow */
884 bfd_elf_generic_reloc, /* special_function */
885 "R_ARM_REL32_NOI", /* name */
886 FALSE, /* partial_inplace */
887 0xffffffff, /* src_mask */
888 0xffffffff, /* dst_mask */
889 FALSE), /* pcrel_offset */
890
891 /* Group relocations. */
892
893 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
894 0, /* rightshift */
895 2, /* size (0 = byte, 1 = short, 2 = long) */
896 32, /* bitsize */
897 TRUE, /* pc_relative */
898 0, /* bitpos */
899 complain_overflow_dont,/* complain_on_overflow */
900 bfd_elf_generic_reloc, /* special_function */
901 "R_ARM_ALU_PC_G0_NC", /* name */
902 FALSE, /* partial_inplace */
903 0xffffffff, /* src_mask */
904 0xffffffff, /* dst_mask */
905 TRUE), /* pcrel_offset */
906
907 HOWTO (R_ARM_ALU_PC_G0, /* type */
908 0, /* rightshift */
909 2, /* size (0 = byte, 1 = short, 2 = long) */
910 32, /* bitsize */
911 TRUE, /* pc_relative */
912 0, /* bitpos */
913 complain_overflow_dont,/* complain_on_overflow */
914 bfd_elf_generic_reloc, /* special_function */
915 "R_ARM_ALU_PC_G0", /* name */
916 FALSE, /* partial_inplace */
917 0xffffffff, /* src_mask */
918 0xffffffff, /* dst_mask */
919 TRUE), /* pcrel_offset */
920
921 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
922 0, /* rightshift */
923 2, /* size (0 = byte, 1 = short, 2 = long) */
924 32, /* bitsize */
925 TRUE, /* pc_relative */
926 0, /* bitpos */
927 complain_overflow_dont,/* complain_on_overflow */
928 bfd_elf_generic_reloc, /* special_function */
929 "R_ARM_ALU_PC_G1_NC", /* name */
930 FALSE, /* partial_inplace */
931 0xffffffff, /* src_mask */
932 0xffffffff, /* dst_mask */
933 TRUE), /* pcrel_offset */
934
935 HOWTO (R_ARM_ALU_PC_G1, /* type */
936 0, /* rightshift */
937 2, /* size (0 = byte, 1 = short, 2 = long) */
938 32, /* bitsize */
939 TRUE, /* pc_relative */
940 0, /* bitpos */
941 complain_overflow_dont,/* complain_on_overflow */
942 bfd_elf_generic_reloc, /* special_function */
943 "R_ARM_ALU_PC_G1", /* name */
944 FALSE, /* partial_inplace */
945 0xffffffff, /* src_mask */
946 0xffffffff, /* dst_mask */
947 TRUE), /* pcrel_offset */
948
949 HOWTO (R_ARM_ALU_PC_G2, /* type */
950 0, /* rightshift */
951 2, /* size (0 = byte, 1 = short, 2 = long) */
952 32, /* bitsize */
953 TRUE, /* pc_relative */
954 0, /* bitpos */
955 complain_overflow_dont,/* complain_on_overflow */
956 bfd_elf_generic_reloc, /* special_function */
957 "R_ARM_ALU_PC_G2", /* name */
958 FALSE, /* partial_inplace */
959 0xffffffff, /* src_mask */
960 0xffffffff, /* dst_mask */
961 TRUE), /* pcrel_offset */
962
963 HOWTO (R_ARM_LDR_PC_G1, /* type */
964 0, /* rightshift */
965 2, /* size (0 = byte, 1 = short, 2 = long) */
966 32, /* bitsize */
967 TRUE, /* pc_relative */
968 0, /* bitpos */
969 complain_overflow_dont,/* complain_on_overflow */
970 bfd_elf_generic_reloc, /* special_function */
971 "R_ARM_LDR_PC_G1", /* name */
972 FALSE, /* partial_inplace */
973 0xffffffff, /* src_mask */
974 0xffffffff, /* dst_mask */
975 TRUE), /* pcrel_offset */
976
977 HOWTO (R_ARM_LDR_PC_G2, /* type */
978 0, /* rightshift */
979 2, /* size (0 = byte, 1 = short, 2 = long) */
980 32, /* bitsize */
981 TRUE, /* pc_relative */
982 0, /* bitpos */
983 complain_overflow_dont,/* complain_on_overflow */
984 bfd_elf_generic_reloc, /* special_function */
985 "R_ARM_LDR_PC_G2", /* name */
986 FALSE, /* partial_inplace */
987 0xffffffff, /* src_mask */
988 0xffffffff, /* dst_mask */
989 TRUE), /* pcrel_offset */
990
991 HOWTO (R_ARM_LDRS_PC_G0, /* type */
992 0, /* rightshift */
993 2, /* size (0 = byte, 1 = short, 2 = long) */
994 32, /* bitsize */
995 TRUE, /* pc_relative */
996 0, /* bitpos */
997 complain_overflow_dont,/* complain_on_overflow */
998 bfd_elf_generic_reloc, /* special_function */
999 "R_ARM_LDRS_PC_G0", /* name */
1000 FALSE, /* partial_inplace */
1001 0xffffffff, /* src_mask */
1002 0xffffffff, /* dst_mask */
1003 TRUE), /* pcrel_offset */
1004
1005 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1006 0, /* rightshift */
1007 2, /* size (0 = byte, 1 = short, 2 = long) */
1008 32, /* bitsize */
1009 TRUE, /* pc_relative */
1010 0, /* bitpos */
1011 complain_overflow_dont,/* complain_on_overflow */
1012 bfd_elf_generic_reloc, /* special_function */
1013 "R_ARM_LDRS_PC_G1", /* name */
1014 FALSE, /* partial_inplace */
1015 0xffffffff, /* src_mask */
1016 0xffffffff, /* dst_mask */
1017 TRUE), /* pcrel_offset */
1018
1019 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1020 0, /* rightshift */
1021 2, /* size (0 = byte, 1 = short, 2 = long) */
1022 32, /* bitsize */
1023 TRUE, /* pc_relative */
1024 0, /* bitpos */
1025 complain_overflow_dont,/* complain_on_overflow */
1026 bfd_elf_generic_reloc, /* special_function */
1027 "R_ARM_LDRS_PC_G2", /* name */
1028 FALSE, /* partial_inplace */
1029 0xffffffff, /* src_mask */
1030 0xffffffff, /* dst_mask */
1031 TRUE), /* pcrel_offset */
1032
1033 HOWTO (R_ARM_LDC_PC_G0, /* type */
1034 0, /* rightshift */
1035 2, /* size (0 = byte, 1 = short, 2 = long) */
1036 32, /* bitsize */
1037 TRUE, /* pc_relative */
1038 0, /* bitpos */
1039 complain_overflow_dont,/* complain_on_overflow */
1040 bfd_elf_generic_reloc, /* special_function */
1041 "R_ARM_LDC_PC_G0", /* name */
1042 FALSE, /* partial_inplace */
1043 0xffffffff, /* src_mask */
1044 0xffffffff, /* dst_mask */
1045 TRUE), /* pcrel_offset */
1046
1047 HOWTO (R_ARM_LDC_PC_G1, /* type */
1048 0, /* rightshift */
1049 2, /* size (0 = byte, 1 = short, 2 = long) */
1050 32, /* bitsize */
1051 TRUE, /* pc_relative */
1052 0, /* bitpos */
1053 complain_overflow_dont,/* complain_on_overflow */
1054 bfd_elf_generic_reloc, /* special_function */
1055 "R_ARM_LDC_PC_G1", /* name */
1056 FALSE, /* partial_inplace */
1057 0xffffffff, /* src_mask */
1058 0xffffffff, /* dst_mask */
1059 TRUE), /* pcrel_offset */
1060
1061 HOWTO (R_ARM_LDC_PC_G2, /* type */
1062 0, /* rightshift */
1063 2, /* size (0 = byte, 1 = short, 2 = long) */
1064 32, /* bitsize */
1065 TRUE, /* pc_relative */
1066 0, /* bitpos */
1067 complain_overflow_dont,/* complain_on_overflow */
1068 bfd_elf_generic_reloc, /* special_function */
1069 "R_ARM_LDC_PC_G2", /* name */
1070 FALSE, /* partial_inplace */
1071 0xffffffff, /* src_mask */
1072 0xffffffff, /* dst_mask */
1073 TRUE), /* pcrel_offset */
1074
1075 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1076 0, /* rightshift */
1077 2, /* size (0 = byte, 1 = short, 2 = long) */
1078 32, /* bitsize */
1079 TRUE, /* pc_relative */
1080 0, /* bitpos */
1081 complain_overflow_dont,/* complain_on_overflow */
1082 bfd_elf_generic_reloc, /* special_function */
1083 "R_ARM_ALU_SB_G0_NC", /* name */
1084 FALSE, /* partial_inplace */
1085 0xffffffff, /* src_mask */
1086 0xffffffff, /* dst_mask */
1087 TRUE), /* pcrel_offset */
1088
1089 HOWTO (R_ARM_ALU_SB_G0, /* type */
1090 0, /* rightshift */
1091 2, /* size (0 = byte, 1 = short, 2 = long) */
1092 32, /* bitsize */
1093 TRUE, /* pc_relative */
1094 0, /* bitpos */
1095 complain_overflow_dont,/* complain_on_overflow */
1096 bfd_elf_generic_reloc, /* special_function */
1097 "R_ARM_ALU_SB_G0", /* name */
1098 FALSE, /* partial_inplace */
1099 0xffffffff, /* src_mask */
1100 0xffffffff, /* dst_mask */
1101 TRUE), /* pcrel_offset */
1102
1103 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1104 0, /* rightshift */
1105 2, /* size (0 = byte, 1 = short, 2 = long) */
1106 32, /* bitsize */
1107 TRUE, /* pc_relative */
1108 0, /* bitpos */
1109 complain_overflow_dont,/* complain_on_overflow */
1110 bfd_elf_generic_reloc, /* special_function */
1111 "R_ARM_ALU_SB_G1_NC", /* name */
1112 FALSE, /* partial_inplace */
1113 0xffffffff, /* src_mask */
1114 0xffffffff, /* dst_mask */
1115 TRUE), /* pcrel_offset */
1116
1117 HOWTO (R_ARM_ALU_SB_G1, /* type */
1118 0, /* rightshift */
1119 2, /* size (0 = byte, 1 = short, 2 = long) */
1120 32, /* bitsize */
1121 TRUE, /* pc_relative */
1122 0, /* bitpos */
1123 complain_overflow_dont,/* complain_on_overflow */
1124 bfd_elf_generic_reloc, /* special_function */
1125 "R_ARM_ALU_SB_G1", /* name */
1126 FALSE, /* partial_inplace */
1127 0xffffffff, /* src_mask */
1128 0xffffffff, /* dst_mask */
1129 TRUE), /* pcrel_offset */
1130
1131 HOWTO (R_ARM_ALU_SB_G2, /* type */
1132 0, /* rightshift */
1133 2, /* size (0 = byte, 1 = short, 2 = long) */
1134 32, /* bitsize */
1135 TRUE, /* pc_relative */
1136 0, /* bitpos */
1137 complain_overflow_dont,/* complain_on_overflow */
1138 bfd_elf_generic_reloc, /* special_function */
1139 "R_ARM_ALU_SB_G2", /* name */
1140 FALSE, /* partial_inplace */
1141 0xffffffff, /* src_mask */
1142 0xffffffff, /* dst_mask */
1143 TRUE), /* pcrel_offset */
1144
1145 HOWTO (R_ARM_LDR_SB_G0, /* type */
1146 0, /* rightshift */
1147 2, /* size (0 = byte, 1 = short, 2 = long) */
1148 32, /* bitsize */
1149 TRUE, /* pc_relative */
1150 0, /* bitpos */
1151 complain_overflow_dont,/* complain_on_overflow */
1152 bfd_elf_generic_reloc, /* special_function */
1153 "R_ARM_LDR_SB_G0", /* name */
1154 FALSE, /* partial_inplace */
1155 0xffffffff, /* src_mask */
1156 0xffffffff, /* dst_mask */
1157 TRUE), /* pcrel_offset */
1158
1159 HOWTO (R_ARM_LDR_SB_G1, /* type */
1160 0, /* rightshift */
1161 2, /* size (0 = byte, 1 = short, 2 = long) */
1162 32, /* bitsize */
1163 TRUE, /* pc_relative */
1164 0, /* bitpos */
1165 complain_overflow_dont,/* complain_on_overflow */
1166 bfd_elf_generic_reloc, /* special_function */
1167 "R_ARM_LDR_SB_G1", /* name */
1168 FALSE, /* partial_inplace */
1169 0xffffffff, /* src_mask */
1170 0xffffffff, /* dst_mask */
1171 TRUE), /* pcrel_offset */
1172
1173 HOWTO (R_ARM_LDR_SB_G2, /* type */
1174 0, /* rightshift */
1175 2, /* size (0 = byte, 1 = short, 2 = long) */
1176 32, /* bitsize */
1177 TRUE, /* pc_relative */
1178 0, /* bitpos */
1179 complain_overflow_dont,/* complain_on_overflow */
1180 bfd_elf_generic_reloc, /* special_function */
1181 "R_ARM_LDR_SB_G2", /* name */
1182 FALSE, /* partial_inplace */
1183 0xffffffff, /* src_mask */
1184 0xffffffff, /* dst_mask */
1185 TRUE), /* pcrel_offset */
1186
1187 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1188 0, /* rightshift */
1189 2, /* size (0 = byte, 1 = short, 2 = long) */
1190 32, /* bitsize */
1191 TRUE, /* pc_relative */
1192 0, /* bitpos */
1193 complain_overflow_dont,/* complain_on_overflow */
1194 bfd_elf_generic_reloc, /* special_function */
1195 "R_ARM_LDRS_SB_G0", /* name */
1196 FALSE, /* partial_inplace */
1197 0xffffffff, /* src_mask */
1198 0xffffffff, /* dst_mask */
1199 TRUE), /* pcrel_offset */
1200
1201 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1202 0, /* rightshift */
1203 2, /* size (0 = byte, 1 = short, 2 = long) */
1204 32, /* bitsize */
1205 TRUE, /* pc_relative */
1206 0, /* bitpos */
1207 complain_overflow_dont,/* complain_on_overflow */
1208 bfd_elf_generic_reloc, /* special_function */
1209 "R_ARM_LDRS_SB_G1", /* name */
1210 FALSE, /* partial_inplace */
1211 0xffffffff, /* src_mask */
1212 0xffffffff, /* dst_mask */
1213 TRUE), /* pcrel_offset */
1214
1215 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1216 0, /* rightshift */
1217 2, /* size (0 = byte, 1 = short, 2 = long) */
1218 32, /* bitsize */
1219 TRUE, /* pc_relative */
1220 0, /* bitpos */
1221 complain_overflow_dont,/* complain_on_overflow */
1222 bfd_elf_generic_reloc, /* special_function */
1223 "R_ARM_LDRS_SB_G2", /* name */
1224 FALSE, /* partial_inplace */
1225 0xffffffff, /* src_mask */
1226 0xffffffff, /* dst_mask */
1227 TRUE), /* pcrel_offset */
1228
1229 HOWTO (R_ARM_LDC_SB_G0, /* type */
1230 0, /* rightshift */
1231 2, /* size (0 = byte, 1 = short, 2 = long) */
1232 32, /* bitsize */
1233 TRUE, /* pc_relative */
1234 0, /* bitpos */
1235 complain_overflow_dont,/* complain_on_overflow */
1236 bfd_elf_generic_reloc, /* special_function */
1237 "R_ARM_LDC_SB_G0", /* name */
1238 FALSE, /* partial_inplace */
1239 0xffffffff, /* src_mask */
1240 0xffffffff, /* dst_mask */
1241 TRUE), /* pcrel_offset */
1242
1243 HOWTO (R_ARM_LDC_SB_G1, /* type */
1244 0, /* rightshift */
1245 2, /* size (0 = byte, 1 = short, 2 = long) */
1246 32, /* bitsize */
1247 TRUE, /* pc_relative */
1248 0, /* bitpos */
1249 complain_overflow_dont,/* complain_on_overflow */
1250 bfd_elf_generic_reloc, /* special_function */
1251 "R_ARM_LDC_SB_G1", /* name */
1252 FALSE, /* partial_inplace */
1253 0xffffffff, /* src_mask */
1254 0xffffffff, /* dst_mask */
1255 TRUE), /* pcrel_offset */
1256
1257 HOWTO (R_ARM_LDC_SB_G2, /* type */
1258 0, /* rightshift */
1259 2, /* size (0 = byte, 1 = short, 2 = long) */
1260 32, /* bitsize */
1261 TRUE, /* pc_relative */
1262 0, /* bitpos */
1263 complain_overflow_dont,/* complain_on_overflow */
1264 bfd_elf_generic_reloc, /* special_function */
1265 "R_ARM_LDC_SB_G2", /* name */
1266 FALSE, /* partial_inplace */
1267 0xffffffff, /* src_mask */
1268 0xffffffff, /* dst_mask */
1269 TRUE), /* pcrel_offset */
1270
1271 /* End of group relocations. */
1272
1273 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1274 0, /* rightshift */
1275 2, /* size (0 = byte, 1 = short, 2 = long) */
1276 16, /* bitsize */
1277 FALSE, /* pc_relative */
1278 0, /* bitpos */
1279 complain_overflow_dont,/* complain_on_overflow */
1280 bfd_elf_generic_reloc, /* special_function */
1281 "R_ARM_MOVW_BREL_NC", /* name */
1282 FALSE, /* partial_inplace */
1283 0x0000ffff, /* src_mask */
1284 0x0000ffff, /* dst_mask */
1285 FALSE), /* pcrel_offset */
1286
1287 HOWTO (R_ARM_MOVT_BREL, /* type */
1288 0, /* rightshift */
1289 2, /* size (0 = byte, 1 = short, 2 = long) */
1290 16, /* bitsize */
1291 FALSE, /* pc_relative */
1292 0, /* bitpos */
1293 complain_overflow_bitfield,/* complain_on_overflow */
1294 bfd_elf_generic_reloc, /* special_function */
1295 "R_ARM_MOVT_BREL", /* name */
1296 FALSE, /* partial_inplace */
1297 0x0000ffff, /* src_mask */
1298 0x0000ffff, /* dst_mask */
1299 FALSE), /* pcrel_offset */
1300
1301 HOWTO (R_ARM_MOVW_BREL, /* type */
1302 0, /* rightshift */
1303 2, /* size (0 = byte, 1 = short, 2 = long) */
1304 16, /* bitsize */
1305 FALSE, /* pc_relative */
1306 0, /* bitpos */
1307 complain_overflow_dont,/* complain_on_overflow */
1308 bfd_elf_generic_reloc, /* special_function */
1309 "R_ARM_MOVW_BREL", /* name */
1310 FALSE, /* partial_inplace */
1311 0x0000ffff, /* src_mask */
1312 0x0000ffff, /* dst_mask */
1313 FALSE), /* pcrel_offset */
1314
1315 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1316 0, /* rightshift */
1317 2, /* size (0 = byte, 1 = short, 2 = long) */
1318 16, /* bitsize */
1319 FALSE, /* pc_relative */
1320 0, /* bitpos */
1321 complain_overflow_dont,/* complain_on_overflow */
1322 bfd_elf_generic_reloc, /* special_function */
1323 "R_ARM_THM_MOVW_BREL_NC",/* name */
1324 FALSE, /* partial_inplace */
1325 0x040f70ff, /* src_mask */
1326 0x040f70ff, /* dst_mask */
1327 FALSE), /* pcrel_offset */
1328
1329 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1330 0, /* rightshift */
1331 2, /* size (0 = byte, 1 = short, 2 = long) */
1332 16, /* bitsize */
1333 FALSE, /* pc_relative */
1334 0, /* bitpos */
1335 complain_overflow_bitfield,/* complain_on_overflow */
1336 bfd_elf_generic_reloc, /* special_function */
1337 "R_ARM_THM_MOVT_BREL", /* name */
1338 FALSE, /* partial_inplace */
1339 0x040f70ff, /* src_mask */
1340 0x040f70ff, /* dst_mask */
1341 FALSE), /* pcrel_offset */
1342
1343 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1344 0, /* rightshift */
1345 2, /* size (0 = byte, 1 = short, 2 = long) */
1346 16, /* bitsize */
1347 FALSE, /* pc_relative */
1348 0, /* bitpos */
1349 complain_overflow_dont,/* complain_on_overflow */
1350 bfd_elf_generic_reloc, /* special_function */
1351 "R_ARM_THM_MOVW_BREL", /* name */
1352 FALSE, /* partial_inplace */
1353 0x040f70ff, /* src_mask */
1354 0x040f70ff, /* dst_mask */
1355 FALSE), /* pcrel_offset */
1356
1357 EMPTY_HOWTO (90), /* Unallocated. */
1358 EMPTY_HOWTO (91),
1359 EMPTY_HOWTO (92),
1360 EMPTY_HOWTO (93),
1361
1362 HOWTO (R_ARM_PLT32_ABS, /* type */
1363 0, /* rightshift */
1364 2, /* size (0 = byte, 1 = short, 2 = long) */
1365 32, /* bitsize */
1366 FALSE, /* pc_relative */
1367 0, /* bitpos */
1368 complain_overflow_dont,/* complain_on_overflow */
1369 bfd_elf_generic_reloc, /* special_function */
1370 "R_ARM_PLT32_ABS", /* name */
1371 FALSE, /* partial_inplace */
1372 0xffffffff, /* src_mask */
1373 0xffffffff, /* dst_mask */
1374 FALSE), /* pcrel_offset */
1375
1376 HOWTO (R_ARM_GOT_ABS, /* type */
1377 0, /* rightshift */
1378 2, /* size (0 = byte, 1 = short, 2 = long) */
1379 32, /* bitsize */
1380 FALSE, /* pc_relative */
1381 0, /* bitpos */
1382 complain_overflow_dont,/* complain_on_overflow */
1383 bfd_elf_generic_reloc, /* special_function */
1384 "R_ARM_GOT_ABS", /* name */
1385 FALSE, /* partial_inplace */
1386 0xffffffff, /* src_mask */
1387 0xffffffff, /* dst_mask */
1388 FALSE), /* pcrel_offset */
1389
1390 HOWTO (R_ARM_GOT_PREL, /* type */
1391 0, /* rightshift */
1392 2, /* size (0 = byte, 1 = short, 2 = long) */
1393 32, /* bitsize */
1394 TRUE, /* pc_relative */
1395 0, /* bitpos */
1396 complain_overflow_dont, /* complain_on_overflow */
1397 bfd_elf_generic_reloc, /* special_function */
1398 "R_ARM_GOT_PREL", /* name */
1399 FALSE, /* partial_inplace */
1400 0xffffffff, /* src_mask */
1401 0xffffffff, /* dst_mask */
1402 TRUE), /* pcrel_offset */
1403
1404 HOWTO (R_ARM_GOT_BREL12, /* type */
1405 0, /* rightshift */
1406 2, /* size (0 = byte, 1 = short, 2 = long) */
1407 12, /* bitsize */
1408 FALSE, /* pc_relative */
1409 0, /* bitpos */
1410 complain_overflow_bitfield,/* complain_on_overflow */
1411 bfd_elf_generic_reloc, /* special_function */
1412 "R_ARM_GOT_BREL12", /* name */
1413 FALSE, /* partial_inplace */
1414 0x00000fff, /* src_mask */
1415 0x00000fff, /* dst_mask */
1416 FALSE), /* pcrel_offset */
1417
1418 HOWTO (R_ARM_GOTOFF12, /* type */
1419 0, /* rightshift */
1420 2, /* size (0 = byte, 1 = short, 2 = long) */
1421 12, /* bitsize */
1422 FALSE, /* pc_relative */
1423 0, /* bitpos */
1424 complain_overflow_bitfield,/* complain_on_overflow */
1425 bfd_elf_generic_reloc, /* special_function */
1426 "R_ARM_GOTOFF12", /* name */
1427 FALSE, /* partial_inplace */
1428 0x00000fff, /* src_mask */
1429 0x00000fff, /* dst_mask */
1430 FALSE), /* pcrel_offset */
1431
1432 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1433
1434 /* GNU extension to record C++ vtable member usage */
1435 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1436 0, /* rightshift */
1437 2, /* size (0 = byte, 1 = short, 2 = long) */
1438 0, /* bitsize */
1439 FALSE, /* pc_relative */
1440 0, /* bitpos */
1441 complain_overflow_dont, /* complain_on_overflow */
1442 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1443 "R_ARM_GNU_VTENTRY", /* name */
1444 FALSE, /* partial_inplace */
1445 0, /* src_mask */
1446 0, /* dst_mask */
1447 FALSE), /* pcrel_offset */
1448
1449 /* GNU extension to record C++ vtable hierarchy */
1450 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1451 0, /* rightshift */
1452 2, /* size (0 = byte, 1 = short, 2 = long) */
1453 0, /* bitsize */
1454 FALSE, /* pc_relative */
1455 0, /* bitpos */
1456 complain_overflow_dont, /* complain_on_overflow */
1457 NULL, /* special_function */
1458 "R_ARM_GNU_VTINHERIT", /* name */
1459 FALSE, /* partial_inplace */
1460 0, /* src_mask */
1461 0, /* dst_mask */
1462 FALSE), /* pcrel_offset */
1463
1464 HOWTO (R_ARM_THM_JUMP11, /* type */
1465 1, /* rightshift */
1466 1, /* size (0 = byte, 1 = short, 2 = long) */
1467 11, /* bitsize */
1468 TRUE, /* pc_relative */
1469 0, /* bitpos */
1470 complain_overflow_signed, /* complain_on_overflow */
1471 bfd_elf_generic_reloc, /* special_function */
1472 "R_ARM_THM_JUMP11", /* name */
1473 FALSE, /* partial_inplace */
1474 0x000007ff, /* src_mask */
1475 0x000007ff, /* dst_mask */
1476 TRUE), /* pcrel_offset */
1477
1478 HOWTO (R_ARM_THM_JUMP8, /* type */
1479 1, /* rightshift */
1480 1, /* size (0 = byte, 1 = short, 2 = long) */
1481 8, /* bitsize */
1482 TRUE, /* pc_relative */
1483 0, /* bitpos */
1484 complain_overflow_signed, /* complain_on_overflow */
1485 bfd_elf_generic_reloc, /* special_function */
1486 "R_ARM_THM_JUMP8", /* name */
1487 FALSE, /* partial_inplace */
1488 0x000000ff, /* src_mask */
1489 0x000000ff, /* dst_mask */
1490 TRUE), /* pcrel_offset */
1491
1492 /* TLS relocations */
1493 HOWTO (R_ARM_TLS_GD32, /* type */
1494 0, /* rightshift */
1495 2, /* size (0 = byte, 1 = short, 2 = long) */
1496 32, /* bitsize */
1497 FALSE, /* pc_relative */
1498 0, /* bitpos */
1499 complain_overflow_bitfield,/* complain_on_overflow */
1500 NULL, /* special_function */
1501 "R_ARM_TLS_GD32", /* name */
1502 TRUE, /* partial_inplace */
1503 0xffffffff, /* src_mask */
1504 0xffffffff, /* dst_mask */
1505 FALSE), /* pcrel_offset */
1506
1507 HOWTO (R_ARM_TLS_LDM32, /* type */
1508 0, /* rightshift */
1509 2, /* size (0 = byte, 1 = short, 2 = long) */
1510 32, /* bitsize */
1511 FALSE, /* pc_relative */
1512 0, /* bitpos */
1513 complain_overflow_bitfield,/* complain_on_overflow */
1514 bfd_elf_generic_reloc, /* special_function */
1515 "R_ARM_TLS_LDM32", /* name */
1516 TRUE, /* partial_inplace */
1517 0xffffffff, /* src_mask */
1518 0xffffffff, /* dst_mask */
1519 FALSE), /* pcrel_offset */
1520
1521 HOWTO (R_ARM_TLS_LDO32, /* type */
1522 0, /* rightshift */
1523 2, /* size (0 = byte, 1 = short, 2 = long) */
1524 32, /* bitsize */
1525 FALSE, /* pc_relative */
1526 0, /* bitpos */
1527 complain_overflow_bitfield,/* complain_on_overflow */
1528 bfd_elf_generic_reloc, /* special_function */
1529 "R_ARM_TLS_LDO32", /* name */
1530 TRUE, /* partial_inplace */
1531 0xffffffff, /* src_mask */
1532 0xffffffff, /* dst_mask */
1533 FALSE), /* pcrel_offset */
1534
1535 HOWTO (R_ARM_TLS_IE32, /* type */
1536 0, /* rightshift */
1537 2, /* size (0 = byte, 1 = short, 2 = long) */
1538 32, /* bitsize */
1539 FALSE, /* pc_relative */
1540 0, /* bitpos */
1541 complain_overflow_bitfield,/* complain_on_overflow */
1542 NULL, /* special_function */
1543 "R_ARM_TLS_IE32", /* name */
1544 TRUE, /* partial_inplace */
1545 0xffffffff, /* src_mask */
1546 0xffffffff, /* dst_mask */
1547 FALSE), /* pcrel_offset */
1548
1549 HOWTO (R_ARM_TLS_LE32, /* type */
1550 0, /* rightshift */
1551 2, /* size (0 = byte, 1 = short, 2 = long) */
1552 32, /* bitsize */
1553 FALSE, /* pc_relative */
1554 0, /* bitpos */
1555 complain_overflow_bitfield,/* complain_on_overflow */
1556 bfd_elf_generic_reloc, /* special_function */
1557 "R_ARM_TLS_LE32", /* name */
1558 TRUE, /* partial_inplace */
1559 0xffffffff, /* src_mask */
1560 0xffffffff, /* dst_mask */
1561 FALSE), /* pcrel_offset */
1562
1563 HOWTO (R_ARM_TLS_LDO12, /* type */
1564 0, /* rightshift */
1565 2, /* size (0 = byte, 1 = short, 2 = long) */
1566 12, /* bitsize */
1567 FALSE, /* pc_relative */
1568 0, /* bitpos */
1569 complain_overflow_bitfield,/* complain_on_overflow */
1570 bfd_elf_generic_reloc, /* special_function */
1571 "R_ARM_TLS_LDO12", /* name */
1572 FALSE, /* partial_inplace */
1573 0x00000fff, /* src_mask */
1574 0x00000fff, /* dst_mask */
1575 FALSE), /* pcrel_offset */
1576
1577 HOWTO (R_ARM_TLS_LE12, /* type */
1578 0, /* rightshift */
1579 2, /* size (0 = byte, 1 = short, 2 = long) */
1580 12, /* bitsize */
1581 FALSE, /* pc_relative */
1582 0, /* bitpos */
1583 complain_overflow_bitfield,/* complain_on_overflow */
1584 bfd_elf_generic_reloc, /* special_function */
1585 "R_ARM_TLS_LE12", /* name */
1586 FALSE, /* partial_inplace */
1587 0x00000fff, /* src_mask */
1588 0x00000fff, /* dst_mask */
1589 FALSE), /* pcrel_offset */
1590
1591 HOWTO (R_ARM_TLS_IE12GP, /* type */
1592 0, /* rightshift */
1593 2, /* size (0 = byte, 1 = short, 2 = long) */
1594 12, /* bitsize */
1595 FALSE, /* pc_relative */
1596 0, /* bitpos */
1597 complain_overflow_bitfield,/* complain_on_overflow */
1598 bfd_elf_generic_reloc, /* special_function */
1599 "R_ARM_TLS_IE12GP", /* name */
1600 FALSE, /* partial_inplace */
1601 0x00000fff, /* src_mask */
1602 0x00000fff, /* dst_mask */
1603 FALSE), /* pcrel_offset */
1604 };
1605
1606 /* 112-127 private relocations
1607 128 R_ARM_ME_TOO, obsolete
1608 129-255 unallocated in AAELF.
1609
1610 249-255 extended, currently unused, relocations: */
1611
1612 static reloc_howto_type elf32_arm_howto_table_2[4] =
1613 {
1614 HOWTO (R_ARM_RREL32, /* type */
1615 0, /* rightshift */
1616 0, /* size (0 = byte, 1 = short, 2 = long) */
1617 0, /* bitsize */
1618 FALSE, /* pc_relative */
1619 0, /* bitpos */
1620 complain_overflow_dont,/* complain_on_overflow */
1621 bfd_elf_generic_reloc, /* special_function */
1622 "R_ARM_RREL32", /* name */
1623 FALSE, /* partial_inplace */
1624 0, /* src_mask */
1625 0, /* dst_mask */
1626 FALSE), /* pcrel_offset */
1627
1628 HOWTO (R_ARM_RABS32, /* type */
1629 0, /* rightshift */
1630 0, /* size (0 = byte, 1 = short, 2 = long) */
1631 0, /* bitsize */
1632 FALSE, /* pc_relative */
1633 0, /* bitpos */
1634 complain_overflow_dont,/* complain_on_overflow */
1635 bfd_elf_generic_reloc, /* special_function */
1636 "R_ARM_RABS32", /* name */
1637 FALSE, /* partial_inplace */
1638 0, /* src_mask */
1639 0, /* dst_mask */
1640 FALSE), /* pcrel_offset */
1641
1642 HOWTO (R_ARM_RPC24, /* type */
1643 0, /* rightshift */
1644 0, /* size (0 = byte, 1 = short, 2 = long) */
1645 0, /* bitsize */
1646 FALSE, /* pc_relative */
1647 0, /* bitpos */
1648 complain_overflow_dont,/* complain_on_overflow */
1649 bfd_elf_generic_reloc, /* special_function */
1650 "R_ARM_RPC24", /* name */
1651 FALSE, /* partial_inplace */
1652 0, /* src_mask */
1653 0, /* dst_mask */
1654 FALSE), /* pcrel_offset */
1655
1656 HOWTO (R_ARM_RBASE, /* type */
1657 0, /* rightshift */
1658 0, /* size (0 = byte, 1 = short, 2 = long) */
1659 0, /* bitsize */
1660 FALSE, /* pc_relative */
1661 0, /* bitpos */
1662 complain_overflow_dont,/* complain_on_overflow */
1663 bfd_elf_generic_reloc, /* special_function */
1664 "R_ARM_RBASE", /* name */
1665 FALSE, /* partial_inplace */
1666 0, /* src_mask */
1667 0, /* dst_mask */
1668 FALSE) /* pcrel_offset */
1669 };
1670
1671 static reloc_howto_type *
1672 elf32_arm_howto_from_type (unsigned int r_type)
1673 {
1674 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1675 return &elf32_arm_howto_table_1[r_type];
1676
1677 if (r_type >= R_ARM_RREL32
1678 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_2))
1679 return &elf32_arm_howto_table_2[r_type - R_ARM_RREL32];
1680
1681 return NULL;
1682 }
1683
1684 static void
1685 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc,
1686 Elf_Internal_Rela * elf_reloc)
1687 {
1688 unsigned int r_type;
1689
1690 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1691 bfd_reloc->howto = elf32_arm_howto_from_type (r_type);
1692 }
1693
1694 struct elf32_arm_reloc_map
1695 {
1696 bfd_reloc_code_real_type bfd_reloc_val;
1697 unsigned char elf_reloc_val;
1698 };
1699
1700 /* All entries in this list must also be present in elf32_arm_howto_table. */
1701 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1702 {
1703 {BFD_RELOC_NONE, R_ARM_NONE},
1704 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1705 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1706 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1707 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1708 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1709 {BFD_RELOC_32, R_ARM_ABS32},
1710 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1711 {BFD_RELOC_8, R_ARM_ABS8},
1712 {BFD_RELOC_16, R_ARM_ABS16},
1713 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1714 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1715 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1716 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1717 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1718 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1719 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1720 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1721 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1722 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1723 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1724 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1725 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1726 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1727 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1728 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1729 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1730 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1731 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1732 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1733 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1734 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
1735 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
1736 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
1737 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
1738 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
1739 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
1740 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
1741 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
1742 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
1743 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
1744 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
1745 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
1746 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
1747 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
1748 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
1749 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
1750 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1751 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1752 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1753 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1754 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1755 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1756 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1757 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1758 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1759 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1760 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1761 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1762 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1763 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1764 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1765 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1766 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1767 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1768 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1769 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1770 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1771 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1772 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1773 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1774 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1775 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1776 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1777 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1778 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1779 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1780 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX}
1781 };
1782
1783 static reloc_howto_type *
1784 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1785 bfd_reloc_code_real_type code)
1786 {
1787 unsigned int i;
1788
1789 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1790 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1791 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1792
1793 return NULL;
1794 }
1795
1796 static reloc_howto_type *
1797 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1798 const char *r_name)
1799 {
1800 unsigned int i;
1801
1802 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1803 if (elf32_arm_howto_table_1[i].name != NULL
1804 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1805 return &elf32_arm_howto_table_1[i];
1806
1807 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1808 if (elf32_arm_howto_table_2[i].name != NULL
1809 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1810 return &elf32_arm_howto_table_2[i];
1811
1812 return NULL;
1813 }
1814
1815 /* Support for core dump NOTE sections. */
1816
1817 static bfd_boolean
1818 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1819 {
1820 int offset;
1821 size_t size;
1822
1823 switch (note->descsz)
1824 {
1825 default:
1826 return FALSE;
1827
1828 case 148: /* Linux/ARM 32-bit. */
1829 /* pr_cursig */
1830 elf_tdata (abfd)->core_signal = bfd_get_16 (abfd, note->descdata + 12);
1831
1832 /* pr_pid */
1833 elf_tdata (abfd)->core_pid = bfd_get_32 (abfd, note->descdata + 24);
1834
1835 /* pr_reg */
1836 offset = 72;
1837 size = 72;
1838
1839 break;
1840 }
1841
1842 /* Make a ".reg/999" section. */
1843 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
1844 size, note->descpos + offset);
1845 }
1846
1847 static bfd_boolean
1848 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
1849 {
1850 switch (note->descsz)
1851 {
1852 default:
1853 return FALSE;
1854
1855 case 124: /* Linux/ARM elf_prpsinfo. */
1856 elf_tdata (abfd)->core_program
1857 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
1858 elf_tdata (abfd)->core_command
1859 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
1860 }
1861
1862 /* Note that for some reason, a spurious space is tacked
1863 onto the end of the args in some (at least one anyway)
1864 implementations, so strip it off if it exists. */
1865 {
1866 char *command = elf_tdata (abfd)->core_command;
1867 int n = strlen (command);
1868
1869 if (0 < n && command[n - 1] == ' ')
1870 command[n - 1] = '\0';
1871 }
1872
1873 return TRUE;
1874 }
1875
1876 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vec
1877 #define TARGET_LITTLE_NAME "elf32-littlearm"
1878 #define TARGET_BIG_SYM bfd_elf32_bigarm_vec
1879 #define TARGET_BIG_NAME "elf32-bigarm"
1880
1881 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
1882 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
1883
1884 typedef unsigned long int insn32;
1885 typedef unsigned short int insn16;
1886
1887 /* In lieu of proper flags, assume all EABIv4 or later objects are
1888 interworkable. */
1889 #define INTERWORK_FLAG(abfd) \
1890 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
1891 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
1892 || ((abfd)->flags & BFD_LINKER_CREATED))
1893
1894 /* The linker script knows the section names for placement.
1895 The entry_names are used to do simple name mangling on the stubs.
1896 Given a function name, and its type, the stub can be found. The
1897 name can be changed. The only requirement is the %s be present. */
1898 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
1899 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
1900
1901 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
1902 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
1903
1904 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
1905 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
1906
1907 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
1908 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
1909
1910 #define STUB_ENTRY_NAME "__%s_veneer"
1911
1912 /* The name of the dynamic interpreter. This is put in the .interp
1913 section. */
1914 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
1915
1916 #ifdef FOUR_WORD_PLT
1917
1918 /* The first entry in a procedure linkage table looks like
1919 this. It is set up so that any shared library function that is
1920 called before the relocation has been set up calls the dynamic
1921 linker first. */
1922 static const bfd_vma elf32_arm_plt0_entry [] =
1923 {
1924 0xe52de004, /* str lr, [sp, #-4]! */
1925 0xe59fe010, /* ldr lr, [pc, #16] */
1926 0xe08fe00e, /* add lr, pc, lr */
1927 0xe5bef008, /* ldr pc, [lr, #8]! */
1928 };
1929
1930 /* Subsequent entries in a procedure linkage table look like
1931 this. */
1932 static const bfd_vma elf32_arm_plt_entry [] =
1933 {
1934 0xe28fc600, /* add ip, pc, #NN */
1935 0xe28cca00, /* add ip, ip, #NN */
1936 0xe5bcf000, /* ldr pc, [ip, #NN]! */
1937 0x00000000, /* unused */
1938 };
1939
1940 #else
1941
1942 /* The first entry in a procedure linkage table looks like
1943 this. It is set up so that any shared library function that is
1944 called before the relocation has been set up calls the dynamic
1945 linker first. */
1946 static const bfd_vma elf32_arm_plt0_entry [] =
1947 {
1948 0xe52de004, /* str lr, [sp, #-4]! */
1949 0xe59fe004, /* ldr lr, [pc, #4] */
1950 0xe08fe00e, /* add lr, pc, lr */
1951 0xe5bef008, /* ldr pc, [lr, #8]! */
1952 0x00000000, /* &GOT[0] - . */
1953 };
1954
1955 /* Subsequent entries in a procedure linkage table look like
1956 this. */
1957 static const bfd_vma elf32_arm_plt_entry [] =
1958 {
1959 0xe28fc600, /* add ip, pc, #0xNN00000 */
1960 0xe28cca00, /* add ip, ip, #0xNN000 */
1961 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
1962 };
1963
1964 #endif
1965
1966 /* The format of the first entry in the procedure linkage table
1967 for a VxWorks executable. */
1968 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
1969 {
1970 0xe52dc008, /* str ip,[sp,#-8]! */
1971 0xe59fc000, /* ldr ip,[pc] */
1972 0xe59cf008, /* ldr pc,[ip,#8] */
1973 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
1974 };
1975
1976 /* The format of subsequent entries in a VxWorks executable. */
1977 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
1978 {
1979 0xe59fc000, /* ldr ip,[pc] */
1980 0xe59cf000, /* ldr pc,[ip] */
1981 0x00000000, /* .long @got */
1982 0xe59fc000, /* ldr ip,[pc] */
1983 0xea000000, /* b _PLT */
1984 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1985 };
1986
1987 /* The format of entries in a VxWorks shared library. */
1988 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
1989 {
1990 0xe59fc000, /* ldr ip,[pc] */
1991 0xe79cf009, /* ldr pc,[ip,r9] */
1992 0x00000000, /* .long @got */
1993 0xe59fc000, /* ldr ip,[pc] */
1994 0xe599f008, /* ldr pc,[r9,#8] */
1995 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1996 };
1997
1998 /* An initial stub used if the PLT entry is referenced from Thumb code. */
1999 #define PLT_THUMB_STUB_SIZE 4
2000 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2001 {
2002 0x4778, /* bx pc */
2003 0x46c0 /* nop */
2004 };
2005
2006 /* The entries in a PLT when using a DLL-based target with multiple
2007 address spaces. */
2008 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2009 {
2010 0xe51ff004, /* ldr pc, [pc, #-4] */
2011 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2012 };
2013
2014 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2015 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2016 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2017 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2018 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2019 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2020
2021 enum stub_insn_type
2022 {
2023 THUMB16_TYPE = 1,
2024 THUMB32_TYPE,
2025 ARM_TYPE,
2026 DATA_TYPE
2027 };
2028
2029 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2030 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2031 is inserted in arm_build_one_stub(). */
2032 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2033 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2034 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2035 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2036 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2037 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2038
2039 typedef struct
2040 {
2041 bfd_vma data;
2042 enum stub_insn_type type;
2043 unsigned int r_type;
2044 int reloc_addend;
2045 } insn_sequence;
2046
2047 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2048 to reach the stub if necessary. */
2049 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2050 {
2051 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2052 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2053 };
2054
2055 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2056 available. */
2057 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2058 {
2059 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2060 ARM_INSN(0xe12fff1c), /* bx ip */
2061 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2062 };
2063
2064 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2065 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2066 {
2067 THUMB16_INSN(0xb401), /* push {r0} */
2068 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2069 THUMB16_INSN(0x4684), /* mov ip, r0 */
2070 THUMB16_INSN(0xbc01), /* pop {r0} */
2071 THUMB16_INSN(0x4760), /* bx ip */
2072 THUMB16_INSN(0xbf00), /* nop */
2073 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2074 };
2075
2076 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2077 allowed. */
2078 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2079 {
2080 THUMB16_INSN(0x4778), /* bx pc */
2081 THUMB16_INSN(0x46c0), /* nop */
2082 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2083 ARM_INSN(0xe12fff1c), /* bx ip */
2084 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2085 };
2086
2087 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2088 available. */
2089 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2090 {
2091 THUMB16_INSN(0x4778), /* bx pc */
2092 THUMB16_INSN(0x46c0), /* nop */
2093 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2094 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2095 };
2096
2097 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2098 one, when the destination is close enough. */
2099 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2100 {
2101 THUMB16_INSN(0x4778), /* bx pc */
2102 THUMB16_INSN(0x46c0), /* nop */
2103 ARM_REL_INSN(0xea000000, -8), /* b (X-8) */
2104 };
2105
2106 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2107 blx to reach the stub if necessary. */
2108 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2109 {
2110 ARM_INSN(0xe59fc000), /* ldr r12, [pc] */
2111 ARM_INSN(0xe08ff00c), /* add pc, pc, ip */
2112 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2113 };
2114
2115 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2116 blx to reach the stub if necessary. We can not add into pc;
2117 it is not guaranteed to mode switch (different in ARMv6 and
2118 ARMv7). */
2119 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2120 {
2121 ARM_INSN(0xe59fc004), /* ldr r12, [pc, #4] */
2122 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2123 ARM_INSN(0xe12fff1c), /* bx ip */
2124 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2125 };
2126
2127 /* V4T ARM -> ARM long branch stub, PIC. */
2128 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2129 {
2130 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2131 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2132 ARM_INSN(0xe12fff1c), /* bx ip */
2133 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2134 };
2135
2136 /* V4T Thumb -> ARM long branch stub, PIC. */
2137 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2138 {
2139 THUMB16_INSN(0x4778), /* bx pc */
2140 THUMB16_INSN(0x46c0), /* nop */
2141 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2142 ARM_INSN(0xe08cf00f), /* add pc, ip, pc */
2143 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2144 };
2145
2146 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2147 architectures. */
2148 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2149 {
2150 THUMB16_INSN(0xb401), /* push {r0} */
2151 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2152 THUMB16_INSN(0x46fc), /* mov ip, pc */
2153 THUMB16_INSN(0x4484), /* add ip, r0 */
2154 THUMB16_INSN(0xbc01), /* pop {r0} */
2155 THUMB16_INSN(0x4760), /* bx ip */
2156 DATA_WORD(0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2157 };
2158
2159 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2160 allowed. */
2161 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2162 {
2163 THUMB16_INSN(0x4778), /* bx pc */
2164 THUMB16_INSN(0x46c0), /* nop */
2165 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2166 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2167 ARM_INSN(0xe12fff1c), /* bx ip */
2168 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2169 };
2170
2171 /* Cortex-A8 erratum-workaround stubs. */
2172
2173 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2174 can't use a conditional branch to reach this stub). */
2175
2176 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2177 {
2178 THUMB16_BCOND_INSN(0xd001), /* b<cond>.n true. */
2179 THUMB32_B_INSN(0xf000b800, -4), /* b.w insn_after_original_branch. */
2180 THUMB32_B_INSN(0xf000b800, -4) /* true: b.w original_branch_dest. */
2181 };
2182
2183 /* Stub used for b.w and bl.w instructions. */
2184
2185 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2186 {
2187 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2188 };
2189
2190 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2191 {
2192 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2193 };
2194
2195 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2196 instruction (which switches to ARM mode) to point to this stub. Jump to the
2197 real destination using an ARM-mode branch. */
2198
2199 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2200 {
2201 ARM_REL_INSN(0xea000000, -8) /* b original_branch_dest. */
2202 };
2203
2204 /* Section name for stubs is the associated section name plus this
2205 string. */
2206 #define STUB_SUFFIX ".stub"
2207
2208 /* One entry per long/short branch stub defined above. */
2209 #define DEF_STUBS \
2210 DEF_STUB(long_branch_any_any) \
2211 DEF_STUB(long_branch_v4t_arm_thumb) \
2212 DEF_STUB(long_branch_thumb_only) \
2213 DEF_STUB(long_branch_v4t_thumb_thumb) \
2214 DEF_STUB(long_branch_v4t_thumb_arm) \
2215 DEF_STUB(short_branch_v4t_thumb_arm) \
2216 DEF_STUB(long_branch_any_arm_pic) \
2217 DEF_STUB(long_branch_any_thumb_pic) \
2218 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2219 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2220 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2221 DEF_STUB(long_branch_thumb_only_pic) \
2222 DEF_STUB(a8_veneer_b_cond) \
2223 DEF_STUB(a8_veneer_b) \
2224 DEF_STUB(a8_veneer_bl) \
2225 DEF_STUB(a8_veneer_blx)
2226
2227 #define DEF_STUB(x) arm_stub_##x,
2228 enum elf32_arm_stub_type {
2229 arm_stub_none,
2230 DEF_STUBS
2231 };
2232 #undef DEF_STUB
2233
2234 typedef struct
2235 {
2236 const insn_sequence* template;
2237 int template_size;
2238 } stub_def;
2239
2240 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2241 static const stub_def stub_definitions[] = {
2242 {NULL, 0},
2243 DEF_STUBS
2244 };
2245
2246 struct elf32_arm_stub_hash_entry
2247 {
2248 /* Base hash table entry structure. */
2249 struct bfd_hash_entry root;
2250
2251 /* The stub section. */
2252 asection *stub_sec;
2253
2254 /* Offset within stub_sec of the beginning of this stub. */
2255 bfd_vma stub_offset;
2256
2257 /* Given the symbol's value and its section we can determine its final
2258 value when building the stubs (so the stub knows where to jump). */
2259 bfd_vma target_value;
2260 asection *target_section;
2261
2262 /* Offset to apply to relocation referencing target_value. */
2263 bfd_vma target_addend;
2264
2265 /* The instruction which caused this stub to be generated (only valid for
2266 Cortex-A8 erratum workaround stubs at present). */
2267 unsigned long orig_insn;
2268
2269 /* The stub type. */
2270 enum elf32_arm_stub_type stub_type;
2271 /* Its encoding size in bytes. */
2272 int stub_size;
2273 /* Its template. */
2274 const insn_sequence *stub_template;
2275 /* The size of the template (number of entries). */
2276 int stub_template_size;
2277
2278 /* The symbol table entry, if any, that this was derived from. */
2279 struct elf32_arm_link_hash_entry *h;
2280
2281 /* Destination symbol type (STT_ARM_TFUNC, ...) */
2282 unsigned char st_type;
2283
2284 /* Where this stub is being called from, or, in the case of combined
2285 stub sections, the first input section in the group. */
2286 asection *id_sec;
2287
2288 /* The name for the local symbol at the start of this stub. The
2289 stub name in the hash table has to be unique; this does not, so
2290 it can be friendlier. */
2291 char *output_name;
2292 };
2293
2294 /* Used to build a map of a section. This is required for mixed-endian
2295 code/data. */
2296
2297 typedef struct elf32_elf_section_map
2298 {
2299 bfd_vma vma;
2300 char type;
2301 }
2302 elf32_arm_section_map;
2303
2304 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2305
2306 typedef enum
2307 {
2308 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2309 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2310 VFP11_ERRATUM_ARM_VENEER,
2311 VFP11_ERRATUM_THUMB_VENEER
2312 }
2313 elf32_vfp11_erratum_type;
2314
2315 typedef struct elf32_vfp11_erratum_list
2316 {
2317 struct elf32_vfp11_erratum_list *next;
2318 bfd_vma vma;
2319 union
2320 {
2321 struct
2322 {
2323 struct elf32_vfp11_erratum_list *veneer;
2324 unsigned int vfp_insn;
2325 } b;
2326 struct
2327 {
2328 struct elf32_vfp11_erratum_list *branch;
2329 unsigned int id;
2330 } v;
2331 } u;
2332 elf32_vfp11_erratum_type type;
2333 }
2334 elf32_vfp11_erratum_list;
2335
2336 typedef enum
2337 {
2338 DELETE_EXIDX_ENTRY,
2339 INSERT_EXIDX_CANTUNWIND_AT_END
2340 }
2341 arm_unwind_edit_type;
2342
2343 /* A (sorted) list of edits to apply to an unwind table. */
2344 typedef struct arm_unwind_table_edit
2345 {
2346 arm_unwind_edit_type type;
2347 /* Note: we sometimes want to insert an unwind entry corresponding to a
2348 section different from the one we're currently writing out, so record the
2349 (text) section this edit relates to here. */
2350 asection *linked_section;
2351 unsigned int index;
2352 struct arm_unwind_table_edit *next;
2353 }
2354 arm_unwind_table_edit;
2355
2356 typedef struct _arm_elf_section_data
2357 {
2358 /* Information about mapping symbols. */
2359 struct bfd_elf_section_data elf;
2360 unsigned int mapcount;
2361 unsigned int mapsize;
2362 elf32_arm_section_map *map;
2363 /* Information about CPU errata. */
2364 unsigned int erratumcount;
2365 elf32_vfp11_erratum_list *erratumlist;
2366 /* Information about unwind tables. */
2367 union
2368 {
2369 /* Unwind info attached to a text section. */
2370 struct
2371 {
2372 asection *arm_exidx_sec;
2373 } text;
2374
2375 /* Unwind info attached to an .ARM.exidx section. */
2376 struct
2377 {
2378 arm_unwind_table_edit *unwind_edit_list;
2379 arm_unwind_table_edit *unwind_edit_tail;
2380 } exidx;
2381 } u;
2382 }
2383 _arm_elf_section_data;
2384
2385 #define elf32_arm_section_data(sec) \
2386 ((_arm_elf_section_data *) elf_section_data (sec))
2387
2388 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2389 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2390 so may be created multiple times: we use an array of these entries whilst
2391 relaxing which we can refresh easily, then create stubs for each potentially
2392 erratum-triggering instruction once we've settled on a solution. */
2393
2394 struct a8_erratum_fix {
2395 bfd *input_bfd;
2396 asection *section;
2397 bfd_vma offset;
2398 bfd_vma addend;
2399 unsigned long orig_insn;
2400 char *stub_name;
2401 enum elf32_arm_stub_type stub_type;
2402 };
2403
2404 /* A table of relocs applied to branches which might trigger Cortex-A8
2405 erratum. */
2406
2407 struct a8_erratum_reloc {
2408 bfd_vma from;
2409 bfd_vma destination;
2410 unsigned int r_type;
2411 unsigned char st_type;
2412 const char *sym_name;
2413 bfd_boolean non_a8_stub;
2414 };
2415
2416 /* The size of the thread control block. */
2417 #define TCB_SIZE 8
2418
2419 struct elf_arm_obj_tdata
2420 {
2421 struct elf_obj_tdata root;
2422
2423 /* tls_type for each local got entry. */
2424 char *local_got_tls_type;
2425
2426 /* Zero to warn when linking objects with incompatible enum sizes. */
2427 int no_enum_size_warning;
2428
2429 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2430 int no_wchar_size_warning;
2431 };
2432
2433 #define elf_arm_tdata(bfd) \
2434 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2435
2436 #define elf32_arm_local_got_tls_type(bfd) \
2437 (elf_arm_tdata (bfd)->local_got_tls_type)
2438
2439 #define is_arm_elf(bfd) \
2440 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2441 && elf_tdata (bfd) != NULL \
2442 && elf_object_id (bfd) == ARM_ELF_TDATA)
2443
2444 static bfd_boolean
2445 elf32_arm_mkobject (bfd *abfd)
2446 {
2447 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2448 ARM_ELF_TDATA);
2449 }
2450
2451 /* The ARM linker needs to keep track of the number of relocs that it
2452 decides to copy in check_relocs for each symbol. This is so that
2453 it can discard PC relative relocs if it doesn't need them when
2454 linking with -Bsymbolic. We store the information in a field
2455 extending the regular ELF linker hash table. */
2456
2457 /* This structure keeps track of the number of relocs we have copied
2458 for a given symbol. */
2459 struct elf32_arm_relocs_copied
2460 {
2461 /* Next section. */
2462 struct elf32_arm_relocs_copied * next;
2463 /* A section in dynobj. */
2464 asection * section;
2465 /* Number of relocs copied in this section. */
2466 bfd_size_type count;
2467 /* Number of PC-relative relocs copied in this section. */
2468 bfd_size_type pc_count;
2469 };
2470
2471 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2472
2473 /* Arm ELF linker hash entry. */
2474 struct elf32_arm_link_hash_entry
2475 {
2476 struct elf_link_hash_entry root;
2477
2478 /* Number of PC relative relocs copied for this symbol. */
2479 struct elf32_arm_relocs_copied * relocs_copied;
2480
2481 /* We reference count Thumb references to a PLT entry separately,
2482 so that we can emit the Thumb trampoline only if needed. */
2483 bfd_signed_vma plt_thumb_refcount;
2484
2485 /* Some references from Thumb code may be eliminated by BL->BLX
2486 conversion, so record them separately. */
2487 bfd_signed_vma plt_maybe_thumb_refcount;
2488
2489 /* Since PLT entries have variable size if the Thumb prologue is
2490 used, we need to record the index into .got.plt instead of
2491 recomputing it from the PLT offset. */
2492 bfd_signed_vma plt_got_offset;
2493
2494 #define GOT_UNKNOWN 0
2495 #define GOT_NORMAL 1
2496 #define GOT_TLS_GD 2
2497 #define GOT_TLS_IE 4
2498 unsigned char tls_type;
2499
2500 /* The symbol marking the real symbol location for exported thumb
2501 symbols with Arm stubs. */
2502 struct elf_link_hash_entry *export_glue;
2503
2504 /* A pointer to the most recently used stub hash entry against this
2505 symbol. */
2506 struct elf32_arm_stub_hash_entry *stub_cache;
2507 };
2508
2509 /* Traverse an arm ELF linker hash table. */
2510 #define elf32_arm_link_hash_traverse(table, func, info) \
2511 (elf_link_hash_traverse \
2512 (&(table)->root, \
2513 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
2514 (info)))
2515
2516 /* Get the ARM elf linker hash table from a link_info structure. */
2517 #define elf32_arm_hash_table(info) \
2518 ((struct elf32_arm_link_hash_table *) ((info)->hash))
2519
2520 #define arm_stub_hash_lookup(table, string, create, copy) \
2521 ((struct elf32_arm_stub_hash_entry *) \
2522 bfd_hash_lookup ((table), (string), (create), (copy)))
2523
2524 /* ARM ELF linker hash table. */
2525 struct elf32_arm_link_hash_table
2526 {
2527 /* The main hash table. */
2528 struct elf_link_hash_table root;
2529
2530 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
2531 bfd_size_type thumb_glue_size;
2532
2533 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
2534 bfd_size_type arm_glue_size;
2535
2536 /* The size in bytes of section containing the ARMv4 BX veneers. */
2537 bfd_size_type bx_glue_size;
2538
2539 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
2540 veneer has been populated. */
2541 bfd_vma bx_glue_offset[15];
2542
2543 /* The size in bytes of the section containing glue for VFP11 erratum
2544 veneers. */
2545 bfd_size_type vfp11_erratum_glue_size;
2546
2547 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
2548 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
2549 elf32_arm_write_section(). */
2550 struct a8_erratum_fix *a8_erratum_fixes;
2551 unsigned int num_a8_erratum_fixes;
2552
2553 /* An arbitrary input BFD chosen to hold the glue sections. */
2554 bfd * bfd_of_glue_owner;
2555
2556 /* Nonzero to output a BE8 image. */
2557 int byteswap_code;
2558
2559 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
2560 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
2561 int target1_is_rel;
2562
2563 /* The relocation to use for R_ARM_TARGET2 relocations. */
2564 int target2_reloc;
2565
2566 /* 0 = Ignore R_ARM_V4BX.
2567 1 = Convert BX to MOV PC.
2568 2 = Generate v4 interworing stubs. */
2569 int fix_v4bx;
2570
2571 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
2572 int fix_cortex_a8;
2573
2574 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
2575 int use_blx;
2576
2577 /* What sort of code sequences we should look for which may trigger the
2578 VFP11 denorm erratum. */
2579 bfd_arm_vfp11_fix vfp11_fix;
2580
2581 /* Global counter for the number of fixes we have emitted. */
2582 int num_vfp11_fixes;
2583
2584 /* Nonzero to force PIC branch veneers. */
2585 int pic_veneer;
2586
2587 /* The number of bytes in the initial entry in the PLT. */
2588 bfd_size_type plt_header_size;
2589
2590 /* The number of bytes in the subsequent PLT etries. */
2591 bfd_size_type plt_entry_size;
2592
2593 /* True if the target system is VxWorks. */
2594 int vxworks_p;
2595
2596 /* True if the target system is Symbian OS. */
2597 int symbian_p;
2598
2599 /* True if the target uses REL relocations. */
2600 int use_rel;
2601
2602 /* Short-cuts to get to dynamic linker sections. */
2603 asection *sgot;
2604 asection *sgotplt;
2605 asection *srelgot;
2606 asection *splt;
2607 asection *srelplt;
2608 asection *sdynbss;
2609 asection *srelbss;
2610
2611 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
2612 asection *srelplt2;
2613
2614 /* Data for R_ARM_TLS_LDM32 relocations. */
2615 union
2616 {
2617 bfd_signed_vma refcount;
2618 bfd_vma offset;
2619 } tls_ldm_got;
2620
2621 /* Small local sym cache. */
2622 struct sym_cache sym_cache;
2623
2624 /* For convenience in allocate_dynrelocs. */
2625 bfd * obfd;
2626
2627 /* The stub hash table. */
2628 struct bfd_hash_table stub_hash_table;
2629
2630 /* Linker stub bfd. */
2631 bfd *stub_bfd;
2632
2633 /* Linker call-backs. */
2634 asection * (*add_stub_section) (const char *, asection *);
2635 void (*layout_sections_again) (void);
2636
2637 /* Array to keep track of which stub sections have been created, and
2638 information on stub grouping. */
2639 struct map_stub
2640 {
2641 /* This is the section to which stubs in the group will be
2642 attached. */
2643 asection *link_sec;
2644 /* The stub section. */
2645 asection *stub_sec;
2646 } *stub_group;
2647
2648 /* Assorted information used by elf32_arm_size_stubs. */
2649 unsigned int bfd_count;
2650 int top_index;
2651 asection **input_list;
2652 };
2653
2654 /* Create an entry in an ARM ELF linker hash table. */
2655
2656 static struct bfd_hash_entry *
2657 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
2658 struct bfd_hash_table * table,
2659 const char * string)
2660 {
2661 struct elf32_arm_link_hash_entry * ret =
2662 (struct elf32_arm_link_hash_entry *) entry;
2663
2664 /* Allocate the structure if it has not already been allocated by a
2665 subclass. */
2666 if (ret == NULL)
2667 ret = bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
2668 if (ret == NULL)
2669 return (struct bfd_hash_entry *) ret;
2670
2671 /* Call the allocation method of the superclass. */
2672 ret = ((struct elf32_arm_link_hash_entry *)
2673 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
2674 table, string));
2675 if (ret != NULL)
2676 {
2677 ret->relocs_copied = NULL;
2678 ret->tls_type = GOT_UNKNOWN;
2679 ret->plt_thumb_refcount = 0;
2680 ret->plt_maybe_thumb_refcount = 0;
2681 ret->plt_got_offset = -1;
2682 ret->export_glue = NULL;
2683
2684 ret->stub_cache = NULL;
2685 }
2686
2687 return (struct bfd_hash_entry *) ret;
2688 }
2689
2690 /* Initialize an entry in the stub hash table. */
2691
2692 static struct bfd_hash_entry *
2693 stub_hash_newfunc (struct bfd_hash_entry *entry,
2694 struct bfd_hash_table *table,
2695 const char *string)
2696 {
2697 /* Allocate the structure if it has not already been allocated by a
2698 subclass. */
2699 if (entry == NULL)
2700 {
2701 entry = bfd_hash_allocate (table,
2702 sizeof (struct elf32_arm_stub_hash_entry));
2703 if (entry == NULL)
2704 return entry;
2705 }
2706
2707 /* Call the allocation method of the superclass. */
2708 entry = bfd_hash_newfunc (entry, table, string);
2709 if (entry != NULL)
2710 {
2711 struct elf32_arm_stub_hash_entry *eh;
2712
2713 /* Initialize the local fields. */
2714 eh = (struct elf32_arm_stub_hash_entry *) entry;
2715 eh->stub_sec = NULL;
2716 eh->stub_offset = 0;
2717 eh->target_value = 0;
2718 eh->target_section = NULL;
2719 eh->stub_type = arm_stub_none;
2720 eh->stub_size = 0;
2721 eh->stub_template = NULL;
2722 eh->stub_template_size = 0;
2723 eh->h = NULL;
2724 eh->id_sec = NULL;
2725 }
2726
2727 return entry;
2728 }
2729
2730 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
2731 shortcuts to them in our hash table. */
2732
2733 static bfd_boolean
2734 create_got_section (bfd *dynobj, struct bfd_link_info *info)
2735 {
2736 struct elf32_arm_link_hash_table *htab;
2737
2738 htab = elf32_arm_hash_table (info);
2739 /* BPABI objects never have a GOT, or associated sections. */
2740 if (htab->symbian_p)
2741 return TRUE;
2742
2743 if (! _bfd_elf_create_got_section (dynobj, info))
2744 return FALSE;
2745
2746 htab->sgot = bfd_get_section_by_name (dynobj, ".got");
2747 htab->sgotplt = bfd_get_section_by_name (dynobj, ".got.plt");
2748 if (!htab->sgot || !htab->sgotplt)
2749 abort ();
2750
2751 htab->srelgot = bfd_get_section_by_name (dynobj,
2752 RELOC_SECTION (htab, ".got"));
2753 if (htab->srelgot == NULL)
2754 return FALSE;
2755 return TRUE;
2756 }
2757
2758 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
2759 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
2760 hash table. */
2761
2762 static bfd_boolean
2763 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
2764 {
2765 struct elf32_arm_link_hash_table *htab;
2766
2767 htab = elf32_arm_hash_table (info);
2768 if (!htab->sgot && !create_got_section (dynobj, info))
2769 return FALSE;
2770
2771 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
2772 return FALSE;
2773
2774 htab->splt = bfd_get_section_by_name (dynobj, ".plt");
2775 htab->srelplt = bfd_get_section_by_name (dynobj,
2776 RELOC_SECTION (htab, ".plt"));
2777 htab->sdynbss = bfd_get_section_by_name (dynobj, ".dynbss");
2778 if (!info->shared)
2779 htab->srelbss = bfd_get_section_by_name (dynobj,
2780 RELOC_SECTION (htab, ".bss"));
2781
2782 if (htab->vxworks_p)
2783 {
2784 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
2785 return FALSE;
2786
2787 if (info->shared)
2788 {
2789 htab->plt_header_size = 0;
2790 htab->plt_entry_size
2791 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
2792 }
2793 else
2794 {
2795 htab->plt_header_size
2796 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
2797 htab->plt_entry_size
2798 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
2799 }
2800 }
2801
2802 if (!htab->splt
2803 || !htab->srelplt
2804 || !htab->sdynbss
2805 || (!info->shared && !htab->srelbss))
2806 abort ();
2807
2808 return TRUE;
2809 }
2810
2811 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2812
2813 static void
2814 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
2815 struct elf_link_hash_entry *dir,
2816 struct elf_link_hash_entry *ind)
2817 {
2818 struct elf32_arm_link_hash_entry *edir, *eind;
2819
2820 edir = (struct elf32_arm_link_hash_entry *) dir;
2821 eind = (struct elf32_arm_link_hash_entry *) ind;
2822
2823 if (eind->relocs_copied != NULL)
2824 {
2825 if (edir->relocs_copied != NULL)
2826 {
2827 struct elf32_arm_relocs_copied **pp;
2828 struct elf32_arm_relocs_copied *p;
2829
2830 /* Add reloc counts against the indirect sym to the direct sym
2831 list. Merge any entries against the same section. */
2832 for (pp = &eind->relocs_copied; (p = *pp) != NULL; )
2833 {
2834 struct elf32_arm_relocs_copied *q;
2835
2836 for (q = edir->relocs_copied; q != NULL; q = q->next)
2837 if (q->section == p->section)
2838 {
2839 q->pc_count += p->pc_count;
2840 q->count += p->count;
2841 *pp = p->next;
2842 break;
2843 }
2844 if (q == NULL)
2845 pp = &p->next;
2846 }
2847 *pp = edir->relocs_copied;
2848 }
2849
2850 edir->relocs_copied = eind->relocs_copied;
2851 eind->relocs_copied = NULL;
2852 }
2853
2854 if (ind->root.type == bfd_link_hash_indirect)
2855 {
2856 /* Copy over PLT info. */
2857 edir->plt_thumb_refcount += eind->plt_thumb_refcount;
2858 eind->plt_thumb_refcount = 0;
2859 edir->plt_maybe_thumb_refcount += eind->plt_maybe_thumb_refcount;
2860 eind->plt_maybe_thumb_refcount = 0;
2861
2862 if (dir->got.refcount <= 0)
2863 {
2864 edir->tls_type = eind->tls_type;
2865 eind->tls_type = GOT_UNKNOWN;
2866 }
2867 }
2868
2869 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2870 }
2871
2872 /* Create an ARM elf linker hash table. */
2873
2874 static struct bfd_link_hash_table *
2875 elf32_arm_link_hash_table_create (bfd *abfd)
2876 {
2877 struct elf32_arm_link_hash_table *ret;
2878 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
2879
2880 ret = bfd_malloc (amt);
2881 if (ret == NULL)
2882 return NULL;
2883
2884 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
2885 elf32_arm_link_hash_newfunc,
2886 sizeof (struct elf32_arm_link_hash_entry)))
2887 {
2888 free (ret);
2889 return NULL;
2890 }
2891
2892 ret->sgot = NULL;
2893 ret->sgotplt = NULL;
2894 ret->srelgot = NULL;
2895 ret->splt = NULL;
2896 ret->srelplt = NULL;
2897 ret->sdynbss = NULL;
2898 ret->srelbss = NULL;
2899 ret->srelplt2 = NULL;
2900 ret->thumb_glue_size = 0;
2901 ret->arm_glue_size = 0;
2902 ret->bx_glue_size = 0;
2903 memset (ret->bx_glue_offset, 0, sizeof (ret->bx_glue_offset));
2904 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
2905 ret->vfp11_erratum_glue_size = 0;
2906 ret->num_vfp11_fixes = 0;
2907 ret->fix_cortex_a8 = 0;
2908 ret->bfd_of_glue_owner = NULL;
2909 ret->byteswap_code = 0;
2910 ret->target1_is_rel = 0;
2911 ret->target2_reloc = R_ARM_NONE;
2912 #ifdef FOUR_WORD_PLT
2913 ret->plt_header_size = 16;
2914 ret->plt_entry_size = 16;
2915 #else
2916 ret->plt_header_size = 20;
2917 ret->plt_entry_size = 12;
2918 #endif
2919 ret->fix_v4bx = 0;
2920 ret->use_blx = 0;
2921 ret->vxworks_p = 0;
2922 ret->symbian_p = 0;
2923 ret->use_rel = 1;
2924 ret->sym_cache.abfd = NULL;
2925 ret->obfd = abfd;
2926 ret->tls_ldm_got.refcount = 0;
2927 ret->stub_bfd = NULL;
2928 ret->add_stub_section = NULL;
2929 ret->layout_sections_again = NULL;
2930 ret->stub_group = NULL;
2931 ret->bfd_count = 0;
2932 ret->top_index = 0;
2933 ret->input_list = NULL;
2934
2935 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2936 sizeof (struct elf32_arm_stub_hash_entry)))
2937 {
2938 free (ret);
2939 return NULL;
2940 }
2941
2942 return &ret->root.root;
2943 }
2944
2945 /* Free the derived linker hash table. */
2946
2947 static void
2948 elf32_arm_hash_table_free (struct bfd_link_hash_table *hash)
2949 {
2950 struct elf32_arm_link_hash_table *ret
2951 = (struct elf32_arm_link_hash_table *) hash;
2952
2953 bfd_hash_table_free (&ret->stub_hash_table);
2954 _bfd_generic_link_hash_table_free (hash);
2955 }
2956
2957 /* Determine if we're dealing with a Thumb only architecture. */
2958
2959 static bfd_boolean
2960 using_thumb_only (struct elf32_arm_link_hash_table *globals)
2961 {
2962 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2963 Tag_CPU_arch);
2964 int profile;
2965
2966 if (arch != TAG_CPU_ARCH_V7)
2967 return FALSE;
2968
2969 profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2970 Tag_CPU_arch_profile);
2971
2972 return profile == 'M';
2973 }
2974
2975 /* Determine if we're dealing with a Thumb-2 object. */
2976
2977 static bfd_boolean
2978 using_thumb2 (struct elf32_arm_link_hash_table *globals)
2979 {
2980 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2981 Tag_CPU_arch);
2982 return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7;
2983 }
2984
2985 static bfd_boolean
2986 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
2987 {
2988 switch (stub_type)
2989 {
2990 case arm_stub_long_branch_thumb_only:
2991 case arm_stub_long_branch_v4t_thumb_arm:
2992 case arm_stub_short_branch_v4t_thumb_arm:
2993 case arm_stub_long_branch_v4t_thumb_arm_pic:
2994 case arm_stub_long_branch_thumb_only_pic:
2995 return TRUE;
2996 case arm_stub_none:
2997 BFD_FAIL ();
2998 return FALSE;
2999 break;
3000 default:
3001 return FALSE;
3002 }
3003 }
3004
3005 /* Determine the type of stub needed, if any, for a call. */
3006
3007 static enum elf32_arm_stub_type
3008 arm_type_of_stub (struct bfd_link_info *info,
3009 asection *input_sec,
3010 const Elf_Internal_Rela *rel,
3011 unsigned char st_type,
3012 struct elf32_arm_link_hash_entry *hash,
3013 bfd_vma destination,
3014 asection *sym_sec,
3015 bfd *input_bfd,
3016 const char *name)
3017 {
3018 bfd_vma location;
3019 bfd_signed_vma branch_offset;
3020 unsigned int r_type;
3021 struct elf32_arm_link_hash_table * globals;
3022 int thumb2;
3023 int thumb_only;
3024 enum elf32_arm_stub_type stub_type = arm_stub_none;
3025 int use_plt = 0;
3026
3027 /* We don't know the actual type of destination in case it is of
3028 type STT_SECTION: give up. */
3029 if (st_type == STT_SECTION)
3030 return stub_type;
3031
3032 globals = elf32_arm_hash_table (info);
3033
3034 thumb_only = using_thumb_only (globals);
3035
3036 thumb2 = using_thumb2 (globals);
3037
3038 /* Determine where the call point is. */
3039 location = (input_sec->output_offset
3040 + input_sec->output_section->vma
3041 + rel->r_offset);
3042
3043 branch_offset = (bfd_signed_vma)(destination - location);
3044
3045 r_type = ELF32_R_TYPE (rel->r_info);
3046
3047 /* Keep a simpler condition, for the sake of clarity. */
3048 if (globals->splt != NULL && hash != NULL && hash->root.plt.offset != (bfd_vma) -1)
3049 {
3050 use_plt = 1;
3051 /* Note when dealing with PLT entries: the main PLT stub is in
3052 ARM mode, so if the branch is in Thumb mode, another
3053 Thumb->ARM stub will be inserted later just before the ARM
3054 PLT stub. We don't take this extra distance into account
3055 here, because if a long branch stub is needed, we'll add a
3056 Thumb->Arm one and branch directly to the ARM PLT entry
3057 because it avoids spreading offset corrections in several
3058 places. */
3059 }
3060
3061 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
3062 {
3063 /* Handle cases where:
3064 - this call goes too far (different Thumb/Thumb2 max
3065 distance)
3066 - it's a Thumb->Arm call and blx is not available, or it's a
3067 Thumb->Arm branch (not bl). A stub is needed in this case,
3068 but only if this call is not through a PLT entry. Indeed,
3069 PLT stubs handle mode switching already.
3070 */
3071 if ((!thumb2
3072 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3073 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3074 || (thumb2
3075 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3076 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3077 || ((st_type != STT_ARM_TFUNC)
3078 && (((r_type == R_ARM_THM_CALL) && !globals->use_blx)
3079 || (r_type == R_ARM_THM_JUMP24))
3080 && !use_plt))
3081 {
3082 if (st_type == STT_ARM_TFUNC)
3083 {
3084 /* Thumb to thumb. */
3085 if (!thumb_only)
3086 {
3087 stub_type = (info->shared | globals->pic_veneer)
3088 /* PIC stubs. */
3089 ? ((globals->use_blx
3090 && (r_type ==R_ARM_THM_CALL))
3091 /* V5T and above. Stub starts with ARM code, so
3092 we must be able to switch mode before
3093 reaching it, which is only possible for 'bl'
3094 (ie R_ARM_THM_CALL relocation). */
3095 ? arm_stub_long_branch_any_thumb_pic
3096 /* On V4T, use Thumb code only. */
3097 : arm_stub_long_branch_v4t_thumb_thumb_pic)
3098
3099 /* non-PIC stubs. */
3100 : ((globals->use_blx
3101 && (r_type ==R_ARM_THM_CALL))
3102 /* V5T and above. */
3103 ? arm_stub_long_branch_any_any
3104 /* V4T. */
3105 : arm_stub_long_branch_v4t_thumb_thumb);
3106 }
3107 else
3108 {
3109 stub_type = (info->shared | globals->pic_veneer)
3110 /* PIC stub. */
3111 ? arm_stub_long_branch_thumb_only_pic
3112 /* non-PIC stub. */
3113 : arm_stub_long_branch_thumb_only;
3114 }
3115 }
3116 else
3117 {
3118 /* Thumb to arm. */
3119 if (sym_sec != NULL
3120 && sym_sec->owner != NULL
3121 && !INTERWORK_FLAG (sym_sec->owner))
3122 {
3123 (*_bfd_error_handler)
3124 (_("%B(%s): warning: interworking not enabled.\n"
3125 " first occurrence: %B: Thumb call to ARM"),
3126 sym_sec->owner, input_bfd, name);
3127 }
3128
3129 stub_type = (info->shared | globals->pic_veneer)
3130 /* PIC stubs. */
3131 ? ((globals->use_blx
3132 && (r_type ==R_ARM_THM_CALL))
3133 /* V5T and above. */
3134 ? arm_stub_long_branch_any_arm_pic
3135 /* V4T PIC stub. */
3136 : arm_stub_long_branch_v4t_thumb_arm_pic)
3137
3138 /* non-PIC stubs. */
3139 : ((globals->use_blx
3140 && (r_type ==R_ARM_THM_CALL))
3141 /* V5T and above. */
3142 ? arm_stub_long_branch_any_any
3143 /* V4T. */
3144 : arm_stub_long_branch_v4t_thumb_arm);
3145
3146 /* Handle v4t short branches. */
3147 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
3148 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
3149 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
3150 stub_type = arm_stub_short_branch_v4t_thumb_arm;
3151 }
3152 }
3153 }
3154 else if (r_type == R_ARM_CALL || r_type == R_ARM_JUMP24 || r_type == R_ARM_PLT32)
3155 {
3156 if (st_type == STT_ARM_TFUNC)
3157 {
3158 /* Arm to thumb. */
3159
3160 if (sym_sec != NULL
3161 && sym_sec->owner != NULL
3162 && !INTERWORK_FLAG (sym_sec->owner))
3163 {
3164 (*_bfd_error_handler)
3165 (_("%B(%s): warning: interworking not enabled.\n"
3166 " first occurrence: %B: ARM call to Thumb"),
3167 sym_sec->owner, input_bfd, name);
3168 }
3169
3170 /* We have an extra 2-bytes reach because of
3171 the mode change (bit 24 (H) of BLX encoding). */
3172 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
3173 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
3174 || ((r_type == R_ARM_CALL) && !globals->use_blx)
3175 || (r_type == R_ARM_JUMP24)
3176 || (r_type == R_ARM_PLT32))
3177 {
3178 stub_type = (info->shared | globals->pic_veneer)
3179 /* PIC stubs. */
3180 ? ((globals->use_blx)
3181 /* V5T and above. */
3182 ? arm_stub_long_branch_any_thumb_pic
3183 /* V4T stub. */
3184 : arm_stub_long_branch_v4t_arm_thumb_pic)
3185
3186 /* non-PIC stubs. */
3187 : ((globals->use_blx)
3188 /* V5T and above. */
3189 ? arm_stub_long_branch_any_any
3190 /* V4T. */
3191 : arm_stub_long_branch_v4t_arm_thumb);
3192 }
3193 }
3194 else
3195 {
3196 /* Arm to arm. */
3197 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
3198 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
3199 {
3200 stub_type = (info->shared | globals->pic_veneer)
3201 /* PIC stubs. */
3202 ? arm_stub_long_branch_any_arm_pic
3203 /* non-PIC stubs. */
3204 : arm_stub_long_branch_any_any;
3205 }
3206 }
3207 }
3208
3209 return stub_type;
3210 }
3211
3212 /* Build a name for an entry in the stub hash table. */
3213
3214 static char *
3215 elf32_arm_stub_name (const asection *input_section,
3216 const asection *sym_sec,
3217 const struct elf32_arm_link_hash_entry *hash,
3218 const Elf_Internal_Rela *rel)
3219 {
3220 char *stub_name;
3221 bfd_size_type len;
3222
3223 if (hash)
3224 {
3225 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1;
3226 stub_name = bfd_malloc (len);
3227 if (stub_name != NULL)
3228 sprintf (stub_name, "%08x_%s+%x",
3229 input_section->id & 0xffffffff,
3230 hash->root.root.root.string,
3231 (int) rel->r_addend & 0xffffffff);
3232 }
3233 else
3234 {
3235 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1;
3236 stub_name = bfd_malloc (len);
3237 if (stub_name != NULL)
3238 sprintf (stub_name, "%08x_%x:%x+%x",
3239 input_section->id & 0xffffffff,
3240 sym_sec->id & 0xffffffff,
3241 (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
3242 (int) rel->r_addend & 0xffffffff);
3243 }
3244
3245 return stub_name;
3246 }
3247
3248 /* Look up an entry in the stub hash. Stub entries are cached because
3249 creating the stub name takes a bit of time. */
3250
3251 static struct elf32_arm_stub_hash_entry *
3252 elf32_arm_get_stub_entry (const asection *input_section,
3253 const asection *sym_sec,
3254 struct elf_link_hash_entry *hash,
3255 const Elf_Internal_Rela *rel,
3256 struct elf32_arm_link_hash_table *htab)
3257 {
3258 struct elf32_arm_stub_hash_entry *stub_entry;
3259 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
3260 const asection *id_sec;
3261
3262 if ((input_section->flags & SEC_CODE) == 0)
3263 return NULL;
3264
3265 /* If this input section is part of a group of sections sharing one
3266 stub section, then use the id of the first section in the group.
3267 Stub names need to include a section id, as there may well be
3268 more than one stub used to reach say, printf, and we need to
3269 distinguish between them. */
3270 id_sec = htab->stub_group[input_section->id].link_sec;
3271
3272 if (h != NULL && h->stub_cache != NULL
3273 && h->stub_cache->h == h
3274 && h->stub_cache->id_sec == id_sec)
3275 {
3276 stub_entry = h->stub_cache;
3277 }
3278 else
3279 {
3280 char *stub_name;
3281
3282 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel);
3283 if (stub_name == NULL)
3284 return NULL;
3285
3286 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
3287 stub_name, FALSE, FALSE);
3288 if (h != NULL)
3289 h->stub_cache = stub_entry;
3290
3291 free (stub_name);
3292 }
3293
3294 return stub_entry;
3295 }
3296
3297 /* Find or create a stub section. Returns a pointer to the stub section, and
3298 the section to which the stub section will be attached (in *LINK_SEC_P).
3299 LINK_SEC_P may be NULL. */
3300
3301 static asection *
3302 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
3303 struct elf32_arm_link_hash_table *htab)
3304 {
3305 asection *link_sec;
3306 asection *stub_sec;
3307
3308 link_sec = htab->stub_group[section->id].link_sec;
3309 stub_sec = htab->stub_group[section->id].stub_sec;
3310 if (stub_sec == NULL)
3311 {
3312 stub_sec = htab->stub_group[link_sec->id].stub_sec;
3313 if (stub_sec == NULL)
3314 {
3315 size_t namelen;
3316 bfd_size_type len;
3317 char *s_name;
3318
3319 namelen = strlen (link_sec->name);
3320 len = namelen + sizeof (STUB_SUFFIX);
3321 s_name = bfd_alloc (htab->stub_bfd, len);
3322 if (s_name == NULL)
3323 return NULL;
3324
3325 memcpy (s_name, link_sec->name, namelen);
3326 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
3327 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
3328 if (stub_sec == NULL)
3329 return NULL;
3330 htab->stub_group[link_sec->id].stub_sec = stub_sec;
3331 }
3332 htab->stub_group[section->id].stub_sec = stub_sec;
3333 }
3334
3335 if (link_sec_p)
3336 *link_sec_p = link_sec;
3337
3338 return stub_sec;
3339 }
3340
3341 /* Add a new stub entry to the stub hash. Not all fields of the new
3342 stub entry are initialised. */
3343
3344 static struct elf32_arm_stub_hash_entry *
3345 elf32_arm_add_stub (const char *stub_name,
3346 asection *section,
3347 struct elf32_arm_link_hash_table *htab)
3348 {
3349 asection *link_sec;
3350 asection *stub_sec;
3351 struct elf32_arm_stub_hash_entry *stub_entry;
3352
3353 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab);
3354 if (stub_sec == NULL)
3355 return NULL;
3356
3357 /* Enter this entry into the linker stub hash table. */
3358 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3359 TRUE, FALSE);
3360 if (stub_entry == NULL)
3361 {
3362 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
3363 section->owner,
3364 stub_name);
3365 return NULL;
3366 }
3367
3368 stub_entry->stub_sec = stub_sec;
3369 stub_entry->stub_offset = 0;
3370 stub_entry->id_sec = link_sec;
3371
3372 return stub_entry;
3373 }
3374
3375 /* Store an Arm insn into an output section not processed by
3376 elf32_arm_write_section. */
3377
3378 static void
3379 put_arm_insn (struct elf32_arm_link_hash_table * htab,
3380 bfd * output_bfd, bfd_vma val, void * ptr)
3381 {
3382 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3383 bfd_putl32 (val, ptr);
3384 else
3385 bfd_putb32 (val, ptr);
3386 }
3387
3388 /* Store a 16-bit Thumb insn into an output section not processed by
3389 elf32_arm_write_section. */
3390
3391 static void
3392 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
3393 bfd * output_bfd, bfd_vma val, void * ptr)
3394 {
3395 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3396 bfd_putl16 (val, ptr);
3397 else
3398 bfd_putb16 (val, ptr);
3399 }
3400
3401 static bfd_reloc_status_type elf32_arm_final_link_relocate
3402 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
3403 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
3404 const char *, int, struct elf_link_hash_entry *, bfd_boolean *, char **);
3405
3406 static bfd_boolean
3407 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
3408 void * in_arg)
3409 {
3410 #define MAXRELOCS 2
3411 struct elf32_arm_stub_hash_entry *stub_entry;
3412 struct bfd_link_info *info;
3413 struct elf32_arm_link_hash_table *htab;
3414 asection *stub_sec;
3415 bfd *stub_bfd;
3416 bfd_vma stub_addr;
3417 bfd_byte *loc;
3418 bfd_vma sym_value;
3419 int template_size;
3420 int size;
3421 const insn_sequence *template;
3422 int i;
3423 struct elf32_arm_link_hash_table * globals;
3424 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
3425 int stub_reloc_offset[MAXRELOCS] = {0, 0};
3426 int nrelocs = 0;
3427
3428 /* Massage our args to the form they really have. */
3429 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3430 info = (struct bfd_link_info *) in_arg;
3431
3432 globals = elf32_arm_hash_table (info);
3433
3434 htab = elf32_arm_hash_table (info);
3435 stub_sec = stub_entry->stub_sec;
3436
3437 /* Make a note of the offset within the stubs for this entry. */
3438 stub_entry->stub_offset = stub_sec->size;
3439 loc = stub_sec->contents + stub_entry->stub_offset;
3440
3441 stub_bfd = stub_sec->owner;
3442
3443 /* This is the address of the start of the stub. */
3444 stub_addr = stub_sec->output_section->vma + stub_sec->output_offset
3445 + stub_entry->stub_offset;
3446
3447 /* This is the address of the stub destination. */
3448 sym_value = (stub_entry->target_value
3449 + stub_entry->target_section->output_offset
3450 + stub_entry->target_section->output_section->vma);
3451
3452 template = stub_entry->stub_template;
3453 template_size = stub_entry->stub_template_size;
3454
3455 size = 0;
3456 for (i = 0; i < template_size; i++)
3457 {
3458 switch (template[i].type)
3459 {
3460 case THUMB16_TYPE:
3461 {
3462 bfd_vma data = template[i].data;
3463 if (template[i].reloc_addend != 0)
3464 {
3465 /* We've borrowed the reloc_addend field to mean we should
3466 insert a condition code into this (Thumb-1 branch)
3467 instruction. See THUMB16_BCOND_INSN. */
3468 BFD_ASSERT ((data & 0xff00) == 0xd000);
3469 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
3470 }
3471 put_thumb_insn (globals, stub_bfd, data, loc + size);
3472 size += 2;
3473 }
3474 break;
3475
3476 case THUMB32_TYPE:
3477 put_thumb_insn (globals, stub_bfd, (template[i].data >> 16) & 0xffff,
3478 loc + size);
3479 put_thumb_insn (globals, stub_bfd, template[i].data & 0xffff,
3480 loc + size + 2);
3481 if (template[i].r_type != R_ARM_NONE)
3482 {
3483 stub_reloc_idx[nrelocs] = i;
3484 stub_reloc_offset[nrelocs++] = size;
3485 }
3486 size += 4;
3487 break;
3488
3489 case ARM_TYPE:
3490 put_arm_insn (globals, stub_bfd, template[i].data, loc + size);
3491 /* Handle cases where the target is encoded within the
3492 instruction. */
3493 if (template[i].r_type == R_ARM_JUMP24)
3494 {
3495 stub_reloc_idx[nrelocs] = i;
3496 stub_reloc_offset[nrelocs++] = size;
3497 }
3498 size += 4;
3499 break;
3500
3501 case DATA_TYPE:
3502 bfd_put_32 (stub_bfd, template[i].data, loc + size);
3503 stub_reloc_idx[nrelocs] = i;
3504 stub_reloc_offset[nrelocs++] = size;
3505 size += 4;
3506 break;
3507
3508 default:
3509 BFD_FAIL ();
3510 return FALSE;
3511 }
3512 }
3513
3514 stub_sec->size += size;
3515
3516 /* Stub size has already been computed in arm_size_one_stub. Check
3517 consistency. */
3518 BFD_ASSERT (size == stub_entry->stub_size);
3519
3520 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
3521 if (stub_entry->st_type == STT_ARM_TFUNC)
3522 sym_value |= 1;
3523
3524 /* Assume there is at least one and at most MAXRELOCS entries to relocate
3525 in each stub. */
3526 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
3527
3528 for (i = 0; i < nrelocs; i++)
3529 if (template[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP24
3530 || template[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP19
3531 || template[stub_reloc_idx[i]].r_type == R_ARM_THM_CALL
3532 || template[stub_reloc_idx[i]].r_type == R_ARM_THM_XPC22)
3533 {
3534 Elf_Internal_Rela rel;
3535 bfd_boolean unresolved_reloc;
3536 char *error_message;
3537 int sym_flags
3538 = (template[stub_reloc_idx[i]].r_type != R_ARM_THM_XPC22)
3539 ? STT_ARM_TFUNC : 0;
3540 bfd_vma points_to = sym_value + stub_entry->target_addend;
3541
3542 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
3543 rel.r_info = ELF32_R_INFO (0, template[stub_reloc_idx[i]].r_type);
3544 rel.r_addend = template[stub_reloc_idx[i]].reloc_addend;
3545
3546 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
3547 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
3548 template should refer back to the instruction after the original
3549 branch. */
3550 points_to = sym_value;
3551
3552 /* Note: _bfd_final_link_relocate doesn't handle these relocations
3553 properly. We should probably use this function unconditionally,
3554 rather than only for certain relocations listed in the enclosing
3555 conditional, for the sake of consistency. */
3556 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
3557 (template[stub_reloc_idx[i]].r_type),
3558 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
3559 points_to, info, stub_entry->target_section, "", sym_flags,
3560 (struct elf_link_hash_entry *) stub_entry, &unresolved_reloc,
3561 &error_message);
3562 }
3563 else
3564 {
3565 _bfd_final_link_relocate (elf32_arm_howto_from_type
3566 (template[stub_reloc_idx[i]].r_type), stub_bfd, stub_sec,
3567 stub_sec->contents, stub_entry->stub_offset + stub_reloc_offset[i],
3568 sym_value + stub_entry->target_addend,
3569 template[stub_reloc_idx[i]].reloc_addend);
3570 }
3571
3572 return TRUE;
3573 #undef MAXRELOCS
3574 }
3575
3576 /* Calculate the template, template size and instruction size for a stub.
3577 Return value is the instruction size. */
3578
3579 static unsigned int
3580 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
3581 const insn_sequence **stub_template,
3582 int *stub_template_size)
3583 {
3584 const insn_sequence *template = NULL;
3585 int template_size = 0, i;
3586 unsigned int size;
3587
3588 template = stub_definitions[stub_type].template;
3589 template_size = stub_definitions[stub_type].template_size;
3590
3591 size = 0;
3592 for (i = 0; i < template_size; i++)
3593 {
3594 switch (template[i].type)
3595 {
3596 case THUMB16_TYPE:
3597 size += 2;
3598 break;
3599
3600 case ARM_TYPE:
3601 case THUMB32_TYPE:
3602 case DATA_TYPE:
3603 size += 4;
3604 break;
3605
3606 default:
3607 BFD_FAIL ();
3608 return FALSE;
3609 }
3610 }
3611
3612 if (stub_template)
3613 *stub_template = template;
3614
3615 if (stub_template_size)
3616 *stub_template_size = template_size;
3617
3618 return size;
3619 }
3620
3621 /* As above, but don't actually build the stub. Just bump offset so
3622 we know stub section sizes. */
3623
3624 static bfd_boolean
3625 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
3626 void * in_arg)
3627 {
3628 struct elf32_arm_stub_hash_entry *stub_entry;
3629 struct elf32_arm_link_hash_table *htab;
3630 const insn_sequence *template;
3631 int template_size, size;
3632
3633 /* Massage our args to the form they really have. */
3634 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3635 htab = (struct elf32_arm_link_hash_table *) in_arg;
3636
3637 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
3638 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
3639
3640 size = find_stub_size_and_template (stub_entry->stub_type, &template,
3641 &template_size);
3642
3643 stub_entry->stub_size = size;
3644 stub_entry->stub_template = template;
3645 stub_entry->stub_template_size = template_size;
3646
3647 size = (size + 7) & ~7;
3648 stub_entry->stub_sec->size += size;
3649
3650 return TRUE;
3651 }
3652
3653 /* External entry points for sizing and building linker stubs. */
3654
3655 /* Set up various things so that we can make a list of input sections
3656 for each output section included in the link. Returns -1 on error,
3657 0 when no stubs will be needed, and 1 on success. */
3658
3659 int
3660 elf32_arm_setup_section_lists (bfd *output_bfd,
3661 struct bfd_link_info *info)
3662 {
3663 bfd *input_bfd;
3664 unsigned int bfd_count;
3665 int top_id, top_index;
3666 asection *section;
3667 asection **input_list, **list;
3668 bfd_size_type amt;
3669 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3670
3671 if (! is_elf_hash_table (htab))
3672 return 0;
3673
3674 /* Count the number of input BFDs and find the top input section id. */
3675 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
3676 input_bfd != NULL;
3677 input_bfd = input_bfd->link_next)
3678 {
3679 bfd_count += 1;
3680 for (section = input_bfd->sections;
3681 section != NULL;
3682 section = section->next)
3683 {
3684 if (top_id < section->id)
3685 top_id = section->id;
3686 }
3687 }
3688 htab->bfd_count = bfd_count;
3689
3690 amt = sizeof (struct map_stub) * (top_id + 1);
3691 htab->stub_group = bfd_zmalloc (amt);
3692 if (htab->stub_group == NULL)
3693 return -1;
3694
3695 /* We can't use output_bfd->section_count here to find the top output
3696 section index as some sections may have been removed, and
3697 _bfd_strip_section_from_output doesn't renumber the indices. */
3698 for (section = output_bfd->sections, top_index = 0;
3699 section != NULL;
3700 section = section->next)
3701 {
3702 if (top_index < section->index)
3703 top_index = section->index;
3704 }
3705
3706 htab->top_index = top_index;
3707 amt = sizeof (asection *) * (top_index + 1);
3708 input_list = bfd_malloc (amt);
3709 htab->input_list = input_list;
3710 if (input_list == NULL)
3711 return -1;
3712
3713 /* For sections we aren't interested in, mark their entries with a
3714 value we can check later. */
3715 list = input_list + top_index;
3716 do
3717 *list = bfd_abs_section_ptr;
3718 while (list-- != input_list);
3719
3720 for (section = output_bfd->sections;
3721 section != NULL;
3722 section = section->next)
3723 {
3724 if ((section->flags & SEC_CODE) != 0)
3725 input_list[section->index] = NULL;
3726 }
3727
3728 return 1;
3729 }
3730
3731 /* The linker repeatedly calls this function for each input section,
3732 in the order that input sections are linked into output sections.
3733 Build lists of input sections to determine groupings between which
3734 we may insert linker stubs. */
3735
3736 void
3737 elf32_arm_next_input_section (struct bfd_link_info *info,
3738 asection *isec)
3739 {
3740 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3741
3742 if (isec->output_section->index <= htab->top_index)
3743 {
3744 asection **list = htab->input_list + isec->output_section->index;
3745
3746 if (*list != bfd_abs_section_ptr)
3747 {
3748 /* Steal the link_sec pointer for our list. */
3749 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
3750 /* This happens to make the list in reverse order,
3751 which we reverse later. */
3752 PREV_SEC (isec) = *list;
3753 *list = isec;
3754 }
3755 }
3756 }
3757
3758 /* See whether we can group stub sections together. Grouping stub
3759 sections may result in fewer stubs. More importantly, we need to
3760 put all .init* and .fini* stubs at the end of the .init or
3761 .fini output sections respectively, because glibc splits the
3762 _init and _fini functions into multiple parts. Putting a stub in
3763 the middle of a function is not a good idea. */
3764
3765 static void
3766 group_sections (struct elf32_arm_link_hash_table *htab,
3767 bfd_size_type stub_group_size,
3768 bfd_boolean stubs_always_after_branch)
3769 {
3770 asection **list = htab->input_list;
3771
3772 do
3773 {
3774 asection *tail = *list;
3775 asection *head;
3776
3777 if (tail == bfd_abs_section_ptr)
3778 continue;
3779
3780 /* Reverse the list: we must avoid placing stubs at the
3781 beginning of the section because the beginning of the text
3782 section may be required for an interrupt vector in bare metal
3783 code. */
3784 #define NEXT_SEC PREV_SEC
3785 head = NULL;
3786 while (tail != NULL)
3787 {
3788 /* Pop from tail. */
3789 asection *item = tail;
3790 tail = PREV_SEC (item);
3791
3792 /* Push on head. */
3793 NEXT_SEC (item) = head;
3794 head = item;
3795 }
3796
3797 while (head != NULL)
3798 {
3799 asection *curr;
3800 asection *next;
3801 bfd_vma stub_group_start = head->output_offset;
3802 bfd_vma end_of_next;
3803
3804 curr = head;
3805 while (NEXT_SEC (curr) != NULL)
3806 {
3807 next = NEXT_SEC (curr);
3808 end_of_next = next->output_offset + next->size;
3809 if (end_of_next - stub_group_start >= stub_group_size)
3810 /* End of NEXT is too far from start, so stop. */
3811 break;
3812 /* Add NEXT to the group. */
3813 curr = next;
3814 }
3815
3816 /* OK, the size from the start to the start of CURR is less
3817 than stub_group_size and thus can be handled by one stub
3818 section. (Or the head section is itself larger than
3819 stub_group_size, in which case we may be toast.)
3820 We should really be keeping track of the total size of
3821 stubs added here, as stubs contribute to the final output
3822 section size. */
3823 do
3824 {
3825 next = NEXT_SEC (head);
3826 /* Set up this stub group. */
3827 htab->stub_group[head->id].link_sec = curr;
3828 }
3829 while (head != curr && (head = next) != NULL);
3830
3831 /* But wait, there's more! Input sections up to stub_group_size
3832 bytes after the stub section can be handled by it too. */
3833 if (!stubs_always_after_branch)
3834 {
3835 stub_group_start = curr->output_offset + curr->size;
3836
3837 while (next != NULL)
3838 {
3839 end_of_next = next->output_offset + next->size;
3840 if (end_of_next - stub_group_start >= stub_group_size)
3841 /* End of NEXT is too far from stubs, so stop. */
3842 break;
3843 /* Add NEXT to the stub group. */
3844 head = next;
3845 next = NEXT_SEC (head);
3846 htab->stub_group[head->id].link_sec = curr;
3847 }
3848 }
3849 head = next;
3850 }
3851 }
3852 while (list++ != htab->input_list + htab->top_index);
3853
3854 free (htab->input_list);
3855 #undef PREV_SEC
3856 #undef NEXT_SEC
3857 }
3858
3859 /* Comparison function for sorting/searching relocations relating to Cortex-A8
3860 erratum fix. */
3861
3862 static int
3863 a8_reloc_compare (const void *a, const void *b)
3864 {
3865 const struct a8_erratum_reloc *ra = a, *rb = b;
3866
3867 if (ra->from < rb->from)
3868 return -1;
3869 else if (ra->from > rb->from)
3870 return 1;
3871 else
3872 return 0;
3873 }
3874
3875 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
3876 const char *, char **);
3877
3878 /* Helper function to scan code for sequences which might trigger the Cortex-A8
3879 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
3880 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
3881 otherwise. */
3882
3883 static bfd_boolean
3884 cortex_a8_erratum_scan (bfd *input_bfd,
3885 struct bfd_link_info *info,
3886 struct a8_erratum_fix **a8_fixes_p,
3887 unsigned int *num_a8_fixes_p,
3888 unsigned int *a8_fix_table_size_p,
3889 struct a8_erratum_reloc *a8_relocs,
3890 unsigned int num_a8_relocs)
3891 {
3892 asection *section;
3893 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3894 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
3895 unsigned int num_a8_fixes = *num_a8_fixes_p;
3896 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
3897
3898 for (section = input_bfd->sections;
3899 section != NULL;
3900 section = section->next)
3901 {
3902 bfd_byte *contents = NULL;
3903 struct _arm_elf_section_data *sec_data;
3904 unsigned int span;
3905 bfd_vma base_vma;
3906
3907 if (elf_section_type (section) != SHT_PROGBITS
3908 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3909 || (section->flags & SEC_EXCLUDE) != 0
3910 || (section->sec_info_type == ELF_INFO_TYPE_JUST_SYMS)
3911 || (section->output_section == bfd_abs_section_ptr))
3912 continue;
3913
3914 base_vma = section->output_section->vma + section->output_offset;
3915
3916 if (elf_section_data (section)->this_hdr.contents != NULL)
3917 contents = elf_section_data (section)->this_hdr.contents;
3918 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3919 return TRUE;
3920
3921 sec_data = elf32_arm_section_data (section);
3922
3923 for (span = 0; span < sec_data->mapcount; span++)
3924 {
3925 unsigned int span_start = sec_data->map[span].vma;
3926 unsigned int span_end = (span == sec_data->mapcount - 1)
3927 ? section->size : sec_data->map[span + 1].vma;
3928 unsigned int i;
3929 char span_type = sec_data->map[span].type;
3930 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
3931
3932 if (span_type != 't')
3933 continue;
3934
3935 /* Span is entirely within a single 4KB region: skip scanning. */
3936 if (((base_vma + span_start) & ~0xfff)
3937 == ((base_vma + span_end) & ~0xfff))
3938 continue;
3939
3940 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
3941
3942 * The opcode is BLX.W, BL.W, B.W, Bcc.W
3943 * The branch target is in the same 4KB region as the
3944 first half of the branch.
3945 * The instruction before the branch is a 32-bit
3946 length non-branch instruction. */
3947 for (i = span_start; i < span_end;)
3948 {
3949 unsigned int insn = bfd_getl16 (&contents[i]);
3950 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
3951 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
3952
3953 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
3954 insn_32bit = TRUE;
3955
3956 if (insn_32bit)
3957 {
3958 /* Load the rest of the insn (in manual-friendly order). */
3959 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
3960
3961 /* Encoding T4: B<c>.W. */
3962 is_b = (insn & 0xf800d000) == 0xf0009000;
3963 /* Encoding T1: BL<c>.W. */
3964 is_bl = (insn & 0xf800d000) == 0xf000d000;
3965 /* Encoding T2: BLX<c>.W. */
3966 is_blx = (insn & 0xf800d000) == 0xf000c000;
3967 /* Encoding T3: B<c>.W (not permitted in IT block). */
3968 is_bcc = (insn & 0xf800d000) == 0xf0008000
3969 && (insn & 0x07f00000) != 0x03800000;
3970 }
3971
3972 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
3973
3974 if (((base_vma + i) & 0xfff) == 0xffe
3975 && insn_32bit
3976 && is_32bit_branch
3977 && last_was_32bit
3978 && ! last_was_branch)
3979 {
3980 bfd_signed_vma offset;
3981 bfd_boolean force_target_arm = FALSE;
3982 bfd_boolean force_target_thumb = FALSE;
3983 bfd_vma target;
3984 enum elf32_arm_stub_type stub_type = arm_stub_none;
3985 struct a8_erratum_reloc key, *found;
3986
3987 key.from = base_vma + i;
3988 found = bsearch (&key, a8_relocs, num_a8_relocs,
3989 sizeof (struct a8_erratum_reloc),
3990 &a8_reloc_compare);
3991
3992 if (found)
3993 {
3994 char *error_message = NULL;
3995 struct elf_link_hash_entry *entry;
3996
3997 /* We don't care about the error returned from this
3998 function, only if there is glue or not. */
3999 entry = find_thumb_glue (info, found->sym_name,
4000 &error_message);
4001
4002 if (entry)
4003 found->non_a8_stub = TRUE;
4004
4005 if (found->r_type == R_ARM_THM_CALL
4006 && found->st_type != STT_ARM_TFUNC)
4007 force_target_arm = TRUE;
4008 else if (found->r_type == R_ARM_THM_CALL
4009 && found->st_type == STT_ARM_TFUNC)
4010 force_target_thumb = TRUE;
4011 }
4012
4013 /* Check if we have an offending branch instruction. */
4014
4015 if (found && found->non_a8_stub)
4016 /* We've already made a stub for this instruction, e.g.
4017 it's a long branch or a Thumb->ARM stub. Assume that
4018 stub will suffice to work around the A8 erratum (see
4019 setting of always_after_branch above). */
4020 ;
4021 else if (is_bcc)
4022 {
4023 offset = (insn & 0x7ff) << 1;
4024 offset |= (insn & 0x3f0000) >> 4;
4025 offset |= (insn & 0x2000) ? 0x40000 : 0;
4026 offset |= (insn & 0x800) ? 0x80000 : 0;
4027 offset |= (insn & 0x4000000) ? 0x100000 : 0;
4028 if (offset & 0x100000)
4029 offset |= ~ ((bfd_signed_vma) 0xfffff);
4030 stub_type = arm_stub_a8_veneer_b_cond;
4031 }
4032 else if (is_b || is_bl || is_blx)
4033 {
4034 int s = (insn & 0x4000000) != 0;
4035 int j1 = (insn & 0x2000) != 0;
4036 int j2 = (insn & 0x800) != 0;
4037 int i1 = !(j1 ^ s);
4038 int i2 = !(j2 ^ s);
4039
4040 offset = (insn & 0x7ff) << 1;
4041 offset |= (insn & 0x3ff0000) >> 4;
4042 offset |= i2 << 22;
4043 offset |= i1 << 23;
4044 offset |= s << 24;
4045 if (offset & 0x1000000)
4046 offset |= ~ ((bfd_signed_vma) 0xffffff);
4047
4048 if (is_blx)
4049 offset &= ~ ((bfd_signed_vma) 3);
4050
4051 stub_type = is_blx ? arm_stub_a8_veneer_blx :
4052 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
4053 }
4054
4055 if (stub_type != arm_stub_none)
4056 {
4057 bfd_vma pc_for_insn = base_vma + i + 4;
4058
4059 /* The original instruction is a BL, but the target is
4060 an ARM instruction. If we were not making a stub,
4061 the BL would have been converted to a BLX. Use the
4062 BLX stub instead in that case. */
4063 if (htab->use_blx && force_target_arm
4064 && stub_type == arm_stub_a8_veneer_bl)
4065 {
4066 stub_type = arm_stub_a8_veneer_blx;
4067 is_blx = TRUE;
4068 is_bl = FALSE;
4069 }
4070 /* Conversely, if the original instruction was
4071 BLX but the target is Thumb mode, use the BL
4072 stub. */
4073 else if (force_target_thumb
4074 && stub_type == arm_stub_a8_veneer_blx)
4075 {
4076 stub_type = arm_stub_a8_veneer_bl;
4077 is_blx = FALSE;
4078 is_bl = TRUE;
4079 }
4080
4081 if (is_blx)
4082 pc_for_insn &= ~ ((bfd_vma) 3);
4083
4084 /* If we found a relocation, use the proper destination,
4085 not the offset in the (unrelocated) instruction.
4086 Note this is always done if we switched the stub type
4087 above. */
4088 if (found)
4089 offset =
4090 (bfd_signed_vma) (found->destination - pc_for_insn);
4091
4092 target = pc_for_insn + offset;
4093
4094 /* The BLX stub is ARM-mode code. Adjust the offset to
4095 take the different PC value (+8 instead of +4) into
4096 account. */
4097 if (stub_type == arm_stub_a8_veneer_blx)
4098 offset += 4;
4099
4100 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
4101 {
4102 char *stub_name;
4103
4104 if (num_a8_fixes == a8_fix_table_size)
4105 {
4106 a8_fix_table_size *= 2;
4107 a8_fixes = bfd_realloc (a8_fixes,
4108 sizeof (struct a8_erratum_fix)
4109 * a8_fix_table_size);
4110 }
4111
4112 stub_name = bfd_malloc (8 + 1 + 8 + 1);
4113 if (stub_name != NULL)
4114 sprintf (stub_name, "%x:%x", section->id, i);
4115
4116 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
4117 a8_fixes[num_a8_fixes].section = section;
4118 a8_fixes[num_a8_fixes].offset = i;
4119 a8_fixes[num_a8_fixes].addend = offset;
4120 a8_fixes[num_a8_fixes].orig_insn = insn;
4121 a8_fixes[num_a8_fixes].stub_name = stub_name;
4122 a8_fixes[num_a8_fixes].stub_type = stub_type;
4123
4124 num_a8_fixes++;
4125 }
4126 }
4127 }
4128
4129 i += insn_32bit ? 4 : 2;
4130 last_was_32bit = insn_32bit;
4131 last_was_branch = is_32bit_branch;
4132 }
4133 }
4134
4135 if (elf_section_data (section)->this_hdr.contents == NULL)
4136 free (contents);
4137 }
4138
4139 *a8_fixes_p = a8_fixes;
4140 *num_a8_fixes_p = num_a8_fixes;
4141 *a8_fix_table_size_p = a8_fix_table_size;
4142
4143 return FALSE;
4144 }
4145
4146 /* Determine and set the size of the stub section for a final link.
4147
4148 The basic idea here is to examine all the relocations looking for
4149 PC-relative calls to a target that is unreachable with a "bl"
4150 instruction. */
4151
4152 bfd_boolean
4153 elf32_arm_size_stubs (bfd *output_bfd,
4154 bfd *stub_bfd,
4155 struct bfd_link_info *info,
4156 bfd_signed_vma group_size,
4157 asection * (*add_stub_section) (const char *, asection *),
4158 void (*layout_sections_again) (void))
4159 {
4160 bfd_size_type stub_group_size;
4161 bfd_boolean stubs_always_after_branch;
4162 bfd_boolean stub_changed = 0;
4163 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4164 struct a8_erratum_fix *a8_fixes = NULL;
4165 unsigned int num_a8_fixes = 0, prev_num_a8_fixes = 0, a8_fix_table_size = 10;
4166 struct a8_erratum_reloc *a8_relocs = NULL;
4167 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
4168
4169 if (htab->fix_cortex_a8)
4170 {
4171 a8_fixes = bfd_zmalloc (sizeof (struct a8_erratum_fix)
4172 * a8_fix_table_size);
4173 a8_relocs = bfd_zmalloc (sizeof (struct a8_erratum_reloc)
4174 * a8_reloc_table_size);
4175 }
4176
4177 /* Propagate mach to stub bfd, because it may not have been
4178 finalized when we created stub_bfd. */
4179 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
4180 bfd_get_mach (output_bfd));
4181
4182 /* Stash our params away. */
4183 htab->stub_bfd = stub_bfd;
4184 htab->add_stub_section = add_stub_section;
4185 htab->layout_sections_again = layout_sections_again;
4186 stubs_always_after_branch = group_size < 0;
4187
4188 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
4189 as the first half of a 32-bit branch straddling two 4K pages. This is a
4190 crude way of enforcing that. */
4191 if (htab->fix_cortex_a8)
4192 stubs_always_after_branch = 1;
4193
4194 if (group_size < 0)
4195 stub_group_size = -group_size;
4196 else
4197 stub_group_size = group_size;
4198
4199 if (stub_group_size == 1)
4200 {
4201 /* Default values. */
4202 /* Thumb branch range is +-4MB has to be used as the default
4203 maximum size (a given section can contain both ARM and Thumb
4204 code, so the worst case has to be taken into account).
4205
4206 This value is 24K less than that, which allows for 2025
4207 12-byte stubs. If we exceed that, then we will fail to link.
4208 The user will have to relink with an explicit group size
4209 option. */
4210 stub_group_size = 4170000;
4211 }
4212
4213 group_sections (htab, stub_group_size, stubs_always_after_branch);
4214
4215 while (1)
4216 {
4217 bfd *input_bfd;
4218 unsigned int bfd_indx;
4219 asection *stub_sec;
4220
4221 num_a8_fixes = 0;
4222
4223 for (input_bfd = info->input_bfds, bfd_indx = 0;
4224 input_bfd != NULL;
4225 input_bfd = input_bfd->link_next, bfd_indx++)
4226 {
4227 Elf_Internal_Shdr *symtab_hdr;
4228 asection *section;
4229 Elf_Internal_Sym *local_syms = NULL;
4230
4231 num_a8_relocs = 0;
4232
4233 /* We'll need the symbol table in a second. */
4234 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4235 if (symtab_hdr->sh_info == 0)
4236 continue;
4237
4238 /* Walk over each section attached to the input bfd. */
4239 for (section = input_bfd->sections;
4240 section != NULL;
4241 section = section->next)
4242 {
4243 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
4244
4245 /* If there aren't any relocs, then there's nothing more
4246 to do. */
4247 if ((section->flags & SEC_RELOC) == 0
4248 || section->reloc_count == 0
4249 || (section->flags & SEC_CODE) == 0)
4250 continue;
4251
4252 /* If this section is a link-once section that will be
4253 discarded, then don't create any stubs. */
4254 if (section->output_section == NULL
4255 || section->output_section->owner != output_bfd)
4256 continue;
4257
4258 /* Get the relocs. */
4259 internal_relocs
4260 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
4261 NULL, info->keep_memory);
4262 if (internal_relocs == NULL)
4263 goto error_ret_free_local;
4264
4265 /* Now examine each relocation. */
4266 irela = internal_relocs;
4267 irelaend = irela + section->reloc_count;
4268 for (; irela < irelaend; irela++)
4269 {
4270 unsigned int r_type, r_indx;
4271 enum elf32_arm_stub_type stub_type;
4272 struct elf32_arm_stub_hash_entry *stub_entry;
4273 asection *sym_sec;
4274 bfd_vma sym_value;
4275 bfd_vma destination;
4276 struct elf32_arm_link_hash_entry *hash;
4277 const char *sym_name;
4278 char *stub_name;
4279 const asection *id_sec;
4280 unsigned char st_type;
4281 bfd_boolean created_stub = FALSE;
4282
4283 r_type = ELF32_R_TYPE (irela->r_info);
4284 r_indx = ELF32_R_SYM (irela->r_info);
4285
4286 if (r_type >= (unsigned int) R_ARM_max)
4287 {
4288 bfd_set_error (bfd_error_bad_value);
4289 error_ret_free_internal:
4290 if (elf_section_data (section)->relocs == NULL)
4291 free (internal_relocs);
4292 goto error_ret_free_local;
4293 }
4294
4295 /* Only look for stubs on branch instructions. */
4296 if ((r_type != (unsigned int) R_ARM_CALL)
4297 && (r_type != (unsigned int) R_ARM_THM_CALL)
4298 && (r_type != (unsigned int) R_ARM_JUMP24)
4299 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
4300 && (r_type != (unsigned int) R_ARM_THM_XPC22)
4301 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
4302 && (r_type != (unsigned int) R_ARM_PLT32))
4303 continue;
4304
4305 /* Now determine the call target, its name, value,
4306 section. */
4307 sym_sec = NULL;
4308 sym_value = 0;
4309 destination = 0;
4310 hash = NULL;
4311 sym_name = NULL;
4312 if (r_indx < symtab_hdr->sh_info)
4313 {
4314 /* It's a local symbol. */
4315 Elf_Internal_Sym *sym;
4316 Elf_Internal_Shdr *hdr;
4317
4318 if (local_syms == NULL)
4319 {
4320 local_syms
4321 = (Elf_Internal_Sym *) symtab_hdr->contents;
4322 if (local_syms == NULL)
4323 local_syms
4324 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
4325 symtab_hdr->sh_info, 0,
4326 NULL, NULL, NULL);
4327 if (local_syms == NULL)
4328 goto error_ret_free_internal;
4329 }
4330
4331 sym = local_syms + r_indx;
4332 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
4333 sym_sec = hdr->bfd_section;
4334 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
4335 sym_value = sym->st_value;
4336 destination = (sym_value + irela->r_addend
4337 + sym_sec->output_offset
4338 + sym_sec->output_section->vma);
4339 st_type = ELF_ST_TYPE (sym->st_info);
4340 sym_name
4341 = bfd_elf_string_from_elf_section (input_bfd,
4342 symtab_hdr->sh_link,
4343 sym->st_name);
4344 }
4345 else
4346 {
4347 /* It's an external symbol. */
4348 int e_indx;
4349
4350 e_indx = r_indx - symtab_hdr->sh_info;
4351 hash = ((struct elf32_arm_link_hash_entry *)
4352 elf_sym_hashes (input_bfd)[e_indx]);
4353
4354 while (hash->root.root.type == bfd_link_hash_indirect
4355 || hash->root.root.type == bfd_link_hash_warning)
4356 hash = ((struct elf32_arm_link_hash_entry *)
4357 hash->root.root.u.i.link);
4358
4359 if (hash->root.root.type == bfd_link_hash_defined
4360 || hash->root.root.type == bfd_link_hash_defweak)
4361 {
4362 sym_sec = hash->root.root.u.def.section;
4363 sym_value = hash->root.root.u.def.value;
4364 if (sym_sec->output_section != NULL)
4365 destination = (sym_value + irela->r_addend
4366 + sym_sec->output_offset
4367 + sym_sec->output_section->vma);
4368 }
4369 else if ((hash->root.root.type == bfd_link_hash_undefined)
4370 || (hash->root.root.type == bfd_link_hash_undefweak))
4371 {
4372 /* For a shared library, use the PLT stub as
4373 target address to decide whether a long
4374 branch stub is needed.
4375 For absolute code, they cannot be handled. */
4376 struct elf32_arm_link_hash_table *globals =
4377 elf32_arm_hash_table (info);
4378
4379 if (globals->splt != NULL && hash != NULL
4380 && hash->root.plt.offset != (bfd_vma) -1)
4381 {
4382 sym_sec = globals->splt;
4383 sym_value = hash->root.plt.offset;
4384 if (sym_sec->output_section != NULL)
4385 destination = (sym_value
4386 + sym_sec->output_offset
4387 + sym_sec->output_section->vma);
4388 }
4389 else
4390 continue;
4391 }
4392 else
4393 {
4394 bfd_set_error (bfd_error_bad_value);
4395 goto error_ret_free_internal;
4396 }
4397 st_type = ELF_ST_TYPE (hash->root.type);
4398 sym_name = hash->root.root.root.string;
4399 }
4400
4401 do
4402 {
4403 /* Determine what (if any) linker stub is needed. */
4404 stub_type = arm_type_of_stub (info, section, irela,
4405 st_type, hash,
4406 destination, sym_sec,
4407 input_bfd, sym_name);
4408 if (stub_type == arm_stub_none)
4409 break;
4410
4411 /* Support for grouping stub sections. */
4412 id_sec = htab->stub_group[section->id].link_sec;
4413
4414 /* Get the name of this stub. */
4415 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash,
4416 irela);
4417 if (!stub_name)
4418 goto error_ret_free_internal;
4419
4420 /* We've either created a stub for this reloc already,
4421 or we are about to. */
4422 created_stub = TRUE;
4423
4424 stub_entry = arm_stub_hash_lookup
4425 (&htab->stub_hash_table, stub_name,
4426 FALSE, FALSE);
4427 if (stub_entry != NULL)
4428 {
4429 /* The proper stub has already been created. */
4430 free (stub_name);
4431 break;
4432 }
4433
4434 stub_entry = elf32_arm_add_stub (stub_name, section,
4435 htab);
4436 if (stub_entry == NULL)
4437 {
4438 free (stub_name);
4439 goto error_ret_free_internal;
4440 }
4441
4442 stub_entry->target_value = sym_value;
4443 stub_entry->target_section = sym_sec;
4444 stub_entry->stub_type = stub_type;
4445 stub_entry->h = hash;
4446 stub_entry->st_type = st_type;
4447
4448 if (sym_name == NULL)
4449 sym_name = "unnamed";
4450 stub_entry->output_name
4451 = bfd_alloc (htab->stub_bfd,
4452 sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
4453 + strlen (sym_name));
4454 if (stub_entry->output_name == NULL)
4455 {
4456 free (stub_name);
4457 goto error_ret_free_internal;
4458 }
4459
4460 /* For historical reasons, use the existing names for
4461 ARM-to-Thumb and Thumb-to-ARM stubs. */
4462 if ( ((r_type == (unsigned int) R_ARM_THM_CALL)
4463 || (r_type == (unsigned int) R_ARM_THM_JUMP24))
4464 && st_type != STT_ARM_TFUNC)
4465 sprintf (stub_entry->output_name,
4466 THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
4467 else if ( ((r_type == (unsigned int) R_ARM_CALL)
4468 || (r_type == (unsigned int) R_ARM_JUMP24))
4469 && st_type == STT_ARM_TFUNC)
4470 sprintf (stub_entry->output_name,
4471 ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
4472 else
4473 sprintf (stub_entry->output_name, STUB_ENTRY_NAME,
4474 sym_name);
4475
4476 stub_changed = TRUE;
4477 }
4478 while (0);
4479
4480 /* Look for relocations which might trigger Cortex-A8
4481 erratum. */
4482 if (htab->fix_cortex_a8
4483 && (r_type == (unsigned int) R_ARM_THM_JUMP24
4484 || r_type == (unsigned int) R_ARM_THM_JUMP19
4485 || r_type == (unsigned int) R_ARM_THM_CALL
4486 || r_type == (unsigned int) R_ARM_THM_XPC22))
4487 {
4488 bfd_vma from = section->output_section->vma
4489 + section->output_offset
4490 + irela->r_offset;
4491
4492 if ((from & 0xfff) == 0xffe)
4493 {
4494 /* Found a candidate. Note we haven't checked the
4495 destination is within 4K here: if we do so (and
4496 don't create an entry in a8_relocs) we can't tell
4497 that a branch should have been relocated when
4498 scanning later. */
4499 if (num_a8_relocs == a8_reloc_table_size)
4500 {
4501 a8_reloc_table_size *= 2;
4502 a8_relocs = bfd_realloc (a8_relocs,
4503 sizeof (struct a8_erratum_reloc)
4504 * a8_reloc_table_size);
4505 }
4506
4507 a8_relocs[num_a8_relocs].from = from;
4508 a8_relocs[num_a8_relocs].destination = destination;
4509 a8_relocs[num_a8_relocs].r_type = r_type;
4510 a8_relocs[num_a8_relocs].st_type = st_type;
4511 a8_relocs[num_a8_relocs].sym_name = sym_name;
4512 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
4513
4514 num_a8_relocs++;
4515 }
4516 }
4517 }
4518
4519 /* We're done with the internal relocs, free them. */
4520 if (elf_section_data (section)->relocs == NULL)
4521 free (internal_relocs);
4522 }
4523
4524 if (htab->fix_cortex_a8)
4525 {
4526 /* Sort relocs which might apply to Cortex-A8 erratum. */
4527 qsort (a8_relocs, num_a8_relocs, sizeof (struct a8_erratum_reloc),
4528 &a8_reloc_compare);
4529
4530 /* Scan for branches which might trigger Cortex-A8 erratum. */
4531 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
4532 &num_a8_fixes, &a8_fix_table_size,
4533 a8_relocs, num_a8_relocs) != 0)
4534 goto error_ret_free_local;
4535 }
4536 }
4537
4538 if (htab->fix_cortex_a8 && num_a8_fixes != prev_num_a8_fixes)
4539 stub_changed = TRUE;
4540
4541 if (!stub_changed)
4542 break;
4543
4544 /* OK, we've added some stubs. Find out the new size of the
4545 stub sections. */
4546 for (stub_sec = htab->stub_bfd->sections;
4547 stub_sec != NULL;
4548 stub_sec = stub_sec->next)
4549 {
4550 /* Ignore non-stub sections. */
4551 if (!strstr (stub_sec->name, STUB_SUFFIX))
4552 continue;
4553
4554 stub_sec->size = 0;
4555 }
4556
4557 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
4558
4559 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
4560 if (htab->fix_cortex_a8)
4561 for (i = 0; i < num_a8_fixes; i++)
4562 {
4563 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
4564 a8_fixes[i].section, htab);
4565
4566 if (stub_sec == NULL)
4567 goto error_ret_free_local;
4568
4569 stub_sec->size
4570 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
4571 NULL);
4572 }
4573
4574
4575 /* Ask the linker to do its stuff. */
4576 (*htab->layout_sections_again) ();
4577 stub_changed = FALSE;
4578 prev_num_a8_fixes = num_a8_fixes;
4579 }
4580
4581 /* Add stubs for Cortex-A8 erratum fixes now. */
4582 if (htab->fix_cortex_a8)
4583 {
4584 for (i = 0; i < num_a8_fixes; i++)
4585 {
4586 struct elf32_arm_stub_hash_entry *stub_entry;
4587 char *stub_name = a8_fixes[i].stub_name;
4588 asection *section = a8_fixes[i].section;
4589 unsigned int section_id = a8_fixes[i].section->id;
4590 asection *link_sec = htab->stub_group[section_id].link_sec;
4591 asection *stub_sec = htab->stub_group[section_id].stub_sec;
4592 const insn_sequence *template;
4593 int template_size, size = 0;
4594
4595 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4596 TRUE, FALSE);
4597 if (stub_entry == NULL)
4598 {
4599 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
4600 section->owner,
4601 stub_name);
4602 return FALSE;
4603 }
4604
4605 stub_entry->stub_sec = stub_sec;
4606 stub_entry->stub_offset = 0;
4607 stub_entry->id_sec = link_sec;
4608 stub_entry->stub_type = a8_fixes[i].stub_type;
4609 stub_entry->target_section = a8_fixes[i].section;
4610 stub_entry->target_value = a8_fixes[i].offset;
4611 stub_entry->target_addend = a8_fixes[i].addend;
4612 stub_entry->orig_insn = a8_fixes[i].orig_insn;
4613 stub_entry->st_type = STT_ARM_TFUNC;
4614
4615 size = find_stub_size_and_template (a8_fixes[i].stub_type, &template,
4616 &template_size);
4617
4618 stub_entry->stub_size = size;
4619 stub_entry->stub_template = template;
4620 stub_entry->stub_template_size = template_size;
4621 }
4622
4623 /* Stash the Cortex-A8 erratum fix array for use later in
4624 elf32_arm_write_section(). */
4625 htab->a8_erratum_fixes = a8_fixes;
4626 htab->num_a8_erratum_fixes = num_a8_fixes;
4627 }
4628 else
4629 {
4630 htab->a8_erratum_fixes = NULL;
4631 htab->num_a8_erratum_fixes = 0;
4632 }
4633 return TRUE;
4634
4635 error_ret_free_local:
4636 return FALSE;
4637 }
4638
4639 /* Build all the stubs associated with the current output file. The
4640 stubs are kept in a hash table attached to the main linker hash
4641 table. We also set up the .plt entries for statically linked PIC
4642 functions here. This function is called via arm_elf_finish in the
4643 linker. */
4644
4645 bfd_boolean
4646 elf32_arm_build_stubs (struct bfd_link_info *info)
4647 {
4648 asection *stub_sec;
4649 struct bfd_hash_table *table;
4650 struct elf32_arm_link_hash_table *htab;
4651
4652 htab = elf32_arm_hash_table (info);
4653
4654 for (stub_sec = htab->stub_bfd->sections;
4655 stub_sec != NULL;
4656 stub_sec = stub_sec->next)
4657 {
4658 bfd_size_type size;
4659
4660 /* Ignore non-stub sections. */
4661 if (!strstr (stub_sec->name, STUB_SUFFIX))
4662 continue;
4663
4664 /* Allocate memory to hold the linker stubs. */
4665 size = stub_sec->size;
4666 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
4667 if (stub_sec->contents == NULL && size != 0)
4668 return FALSE;
4669 stub_sec->size = 0;
4670 }
4671
4672 /* Build the stubs as directed by the stub hash table. */
4673 table = &htab->stub_hash_table;
4674 bfd_hash_traverse (table, arm_build_one_stub, info);
4675
4676 return TRUE;
4677 }
4678
4679 /* Locate the Thumb encoded calling stub for NAME. */
4680
4681 static struct elf_link_hash_entry *
4682 find_thumb_glue (struct bfd_link_info *link_info,
4683 const char *name,
4684 char **error_message)
4685 {
4686 char *tmp_name;
4687 struct elf_link_hash_entry *hash;
4688 struct elf32_arm_link_hash_table *hash_table;
4689
4690 /* We need a pointer to the armelf specific hash table. */
4691 hash_table = elf32_arm_hash_table (link_info);
4692
4693 tmp_name = bfd_malloc ((bfd_size_type) strlen (name)
4694 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
4695
4696 BFD_ASSERT (tmp_name);
4697
4698 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
4699
4700 hash = elf_link_hash_lookup
4701 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4702
4703 if (hash == NULL
4704 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
4705 tmp_name, name) == -1)
4706 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4707
4708 free (tmp_name);
4709
4710 return hash;
4711 }
4712
4713 /* Locate the ARM encoded calling stub for NAME. */
4714
4715 static struct elf_link_hash_entry *
4716 find_arm_glue (struct bfd_link_info *link_info,
4717 const char *name,
4718 char **error_message)
4719 {
4720 char *tmp_name;
4721 struct elf_link_hash_entry *myh;
4722 struct elf32_arm_link_hash_table *hash_table;
4723
4724 /* We need a pointer to the elfarm specific hash table. */
4725 hash_table = elf32_arm_hash_table (link_info);
4726
4727 tmp_name = bfd_malloc ((bfd_size_type) strlen (name)
4728 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
4729
4730 BFD_ASSERT (tmp_name);
4731
4732 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
4733
4734 myh = elf_link_hash_lookup
4735 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4736
4737 if (myh == NULL
4738 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
4739 tmp_name, name) == -1)
4740 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4741
4742 free (tmp_name);
4743
4744 return myh;
4745 }
4746
4747 /* ARM->Thumb glue (static images):
4748
4749 .arm
4750 __func_from_arm:
4751 ldr r12, __func_addr
4752 bx r12
4753 __func_addr:
4754 .word func @ behave as if you saw a ARM_32 reloc.
4755
4756 (v5t static images)
4757 .arm
4758 __func_from_arm:
4759 ldr pc, __func_addr
4760 __func_addr:
4761 .word func @ behave as if you saw a ARM_32 reloc.
4762
4763 (relocatable images)
4764 .arm
4765 __func_from_arm:
4766 ldr r12, __func_offset
4767 add r12, r12, pc
4768 bx r12
4769 __func_offset:
4770 .word func - . */
4771
4772 #define ARM2THUMB_STATIC_GLUE_SIZE 12
4773 static const insn32 a2t1_ldr_insn = 0xe59fc000;
4774 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
4775 static const insn32 a2t3_func_addr_insn = 0x00000001;
4776
4777 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
4778 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
4779 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
4780
4781 #define ARM2THUMB_PIC_GLUE_SIZE 16
4782 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
4783 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
4784 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
4785
4786 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
4787
4788 .thumb .thumb
4789 .align 2 .align 2
4790 __func_from_thumb: __func_from_thumb:
4791 bx pc push {r6, lr}
4792 nop ldr r6, __func_addr
4793 .arm mov lr, pc
4794 b func bx r6
4795 .arm
4796 ;; back_to_thumb
4797 ldmia r13! {r6, lr}
4798 bx lr
4799 __func_addr:
4800 .word func */
4801
4802 #define THUMB2ARM_GLUE_SIZE 8
4803 static const insn16 t2a1_bx_pc_insn = 0x4778;
4804 static const insn16 t2a2_noop_insn = 0x46c0;
4805 static const insn32 t2a3_b_insn = 0xea000000;
4806
4807 #define VFP11_ERRATUM_VENEER_SIZE 8
4808
4809 #define ARM_BX_VENEER_SIZE 12
4810 static const insn32 armbx1_tst_insn = 0xe3100001;
4811 static const insn32 armbx2_moveq_insn = 0x01a0f000;
4812 static const insn32 armbx3_bx_insn = 0xe12fff10;
4813
4814 #ifndef ELFARM_NABI_C_INCLUDED
4815 static void
4816 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
4817 {
4818 asection * s;
4819 bfd_byte * contents;
4820
4821 if (size == 0)
4822 {
4823 /* Do not include empty glue sections in the output. */
4824 if (abfd != NULL)
4825 {
4826 s = bfd_get_section_by_name (abfd, name);
4827 if (s != NULL)
4828 s->flags |= SEC_EXCLUDE;
4829 }
4830 return;
4831 }
4832
4833 BFD_ASSERT (abfd != NULL);
4834
4835 s = bfd_get_section_by_name (abfd, name);
4836 BFD_ASSERT (s != NULL);
4837
4838 contents = bfd_alloc (abfd, size);
4839
4840 BFD_ASSERT (s->size == size);
4841 s->contents = contents;
4842 }
4843
4844 bfd_boolean
4845 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
4846 {
4847 struct elf32_arm_link_hash_table * globals;
4848
4849 globals = elf32_arm_hash_table (info);
4850 BFD_ASSERT (globals != NULL);
4851
4852 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4853 globals->arm_glue_size,
4854 ARM2THUMB_GLUE_SECTION_NAME);
4855
4856 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4857 globals->thumb_glue_size,
4858 THUMB2ARM_GLUE_SECTION_NAME);
4859
4860 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4861 globals->vfp11_erratum_glue_size,
4862 VFP11_ERRATUM_VENEER_SECTION_NAME);
4863
4864 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4865 globals->bx_glue_size,
4866 ARM_BX_GLUE_SECTION_NAME);
4867
4868 return TRUE;
4869 }
4870
4871 /* Allocate space and symbols for calling a Thumb function from Arm mode.
4872 returns the symbol identifying the stub. */
4873
4874 static struct elf_link_hash_entry *
4875 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
4876 struct elf_link_hash_entry * h)
4877 {
4878 const char * name = h->root.root.string;
4879 asection * s;
4880 char * tmp_name;
4881 struct elf_link_hash_entry * myh;
4882 struct bfd_link_hash_entry * bh;
4883 struct elf32_arm_link_hash_table * globals;
4884 bfd_vma val;
4885 bfd_size_type size;
4886
4887 globals = elf32_arm_hash_table (link_info);
4888
4889 BFD_ASSERT (globals != NULL);
4890 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
4891
4892 s = bfd_get_section_by_name
4893 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
4894
4895 BFD_ASSERT (s != NULL);
4896
4897 tmp_name = bfd_malloc ((bfd_size_type) strlen (name) + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
4898
4899 BFD_ASSERT (tmp_name);
4900
4901 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
4902
4903 myh = elf_link_hash_lookup
4904 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
4905
4906 if (myh != NULL)
4907 {
4908 /* We've already seen this guy. */
4909 free (tmp_name);
4910 return myh;
4911 }
4912
4913 /* The only trick here is using hash_table->arm_glue_size as the value.
4914 Even though the section isn't allocated yet, this is where we will be
4915 putting it. The +1 on the value marks that the stub has not been
4916 output yet - not that it is a Thumb function. */
4917 bh = NULL;
4918 val = globals->arm_glue_size + 1;
4919 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
4920 tmp_name, BSF_GLOBAL, s, val,
4921 NULL, TRUE, FALSE, &bh);
4922
4923 myh = (struct elf_link_hash_entry *) bh;
4924 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
4925 myh->forced_local = 1;
4926
4927 free (tmp_name);
4928
4929 if (link_info->shared || globals->root.is_relocatable_executable
4930 || globals->pic_veneer)
4931 size = ARM2THUMB_PIC_GLUE_SIZE;
4932 else if (globals->use_blx)
4933 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
4934 else
4935 size = ARM2THUMB_STATIC_GLUE_SIZE;
4936
4937 s->size += size;
4938 globals->arm_glue_size += size;
4939
4940 return myh;
4941 }
4942
4943 /* Allocate space for ARMv4 BX veneers. */
4944
4945 static void
4946 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
4947 {
4948 asection * s;
4949 struct elf32_arm_link_hash_table *globals;
4950 char *tmp_name;
4951 struct elf_link_hash_entry *myh;
4952 struct bfd_link_hash_entry *bh;
4953 bfd_vma val;
4954
4955 /* BX PC does not need a veneer. */
4956 if (reg == 15)
4957 return;
4958
4959 globals = elf32_arm_hash_table (link_info);
4960
4961 BFD_ASSERT (globals != NULL);
4962 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
4963
4964 /* Check if this veneer has already been allocated. */
4965 if (globals->bx_glue_offset[reg])
4966 return;
4967
4968 s = bfd_get_section_by_name
4969 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
4970
4971 BFD_ASSERT (s != NULL);
4972
4973 /* Add symbol for veneer. */
4974 tmp_name = bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
4975
4976 BFD_ASSERT (tmp_name);
4977
4978 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
4979
4980 myh = elf_link_hash_lookup
4981 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
4982
4983 BFD_ASSERT (myh == NULL);
4984
4985 bh = NULL;
4986 val = globals->bx_glue_size;
4987 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
4988 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
4989 NULL, TRUE, FALSE, &bh);
4990
4991 myh = (struct elf_link_hash_entry *) bh;
4992 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
4993 myh->forced_local = 1;
4994
4995 s->size += ARM_BX_VENEER_SIZE;
4996 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
4997 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
4998 }
4999
5000
5001 /* Add an entry to the code/data map for section SEC. */
5002
5003 static void
5004 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
5005 {
5006 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
5007 unsigned int newidx;
5008
5009 if (sec_data->map == NULL)
5010 {
5011 sec_data->map = bfd_malloc (sizeof (elf32_arm_section_map));
5012 sec_data->mapcount = 0;
5013 sec_data->mapsize = 1;
5014 }
5015
5016 newidx = sec_data->mapcount++;
5017
5018 if (sec_data->mapcount > sec_data->mapsize)
5019 {
5020 sec_data->mapsize *= 2;
5021 sec_data->map = bfd_realloc_or_free (sec_data->map, sec_data->mapsize
5022 * sizeof (elf32_arm_section_map));
5023 }
5024
5025 if (sec_data->map)
5026 {
5027 sec_data->map[newidx].vma = vma;
5028 sec_data->map[newidx].type = type;
5029 }
5030 }
5031
5032
5033 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
5034 veneers are handled for now. */
5035
5036 static bfd_vma
5037 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
5038 elf32_vfp11_erratum_list *branch,
5039 bfd *branch_bfd,
5040 asection *branch_sec,
5041 unsigned int offset)
5042 {
5043 asection *s;
5044 struct elf32_arm_link_hash_table *hash_table;
5045 char *tmp_name;
5046 struct elf_link_hash_entry *myh;
5047 struct bfd_link_hash_entry *bh;
5048 bfd_vma val;
5049 struct _arm_elf_section_data *sec_data;
5050 int errcount;
5051 elf32_vfp11_erratum_list *newerr;
5052
5053 hash_table = elf32_arm_hash_table (link_info);
5054
5055 BFD_ASSERT (hash_table != NULL);
5056 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
5057
5058 s = bfd_get_section_by_name
5059 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
5060
5061 sec_data = elf32_arm_section_data (s);
5062
5063 BFD_ASSERT (s != NULL);
5064
5065 tmp_name = bfd_malloc ((bfd_size_type) strlen
5066 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
5067
5068 BFD_ASSERT (tmp_name);
5069
5070 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
5071 hash_table->num_vfp11_fixes);
5072
5073 myh = elf_link_hash_lookup
5074 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5075
5076 BFD_ASSERT (myh == NULL);
5077
5078 bh = NULL;
5079 val = hash_table->vfp11_erratum_glue_size;
5080 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
5081 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5082 NULL, TRUE, FALSE, &bh);
5083
5084 myh = (struct elf_link_hash_entry *) bh;
5085 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5086 myh->forced_local = 1;
5087
5088 /* Link veneer back to calling location. */
5089 errcount = ++(sec_data->erratumcount);
5090 newerr = bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
5091
5092 newerr->type = VFP11_ERRATUM_ARM_VENEER;
5093 newerr->vma = -1;
5094 newerr->u.v.branch = branch;
5095 newerr->u.v.id = hash_table->num_vfp11_fixes;
5096 branch->u.b.veneer = newerr;
5097
5098 newerr->next = sec_data->erratumlist;
5099 sec_data->erratumlist = newerr;
5100
5101 /* A symbol for the return from the veneer. */
5102 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
5103 hash_table->num_vfp11_fixes);
5104
5105 myh = elf_link_hash_lookup
5106 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5107
5108 if (myh != NULL)
5109 abort ();
5110
5111 bh = NULL;
5112 val = offset + 4;
5113 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
5114 branch_sec, val, NULL, TRUE, FALSE, &bh);
5115
5116 myh = (struct elf_link_hash_entry *) bh;
5117 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5118 myh->forced_local = 1;
5119
5120 free (tmp_name);
5121
5122 /* Generate a mapping symbol for the veneer section, and explicitly add an
5123 entry for that symbol to the code/data map for the section. */
5124 if (hash_table->vfp11_erratum_glue_size == 0)
5125 {
5126 bh = NULL;
5127 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
5128 ever requires this erratum fix. */
5129 _bfd_generic_link_add_one_symbol (link_info,
5130 hash_table->bfd_of_glue_owner, "$a",
5131 BSF_LOCAL, s, 0, NULL,
5132 TRUE, FALSE, &bh);
5133
5134 myh = (struct elf_link_hash_entry *) bh;
5135 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
5136 myh->forced_local = 1;
5137
5138 /* The elf32_arm_init_maps function only cares about symbols from input
5139 BFDs. We must make a note of this generated mapping symbol
5140 ourselves so that code byteswapping works properly in
5141 elf32_arm_write_section. */
5142 elf32_arm_section_map_add (s, 'a', 0);
5143 }
5144
5145 s->size += VFP11_ERRATUM_VENEER_SIZE;
5146 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
5147 hash_table->num_vfp11_fixes++;
5148
5149 /* The offset of the veneer. */
5150 return val;
5151 }
5152
5153 #define ARM_GLUE_SECTION_FLAGS \
5154 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
5155 | SEC_READONLY | SEC_LINKER_CREATED)
5156
5157 /* Create a fake section for use by the ARM backend of the linker. */
5158
5159 static bfd_boolean
5160 arm_make_glue_section (bfd * abfd, const char * name)
5161 {
5162 asection * sec;
5163
5164 sec = bfd_get_section_by_name (abfd, name);
5165 if (sec != NULL)
5166 /* Already made. */
5167 return TRUE;
5168
5169 sec = bfd_make_section_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
5170
5171 if (sec == NULL
5172 || !bfd_set_section_alignment (abfd, sec, 2))
5173 return FALSE;
5174
5175 /* Set the gc mark to prevent the section from being removed by garbage
5176 collection, despite the fact that no relocs refer to this section. */
5177 sec->gc_mark = 1;
5178
5179 return TRUE;
5180 }
5181
5182 /* Add the glue sections to ABFD. This function is called from the
5183 linker scripts in ld/emultempl/{armelf}.em. */
5184
5185 bfd_boolean
5186 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
5187 struct bfd_link_info *info)
5188 {
5189 /* If we are only performing a partial
5190 link do not bother adding the glue. */
5191 if (info->relocatable)
5192 return TRUE;
5193
5194 return arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
5195 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
5196 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
5197 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
5198 }
5199
5200 /* Select a BFD to be used to hold the sections used by the glue code.
5201 This function is called from the linker scripts in ld/emultempl/
5202 {armelf/pe}.em. */
5203
5204 bfd_boolean
5205 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
5206 {
5207 struct elf32_arm_link_hash_table *globals;
5208
5209 /* If we are only performing a partial link
5210 do not bother getting a bfd to hold the glue. */
5211 if (info->relocatable)
5212 return TRUE;
5213
5214 /* Make sure we don't attach the glue sections to a dynamic object. */
5215 BFD_ASSERT (!(abfd->flags & DYNAMIC));
5216
5217 globals = elf32_arm_hash_table (info);
5218
5219 BFD_ASSERT (globals != NULL);
5220
5221 if (globals->bfd_of_glue_owner != NULL)
5222 return TRUE;
5223
5224 /* Save the bfd for later use. */
5225 globals->bfd_of_glue_owner = abfd;
5226
5227 return TRUE;
5228 }
5229
5230 static void
5231 check_use_blx (struct elf32_arm_link_hash_table *globals)
5232 {
5233 if (bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
5234 Tag_CPU_arch) > 2)
5235 globals->use_blx = 1;
5236 }
5237
5238 bfd_boolean
5239 bfd_elf32_arm_process_before_allocation (bfd *abfd,
5240 struct bfd_link_info *link_info)
5241 {
5242 Elf_Internal_Shdr *symtab_hdr;
5243 Elf_Internal_Rela *internal_relocs = NULL;
5244 Elf_Internal_Rela *irel, *irelend;
5245 bfd_byte *contents = NULL;
5246
5247 asection *sec;
5248 struct elf32_arm_link_hash_table *globals;
5249
5250 /* If we are only performing a partial link do not bother
5251 to construct any glue. */
5252 if (link_info->relocatable)
5253 return TRUE;
5254
5255 /* Here we have a bfd that is to be included on the link. We have a
5256 hook to do reloc rummaging, before section sizes are nailed down. */
5257 globals = elf32_arm_hash_table (link_info);
5258
5259 BFD_ASSERT (globals != NULL);
5260
5261 check_use_blx (globals);
5262
5263 if (globals->byteswap_code && !bfd_big_endian (abfd))
5264 {
5265 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
5266 abfd);
5267 return FALSE;
5268 }
5269
5270 /* PR 5398: If we have not decided to include any loadable sections in
5271 the output then we will not have a glue owner bfd. This is OK, it
5272 just means that there is nothing else for us to do here. */
5273 if (globals->bfd_of_glue_owner == NULL)
5274 return TRUE;
5275
5276 /* Rummage around all the relocs and map the glue vectors. */
5277 sec = abfd->sections;
5278
5279 if (sec == NULL)
5280 return TRUE;
5281
5282 for (; sec != NULL; sec = sec->next)
5283 {
5284 if (sec->reloc_count == 0)
5285 continue;
5286
5287 if ((sec->flags & SEC_EXCLUDE) != 0)
5288 continue;
5289
5290 symtab_hdr = & elf_symtab_hdr (abfd);
5291
5292 /* Load the relocs. */
5293 internal_relocs
5294 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
5295
5296 if (internal_relocs == NULL)
5297 goto error_return;
5298
5299 irelend = internal_relocs + sec->reloc_count;
5300 for (irel = internal_relocs; irel < irelend; irel++)
5301 {
5302 long r_type;
5303 unsigned long r_index;
5304
5305 struct elf_link_hash_entry *h;
5306
5307 r_type = ELF32_R_TYPE (irel->r_info);
5308 r_index = ELF32_R_SYM (irel->r_info);
5309
5310 /* These are the only relocation types we care about. */
5311 if ( r_type != R_ARM_PC24
5312 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
5313 continue;
5314
5315 /* Get the section contents if we haven't done so already. */
5316 if (contents == NULL)
5317 {
5318 /* Get cached copy if it exists. */
5319 if (elf_section_data (sec)->this_hdr.contents != NULL)
5320 contents = elf_section_data (sec)->this_hdr.contents;
5321 else
5322 {
5323 /* Go get them off disk. */
5324 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
5325 goto error_return;
5326 }
5327 }
5328
5329 if (r_type == R_ARM_V4BX)
5330 {
5331 int reg;
5332
5333 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
5334 record_arm_bx_glue (link_info, reg);
5335 continue;
5336 }
5337
5338 /* If the relocation is not against a symbol it cannot concern us. */
5339 h = NULL;
5340
5341 /* We don't care about local symbols. */
5342 if (r_index < symtab_hdr->sh_info)
5343 continue;
5344
5345 /* This is an external symbol. */
5346 r_index -= symtab_hdr->sh_info;
5347 h = (struct elf_link_hash_entry *)
5348 elf_sym_hashes (abfd)[r_index];
5349
5350 /* If the relocation is against a static symbol it must be within
5351 the current section and so cannot be a cross ARM/Thumb relocation. */
5352 if (h == NULL)
5353 continue;
5354
5355 /* If the call will go through a PLT entry then we do not need
5356 glue. */
5357 if (globals->splt != NULL && h->plt.offset != (bfd_vma) -1)
5358 continue;
5359
5360 switch (r_type)
5361 {
5362 case R_ARM_PC24:
5363 /* This one is a call from arm code. We need to look up
5364 the target of the call. If it is a thumb target, we
5365 insert glue. */
5366 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
5367 record_arm_to_thumb_glue (link_info, h);
5368 break;
5369
5370 default:
5371 abort ();
5372 }
5373 }
5374
5375 if (contents != NULL
5376 && elf_section_data (sec)->this_hdr.contents != contents)
5377 free (contents);
5378 contents = NULL;
5379
5380 if (internal_relocs != NULL
5381 && elf_section_data (sec)->relocs != internal_relocs)
5382 free (internal_relocs);
5383 internal_relocs = NULL;
5384 }
5385
5386 return TRUE;
5387
5388 error_return:
5389 if (contents != NULL
5390 && elf_section_data (sec)->this_hdr.contents != contents)
5391 free (contents);
5392 if (internal_relocs != NULL
5393 && elf_section_data (sec)->relocs != internal_relocs)
5394 free (internal_relocs);
5395
5396 return FALSE;
5397 }
5398 #endif
5399
5400
5401 /* Initialise maps of ARM/Thumb/data for input BFDs. */
5402
5403 void
5404 bfd_elf32_arm_init_maps (bfd *abfd)
5405 {
5406 Elf_Internal_Sym *isymbuf;
5407 Elf_Internal_Shdr *hdr;
5408 unsigned int i, localsyms;
5409
5410 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
5411 if (! is_arm_elf (abfd))
5412 return;
5413
5414 if ((abfd->flags & DYNAMIC) != 0)
5415 return;
5416
5417 hdr = & elf_symtab_hdr (abfd);
5418 localsyms = hdr->sh_info;
5419
5420 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
5421 should contain the number of local symbols, which should come before any
5422 global symbols. Mapping symbols are always local. */
5423 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
5424 NULL);
5425
5426 /* No internal symbols read? Skip this BFD. */
5427 if (isymbuf == NULL)
5428 return;
5429
5430 for (i = 0; i < localsyms; i++)
5431 {
5432 Elf_Internal_Sym *isym = &isymbuf[i];
5433 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
5434 const char *name;
5435
5436 if (sec != NULL
5437 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
5438 {
5439 name = bfd_elf_string_from_elf_section (abfd,
5440 hdr->sh_link, isym->st_name);
5441
5442 if (bfd_is_arm_special_symbol_name (name,
5443 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
5444 elf32_arm_section_map_add (sec, name[1], isym->st_value);
5445 }
5446 }
5447 }
5448
5449
5450 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
5451 say what they wanted. */
5452
5453 void
5454 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
5455 {
5456 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5457 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5458
5459 if (globals->fix_cortex_a8 == -1)
5460 {
5461 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
5462 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
5463 && (out_attr[Tag_CPU_arch_profile].i == 'A'
5464 || out_attr[Tag_CPU_arch_profile].i == 0))
5465 globals->fix_cortex_a8 = 1;
5466 else
5467 globals->fix_cortex_a8 = 0;
5468 }
5469 }
5470
5471
5472 void
5473 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
5474 {
5475 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5476 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5477
5478 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
5479 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
5480 {
5481 switch (globals->vfp11_fix)
5482 {
5483 case BFD_ARM_VFP11_FIX_DEFAULT:
5484 case BFD_ARM_VFP11_FIX_NONE:
5485 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5486 break;
5487
5488 default:
5489 /* Give a warning, but do as the user requests anyway. */
5490 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
5491 "workaround is not necessary for target architecture"), obfd);
5492 }
5493 }
5494 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
5495 /* For earlier architectures, we might need the workaround, but do not
5496 enable it by default. If users is running with broken hardware, they
5497 must enable the erratum fix explicitly. */
5498 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5499 }
5500
5501
5502 enum bfd_arm_vfp11_pipe
5503 {
5504 VFP11_FMAC,
5505 VFP11_LS,
5506 VFP11_DS,
5507 VFP11_BAD
5508 };
5509
5510 /* Return a VFP register number. This is encoded as RX:X for single-precision
5511 registers, or X:RX for double-precision registers, where RX is the group of
5512 four bits in the instruction encoding and X is the single extension bit.
5513 RX and X fields are specified using their lowest (starting) bit. The return
5514 value is:
5515
5516 0...31: single-precision registers s0...s31
5517 32...63: double-precision registers d0...d31.
5518
5519 Although X should be zero for VFP11 (encoding d0...d15 only), we might
5520 encounter VFP3 instructions, so we allow the full range for DP registers. */
5521
5522 static unsigned int
5523 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
5524 unsigned int x)
5525 {
5526 if (is_double)
5527 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
5528 else
5529 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
5530 }
5531
5532 /* Set bits in *WMASK according to a register number REG as encoded by
5533 bfd_arm_vfp11_regno(). Ignore d16-d31. */
5534
5535 static void
5536 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
5537 {
5538 if (reg < 32)
5539 *wmask |= 1 << reg;
5540 else if (reg < 48)
5541 *wmask |= 3 << ((reg - 32) * 2);
5542 }
5543
5544 /* Return TRUE if WMASK overwrites anything in REGS. */
5545
5546 static bfd_boolean
5547 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
5548 {
5549 int i;
5550
5551 for (i = 0; i < numregs; i++)
5552 {
5553 unsigned int reg = regs[i];
5554
5555 if (reg < 32 && (wmask & (1 << reg)) != 0)
5556 return TRUE;
5557
5558 reg -= 32;
5559
5560 if (reg >= 16)
5561 continue;
5562
5563 if ((wmask & (3 << (reg * 2))) != 0)
5564 return TRUE;
5565 }
5566
5567 return FALSE;
5568 }
5569
5570 /* In this function, we're interested in two things: finding input registers
5571 for VFP data-processing instructions, and finding the set of registers which
5572 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
5573 hold the written set, so FLDM etc. are easy to deal with (we're only
5574 interested in 32 SP registers or 16 dp registers, due to the VFP version
5575 implemented by the chip in question). DP registers are marked by setting
5576 both SP registers in the write mask). */
5577
5578 static enum bfd_arm_vfp11_pipe
5579 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
5580 int *numregs)
5581 {
5582 enum bfd_arm_vfp11_pipe pipe = VFP11_BAD;
5583 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
5584
5585 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
5586 {
5587 unsigned int pqrs;
5588 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5589 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5590
5591 pqrs = ((insn & 0x00800000) >> 20)
5592 | ((insn & 0x00300000) >> 19)
5593 | ((insn & 0x00000040) >> 6);
5594
5595 switch (pqrs)
5596 {
5597 case 0: /* fmac[sd]. */
5598 case 1: /* fnmac[sd]. */
5599 case 2: /* fmsc[sd]. */
5600 case 3: /* fnmsc[sd]. */
5601 pipe = VFP11_FMAC;
5602 bfd_arm_vfp11_write_mask (destmask, fd);
5603 regs[0] = fd;
5604 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5605 regs[2] = fm;
5606 *numregs = 3;
5607 break;
5608
5609 case 4: /* fmul[sd]. */
5610 case 5: /* fnmul[sd]. */
5611 case 6: /* fadd[sd]. */
5612 case 7: /* fsub[sd]. */
5613 pipe = VFP11_FMAC;
5614 goto vfp_binop;
5615
5616 case 8: /* fdiv[sd]. */
5617 pipe = VFP11_DS;
5618 vfp_binop:
5619 bfd_arm_vfp11_write_mask (destmask, fd);
5620 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5621 regs[1] = fm;
5622 *numregs = 2;
5623 break;
5624
5625 case 15: /* extended opcode. */
5626 {
5627 unsigned int extn = ((insn >> 15) & 0x1e)
5628 | ((insn >> 7) & 1);
5629
5630 switch (extn)
5631 {
5632 case 0: /* fcpy[sd]. */
5633 case 1: /* fabs[sd]. */
5634 case 2: /* fneg[sd]. */
5635 case 8: /* fcmp[sd]. */
5636 case 9: /* fcmpe[sd]. */
5637 case 10: /* fcmpz[sd]. */
5638 case 11: /* fcmpez[sd]. */
5639 case 16: /* fuito[sd]. */
5640 case 17: /* fsito[sd]. */
5641 case 24: /* ftoui[sd]. */
5642 case 25: /* ftouiz[sd]. */
5643 case 26: /* ftosi[sd]. */
5644 case 27: /* ftosiz[sd]. */
5645 /* These instructions will not bounce due to underflow. */
5646 *numregs = 0;
5647 pipe = VFP11_FMAC;
5648 break;
5649
5650 case 3: /* fsqrt[sd]. */
5651 /* fsqrt cannot underflow, but it can (perhaps) overwrite
5652 registers to cause the erratum in previous instructions. */
5653 bfd_arm_vfp11_write_mask (destmask, fd);
5654 pipe = VFP11_DS;
5655 break;
5656
5657 case 15: /* fcvt{ds,sd}. */
5658 {
5659 int rnum = 0;
5660
5661 bfd_arm_vfp11_write_mask (destmask, fd);
5662
5663 /* Only FCVTSD can underflow. */
5664 if ((insn & 0x100) != 0)
5665 regs[rnum++] = fm;
5666
5667 *numregs = rnum;
5668
5669 pipe = VFP11_FMAC;
5670 }
5671 break;
5672
5673 default:
5674 return VFP11_BAD;
5675 }
5676 }
5677 break;
5678
5679 default:
5680 return VFP11_BAD;
5681 }
5682 }
5683 /* Two-register transfer. */
5684 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
5685 {
5686 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5687
5688 if ((insn & 0x100000) == 0)
5689 {
5690 if (is_double)
5691 bfd_arm_vfp11_write_mask (destmask, fm);
5692 else
5693 {
5694 bfd_arm_vfp11_write_mask (destmask, fm);
5695 bfd_arm_vfp11_write_mask (destmask, fm + 1);
5696 }
5697 }
5698
5699 pipe = VFP11_LS;
5700 }
5701 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
5702 {
5703 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5704 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
5705
5706 switch (puw)
5707 {
5708 case 0: /* Two-reg transfer. We should catch these above. */
5709 abort ();
5710
5711 case 2: /* fldm[sdx]. */
5712 case 3:
5713 case 5:
5714 {
5715 unsigned int i, offset = insn & 0xff;
5716
5717 if (is_double)
5718 offset >>= 1;
5719
5720 for (i = fd; i < fd + offset; i++)
5721 bfd_arm_vfp11_write_mask (destmask, i);
5722 }
5723 break;
5724
5725 case 4: /* fld[sd]. */
5726 case 6:
5727 bfd_arm_vfp11_write_mask (destmask, fd);
5728 break;
5729
5730 default:
5731 return VFP11_BAD;
5732 }
5733
5734 pipe = VFP11_LS;
5735 }
5736 /* Single-register transfer. Note L==0. */
5737 else if ((insn & 0x0f100e10) == 0x0e000a10)
5738 {
5739 unsigned int opcode = (insn >> 21) & 7;
5740 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
5741
5742 switch (opcode)
5743 {
5744 case 0: /* fmsr/fmdlr. */
5745 case 1: /* fmdhr. */
5746 /* Mark fmdhr and fmdlr as writing to the whole of the DP
5747 destination register. I don't know if this is exactly right,
5748 but it is the conservative choice. */
5749 bfd_arm_vfp11_write_mask (destmask, fn);
5750 break;
5751
5752 case 7: /* fmxr. */
5753 break;
5754 }
5755
5756 pipe = VFP11_LS;
5757 }
5758
5759 return pipe;
5760 }
5761
5762
5763 static int elf32_arm_compare_mapping (const void * a, const void * b);
5764
5765
5766 /* Look for potentially-troublesome code sequences which might trigger the
5767 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
5768 (available from ARM) for details of the erratum. A short version is
5769 described in ld.texinfo. */
5770
5771 bfd_boolean
5772 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
5773 {
5774 asection *sec;
5775 bfd_byte *contents = NULL;
5776 int state = 0;
5777 int regs[3], numregs = 0;
5778 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5779 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
5780
5781 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
5782 The states transition as follows:
5783
5784 0 -> 1 (vector) or 0 -> 2 (scalar)
5785 A VFP FMAC-pipeline instruction has been seen. Fill
5786 regs[0]..regs[numregs-1] with its input operands. Remember this
5787 instruction in 'first_fmac'.
5788
5789 1 -> 2
5790 Any instruction, except for a VFP instruction which overwrites
5791 regs[*].
5792
5793 1 -> 3 [ -> 0 ] or
5794 2 -> 3 [ -> 0 ]
5795 A VFP instruction has been seen which overwrites any of regs[*].
5796 We must make a veneer! Reset state to 0 before examining next
5797 instruction.
5798
5799 2 -> 0
5800 If we fail to match anything in state 2, reset to state 0 and reset
5801 the instruction pointer to the instruction after 'first_fmac'.
5802
5803 If the VFP11 vector mode is in use, there must be at least two unrelated
5804 instructions between anti-dependent VFP11 instructions to properly avoid
5805 triggering the erratum, hence the use of the extra state 1. */
5806
5807 /* If we are only performing a partial link do not bother
5808 to construct any glue. */
5809 if (link_info->relocatable)
5810 return TRUE;
5811
5812 /* Skip if this bfd does not correspond to an ELF image. */
5813 if (! is_arm_elf (abfd))
5814 return TRUE;
5815
5816 /* We should have chosen a fix type by the time we get here. */
5817 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
5818
5819 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
5820 return TRUE;
5821
5822 /* Skip this BFD if it corresponds to an executable or dynamic object. */
5823 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
5824 return TRUE;
5825
5826 for (sec = abfd->sections; sec != NULL; sec = sec->next)
5827 {
5828 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
5829 struct _arm_elf_section_data *sec_data;
5830
5831 /* If we don't have executable progbits, we're not interested in this
5832 section. Also skip if section is to be excluded. */
5833 if (elf_section_type (sec) != SHT_PROGBITS
5834 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
5835 || (sec->flags & SEC_EXCLUDE) != 0
5836 || sec->sec_info_type == ELF_INFO_TYPE_JUST_SYMS
5837 || sec->output_section == bfd_abs_section_ptr
5838 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
5839 continue;
5840
5841 sec_data = elf32_arm_section_data (sec);
5842
5843 if (sec_data->mapcount == 0)
5844 continue;
5845
5846 if (elf_section_data (sec)->this_hdr.contents != NULL)
5847 contents = elf_section_data (sec)->this_hdr.contents;
5848 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
5849 goto error_return;
5850
5851 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
5852 elf32_arm_compare_mapping);
5853
5854 for (span = 0; span < sec_data->mapcount; span++)
5855 {
5856 unsigned int span_start = sec_data->map[span].vma;
5857 unsigned int span_end = (span == sec_data->mapcount - 1)
5858 ? sec->size : sec_data->map[span + 1].vma;
5859 char span_type = sec_data->map[span].type;
5860
5861 /* FIXME: Only ARM mode is supported at present. We may need to
5862 support Thumb-2 mode also at some point. */
5863 if (span_type != 'a')
5864 continue;
5865
5866 for (i = span_start; i < span_end;)
5867 {
5868 unsigned int next_i = i + 4;
5869 unsigned int insn = bfd_big_endian (abfd)
5870 ? (contents[i] << 24)
5871 | (contents[i + 1] << 16)
5872 | (contents[i + 2] << 8)
5873 | contents[i + 3]
5874 : (contents[i + 3] << 24)
5875 | (contents[i + 2] << 16)
5876 | (contents[i + 1] << 8)
5877 | contents[i];
5878 unsigned int writemask = 0;
5879 enum bfd_arm_vfp11_pipe pipe;
5880
5881 switch (state)
5882 {
5883 case 0:
5884 pipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
5885 &numregs);
5886 /* I'm assuming the VFP11 erratum can trigger with denorm
5887 operands on either the FMAC or the DS pipeline. This might
5888 lead to slightly overenthusiastic veneer insertion. */
5889 if (pipe == VFP11_FMAC || pipe == VFP11_DS)
5890 {
5891 state = use_vector ? 1 : 2;
5892 first_fmac = i;
5893 veneer_of_insn = insn;
5894 }
5895 break;
5896
5897 case 1:
5898 {
5899 int other_regs[3], other_numregs;
5900 pipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
5901 other_regs,
5902 &other_numregs);
5903 if (pipe != VFP11_BAD
5904 && bfd_arm_vfp11_antidependency (writemask, regs,
5905 numregs))
5906 state = 3;
5907 else
5908 state = 2;
5909 }
5910 break;
5911
5912 case 2:
5913 {
5914 int other_regs[3], other_numregs;
5915 pipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
5916 other_regs,
5917 &other_numregs);
5918 if (pipe != VFP11_BAD
5919 && bfd_arm_vfp11_antidependency (writemask, regs,
5920 numregs))
5921 state = 3;
5922 else
5923 {
5924 state = 0;
5925 next_i = first_fmac + 4;
5926 }
5927 }
5928 break;
5929
5930 case 3:
5931 abort (); /* Should be unreachable. */
5932 }
5933
5934 if (state == 3)
5935 {
5936 elf32_vfp11_erratum_list *newerr
5937 = bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
5938 int errcount;
5939
5940 errcount = ++(elf32_arm_section_data (sec)->erratumcount);
5941
5942 newerr->u.b.vfp_insn = veneer_of_insn;
5943
5944 switch (span_type)
5945 {
5946 case 'a':
5947 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
5948 break;
5949
5950 default:
5951 abort ();
5952 }
5953
5954 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
5955 first_fmac);
5956
5957 newerr->vma = -1;
5958
5959 newerr->next = sec_data->erratumlist;
5960 sec_data->erratumlist = newerr;
5961
5962 state = 0;
5963 }
5964
5965 i = next_i;
5966 }
5967 }
5968
5969 if (contents != NULL
5970 && elf_section_data (sec)->this_hdr.contents != contents)
5971 free (contents);
5972 contents = NULL;
5973 }
5974
5975 return TRUE;
5976
5977 error_return:
5978 if (contents != NULL
5979 && elf_section_data (sec)->this_hdr.contents != contents)
5980 free (contents);
5981
5982 return FALSE;
5983 }
5984
5985 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
5986 after sections have been laid out, using specially-named symbols. */
5987
5988 void
5989 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
5990 struct bfd_link_info *link_info)
5991 {
5992 asection *sec;
5993 struct elf32_arm_link_hash_table *globals;
5994 char *tmp_name;
5995
5996 if (link_info->relocatable)
5997 return;
5998
5999 /* Skip if this bfd does not correspond to an ELF image. */
6000 if (! is_arm_elf (abfd))
6001 return;
6002
6003 globals = elf32_arm_hash_table (link_info);
6004
6005 tmp_name = bfd_malloc ((bfd_size_type) strlen
6006 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
6007
6008 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6009 {
6010 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
6011 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
6012
6013 for (; errnode != NULL; errnode = errnode->next)
6014 {
6015 struct elf_link_hash_entry *myh;
6016 bfd_vma vma;
6017
6018 switch (errnode->type)
6019 {
6020 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
6021 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
6022 /* Find veneer symbol. */
6023 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
6024 errnode->u.b.veneer->u.v.id);
6025
6026 myh = elf_link_hash_lookup
6027 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6028
6029 if (myh == NULL)
6030 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6031 "`%s'"), abfd, tmp_name);
6032
6033 vma = myh->root.u.def.section->output_section->vma
6034 + myh->root.u.def.section->output_offset
6035 + myh->root.u.def.value;
6036
6037 errnode->u.b.veneer->vma = vma;
6038 break;
6039
6040 case VFP11_ERRATUM_ARM_VENEER:
6041 case VFP11_ERRATUM_THUMB_VENEER:
6042 /* Find return location. */
6043 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6044 errnode->u.v.id);
6045
6046 myh = elf_link_hash_lookup
6047 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6048
6049 if (myh == NULL)
6050 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6051 "`%s'"), abfd, tmp_name);
6052
6053 vma = myh->root.u.def.section->output_section->vma
6054 + myh->root.u.def.section->output_offset
6055 + myh->root.u.def.value;
6056
6057 errnode->u.v.branch->vma = vma;
6058 break;
6059
6060 default:
6061 abort ();
6062 }
6063 }
6064 }
6065
6066 free (tmp_name);
6067 }
6068
6069
6070 /* Set target relocation values needed during linking. */
6071
6072 void
6073 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
6074 struct bfd_link_info *link_info,
6075 int target1_is_rel,
6076 char * target2_type,
6077 int fix_v4bx,
6078 int use_blx,
6079 bfd_arm_vfp11_fix vfp11_fix,
6080 int no_enum_warn, int no_wchar_warn,
6081 int pic_veneer, int fix_cortex_a8)
6082 {
6083 struct elf32_arm_link_hash_table *globals;
6084
6085 globals = elf32_arm_hash_table (link_info);
6086
6087 globals->target1_is_rel = target1_is_rel;
6088 if (strcmp (target2_type, "rel") == 0)
6089 globals->target2_reloc = R_ARM_REL32;
6090 else if (strcmp (target2_type, "abs") == 0)
6091 globals->target2_reloc = R_ARM_ABS32;
6092 else if (strcmp (target2_type, "got-rel") == 0)
6093 globals->target2_reloc = R_ARM_GOT_PREL;
6094 else
6095 {
6096 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
6097 target2_type);
6098 }
6099 globals->fix_v4bx = fix_v4bx;
6100 globals->use_blx |= use_blx;
6101 globals->vfp11_fix = vfp11_fix;
6102 globals->pic_veneer = pic_veneer;
6103 globals->fix_cortex_a8 = fix_cortex_a8;
6104
6105 BFD_ASSERT (is_arm_elf (output_bfd));
6106 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
6107 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
6108 }
6109
6110 /* Replace the target offset of a Thumb bl or b.w instruction. */
6111
6112 static void
6113 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
6114 {
6115 bfd_vma upper;
6116 bfd_vma lower;
6117 int reloc_sign;
6118
6119 BFD_ASSERT ((offset & 1) == 0);
6120
6121 upper = bfd_get_16 (abfd, insn);
6122 lower = bfd_get_16 (abfd, insn + 2);
6123 reloc_sign = (offset < 0) ? 1 : 0;
6124 upper = (upper & ~(bfd_vma) 0x7ff)
6125 | ((offset >> 12) & 0x3ff)
6126 | (reloc_sign << 10);
6127 lower = (lower & ~(bfd_vma) 0x2fff)
6128 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
6129 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
6130 | ((offset >> 1) & 0x7ff);
6131 bfd_put_16 (abfd, upper, insn);
6132 bfd_put_16 (abfd, lower, insn + 2);
6133 }
6134
6135 /* Thumb code calling an ARM function. */
6136
6137 static int
6138 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
6139 const char * name,
6140 bfd * input_bfd,
6141 bfd * output_bfd,
6142 asection * input_section,
6143 bfd_byte * hit_data,
6144 asection * sym_sec,
6145 bfd_vma offset,
6146 bfd_signed_vma addend,
6147 bfd_vma val,
6148 char **error_message)
6149 {
6150 asection * s = 0;
6151 bfd_vma my_offset;
6152 long int ret_offset;
6153 struct elf_link_hash_entry * myh;
6154 struct elf32_arm_link_hash_table * globals;
6155
6156 myh = find_thumb_glue (info, name, error_message);
6157 if (myh == NULL)
6158 return FALSE;
6159
6160 globals = elf32_arm_hash_table (info);
6161
6162 BFD_ASSERT (globals != NULL);
6163 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6164
6165 my_offset = myh->root.u.def.value;
6166
6167 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6168 THUMB2ARM_GLUE_SECTION_NAME);
6169
6170 BFD_ASSERT (s != NULL);
6171 BFD_ASSERT (s->contents != NULL);
6172 BFD_ASSERT (s->output_section != NULL);
6173
6174 if ((my_offset & 0x01) == 0x01)
6175 {
6176 if (sym_sec != NULL
6177 && sym_sec->owner != NULL
6178 && !INTERWORK_FLAG (sym_sec->owner))
6179 {
6180 (*_bfd_error_handler)
6181 (_("%B(%s): warning: interworking not enabled.\n"
6182 " first occurrence: %B: thumb call to arm"),
6183 sym_sec->owner, input_bfd, name);
6184
6185 return FALSE;
6186 }
6187
6188 --my_offset;
6189 myh->root.u.def.value = my_offset;
6190
6191 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
6192 s->contents + my_offset);
6193
6194 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
6195 s->contents + my_offset + 2);
6196
6197 ret_offset =
6198 /* Address of destination of the stub. */
6199 ((bfd_signed_vma) val)
6200 - ((bfd_signed_vma)
6201 /* Offset from the start of the current section
6202 to the start of the stubs. */
6203 (s->output_offset
6204 /* Offset of the start of this stub from the start of the stubs. */
6205 + my_offset
6206 /* Address of the start of the current section. */
6207 + s->output_section->vma)
6208 /* The branch instruction is 4 bytes into the stub. */
6209 + 4
6210 /* ARM branches work from the pc of the instruction + 8. */
6211 + 8);
6212
6213 put_arm_insn (globals, output_bfd,
6214 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
6215 s->contents + my_offset + 4);
6216 }
6217
6218 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
6219
6220 /* Now go back and fix up the original BL insn to point to here. */
6221 ret_offset =
6222 /* Address of where the stub is located. */
6223 (s->output_section->vma + s->output_offset + my_offset)
6224 /* Address of where the BL is located. */
6225 - (input_section->output_section->vma + input_section->output_offset
6226 + offset)
6227 /* Addend in the relocation. */
6228 - addend
6229 /* Biassing for PC-relative addressing. */
6230 - 8;
6231
6232 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
6233
6234 return TRUE;
6235 }
6236
6237 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
6238
6239 static struct elf_link_hash_entry *
6240 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
6241 const char * name,
6242 bfd * input_bfd,
6243 bfd * output_bfd,
6244 asection * sym_sec,
6245 bfd_vma val,
6246 asection * s,
6247 char ** error_message)
6248 {
6249 bfd_vma my_offset;
6250 long int ret_offset;
6251 struct elf_link_hash_entry * myh;
6252 struct elf32_arm_link_hash_table * globals;
6253
6254 myh = find_arm_glue (info, name, error_message);
6255 if (myh == NULL)
6256 return NULL;
6257
6258 globals = elf32_arm_hash_table (info);
6259
6260 BFD_ASSERT (globals != NULL);
6261 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6262
6263 my_offset = myh->root.u.def.value;
6264
6265 if ((my_offset & 0x01) == 0x01)
6266 {
6267 if (sym_sec != NULL
6268 && sym_sec->owner != NULL
6269 && !INTERWORK_FLAG (sym_sec->owner))
6270 {
6271 (*_bfd_error_handler)
6272 (_("%B(%s): warning: interworking not enabled.\n"
6273 " first occurrence: %B: arm call to thumb"),
6274 sym_sec->owner, input_bfd, name);
6275 }
6276
6277 --my_offset;
6278 myh->root.u.def.value = my_offset;
6279
6280 if (info->shared || globals->root.is_relocatable_executable
6281 || globals->pic_veneer)
6282 {
6283 /* For relocatable objects we can't use absolute addresses,
6284 so construct the address from a relative offset. */
6285 /* TODO: If the offset is small it's probably worth
6286 constructing the address with adds. */
6287 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
6288 s->contents + my_offset);
6289 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
6290 s->contents + my_offset + 4);
6291 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
6292 s->contents + my_offset + 8);
6293 /* Adjust the offset by 4 for the position of the add,
6294 and 8 for the pipeline offset. */
6295 ret_offset = (val - (s->output_offset
6296 + s->output_section->vma
6297 + my_offset + 12))
6298 | 1;
6299 bfd_put_32 (output_bfd, ret_offset,
6300 s->contents + my_offset + 12);
6301 }
6302 else if (globals->use_blx)
6303 {
6304 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
6305 s->contents + my_offset);
6306
6307 /* It's a thumb address. Add the low order bit. */
6308 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
6309 s->contents + my_offset + 4);
6310 }
6311 else
6312 {
6313 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
6314 s->contents + my_offset);
6315
6316 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
6317 s->contents + my_offset + 4);
6318
6319 /* It's a thumb address. Add the low order bit. */
6320 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
6321 s->contents + my_offset + 8);
6322
6323 my_offset += 12;
6324 }
6325 }
6326
6327 BFD_ASSERT (my_offset <= globals->arm_glue_size);
6328
6329 return myh;
6330 }
6331
6332 /* Arm code calling a Thumb function. */
6333
6334 static int
6335 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
6336 const char * name,
6337 bfd * input_bfd,
6338 bfd * output_bfd,
6339 asection * input_section,
6340 bfd_byte * hit_data,
6341 asection * sym_sec,
6342 bfd_vma offset,
6343 bfd_signed_vma addend,
6344 bfd_vma val,
6345 char **error_message)
6346 {
6347 unsigned long int tmp;
6348 bfd_vma my_offset;
6349 asection * s;
6350 long int ret_offset;
6351 struct elf_link_hash_entry * myh;
6352 struct elf32_arm_link_hash_table * globals;
6353
6354 globals = elf32_arm_hash_table (info);
6355
6356 BFD_ASSERT (globals != NULL);
6357 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6358
6359 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6360 ARM2THUMB_GLUE_SECTION_NAME);
6361 BFD_ASSERT (s != NULL);
6362 BFD_ASSERT (s->contents != NULL);
6363 BFD_ASSERT (s->output_section != NULL);
6364
6365 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
6366 sym_sec, val, s, error_message);
6367 if (!myh)
6368 return FALSE;
6369
6370 my_offset = myh->root.u.def.value;
6371 tmp = bfd_get_32 (input_bfd, hit_data);
6372 tmp = tmp & 0xFF000000;
6373
6374 /* Somehow these are both 4 too far, so subtract 8. */
6375 ret_offset = (s->output_offset
6376 + my_offset
6377 + s->output_section->vma
6378 - (input_section->output_offset
6379 + input_section->output_section->vma
6380 + offset + addend)
6381 - 8);
6382
6383 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
6384
6385 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
6386
6387 return TRUE;
6388 }
6389
6390 /* Populate Arm stub for an exported Thumb function. */
6391
6392 static bfd_boolean
6393 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
6394 {
6395 struct bfd_link_info * info = (struct bfd_link_info *) inf;
6396 asection * s;
6397 struct elf_link_hash_entry * myh;
6398 struct elf32_arm_link_hash_entry *eh;
6399 struct elf32_arm_link_hash_table * globals;
6400 asection *sec;
6401 bfd_vma val;
6402 char *error_message;
6403
6404 eh = elf32_arm_hash_entry (h);
6405 /* Allocate stubs for exported Thumb functions on v4t. */
6406 if (eh->export_glue == NULL)
6407 return TRUE;
6408
6409 globals = elf32_arm_hash_table (info);
6410
6411 BFD_ASSERT (globals != NULL);
6412 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6413
6414 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6415 ARM2THUMB_GLUE_SECTION_NAME);
6416 BFD_ASSERT (s != NULL);
6417 BFD_ASSERT (s->contents != NULL);
6418 BFD_ASSERT (s->output_section != NULL);
6419
6420 sec = eh->export_glue->root.u.def.section;
6421
6422 BFD_ASSERT (sec->output_section != NULL);
6423
6424 val = eh->export_glue->root.u.def.value + sec->output_offset
6425 + sec->output_section->vma;
6426
6427 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
6428 h->root.u.def.section->owner,
6429 globals->obfd, sec, val, s,
6430 &error_message);
6431 BFD_ASSERT (myh);
6432 return TRUE;
6433 }
6434
6435 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
6436
6437 static bfd_vma
6438 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
6439 {
6440 bfd_byte *p;
6441 bfd_vma glue_addr;
6442 asection *s;
6443 struct elf32_arm_link_hash_table *globals;
6444
6445 globals = elf32_arm_hash_table (info);
6446
6447 BFD_ASSERT (globals != NULL);
6448 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6449
6450 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6451 ARM_BX_GLUE_SECTION_NAME);
6452 BFD_ASSERT (s != NULL);
6453 BFD_ASSERT (s->contents != NULL);
6454 BFD_ASSERT (s->output_section != NULL);
6455
6456 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
6457
6458 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
6459
6460 if ((globals->bx_glue_offset[reg] & 1) == 0)
6461 {
6462 p = s->contents + glue_addr;
6463 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
6464 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
6465 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
6466 globals->bx_glue_offset[reg] |= 1;
6467 }
6468
6469 return glue_addr + s->output_section->vma + s->output_offset;
6470 }
6471
6472 /* Generate Arm stubs for exported Thumb symbols. */
6473 static void
6474 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
6475 struct bfd_link_info *link_info)
6476 {
6477 struct elf32_arm_link_hash_table * globals;
6478
6479 if (link_info == NULL)
6480 /* Ignore this if we are not called by the ELF backend linker. */
6481 return;
6482
6483 globals = elf32_arm_hash_table (link_info);
6484 /* If blx is available then exported Thumb symbols are OK and there is
6485 nothing to do. */
6486 if (globals->use_blx)
6487 return;
6488
6489 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
6490 link_info);
6491 }
6492
6493 /* Some relocations map to different relocations depending on the
6494 target. Return the real relocation. */
6495
6496 static int
6497 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
6498 int r_type)
6499 {
6500 switch (r_type)
6501 {
6502 case R_ARM_TARGET1:
6503 if (globals->target1_is_rel)
6504 return R_ARM_REL32;
6505 else
6506 return R_ARM_ABS32;
6507
6508 case R_ARM_TARGET2:
6509 return globals->target2_reloc;
6510
6511 default:
6512 return r_type;
6513 }
6514 }
6515
6516 /* Return the base VMA address which should be subtracted from real addresses
6517 when resolving @dtpoff relocation.
6518 This is PT_TLS segment p_vaddr. */
6519
6520 static bfd_vma
6521 dtpoff_base (struct bfd_link_info *info)
6522 {
6523 /* If tls_sec is NULL, we should have signalled an error already. */
6524 if (elf_hash_table (info)->tls_sec == NULL)
6525 return 0;
6526 return elf_hash_table (info)->tls_sec->vma;
6527 }
6528
6529 /* Return the relocation value for @tpoff relocation
6530 if STT_TLS virtual address is ADDRESS. */
6531
6532 static bfd_vma
6533 tpoff (struct bfd_link_info *info, bfd_vma address)
6534 {
6535 struct elf_link_hash_table *htab = elf_hash_table (info);
6536 bfd_vma base;
6537
6538 /* If tls_sec is NULL, we should have signalled an error already. */
6539 if (htab->tls_sec == NULL)
6540 return 0;
6541 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
6542 return address - htab->tls_sec->vma + base;
6543 }
6544
6545 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
6546 VALUE is the relocation value. */
6547
6548 static bfd_reloc_status_type
6549 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
6550 {
6551 if (value > 0xfff)
6552 return bfd_reloc_overflow;
6553
6554 value |= bfd_get_32 (abfd, data) & 0xfffff000;
6555 bfd_put_32 (abfd, value, data);
6556 return bfd_reloc_ok;
6557 }
6558
6559 /* For a given value of n, calculate the value of G_n as required to
6560 deal with group relocations. We return it in the form of an
6561 encoded constant-and-rotation, together with the final residual. If n is
6562 specified as less than zero, then final_residual is filled with the
6563 input value and no further action is performed. */
6564
6565 static bfd_vma
6566 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
6567 {
6568 int current_n;
6569 bfd_vma g_n;
6570 bfd_vma encoded_g_n = 0;
6571 bfd_vma residual = value; /* Also known as Y_n. */
6572
6573 for (current_n = 0; current_n <= n; current_n++)
6574 {
6575 int shift;
6576
6577 /* Calculate which part of the value to mask. */
6578 if (residual == 0)
6579 shift = 0;
6580 else
6581 {
6582 int msb;
6583
6584 /* Determine the most significant bit in the residual and
6585 align the resulting value to a 2-bit boundary. */
6586 for (msb = 30; msb >= 0; msb -= 2)
6587 if (residual & (3 << msb))
6588 break;
6589
6590 /* The desired shift is now (msb - 6), or zero, whichever
6591 is the greater. */
6592 shift = msb - 6;
6593 if (shift < 0)
6594 shift = 0;
6595 }
6596
6597 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
6598 g_n = residual & (0xff << shift);
6599 encoded_g_n = (g_n >> shift)
6600 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
6601
6602 /* Calculate the residual for the next time around. */
6603 residual &= ~g_n;
6604 }
6605
6606 *final_residual = residual;
6607
6608 return encoded_g_n;
6609 }
6610
6611 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
6612 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
6613
6614 static int
6615 identify_add_or_sub (bfd_vma insn)
6616 {
6617 int opcode = insn & 0x1e00000;
6618
6619 if (opcode == 1 << 23) /* ADD */
6620 return 1;
6621
6622 if (opcode == 1 << 22) /* SUB */
6623 return -1;
6624
6625 return 0;
6626 }
6627
6628 /* Perform a relocation as part of a final link. */
6629
6630 static bfd_reloc_status_type
6631 elf32_arm_final_link_relocate (reloc_howto_type * howto,
6632 bfd * input_bfd,
6633 bfd * output_bfd,
6634 asection * input_section,
6635 bfd_byte * contents,
6636 Elf_Internal_Rela * rel,
6637 bfd_vma value,
6638 struct bfd_link_info * info,
6639 asection * sym_sec,
6640 const char * sym_name,
6641 int sym_flags,
6642 struct elf_link_hash_entry * h,
6643 bfd_boolean * unresolved_reloc_p,
6644 char ** error_message)
6645 {
6646 unsigned long r_type = howto->type;
6647 unsigned long r_symndx;
6648 bfd_byte * hit_data = contents + rel->r_offset;
6649 bfd * dynobj = NULL;
6650 Elf_Internal_Shdr * symtab_hdr;
6651 struct elf_link_hash_entry ** sym_hashes;
6652 bfd_vma * local_got_offsets;
6653 asection * sgot = NULL;
6654 asection * splt = NULL;
6655 asection * sreloc = NULL;
6656 bfd_vma addend;
6657 bfd_signed_vma signed_addend;
6658 struct elf32_arm_link_hash_table * globals;
6659
6660 globals = elf32_arm_hash_table (info);
6661
6662 BFD_ASSERT (is_arm_elf (input_bfd));
6663
6664 /* Some relocation types map to different relocations depending on the
6665 target. We pick the right one here. */
6666 r_type = arm_real_reloc_type (globals, r_type);
6667 if (r_type != howto->type)
6668 howto = elf32_arm_howto_from_type (r_type);
6669
6670 /* If the start address has been set, then set the EF_ARM_HASENTRY
6671 flag. Setting this more than once is redundant, but the cost is
6672 not too high, and it keeps the code simple.
6673
6674 The test is done here, rather than somewhere else, because the
6675 start address is only set just before the final link commences.
6676
6677 Note - if the user deliberately sets a start address of 0, the
6678 flag will not be set. */
6679 if (bfd_get_start_address (output_bfd) != 0)
6680 elf_elfheader (output_bfd)->e_flags |= EF_ARM_HASENTRY;
6681
6682 dynobj = elf_hash_table (info)->dynobj;
6683 if (dynobj)
6684 {
6685 sgot = bfd_get_section_by_name (dynobj, ".got");
6686 splt = bfd_get_section_by_name (dynobj, ".plt");
6687 }
6688 symtab_hdr = & elf_symtab_hdr (input_bfd);
6689 sym_hashes = elf_sym_hashes (input_bfd);
6690 local_got_offsets = elf_local_got_offsets (input_bfd);
6691 r_symndx = ELF32_R_SYM (rel->r_info);
6692
6693 if (globals->use_rel)
6694 {
6695 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
6696
6697 if (addend & ((howto->src_mask + 1) >> 1))
6698 {
6699 signed_addend = -1;
6700 signed_addend &= ~ howto->src_mask;
6701 signed_addend |= addend;
6702 }
6703 else
6704 signed_addend = addend;
6705 }
6706 else
6707 addend = signed_addend = rel->r_addend;
6708
6709 switch (r_type)
6710 {
6711 case R_ARM_NONE:
6712 /* We don't need to find a value for this symbol. It's just a
6713 marker. */
6714 *unresolved_reloc_p = FALSE;
6715 return bfd_reloc_ok;
6716
6717 case R_ARM_ABS12:
6718 if (!globals->vxworks_p)
6719 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
6720
6721 case R_ARM_PC24:
6722 case R_ARM_ABS32:
6723 case R_ARM_ABS32_NOI:
6724 case R_ARM_REL32:
6725 case R_ARM_REL32_NOI:
6726 case R_ARM_CALL:
6727 case R_ARM_JUMP24:
6728 case R_ARM_XPC25:
6729 case R_ARM_PREL31:
6730 case R_ARM_PLT32:
6731 /* Handle relocations which should use the PLT entry. ABS32/REL32
6732 will use the symbol's value, which may point to a PLT entry, but we
6733 don't need to handle that here. If we created a PLT entry, all
6734 branches in this object should go to it, except if the PLT is too
6735 far away, in which case a long branch stub should be inserted. */
6736 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
6737 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
6738 && r_type != R_ARM_CALL
6739 && r_type != R_ARM_JUMP24
6740 && r_type != R_ARM_PLT32)
6741 && h != NULL
6742 && splt != NULL
6743 && h->plt.offset != (bfd_vma) -1)
6744 {
6745 /* If we've created a .plt section, and assigned a PLT entry to
6746 this function, it should not be known to bind locally. If
6747 it were, we would have cleared the PLT entry. */
6748 BFD_ASSERT (!SYMBOL_CALLS_LOCAL (info, h));
6749
6750 value = (splt->output_section->vma
6751 + splt->output_offset
6752 + h->plt.offset);
6753 *unresolved_reloc_p = FALSE;
6754 return _bfd_final_link_relocate (howto, input_bfd, input_section,
6755 contents, rel->r_offset, value,
6756 rel->r_addend);
6757 }
6758
6759 /* When generating a shared object or relocatable executable, these
6760 relocations are copied into the output file to be resolved at
6761 run time. */
6762 if ((info->shared || globals->root.is_relocatable_executable)
6763 && (input_section->flags & SEC_ALLOC)
6764 && !(elf32_arm_hash_table (info)->vxworks_p
6765 && strcmp (input_section->output_section->name,
6766 ".tls_vars") == 0)
6767 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
6768 || !SYMBOL_CALLS_LOCAL (info, h))
6769 && (h == NULL
6770 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6771 || h->root.type != bfd_link_hash_undefweak)
6772 && r_type != R_ARM_PC24
6773 && r_type != R_ARM_CALL
6774 && r_type != R_ARM_JUMP24
6775 && r_type != R_ARM_PREL31
6776 && r_type != R_ARM_PLT32)
6777 {
6778 Elf_Internal_Rela outrel;
6779 bfd_byte *loc;
6780 bfd_boolean skip, relocate;
6781
6782 *unresolved_reloc_p = FALSE;
6783
6784 if (sreloc == NULL)
6785 {
6786 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
6787 ! globals->use_rel);
6788
6789 if (sreloc == NULL)
6790 return bfd_reloc_notsupported;
6791 }
6792
6793 skip = FALSE;
6794 relocate = FALSE;
6795
6796 outrel.r_addend = addend;
6797 outrel.r_offset =
6798 _bfd_elf_section_offset (output_bfd, info, input_section,
6799 rel->r_offset);
6800 if (outrel.r_offset == (bfd_vma) -1)
6801 skip = TRUE;
6802 else if (outrel.r_offset == (bfd_vma) -2)
6803 skip = TRUE, relocate = TRUE;
6804 outrel.r_offset += (input_section->output_section->vma
6805 + input_section->output_offset);
6806
6807 if (skip)
6808 memset (&outrel, 0, sizeof outrel);
6809 else if (h != NULL
6810 && h->dynindx != -1
6811 && (!info->shared
6812 || !info->symbolic
6813 || !h->def_regular))
6814 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
6815 else
6816 {
6817 int symbol;
6818
6819 /* This symbol is local, or marked to become local. */
6820 if (sym_flags == STT_ARM_TFUNC)
6821 value |= 1;
6822 if (globals->symbian_p)
6823 {
6824 asection *osec;
6825
6826 /* On Symbian OS, the data segment and text segement
6827 can be relocated independently. Therefore, we
6828 must indicate the segment to which this
6829 relocation is relative. The BPABI allows us to
6830 use any symbol in the right segment; we just use
6831 the section symbol as it is convenient. (We
6832 cannot use the symbol given by "h" directly as it
6833 will not appear in the dynamic symbol table.)
6834
6835 Note that the dynamic linker ignores the section
6836 symbol value, so we don't subtract osec->vma
6837 from the emitted reloc addend. */
6838 if (sym_sec)
6839 osec = sym_sec->output_section;
6840 else
6841 osec = input_section->output_section;
6842 symbol = elf_section_data (osec)->dynindx;
6843 if (symbol == 0)
6844 {
6845 struct elf_link_hash_table *htab = elf_hash_table (info);
6846
6847 if ((osec->flags & SEC_READONLY) == 0
6848 && htab->data_index_section != NULL)
6849 osec = htab->data_index_section;
6850 else
6851 osec = htab->text_index_section;
6852 symbol = elf_section_data (osec)->dynindx;
6853 }
6854 BFD_ASSERT (symbol != 0);
6855 }
6856 else
6857 /* On SVR4-ish systems, the dynamic loader cannot
6858 relocate the text and data segments independently,
6859 so the symbol does not matter. */
6860 symbol = 0;
6861 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
6862 if (globals->use_rel)
6863 relocate = TRUE;
6864 else
6865 outrel.r_addend += value;
6866 }
6867
6868 loc = sreloc->contents;
6869 loc += sreloc->reloc_count++ * RELOC_SIZE (globals);
6870 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
6871
6872 /* If this reloc is against an external symbol, we do not want to
6873 fiddle with the addend. Otherwise, we need to include the symbol
6874 value so that it becomes an addend for the dynamic reloc. */
6875 if (! relocate)
6876 return bfd_reloc_ok;
6877
6878 return _bfd_final_link_relocate (howto, input_bfd, input_section,
6879 contents, rel->r_offset, value,
6880 (bfd_vma) 0);
6881 }
6882 else switch (r_type)
6883 {
6884 case R_ARM_ABS12:
6885 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
6886
6887 case R_ARM_XPC25: /* Arm BLX instruction. */
6888 case R_ARM_CALL:
6889 case R_ARM_JUMP24:
6890 case R_ARM_PC24: /* Arm B/BL instruction. */
6891 case R_ARM_PLT32:
6892 {
6893 bfd_vma from;
6894 bfd_signed_vma branch_offset;
6895 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
6896
6897 if (r_type == R_ARM_XPC25)
6898 {
6899 /* Check for Arm calling Arm function. */
6900 /* FIXME: Should we translate the instruction into a BL
6901 instruction instead ? */
6902 if (sym_flags != STT_ARM_TFUNC)
6903 (*_bfd_error_handler)
6904 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
6905 input_bfd,
6906 h ? h->root.root.string : "(local)");
6907 }
6908 else if (r_type == R_ARM_PC24)
6909 {
6910 /* Check for Arm calling Thumb function. */
6911 if (sym_flags == STT_ARM_TFUNC)
6912 {
6913 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
6914 output_bfd, input_section,
6915 hit_data, sym_sec, rel->r_offset,
6916 signed_addend, value,
6917 error_message))
6918 return bfd_reloc_ok;
6919 else
6920 return bfd_reloc_dangerous;
6921 }
6922 }
6923
6924 /* Check if a stub has to be inserted because the
6925 destination is too far or we are changing mode. */
6926 if ( r_type == R_ARM_CALL
6927 || r_type == R_ARM_JUMP24
6928 || r_type == R_ARM_PLT32)
6929 {
6930 /* If the call goes through a PLT entry, make sure to
6931 check distance to the right destination address. */
6932 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
6933 {
6934 value = (splt->output_section->vma
6935 + splt->output_offset
6936 + h->plt.offset);
6937 *unresolved_reloc_p = FALSE;
6938 }
6939
6940 from = (input_section->output_section->vma
6941 + input_section->output_offset
6942 + rel->r_offset);
6943 branch_offset = (bfd_signed_vma)(value - from);
6944
6945 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
6946 || branch_offset < ARM_MAX_BWD_BRANCH_OFFSET
6947 || ((sym_flags == STT_ARM_TFUNC)
6948 && (((r_type == R_ARM_CALL) && !globals->use_blx)
6949 || (r_type == R_ARM_JUMP24)
6950 || (r_type == R_ARM_PLT32) ))
6951 )
6952 {
6953 /* The target is out of reach, so redirect the
6954 branch to the local stub for this function. */
6955
6956 stub_entry = elf32_arm_get_stub_entry (input_section,
6957 sym_sec, h,
6958 rel, globals);
6959 if (stub_entry != NULL)
6960 value = (stub_entry->stub_offset
6961 + stub_entry->stub_sec->output_offset
6962 + stub_entry->stub_sec->output_section->vma);
6963 }
6964 }
6965
6966 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
6967 where:
6968 S is the address of the symbol in the relocation.
6969 P is address of the instruction being relocated.
6970 A is the addend (extracted from the instruction) in bytes.
6971
6972 S is held in 'value'.
6973 P is the base address of the section containing the
6974 instruction plus the offset of the reloc into that
6975 section, ie:
6976 (input_section->output_section->vma +
6977 input_section->output_offset +
6978 rel->r_offset).
6979 A is the addend, converted into bytes, ie:
6980 (signed_addend * 4)
6981
6982 Note: None of these operations have knowledge of the pipeline
6983 size of the processor, thus it is up to the assembler to
6984 encode this information into the addend. */
6985 value -= (input_section->output_section->vma
6986 + input_section->output_offset);
6987 value -= rel->r_offset;
6988 if (globals->use_rel)
6989 value += (signed_addend << howto->size);
6990 else
6991 /* RELA addends do not have to be adjusted by howto->size. */
6992 value += signed_addend;
6993
6994 signed_addend = value;
6995 signed_addend >>= howto->rightshift;
6996
6997 /* A branch to an undefined weak symbol is turned into a jump to
6998 the next instruction unless a PLT entry will be created. */
6999 if (h && h->root.type == bfd_link_hash_undefweak
7000 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7001 {
7002 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000)
7003 | 0x0affffff;
7004 }
7005 else
7006 {
7007 /* Perform a signed range check. */
7008 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
7009 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
7010 return bfd_reloc_overflow;
7011
7012 addend = (value & 2);
7013
7014 value = (signed_addend & howto->dst_mask)
7015 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
7016
7017 if (r_type == R_ARM_CALL)
7018 {
7019 /* Set the H bit in the BLX instruction. */
7020 if (sym_flags == STT_ARM_TFUNC)
7021 {
7022 if (addend)
7023 value |= (1 << 24);
7024 else
7025 value &= ~(bfd_vma)(1 << 24);
7026 }
7027
7028 /* Select the correct instruction (BL or BLX). */
7029 /* Only if we are not handling a BL to a stub. In this
7030 case, mode switching is performed by the stub. */
7031 if (sym_flags == STT_ARM_TFUNC && !stub_entry)
7032 value |= (1 << 28);
7033 else
7034 {
7035 value &= ~(bfd_vma)(1 << 28);
7036 value |= (1 << 24);
7037 }
7038 }
7039 }
7040 }
7041 break;
7042
7043 case R_ARM_ABS32:
7044 value += addend;
7045 if (sym_flags == STT_ARM_TFUNC)
7046 value |= 1;
7047 break;
7048
7049 case R_ARM_ABS32_NOI:
7050 value += addend;
7051 break;
7052
7053 case R_ARM_REL32:
7054 value += addend;
7055 if (sym_flags == STT_ARM_TFUNC)
7056 value |= 1;
7057 value -= (input_section->output_section->vma
7058 + input_section->output_offset + rel->r_offset);
7059 break;
7060
7061 case R_ARM_REL32_NOI:
7062 value += addend;
7063 value -= (input_section->output_section->vma
7064 + input_section->output_offset + rel->r_offset);
7065 break;
7066
7067 case R_ARM_PREL31:
7068 value -= (input_section->output_section->vma
7069 + input_section->output_offset + rel->r_offset);
7070 value += signed_addend;
7071 if (! h || h->root.type != bfd_link_hash_undefweak)
7072 {
7073 /* Check for overflow. */
7074 if ((value ^ (value >> 1)) & (1 << 30))
7075 return bfd_reloc_overflow;
7076 }
7077 value &= 0x7fffffff;
7078 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
7079 if (sym_flags == STT_ARM_TFUNC)
7080 value |= 1;
7081 break;
7082 }
7083
7084 bfd_put_32 (input_bfd, value, hit_data);
7085 return bfd_reloc_ok;
7086
7087 case R_ARM_ABS8:
7088 value += addend;
7089 if ((long) value > 0x7f || (long) value < -0x80)
7090 return bfd_reloc_overflow;
7091
7092 bfd_put_8 (input_bfd, value, hit_data);
7093 return bfd_reloc_ok;
7094
7095 case R_ARM_ABS16:
7096 value += addend;
7097
7098 if ((long) value > 0x7fff || (long) value < -0x8000)
7099 return bfd_reloc_overflow;
7100
7101 bfd_put_16 (input_bfd, value, hit_data);
7102 return bfd_reloc_ok;
7103
7104 case R_ARM_THM_ABS5:
7105 /* Support ldr and str instructions for the thumb. */
7106 if (globals->use_rel)
7107 {
7108 /* Need to refetch addend. */
7109 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7110 /* ??? Need to determine shift amount from operand size. */
7111 addend >>= howto->rightshift;
7112 }
7113 value += addend;
7114
7115 /* ??? Isn't value unsigned? */
7116 if ((long) value > 0x1f || (long) value < -0x10)
7117 return bfd_reloc_overflow;
7118
7119 /* ??? Value needs to be properly shifted into place first. */
7120 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
7121 bfd_put_16 (input_bfd, value, hit_data);
7122 return bfd_reloc_ok;
7123
7124 case R_ARM_THM_ALU_PREL_11_0:
7125 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
7126 {
7127 bfd_vma insn;
7128 bfd_signed_vma relocation;
7129
7130 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7131 | bfd_get_16 (input_bfd, hit_data + 2);
7132
7133 if (globals->use_rel)
7134 {
7135 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
7136 | ((insn & (1 << 26)) >> 15);
7137 if (insn & 0xf00000)
7138 signed_addend = -signed_addend;
7139 }
7140
7141 relocation = value + signed_addend;
7142 relocation -= (input_section->output_section->vma
7143 + input_section->output_offset
7144 + rel->r_offset);
7145
7146 value = abs (relocation);
7147
7148 if (value >= 0x1000)
7149 return bfd_reloc_overflow;
7150
7151 insn = (insn & 0xfb0f8f00) | (value & 0xff)
7152 | ((value & 0x700) << 4)
7153 | ((value & 0x800) << 15);
7154 if (relocation < 0)
7155 insn |= 0xa00000;
7156
7157 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7158 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7159
7160 return bfd_reloc_ok;
7161 }
7162
7163 case R_ARM_THM_PC12:
7164 /* Corresponds to: ldr.w reg, [pc, #offset]. */
7165 {
7166 bfd_vma insn;
7167 bfd_signed_vma relocation;
7168
7169 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7170 | bfd_get_16 (input_bfd, hit_data + 2);
7171
7172 if (globals->use_rel)
7173 {
7174 signed_addend = insn & 0xfff;
7175 if (!(insn & (1 << 23)))
7176 signed_addend = -signed_addend;
7177 }
7178
7179 relocation = value + signed_addend;
7180 relocation -= (input_section->output_section->vma
7181 + input_section->output_offset
7182 + rel->r_offset);
7183
7184 value = abs (relocation);
7185
7186 if (value >= 0x1000)
7187 return bfd_reloc_overflow;
7188
7189 insn = (insn & 0xff7ff000) | value;
7190 if (relocation >= 0)
7191 insn |= (1 << 23);
7192
7193 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7194 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7195
7196 return bfd_reloc_ok;
7197 }
7198
7199 case R_ARM_THM_XPC22:
7200 case R_ARM_THM_CALL:
7201 case R_ARM_THM_JUMP24:
7202 /* Thumb BL (branch long instruction). */
7203 {
7204 bfd_vma relocation;
7205 bfd_vma reloc_sign;
7206 bfd_boolean overflow = FALSE;
7207 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7208 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7209 bfd_signed_vma reloc_signed_max;
7210 bfd_signed_vma reloc_signed_min;
7211 bfd_vma check;
7212 bfd_signed_vma signed_check;
7213 int bitsize;
7214 int thumb2 = using_thumb2 (globals);
7215
7216 /* A branch to an undefined weak symbol is turned into a jump to
7217 the next instruction unless a PLT entry will be created. */
7218 if (h && h->root.type == bfd_link_hash_undefweak
7219 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7220 {
7221 bfd_put_16 (input_bfd, 0xe000, hit_data);
7222 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
7223 return bfd_reloc_ok;
7224 }
7225
7226 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
7227 with Thumb-1) involving the J1 and J2 bits. */
7228 if (globals->use_rel)
7229 {
7230 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
7231 bfd_vma upper = upper_insn & 0x3ff;
7232 bfd_vma lower = lower_insn & 0x7ff;
7233 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
7234 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
7235 bfd_vma i1 = j1 ^ s ? 0 : 1;
7236 bfd_vma i2 = j2 ^ s ? 0 : 1;
7237
7238 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
7239 /* Sign extend. */
7240 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
7241
7242 signed_addend = addend;
7243 }
7244
7245 if (r_type == R_ARM_THM_XPC22)
7246 {
7247 /* Check for Thumb to Thumb call. */
7248 /* FIXME: Should we translate the instruction into a BL
7249 instruction instead ? */
7250 if (sym_flags == STT_ARM_TFUNC)
7251 (*_bfd_error_handler)
7252 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
7253 input_bfd,
7254 h ? h->root.root.string : "(local)");
7255 }
7256 else
7257 {
7258 /* If it is not a call to Thumb, assume call to Arm.
7259 If it is a call relative to a section name, then it is not a
7260 function call at all, but rather a long jump. Calls through
7261 the PLT do not require stubs. */
7262 if (sym_flags != STT_ARM_TFUNC && sym_flags != STT_SECTION
7263 && (h == NULL || splt == NULL
7264 || h->plt.offset == (bfd_vma) -1))
7265 {
7266 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7267 {
7268 /* Convert BL to BLX. */
7269 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7270 }
7271 else if (( r_type != R_ARM_THM_CALL)
7272 && (r_type != R_ARM_THM_JUMP24))
7273 {
7274 if (elf32_thumb_to_arm_stub
7275 (info, sym_name, input_bfd, output_bfd, input_section,
7276 hit_data, sym_sec, rel->r_offset, signed_addend, value,
7277 error_message))
7278 return bfd_reloc_ok;
7279 else
7280 return bfd_reloc_dangerous;
7281 }
7282 }
7283 else if (sym_flags == STT_ARM_TFUNC && globals->use_blx
7284 && r_type == R_ARM_THM_CALL)
7285 {
7286 /* Make sure this is a BL. */
7287 lower_insn |= 0x1800;
7288 }
7289 }
7290
7291 /* Handle calls via the PLT. */
7292 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
7293 {
7294 value = (splt->output_section->vma
7295 + splt->output_offset
7296 + h->plt.offset);
7297 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7298 {
7299 /* If the Thumb BLX instruction is available, convert the
7300 BL to a BLX instruction to call the ARM-mode PLT entry. */
7301 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7302 }
7303 else
7304 /* Target the Thumb stub before the ARM PLT entry. */
7305 value -= PLT_THUMB_STUB_SIZE;
7306 *unresolved_reloc_p = FALSE;
7307 }
7308
7309 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
7310 {
7311 /* Check if a stub has to be inserted because the destination
7312 is too far. */
7313 bfd_vma from;
7314 bfd_signed_vma branch_offset;
7315 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
7316
7317 from = (input_section->output_section->vma
7318 + input_section->output_offset
7319 + rel->r_offset);
7320 branch_offset = (bfd_signed_vma)(value - from);
7321
7322 if ((!thumb2
7323 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
7324 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
7325 ||
7326 (thumb2
7327 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
7328 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
7329 || ((sym_flags != STT_ARM_TFUNC)
7330 && (((r_type == R_ARM_THM_CALL) && !globals->use_blx)
7331 || r_type == R_ARM_THM_JUMP24)))
7332 {
7333 /* The target is out of reach or we are changing modes, so
7334 redirect the branch to the local stub for this
7335 function. */
7336 stub_entry = elf32_arm_get_stub_entry (input_section,
7337 sym_sec, h,
7338 rel, globals);
7339 if (stub_entry != NULL)
7340 value = (stub_entry->stub_offset
7341 + stub_entry->stub_sec->output_offset
7342 + stub_entry->stub_sec->output_section->vma);
7343
7344 /* If this call becomes a call to Arm, force BLX. */
7345 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
7346 {
7347 if ((stub_entry
7348 && !arm_stub_is_thumb (stub_entry->stub_type))
7349 || (sym_flags != STT_ARM_TFUNC))
7350 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7351 }
7352 }
7353 }
7354
7355 relocation = value + signed_addend;
7356
7357 relocation -= (input_section->output_section->vma
7358 + input_section->output_offset
7359 + rel->r_offset);
7360
7361 check = relocation >> howto->rightshift;
7362
7363 /* If this is a signed value, the rightshift just dropped
7364 leading 1 bits (assuming twos complement). */
7365 if ((bfd_signed_vma) relocation >= 0)
7366 signed_check = check;
7367 else
7368 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
7369
7370 /* Calculate the permissable maximum and minimum values for
7371 this relocation according to whether we're relocating for
7372 Thumb-2 or not. */
7373 bitsize = howto->bitsize;
7374 if (!thumb2)
7375 bitsize -= 2;
7376 reloc_signed_max = ((1 << (bitsize - 1)) - 1) >> howto->rightshift;
7377 reloc_signed_min = ~reloc_signed_max;
7378
7379 /* Assumes two's complement. */
7380 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7381 overflow = TRUE;
7382
7383 if ((lower_insn & 0x5000) == 0x4000)
7384 /* For a BLX instruction, make sure that the relocation is rounded up
7385 to a word boundary. This follows the semantics of the instruction
7386 which specifies that bit 1 of the target address will come from bit
7387 1 of the base address. */
7388 relocation = (relocation + 2) & ~ 3;
7389
7390 /* Put RELOCATION back into the insn. Assumes two's complement.
7391 We use the Thumb-2 encoding, which is safe even if dealing with
7392 a Thumb-1 instruction by virtue of our overflow check above. */
7393 reloc_sign = (signed_check < 0) ? 1 : 0;
7394 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
7395 | ((relocation >> 12) & 0x3ff)
7396 | (reloc_sign << 10);
7397 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
7398 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
7399 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
7400 | ((relocation >> 1) & 0x7ff);
7401
7402 /* Put the relocated value back in the object file: */
7403 bfd_put_16 (input_bfd, upper_insn, hit_data);
7404 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7405
7406 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7407 }
7408 break;
7409
7410 case R_ARM_THM_JUMP19:
7411 /* Thumb32 conditional branch instruction. */
7412 {
7413 bfd_vma relocation;
7414 bfd_boolean overflow = FALSE;
7415 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7416 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7417 bfd_signed_vma reloc_signed_max = 0xffffe;
7418 bfd_signed_vma reloc_signed_min = -0x100000;
7419 bfd_signed_vma signed_check;
7420
7421 /* Need to refetch the addend, reconstruct the top three bits,
7422 and squish the two 11 bit pieces together. */
7423 if (globals->use_rel)
7424 {
7425 bfd_vma S = (upper_insn & 0x0400) >> 10;
7426 bfd_vma upper = (upper_insn & 0x003f);
7427 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
7428 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
7429 bfd_vma lower = (lower_insn & 0x07ff);
7430
7431 upper |= J1 << 6;
7432 upper |= J2 << 7;
7433 upper |= (!S) << 8;
7434 upper -= 0x0100; /* Sign extend. */
7435
7436 addend = (upper << 12) | (lower << 1);
7437 signed_addend = addend;
7438 }
7439
7440 /* Handle calls via the PLT. */
7441 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
7442 {
7443 value = (splt->output_section->vma
7444 + splt->output_offset
7445 + h->plt.offset);
7446 /* Target the Thumb stub before the ARM PLT entry. */
7447 value -= PLT_THUMB_STUB_SIZE;
7448 *unresolved_reloc_p = FALSE;
7449 }
7450
7451 /* ??? Should handle interworking? GCC might someday try to
7452 use this for tail calls. */
7453
7454 relocation = value + signed_addend;
7455 relocation -= (input_section->output_section->vma
7456 + input_section->output_offset
7457 + rel->r_offset);
7458 signed_check = (bfd_signed_vma) relocation;
7459
7460 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7461 overflow = TRUE;
7462
7463 /* Put RELOCATION back into the insn. */
7464 {
7465 bfd_vma S = (relocation & 0x00100000) >> 20;
7466 bfd_vma J2 = (relocation & 0x00080000) >> 19;
7467 bfd_vma J1 = (relocation & 0x00040000) >> 18;
7468 bfd_vma hi = (relocation & 0x0003f000) >> 12;
7469 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
7470
7471 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
7472 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
7473 }
7474
7475 /* Put the relocated value back in the object file: */
7476 bfd_put_16 (input_bfd, upper_insn, hit_data);
7477 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7478
7479 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7480 }
7481
7482 case R_ARM_THM_JUMP11:
7483 case R_ARM_THM_JUMP8:
7484 case R_ARM_THM_JUMP6:
7485 /* Thumb B (branch) instruction). */
7486 {
7487 bfd_signed_vma relocation;
7488 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
7489 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
7490 bfd_signed_vma signed_check;
7491
7492 /* CZB cannot jump backward. */
7493 if (r_type == R_ARM_THM_JUMP6)
7494 reloc_signed_min = 0;
7495
7496 if (globals->use_rel)
7497 {
7498 /* Need to refetch addend. */
7499 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7500 if (addend & ((howto->src_mask + 1) >> 1))
7501 {
7502 signed_addend = -1;
7503 signed_addend &= ~ howto->src_mask;
7504 signed_addend |= addend;
7505 }
7506 else
7507 signed_addend = addend;
7508 /* The value in the insn has been right shifted. We need to
7509 undo this, so that we can perform the address calculation
7510 in terms of bytes. */
7511 signed_addend <<= howto->rightshift;
7512 }
7513 relocation = value + signed_addend;
7514
7515 relocation -= (input_section->output_section->vma
7516 + input_section->output_offset
7517 + rel->r_offset);
7518
7519 relocation >>= howto->rightshift;
7520 signed_check = relocation;
7521
7522 if (r_type == R_ARM_THM_JUMP6)
7523 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
7524 else
7525 relocation &= howto->dst_mask;
7526 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
7527
7528 bfd_put_16 (input_bfd, relocation, hit_data);
7529
7530 /* Assumes two's complement. */
7531 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7532 return bfd_reloc_overflow;
7533
7534 return bfd_reloc_ok;
7535 }
7536
7537 case R_ARM_ALU_PCREL7_0:
7538 case R_ARM_ALU_PCREL15_8:
7539 case R_ARM_ALU_PCREL23_15:
7540 {
7541 bfd_vma insn;
7542 bfd_vma relocation;
7543
7544 insn = bfd_get_32 (input_bfd, hit_data);
7545 if (globals->use_rel)
7546 {
7547 /* Extract the addend. */
7548 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
7549 signed_addend = addend;
7550 }
7551 relocation = value + signed_addend;
7552
7553 relocation -= (input_section->output_section->vma
7554 + input_section->output_offset
7555 + rel->r_offset);
7556 insn = (insn & ~0xfff)
7557 | ((howto->bitpos << 7) & 0xf00)
7558 | ((relocation >> howto->bitpos) & 0xff);
7559 bfd_put_32 (input_bfd, value, hit_data);
7560 }
7561 return bfd_reloc_ok;
7562
7563 case R_ARM_GNU_VTINHERIT:
7564 case R_ARM_GNU_VTENTRY:
7565 return bfd_reloc_ok;
7566
7567 case R_ARM_GOTOFF32:
7568 /* Relocation is relative to the start of the
7569 global offset table. */
7570
7571 BFD_ASSERT (sgot != NULL);
7572 if (sgot == NULL)
7573 return bfd_reloc_notsupported;
7574
7575 /* If we are addressing a Thumb function, we need to adjust the
7576 address by one, so that attempts to call the function pointer will
7577 correctly interpret it as Thumb code. */
7578 if (sym_flags == STT_ARM_TFUNC)
7579 value += 1;
7580
7581 /* Note that sgot->output_offset is not involved in this
7582 calculation. We always want the start of .got. If we
7583 define _GLOBAL_OFFSET_TABLE in a different way, as is
7584 permitted by the ABI, we might have to change this
7585 calculation. */
7586 value -= sgot->output_section->vma;
7587 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7588 contents, rel->r_offset, value,
7589 rel->r_addend);
7590
7591 case R_ARM_GOTPC:
7592 /* Use global offset table as symbol value. */
7593 BFD_ASSERT (sgot != NULL);
7594
7595 if (sgot == NULL)
7596 return bfd_reloc_notsupported;
7597
7598 *unresolved_reloc_p = FALSE;
7599 value = sgot->output_section->vma;
7600 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7601 contents, rel->r_offset, value,
7602 rel->r_addend);
7603
7604 case R_ARM_GOT32:
7605 case R_ARM_GOT_PREL:
7606 /* Relocation is to the entry for this symbol in the
7607 global offset table. */
7608 if (sgot == NULL)
7609 return bfd_reloc_notsupported;
7610
7611 if (h != NULL)
7612 {
7613 bfd_vma off;
7614 bfd_boolean dyn;
7615
7616 off = h->got.offset;
7617 BFD_ASSERT (off != (bfd_vma) -1);
7618 dyn = globals->root.dynamic_sections_created;
7619
7620 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
7621 || (info->shared
7622 && SYMBOL_REFERENCES_LOCAL (info, h))
7623 || (ELF_ST_VISIBILITY (h->other)
7624 && h->root.type == bfd_link_hash_undefweak))
7625 {
7626 /* This is actually a static link, or it is a -Bsymbolic link
7627 and the symbol is defined locally. We must initialize this
7628 entry in the global offset table. Since the offset must
7629 always be a multiple of 4, we use the least significant bit
7630 to record whether we have initialized it already.
7631
7632 When doing a dynamic link, we create a .rel(a).got relocation
7633 entry to initialize the value. This is done in the
7634 finish_dynamic_symbol routine. */
7635 if ((off & 1) != 0)
7636 off &= ~1;
7637 else
7638 {
7639 /* If we are addressing a Thumb function, we need to
7640 adjust the address by one, so that attempts to
7641 call the function pointer will correctly
7642 interpret it as Thumb code. */
7643 if (sym_flags == STT_ARM_TFUNC)
7644 value |= 1;
7645
7646 bfd_put_32 (output_bfd, value, sgot->contents + off);
7647 h->got.offset |= 1;
7648 }
7649 }
7650 else
7651 *unresolved_reloc_p = FALSE;
7652
7653 value = sgot->output_offset + off;
7654 }
7655 else
7656 {
7657 bfd_vma off;
7658
7659 BFD_ASSERT (local_got_offsets != NULL &&
7660 local_got_offsets[r_symndx] != (bfd_vma) -1);
7661
7662 off = local_got_offsets[r_symndx];
7663
7664 /* The offset must always be a multiple of 4. We use the
7665 least significant bit to record whether we have already
7666 generated the necessary reloc. */
7667 if ((off & 1) != 0)
7668 off &= ~1;
7669 else
7670 {
7671 /* If we are addressing a Thumb function, we need to
7672 adjust the address by one, so that attempts to
7673 call the function pointer will correctly
7674 interpret it as Thumb code. */
7675 if (sym_flags == STT_ARM_TFUNC)
7676 value |= 1;
7677
7678 if (globals->use_rel)
7679 bfd_put_32 (output_bfd, value, sgot->contents + off);
7680
7681 if (info->shared)
7682 {
7683 asection * srelgot;
7684 Elf_Internal_Rela outrel;
7685 bfd_byte *loc;
7686
7687 srelgot = (bfd_get_section_by_name
7688 (dynobj, RELOC_SECTION (globals, ".got")));
7689 BFD_ASSERT (srelgot != NULL);
7690
7691 outrel.r_addend = addend + value;
7692 outrel.r_offset = (sgot->output_section->vma
7693 + sgot->output_offset
7694 + off);
7695 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
7696 loc = srelgot->contents;
7697 loc += srelgot->reloc_count++ * RELOC_SIZE (globals);
7698 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7699 }
7700
7701 local_got_offsets[r_symndx] |= 1;
7702 }
7703
7704 value = sgot->output_offset + off;
7705 }
7706 if (r_type != R_ARM_GOT32)
7707 value += sgot->output_section->vma;
7708
7709 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7710 contents, rel->r_offset, value,
7711 rel->r_addend);
7712
7713 case R_ARM_TLS_LDO32:
7714 value = value - dtpoff_base (info);
7715
7716 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7717 contents, rel->r_offset, value,
7718 rel->r_addend);
7719
7720 case R_ARM_TLS_LDM32:
7721 {
7722 bfd_vma off;
7723
7724 if (globals->sgot == NULL)
7725 abort ();
7726
7727 off = globals->tls_ldm_got.offset;
7728
7729 if ((off & 1) != 0)
7730 off &= ~1;
7731 else
7732 {
7733 /* If we don't know the module number, create a relocation
7734 for it. */
7735 if (info->shared)
7736 {
7737 Elf_Internal_Rela outrel;
7738 bfd_byte *loc;
7739
7740 if (globals->srelgot == NULL)
7741 abort ();
7742
7743 outrel.r_addend = 0;
7744 outrel.r_offset = (globals->sgot->output_section->vma
7745 + globals->sgot->output_offset + off);
7746 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
7747
7748 if (globals->use_rel)
7749 bfd_put_32 (output_bfd, outrel.r_addend,
7750 globals->sgot->contents + off);
7751
7752 loc = globals->srelgot->contents;
7753 loc += globals->srelgot->reloc_count++ * RELOC_SIZE (globals);
7754 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7755 }
7756 else
7757 bfd_put_32 (output_bfd, 1, globals->sgot->contents + off);
7758
7759 globals->tls_ldm_got.offset |= 1;
7760 }
7761
7762 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
7763 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
7764
7765 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7766 contents, rel->r_offset, value,
7767 rel->r_addend);
7768 }
7769
7770 case R_ARM_TLS_GD32:
7771 case R_ARM_TLS_IE32:
7772 {
7773 bfd_vma off;
7774 int indx;
7775 char tls_type;
7776
7777 if (globals->sgot == NULL)
7778 abort ();
7779
7780 indx = 0;
7781 if (h != NULL)
7782 {
7783 bfd_boolean dyn;
7784 dyn = globals->root.dynamic_sections_created;
7785 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
7786 && (!info->shared
7787 || !SYMBOL_REFERENCES_LOCAL (info, h)))
7788 {
7789 *unresolved_reloc_p = FALSE;
7790 indx = h->dynindx;
7791 }
7792 off = h->got.offset;
7793 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
7794 }
7795 else
7796 {
7797 if (local_got_offsets == NULL)
7798 abort ();
7799 off = local_got_offsets[r_symndx];
7800 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
7801 }
7802
7803 if (tls_type == GOT_UNKNOWN)
7804 abort ();
7805
7806 if ((off & 1) != 0)
7807 off &= ~1;
7808 else
7809 {
7810 bfd_boolean need_relocs = FALSE;
7811 Elf_Internal_Rela outrel;
7812 bfd_byte *loc = NULL;
7813 int cur_off = off;
7814
7815 /* The GOT entries have not been initialized yet. Do it
7816 now, and emit any relocations. If both an IE GOT and a
7817 GD GOT are necessary, we emit the GD first. */
7818
7819 if ((info->shared || indx != 0)
7820 && (h == NULL
7821 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7822 || h->root.type != bfd_link_hash_undefweak))
7823 {
7824 need_relocs = TRUE;
7825 if (globals->srelgot == NULL)
7826 abort ();
7827 loc = globals->srelgot->contents;
7828 loc += globals->srelgot->reloc_count * RELOC_SIZE (globals);
7829 }
7830
7831 if (tls_type & GOT_TLS_GD)
7832 {
7833 if (need_relocs)
7834 {
7835 outrel.r_addend = 0;
7836 outrel.r_offset = (globals->sgot->output_section->vma
7837 + globals->sgot->output_offset
7838 + cur_off);
7839 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
7840
7841 if (globals->use_rel)
7842 bfd_put_32 (output_bfd, outrel.r_addend,
7843 globals->sgot->contents + cur_off);
7844
7845 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7846 globals->srelgot->reloc_count++;
7847 loc += RELOC_SIZE (globals);
7848
7849 if (indx == 0)
7850 bfd_put_32 (output_bfd, value - dtpoff_base (info),
7851 globals->sgot->contents + cur_off + 4);
7852 else
7853 {
7854 outrel.r_addend = 0;
7855 outrel.r_info = ELF32_R_INFO (indx,
7856 R_ARM_TLS_DTPOFF32);
7857 outrel.r_offset += 4;
7858
7859 if (globals->use_rel)
7860 bfd_put_32 (output_bfd, outrel.r_addend,
7861 globals->sgot->contents + cur_off + 4);
7862
7863
7864 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7865 globals->srelgot->reloc_count++;
7866 loc += RELOC_SIZE (globals);
7867 }
7868 }
7869 else
7870 {
7871 /* If we are not emitting relocations for a
7872 general dynamic reference, then we must be in a
7873 static link or an executable link with the
7874 symbol binding locally. Mark it as belonging
7875 to module 1, the executable. */
7876 bfd_put_32 (output_bfd, 1,
7877 globals->sgot->contents + cur_off);
7878 bfd_put_32 (output_bfd, value - dtpoff_base (info),
7879 globals->sgot->contents + cur_off + 4);
7880 }
7881
7882 cur_off += 8;
7883 }
7884
7885 if (tls_type & GOT_TLS_IE)
7886 {
7887 if (need_relocs)
7888 {
7889 if (indx == 0)
7890 outrel.r_addend = value - dtpoff_base (info);
7891 else
7892 outrel.r_addend = 0;
7893 outrel.r_offset = (globals->sgot->output_section->vma
7894 + globals->sgot->output_offset
7895 + cur_off);
7896 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
7897
7898 if (globals->use_rel)
7899 bfd_put_32 (output_bfd, outrel.r_addend,
7900 globals->sgot->contents + cur_off);
7901
7902 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7903 globals->srelgot->reloc_count++;
7904 loc += RELOC_SIZE (globals);
7905 }
7906 else
7907 bfd_put_32 (output_bfd, tpoff (info, value),
7908 globals->sgot->contents + cur_off);
7909 cur_off += 4;
7910 }
7911
7912 if (h != NULL)
7913 h->got.offset |= 1;
7914 else
7915 local_got_offsets[r_symndx] |= 1;
7916 }
7917
7918 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
7919 off += 8;
7920 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
7921 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
7922
7923 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7924 contents, rel->r_offset, value,
7925 rel->r_addend);
7926 }
7927
7928 case R_ARM_TLS_LE32:
7929 if (info->shared)
7930 {
7931 (*_bfd_error_handler)
7932 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
7933 input_bfd, input_section,
7934 (long) rel->r_offset, howto->name);
7935 return FALSE;
7936 }
7937 else
7938 value = tpoff (info, value);
7939
7940 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7941 contents, rel->r_offset, value,
7942 rel->r_addend);
7943
7944 case R_ARM_V4BX:
7945 if (globals->fix_v4bx)
7946 {
7947 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
7948
7949 /* Ensure that we have a BX instruction. */
7950 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
7951
7952 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
7953 {
7954 /* Branch to veneer. */
7955 bfd_vma glue_addr;
7956 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
7957 glue_addr -= input_section->output_section->vma
7958 + input_section->output_offset
7959 + rel->r_offset + 8;
7960 insn = (insn & 0xf0000000) | 0x0a000000
7961 | ((glue_addr >> 2) & 0x00ffffff);
7962 }
7963 else
7964 {
7965 /* Preserve Rm (lowest four bits) and the condition code
7966 (highest four bits). Other bits encode MOV PC,Rm. */
7967 insn = (insn & 0xf000000f) | 0x01a0f000;
7968 }
7969
7970 bfd_put_32 (input_bfd, insn, hit_data);
7971 }
7972 return bfd_reloc_ok;
7973
7974 case R_ARM_MOVW_ABS_NC:
7975 case R_ARM_MOVT_ABS:
7976 case R_ARM_MOVW_PREL_NC:
7977 case R_ARM_MOVT_PREL:
7978 /* Until we properly support segment-base-relative addressing then
7979 we assume the segment base to be zero, as for the group relocations.
7980 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
7981 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
7982 case R_ARM_MOVW_BREL_NC:
7983 case R_ARM_MOVW_BREL:
7984 case R_ARM_MOVT_BREL:
7985 {
7986 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
7987
7988 if (globals->use_rel)
7989 {
7990 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
7991 signed_addend = (addend ^ 0x8000) - 0x8000;
7992 }
7993
7994 value += signed_addend;
7995
7996 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
7997 value -= (input_section->output_section->vma
7998 + input_section->output_offset + rel->r_offset);
7999
8000 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
8001 return bfd_reloc_overflow;
8002
8003 if (sym_flags == STT_ARM_TFUNC)
8004 value |= 1;
8005
8006 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
8007 || r_type == R_ARM_MOVT_BREL)
8008 value >>= 16;
8009
8010 insn &= 0xfff0f000;
8011 insn |= value & 0xfff;
8012 insn |= (value & 0xf000) << 4;
8013 bfd_put_32 (input_bfd, insn, hit_data);
8014 }
8015 return bfd_reloc_ok;
8016
8017 case R_ARM_THM_MOVW_ABS_NC:
8018 case R_ARM_THM_MOVT_ABS:
8019 case R_ARM_THM_MOVW_PREL_NC:
8020 case R_ARM_THM_MOVT_PREL:
8021 /* Until we properly support segment-base-relative addressing then
8022 we assume the segment base to be zero, as for the above relocations.
8023 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
8024 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
8025 as R_ARM_THM_MOVT_ABS. */
8026 case R_ARM_THM_MOVW_BREL_NC:
8027 case R_ARM_THM_MOVW_BREL:
8028 case R_ARM_THM_MOVT_BREL:
8029 {
8030 bfd_vma insn;
8031
8032 insn = bfd_get_16 (input_bfd, hit_data) << 16;
8033 insn |= bfd_get_16 (input_bfd, hit_data + 2);
8034
8035 if (globals->use_rel)
8036 {
8037 addend = ((insn >> 4) & 0xf000)
8038 | ((insn >> 15) & 0x0800)
8039 | ((insn >> 4) & 0x0700)
8040 | (insn & 0x00ff);
8041 signed_addend = (addend ^ 0x8000) - 0x8000;
8042 }
8043
8044 value += signed_addend;
8045
8046 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
8047 value -= (input_section->output_section->vma
8048 + input_section->output_offset + rel->r_offset);
8049
8050 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
8051 return bfd_reloc_overflow;
8052
8053 if (sym_flags == STT_ARM_TFUNC)
8054 value |= 1;
8055
8056 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
8057 || r_type == R_ARM_THM_MOVT_BREL)
8058 value >>= 16;
8059
8060 insn &= 0xfbf08f00;
8061 insn |= (value & 0xf000) << 4;
8062 insn |= (value & 0x0800) << 15;
8063 insn |= (value & 0x0700) << 4;
8064 insn |= (value & 0x00ff);
8065
8066 bfd_put_16 (input_bfd, insn >> 16, hit_data);
8067 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
8068 }
8069 return bfd_reloc_ok;
8070
8071 case R_ARM_ALU_PC_G0_NC:
8072 case R_ARM_ALU_PC_G1_NC:
8073 case R_ARM_ALU_PC_G0:
8074 case R_ARM_ALU_PC_G1:
8075 case R_ARM_ALU_PC_G2:
8076 case R_ARM_ALU_SB_G0_NC:
8077 case R_ARM_ALU_SB_G1_NC:
8078 case R_ARM_ALU_SB_G0:
8079 case R_ARM_ALU_SB_G1:
8080 case R_ARM_ALU_SB_G2:
8081 {
8082 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8083 bfd_vma pc = input_section->output_section->vma
8084 + input_section->output_offset + rel->r_offset;
8085 /* sb should be the origin of the *segment* containing the symbol.
8086 It is not clear how to obtain this OS-dependent value, so we
8087 make an arbitrary choice of zero. */
8088 bfd_vma sb = 0;
8089 bfd_vma residual;
8090 bfd_vma g_n;
8091 bfd_signed_vma signed_value;
8092 int group = 0;
8093
8094 /* Determine which group of bits to select. */
8095 switch (r_type)
8096 {
8097 case R_ARM_ALU_PC_G0_NC:
8098 case R_ARM_ALU_PC_G0:
8099 case R_ARM_ALU_SB_G0_NC:
8100 case R_ARM_ALU_SB_G0:
8101 group = 0;
8102 break;
8103
8104 case R_ARM_ALU_PC_G1_NC:
8105 case R_ARM_ALU_PC_G1:
8106 case R_ARM_ALU_SB_G1_NC:
8107 case R_ARM_ALU_SB_G1:
8108 group = 1;
8109 break;
8110
8111 case R_ARM_ALU_PC_G2:
8112 case R_ARM_ALU_SB_G2:
8113 group = 2;
8114 break;
8115
8116 default:
8117 abort ();
8118 }
8119
8120 /* If REL, extract the addend from the insn. If RELA, it will
8121 have already been fetched for us. */
8122 if (globals->use_rel)
8123 {
8124 int negative;
8125 bfd_vma constant = insn & 0xff;
8126 bfd_vma rotation = (insn & 0xf00) >> 8;
8127
8128 if (rotation == 0)
8129 signed_addend = constant;
8130 else
8131 {
8132 /* Compensate for the fact that in the instruction, the
8133 rotation is stored in multiples of 2 bits. */
8134 rotation *= 2;
8135
8136 /* Rotate "constant" right by "rotation" bits. */
8137 signed_addend = (constant >> rotation) |
8138 (constant << (8 * sizeof (bfd_vma) - rotation));
8139 }
8140
8141 /* Determine if the instruction is an ADD or a SUB.
8142 (For REL, this determines the sign of the addend.) */
8143 negative = identify_add_or_sub (insn);
8144 if (negative == 0)
8145 {
8146 (*_bfd_error_handler)
8147 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
8148 input_bfd, input_section,
8149 (long) rel->r_offset, howto->name);
8150 return bfd_reloc_overflow;
8151 }
8152
8153 signed_addend *= negative;
8154 }
8155
8156 /* Compute the value (X) to go in the place. */
8157 if (r_type == R_ARM_ALU_PC_G0_NC
8158 || r_type == R_ARM_ALU_PC_G1_NC
8159 || r_type == R_ARM_ALU_PC_G0
8160 || r_type == R_ARM_ALU_PC_G1
8161 || r_type == R_ARM_ALU_PC_G2)
8162 /* PC relative. */
8163 signed_value = value - pc + signed_addend;
8164 else
8165 /* Section base relative. */
8166 signed_value = value - sb + signed_addend;
8167
8168 /* If the target symbol is a Thumb function, then set the
8169 Thumb bit in the address. */
8170 if (sym_flags == STT_ARM_TFUNC)
8171 signed_value |= 1;
8172
8173 /* Calculate the value of the relevant G_n, in encoded
8174 constant-with-rotation format. */
8175 g_n = calculate_group_reloc_mask (abs (signed_value), group,
8176 &residual);
8177
8178 /* Check for overflow if required. */
8179 if ((r_type == R_ARM_ALU_PC_G0
8180 || r_type == R_ARM_ALU_PC_G1
8181 || r_type == R_ARM_ALU_PC_G2
8182 || r_type == R_ARM_ALU_SB_G0
8183 || r_type == R_ARM_ALU_SB_G1
8184 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
8185 {
8186 (*_bfd_error_handler)
8187 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8188 input_bfd, input_section,
8189 (long) rel->r_offset, abs (signed_value), howto->name);
8190 return bfd_reloc_overflow;
8191 }
8192
8193 /* Mask out the value and the ADD/SUB part of the opcode; take care
8194 not to destroy the S bit. */
8195 insn &= 0xff1ff000;
8196
8197 /* Set the opcode according to whether the value to go in the
8198 place is negative. */
8199 if (signed_value < 0)
8200 insn |= 1 << 22;
8201 else
8202 insn |= 1 << 23;
8203
8204 /* Encode the offset. */
8205 insn |= g_n;
8206
8207 bfd_put_32 (input_bfd, insn, hit_data);
8208 }
8209 return bfd_reloc_ok;
8210
8211 case R_ARM_LDR_PC_G0:
8212 case R_ARM_LDR_PC_G1:
8213 case R_ARM_LDR_PC_G2:
8214 case R_ARM_LDR_SB_G0:
8215 case R_ARM_LDR_SB_G1:
8216 case R_ARM_LDR_SB_G2:
8217 {
8218 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8219 bfd_vma pc = input_section->output_section->vma
8220 + input_section->output_offset + rel->r_offset;
8221 bfd_vma sb = 0; /* See note above. */
8222 bfd_vma residual;
8223 bfd_signed_vma signed_value;
8224 int group = 0;
8225
8226 /* Determine which groups of bits to calculate. */
8227 switch (r_type)
8228 {
8229 case R_ARM_LDR_PC_G0:
8230 case R_ARM_LDR_SB_G0:
8231 group = 0;
8232 break;
8233
8234 case R_ARM_LDR_PC_G1:
8235 case R_ARM_LDR_SB_G1:
8236 group = 1;
8237 break;
8238
8239 case R_ARM_LDR_PC_G2:
8240 case R_ARM_LDR_SB_G2:
8241 group = 2;
8242 break;
8243
8244 default:
8245 abort ();
8246 }
8247
8248 /* If REL, extract the addend from the insn. If RELA, it will
8249 have already been fetched for us. */
8250 if (globals->use_rel)
8251 {
8252 int negative = (insn & (1 << 23)) ? 1 : -1;
8253 signed_addend = negative * (insn & 0xfff);
8254 }
8255
8256 /* Compute the value (X) to go in the place. */
8257 if (r_type == R_ARM_LDR_PC_G0
8258 || r_type == R_ARM_LDR_PC_G1
8259 || r_type == R_ARM_LDR_PC_G2)
8260 /* PC relative. */
8261 signed_value = value - pc + signed_addend;
8262 else
8263 /* Section base relative. */
8264 signed_value = value - sb + signed_addend;
8265
8266 /* Calculate the value of the relevant G_{n-1} to obtain
8267 the residual at that stage. */
8268 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8269
8270 /* Check for overflow. */
8271 if (residual >= 0x1000)
8272 {
8273 (*_bfd_error_handler)
8274 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8275 input_bfd, input_section,
8276 (long) rel->r_offset, abs (signed_value), howto->name);
8277 return bfd_reloc_overflow;
8278 }
8279
8280 /* Mask out the value and U bit. */
8281 insn &= 0xff7ff000;
8282
8283 /* Set the U bit if the value to go in the place is non-negative. */
8284 if (signed_value >= 0)
8285 insn |= 1 << 23;
8286
8287 /* Encode the offset. */
8288 insn |= residual;
8289
8290 bfd_put_32 (input_bfd, insn, hit_data);
8291 }
8292 return bfd_reloc_ok;
8293
8294 case R_ARM_LDRS_PC_G0:
8295 case R_ARM_LDRS_PC_G1:
8296 case R_ARM_LDRS_PC_G2:
8297 case R_ARM_LDRS_SB_G0:
8298 case R_ARM_LDRS_SB_G1:
8299 case R_ARM_LDRS_SB_G2:
8300 {
8301 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8302 bfd_vma pc = input_section->output_section->vma
8303 + input_section->output_offset + rel->r_offset;
8304 bfd_vma sb = 0; /* See note above. */
8305 bfd_vma residual;
8306 bfd_signed_vma signed_value;
8307 int group = 0;
8308
8309 /* Determine which groups of bits to calculate. */
8310 switch (r_type)
8311 {
8312 case R_ARM_LDRS_PC_G0:
8313 case R_ARM_LDRS_SB_G0:
8314 group = 0;
8315 break;
8316
8317 case R_ARM_LDRS_PC_G1:
8318 case R_ARM_LDRS_SB_G1:
8319 group = 1;
8320 break;
8321
8322 case R_ARM_LDRS_PC_G2:
8323 case R_ARM_LDRS_SB_G2:
8324 group = 2;
8325 break;
8326
8327 default:
8328 abort ();
8329 }
8330
8331 /* If REL, extract the addend from the insn. If RELA, it will
8332 have already been fetched for us. */
8333 if (globals->use_rel)
8334 {
8335 int negative = (insn & (1 << 23)) ? 1 : -1;
8336 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
8337 }
8338
8339 /* Compute the value (X) to go in the place. */
8340 if (r_type == R_ARM_LDRS_PC_G0
8341 || r_type == R_ARM_LDRS_PC_G1
8342 || r_type == R_ARM_LDRS_PC_G2)
8343 /* PC relative. */
8344 signed_value = value - pc + signed_addend;
8345 else
8346 /* Section base relative. */
8347 signed_value = value - sb + signed_addend;
8348
8349 /* Calculate the value of the relevant G_{n-1} to obtain
8350 the residual at that stage. */
8351 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8352
8353 /* Check for overflow. */
8354 if (residual >= 0x100)
8355 {
8356 (*_bfd_error_handler)
8357 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8358 input_bfd, input_section,
8359 (long) rel->r_offset, abs (signed_value), howto->name);
8360 return bfd_reloc_overflow;
8361 }
8362
8363 /* Mask out the value and U bit. */
8364 insn &= 0xff7ff0f0;
8365
8366 /* Set the U bit if the value to go in the place is non-negative. */
8367 if (signed_value >= 0)
8368 insn |= 1 << 23;
8369
8370 /* Encode the offset. */
8371 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
8372
8373 bfd_put_32 (input_bfd, insn, hit_data);
8374 }
8375 return bfd_reloc_ok;
8376
8377 case R_ARM_LDC_PC_G0:
8378 case R_ARM_LDC_PC_G1:
8379 case R_ARM_LDC_PC_G2:
8380 case R_ARM_LDC_SB_G0:
8381 case R_ARM_LDC_SB_G1:
8382 case R_ARM_LDC_SB_G2:
8383 {
8384 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8385 bfd_vma pc = input_section->output_section->vma
8386 + input_section->output_offset + rel->r_offset;
8387 bfd_vma sb = 0; /* See note above. */
8388 bfd_vma residual;
8389 bfd_signed_vma signed_value;
8390 int group = 0;
8391
8392 /* Determine which groups of bits to calculate. */
8393 switch (r_type)
8394 {
8395 case R_ARM_LDC_PC_G0:
8396 case R_ARM_LDC_SB_G0:
8397 group = 0;
8398 break;
8399
8400 case R_ARM_LDC_PC_G1:
8401 case R_ARM_LDC_SB_G1:
8402 group = 1;
8403 break;
8404
8405 case R_ARM_LDC_PC_G2:
8406 case R_ARM_LDC_SB_G2:
8407 group = 2;
8408 break;
8409
8410 default:
8411 abort ();
8412 }
8413
8414 /* If REL, extract the addend from the insn. If RELA, it will
8415 have already been fetched for us. */
8416 if (globals->use_rel)
8417 {
8418 int negative = (insn & (1 << 23)) ? 1 : -1;
8419 signed_addend = negative * ((insn & 0xff) << 2);
8420 }
8421
8422 /* Compute the value (X) to go in the place. */
8423 if (r_type == R_ARM_LDC_PC_G0
8424 || r_type == R_ARM_LDC_PC_G1
8425 || r_type == R_ARM_LDC_PC_G2)
8426 /* PC relative. */
8427 signed_value = value - pc + signed_addend;
8428 else
8429 /* Section base relative. */
8430 signed_value = value - sb + signed_addend;
8431
8432 /* Calculate the value of the relevant G_{n-1} to obtain
8433 the residual at that stage. */
8434 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8435
8436 /* Check for overflow. (The absolute value to go in the place must be
8437 divisible by four and, after having been divided by four, must
8438 fit in eight bits.) */
8439 if ((residual & 0x3) != 0 || residual >= 0x400)
8440 {
8441 (*_bfd_error_handler)
8442 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8443 input_bfd, input_section,
8444 (long) rel->r_offset, abs (signed_value), howto->name);
8445 return bfd_reloc_overflow;
8446 }
8447
8448 /* Mask out the value and U bit. */
8449 insn &= 0xff7fff00;
8450
8451 /* Set the U bit if the value to go in the place is non-negative. */
8452 if (signed_value >= 0)
8453 insn |= 1 << 23;
8454
8455 /* Encode the offset. */
8456 insn |= residual >> 2;
8457
8458 bfd_put_32 (input_bfd, insn, hit_data);
8459 }
8460 return bfd_reloc_ok;
8461
8462 default:
8463 return bfd_reloc_notsupported;
8464 }
8465 }
8466
8467 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
8468 static void
8469 arm_add_to_rel (bfd * abfd,
8470 bfd_byte * address,
8471 reloc_howto_type * howto,
8472 bfd_signed_vma increment)
8473 {
8474 bfd_signed_vma addend;
8475
8476 if (howto->type == R_ARM_THM_CALL
8477 || howto->type == R_ARM_THM_JUMP24)
8478 {
8479 int upper_insn, lower_insn;
8480 int upper, lower;
8481
8482 upper_insn = bfd_get_16 (abfd, address);
8483 lower_insn = bfd_get_16 (abfd, address + 2);
8484 upper = upper_insn & 0x7ff;
8485 lower = lower_insn & 0x7ff;
8486
8487 addend = (upper << 12) | (lower << 1);
8488 addend += increment;
8489 addend >>= 1;
8490
8491 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
8492 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
8493
8494 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
8495 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
8496 }
8497 else
8498 {
8499 bfd_vma contents;
8500
8501 contents = bfd_get_32 (abfd, address);
8502
8503 /* Get the (signed) value from the instruction. */
8504 addend = contents & howto->src_mask;
8505 if (addend & ((howto->src_mask + 1) >> 1))
8506 {
8507 bfd_signed_vma mask;
8508
8509 mask = -1;
8510 mask &= ~ howto->src_mask;
8511 addend |= mask;
8512 }
8513
8514 /* Add in the increment, (which is a byte value). */
8515 switch (howto->type)
8516 {
8517 default:
8518 addend += increment;
8519 break;
8520
8521 case R_ARM_PC24:
8522 case R_ARM_PLT32:
8523 case R_ARM_CALL:
8524 case R_ARM_JUMP24:
8525 addend <<= howto->size;
8526 addend += increment;
8527
8528 /* Should we check for overflow here ? */
8529
8530 /* Drop any undesired bits. */
8531 addend >>= howto->rightshift;
8532 break;
8533 }
8534
8535 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
8536
8537 bfd_put_32 (abfd, contents, address);
8538 }
8539 }
8540
8541 #define IS_ARM_TLS_RELOC(R_TYPE) \
8542 ((R_TYPE) == R_ARM_TLS_GD32 \
8543 || (R_TYPE) == R_ARM_TLS_LDO32 \
8544 || (R_TYPE) == R_ARM_TLS_LDM32 \
8545 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
8546 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
8547 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
8548 || (R_TYPE) == R_ARM_TLS_LE32 \
8549 || (R_TYPE) == R_ARM_TLS_IE32)
8550
8551 /* Relocate an ARM ELF section. */
8552
8553 static bfd_boolean
8554 elf32_arm_relocate_section (bfd * output_bfd,
8555 struct bfd_link_info * info,
8556 bfd * input_bfd,
8557 asection * input_section,
8558 bfd_byte * contents,
8559 Elf_Internal_Rela * relocs,
8560 Elf_Internal_Sym * local_syms,
8561 asection ** local_sections)
8562 {
8563 Elf_Internal_Shdr *symtab_hdr;
8564 struct elf_link_hash_entry **sym_hashes;
8565 Elf_Internal_Rela *rel;
8566 Elf_Internal_Rela *relend;
8567 const char *name;
8568 struct elf32_arm_link_hash_table * globals;
8569
8570 globals = elf32_arm_hash_table (info);
8571
8572 symtab_hdr = & elf_symtab_hdr (input_bfd);
8573 sym_hashes = elf_sym_hashes (input_bfd);
8574
8575 rel = relocs;
8576 relend = relocs + input_section->reloc_count;
8577 for (; rel < relend; rel++)
8578 {
8579 int r_type;
8580 reloc_howto_type * howto;
8581 unsigned long r_symndx;
8582 Elf_Internal_Sym * sym;
8583 asection * sec;
8584 struct elf_link_hash_entry * h;
8585 bfd_vma relocation;
8586 bfd_reloc_status_type r;
8587 arelent bfd_reloc;
8588 char sym_type;
8589 bfd_boolean unresolved_reloc = FALSE;
8590 char *error_message = NULL;
8591
8592 r_symndx = ELF32_R_SYM (rel->r_info);
8593 r_type = ELF32_R_TYPE (rel->r_info);
8594 r_type = arm_real_reloc_type (globals, r_type);
8595
8596 if ( r_type == R_ARM_GNU_VTENTRY
8597 || r_type == R_ARM_GNU_VTINHERIT)
8598 continue;
8599
8600 bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
8601 howto = bfd_reloc.howto;
8602
8603 h = NULL;
8604 sym = NULL;
8605 sec = NULL;
8606
8607 if (r_symndx < symtab_hdr->sh_info)
8608 {
8609 sym = local_syms + r_symndx;
8610 sym_type = ELF32_ST_TYPE (sym->st_info);
8611 sec = local_sections[r_symndx];
8612 if (globals->use_rel)
8613 {
8614 relocation = (sec->output_section->vma
8615 + sec->output_offset
8616 + sym->st_value);
8617 if (!info->relocatable
8618 && (sec->flags & SEC_MERGE)
8619 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
8620 {
8621 asection *msec;
8622 bfd_vma addend, value;
8623
8624 switch (r_type)
8625 {
8626 case R_ARM_MOVW_ABS_NC:
8627 case R_ARM_MOVT_ABS:
8628 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8629 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
8630 addend = (addend ^ 0x8000) - 0x8000;
8631 break;
8632
8633 case R_ARM_THM_MOVW_ABS_NC:
8634 case R_ARM_THM_MOVT_ABS:
8635 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
8636 << 16;
8637 value |= bfd_get_16 (input_bfd,
8638 contents + rel->r_offset + 2);
8639 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
8640 | ((value & 0x04000000) >> 15);
8641 addend = (addend ^ 0x8000) - 0x8000;
8642 break;
8643
8644 default:
8645 if (howto->rightshift
8646 || (howto->src_mask & (howto->src_mask + 1)))
8647 {
8648 (*_bfd_error_handler)
8649 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
8650 input_bfd, input_section,
8651 (long) rel->r_offset, howto->name);
8652 return FALSE;
8653 }
8654
8655 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8656
8657 /* Get the (signed) value from the instruction. */
8658 addend = value & howto->src_mask;
8659 if (addend & ((howto->src_mask + 1) >> 1))
8660 {
8661 bfd_signed_vma mask;
8662
8663 mask = -1;
8664 mask &= ~ howto->src_mask;
8665 addend |= mask;
8666 }
8667 break;
8668 }
8669
8670 msec = sec;
8671 addend =
8672 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
8673 - relocation;
8674 addend += msec->output_section->vma + msec->output_offset;
8675
8676 /* Cases here must match those in the preceeding
8677 switch statement. */
8678 switch (r_type)
8679 {
8680 case R_ARM_MOVW_ABS_NC:
8681 case R_ARM_MOVT_ABS:
8682 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
8683 | (addend & 0xfff);
8684 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
8685 break;
8686
8687 case R_ARM_THM_MOVW_ABS_NC:
8688 case R_ARM_THM_MOVT_ABS:
8689 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
8690 | (addend & 0xff) | ((addend & 0x0800) << 15);
8691 bfd_put_16 (input_bfd, value >> 16,
8692 contents + rel->r_offset);
8693 bfd_put_16 (input_bfd, value,
8694 contents + rel->r_offset + 2);
8695 break;
8696
8697 default:
8698 value = (value & ~ howto->dst_mask)
8699 | (addend & howto->dst_mask);
8700 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
8701 break;
8702 }
8703 }
8704 }
8705 else
8706 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
8707 }
8708 else
8709 {
8710 bfd_boolean warned;
8711
8712 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
8713 r_symndx, symtab_hdr, sym_hashes,
8714 h, sec, relocation,
8715 unresolved_reloc, warned);
8716
8717 sym_type = h->type;
8718 }
8719
8720 if (sec != NULL && elf_discarded_section (sec))
8721 {
8722 /* For relocs against symbols from removed linkonce sections,
8723 or sections discarded by a linker script, we just want the
8724 section contents zeroed. Avoid any special processing. */
8725 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
8726 rel->r_info = 0;
8727 rel->r_addend = 0;
8728 continue;
8729 }
8730
8731 if (info->relocatable)
8732 {
8733 /* This is a relocatable link. We don't have to change
8734 anything, unless the reloc is against a section symbol,
8735 in which case we have to adjust according to where the
8736 section symbol winds up in the output section. */
8737 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
8738 {
8739 if (globals->use_rel)
8740 arm_add_to_rel (input_bfd, contents + rel->r_offset,
8741 howto, (bfd_signed_vma) sec->output_offset);
8742 else
8743 rel->r_addend += sec->output_offset;
8744 }
8745 continue;
8746 }
8747
8748 if (h != NULL)
8749 name = h->root.root.string;
8750 else
8751 {
8752 name = (bfd_elf_string_from_elf_section
8753 (input_bfd, symtab_hdr->sh_link, sym->st_name));
8754 if (name == NULL || *name == '\0')
8755 name = bfd_section_name (input_bfd, sec);
8756 }
8757
8758 if (r_symndx != 0
8759 && r_type != R_ARM_NONE
8760 && (h == NULL
8761 || h->root.type == bfd_link_hash_defined
8762 || h->root.type == bfd_link_hash_defweak)
8763 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
8764 {
8765 (*_bfd_error_handler)
8766 ((sym_type == STT_TLS
8767 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
8768 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
8769 input_bfd,
8770 input_section,
8771 (long) rel->r_offset,
8772 howto->name,
8773 name);
8774 }
8775
8776 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
8777 input_section, contents, rel,
8778 relocation, info, sec, name,
8779 (h ? ELF_ST_TYPE (h->type) :
8780 ELF_ST_TYPE (sym->st_info)), h,
8781 &unresolved_reloc, &error_message);
8782
8783 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
8784 because such sections are not SEC_ALLOC and thus ld.so will
8785 not process them. */
8786 if (unresolved_reloc
8787 && !((input_section->flags & SEC_DEBUGGING) != 0
8788 && h->def_dynamic))
8789 {
8790 (*_bfd_error_handler)
8791 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
8792 input_bfd,
8793 input_section,
8794 (long) rel->r_offset,
8795 howto->name,
8796 h->root.root.string);
8797 return FALSE;
8798 }
8799
8800 if (r != bfd_reloc_ok)
8801 {
8802 switch (r)
8803 {
8804 case bfd_reloc_overflow:
8805 /* If the overflowing reloc was to an undefined symbol,
8806 we have already printed one error message and there
8807 is no point complaining again. */
8808 if ((! h ||
8809 h->root.type != bfd_link_hash_undefined)
8810 && (!((*info->callbacks->reloc_overflow)
8811 (info, (h ? &h->root : NULL), name, howto->name,
8812 (bfd_vma) 0, input_bfd, input_section,
8813 rel->r_offset))))
8814 return FALSE;
8815 break;
8816
8817 case bfd_reloc_undefined:
8818 if (!((*info->callbacks->undefined_symbol)
8819 (info, name, input_bfd, input_section,
8820 rel->r_offset, TRUE)))
8821 return FALSE;
8822 break;
8823
8824 case bfd_reloc_outofrange:
8825 error_message = _("out of range");
8826 goto common_error;
8827
8828 case bfd_reloc_notsupported:
8829 error_message = _("unsupported relocation");
8830 goto common_error;
8831
8832 case bfd_reloc_dangerous:
8833 /* error_message should already be set. */
8834 goto common_error;
8835
8836 default:
8837 error_message = _("unknown error");
8838 /* Fall through. */
8839
8840 common_error:
8841 BFD_ASSERT (error_message != NULL);
8842 if (!((*info->callbacks->reloc_dangerous)
8843 (info, error_message, input_bfd, input_section,
8844 rel->r_offset)))
8845 return FALSE;
8846 break;
8847 }
8848 }
8849 }
8850
8851 return TRUE;
8852 }
8853
8854 /* Add a new unwind edit to the list described by HEAD, TAIL. If INDEX is zero,
8855 adds the edit to the start of the list. (The list must be built in order of
8856 ascending INDEX: the function's callers are primarily responsible for
8857 maintaining that condition). */
8858
8859 static void
8860 add_unwind_table_edit (arm_unwind_table_edit **head,
8861 arm_unwind_table_edit **tail,
8862 arm_unwind_edit_type type,
8863 asection *linked_section,
8864 unsigned int index)
8865 {
8866 arm_unwind_table_edit *new_edit = xmalloc (sizeof (arm_unwind_table_edit));
8867
8868 new_edit->type = type;
8869 new_edit->linked_section = linked_section;
8870 new_edit->index = index;
8871
8872 if (index > 0)
8873 {
8874 new_edit->next = NULL;
8875
8876 if (*tail)
8877 (*tail)->next = new_edit;
8878
8879 (*tail) = new_edit;
8880
8881 if (!*head)
8882 (*head) = new_edit;
8883 }
8884 else
8885 {
8886 new_edit->next = *head;
8887
8888 if (!*tail)
8889 *tail = new_edit;
8890
8891 *head = new_edit;
8892 }
8893 }
8894
8895 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
8896
8897 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
8898 static void
8899 adjust_exidx_size(asection *exidx_sec, int adjust)
8900 {
8901 asection *out_sec;
8902
8903 if (!exidx_sec->rawsize)
8904 exidx_sec->rawsize = exidx_sec->size;
8905
8906 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
8907 out_sec = exidx_sec->output_section;
8908 /* Adjust size of output section. */
8909 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
8910 }
8911
8912 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
8913 static void
8914 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
8915 {
8916 struct _arm_elf_section_data *exidx_arm_data;
8917
8918 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
8919 add_unwind_table_edit (
8920 &exidx_arm_data->u.exidx.unwind_edit_list,
8921 &exidx_arm_data->u.exidx.unwind_edit_tail,
8922 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
8923
8924 adjust_exidx_size(exidx_sec, 8);
8925 }
8926
8927 /* Scan .ARM.exidx tables, and create a list describing edits which should be
8928 made to those tables, such that:
8929
8930 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
8931 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
8932 codes which have been inlined into the index).
8933
8934 The edits are applied when the tables are written
8935 (in elf32_arm_write_section).
8936 */
8937
8938 bfd_boolean
8939 elf32_arm_fix_exidx_coverage (asection **text_section_order,
8940 unsigned int num_text_sections,
8941 struct bfd_link_info *info)
8942 {
8943 bfd *inp;
8944 unsigned int last_second_word = 0, i;
8945 asection *last_exidx_sec = NULL;
8946 asection *last_text_sec = NULL;
8947 int last_unwind_type = -1;
8948
8949 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
8950 text sections. */
8951 for (inp = info->input_bfds; inp != NULL; inp = inp->link_next)
8952 {
8953 asection *sec;
8954
8955 for (sec = inp->sections; sec != NULL; sec = sec->next)
8956 {
8957 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
8958 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
8959
8960 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
8961 continue;
8962
8963 if (elf_sec->linked_to)
8964 {
8965 Elf_Internal_Shdr *linked_hdr
8966 = &elf_section_data (elf_sec->linked_to)->this_hdr;
8967 struct _arm_elf_section_data *linked_sec_arm_data
8968 = get_arm_elf_section_data (linked_hdr->bfd_section);
8969
8970 if (linked_sec_arm_data == NULL)
8971 continue;
8972
8973 /* Link this .ARM.exidx section back from the text section it
8974 describes. */
8975 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
8976 }
8977 }
8978 }
8979
8980 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
8981 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
8982 and add EXIDX_CANTUNWIND entries for sections with no unwind table data.
8983 */
8984
8985 for (i = 0; i < num_text_sections; i++)
8986 {
8987 asection *sec = text_section_order[i];
8988 asection *exidx_sec;
8989 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
8990 struct _arm_elf_section_data *exidx_arm_data;
8991 bfd_byte *contents = NULL;
8992 int deleted_exidx_bytes = 0;
8993 bfd_vma j;
8994 arm_unwind_table_edit *unwind_edit_head = NULL;
8995 arm_unwind_table_edit *unwind_edit_tail = NULL;
8996 Elf_Internal_Shdr *hdr;
8997 bfd *ibfd;
8998
8999 if (arm_data == NULL)
9000 continue;
9001
9002 exidx_sec = arm_data->u.text.arm_exidx_sec;
9003 if (exidx_sec == NULL)
9004 {
9005 /* Section has no unwind data. */
9006 if (last_unwind_type == 0 || !last_exidx_sec)
9007 continue;
9008
9009 /* Ignore zero sized sections. */
9010 if (sec->size == 0)
9011 continue;
9012
9013 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9014 last_unwind_type = 0;
9015 continue;
9016 }
9017
9018 /* Skip /DISCARD/ sections. */
9019 if (bfd_is_abs_section (exidx_sec->output_section))
9020 continue;
9021
9022 hdr = &elf_section_data (exidx_sec)->this_hdr;
9023 if (hdr->sh_type != SHT_ARM_EXIDX)
9024 continue;
9025
9026 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
9027 if (exidx_arm_data == NULL)
9028 continue;
9029
9030 ibfd = exidx_sec->owner;
9031
9032 if (hdr->contents != NULL)
9033 contents = hdr->contents;
9034 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
9035 /* An error? */
9036 continue;
9037
9038 for (j = 0; j < hdr->sh_size; j += 8)
9039 {
9040 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
9041 int unwind_type;
9042 int elide = 0;
9043
9044 /* An EXIDX_CANTUNWIND entry. */
9045 if (second_word == 1)
9046 {
9047 if (last_unwind_type == 0)
9048 elide = 1;
9049 unwind_type = 0;
9050 }
9051 /* Inlined unwinding data. Merge if equal to previous. */
9052 else if ((second_word & 0x80000000) != 0)
9053 {
9054 if (last_second_word == second_word && last_unwind_type == 1)
9055 elide = 1;
9056 unwind_type = 1;
9057 last_second_word = second_word;
9058 }
9059 /* Normal table entry. In theory we could merge these too,
9060 but duplicate entries are likely to be much less common. */
9061 else
9062 unwind_type = 2;
9063
9064 if (elide)
9065 {
9066 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
9067 DELETE_EXIDX_ENTRY, NULL, j / 8);
9068
9069 deleted_exidx_bytes += 8;
9070 }
9071
9072 last_unwind_type = unwind_type;
9073 }
9074
9075 /* Free contents if we allocated it ourselves. */
9076 if (contents != hdr->contents)
9077 free (contents);
9078
9079 /* Record edits to be applied later (in elf32_arm_write_section). */
9080 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
9081 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
9082
9083 if (deleted_exidx_bytes > 0)
9084 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
9085
9086 last_exidx_sec = exidx_sec;
9087 last_text_sec = sec;
9088 }
9089
9090 /* Add terminating CANTUNWIND entry. */
9091 if (last_exidx_sec && last_unwind_type != 0)
9092 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9093
9094 return TRUE;
9095 }
9096
9097 static bfd_boolean
9098 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
9099 bfd *ibfd, const char *name)
9100 {
9101 asection *sec, *osec;
9102
9103 sec = bfd_get_section_by_name (ibfd, name);
9104 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
9105 return TRUE;
9106
9107 osec = sec->output_section;
9108 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
9109 return TRUE;
9110
9111 if (! bfd_set_section_contents (obfd, osec, sec->contents,
9112 sec->output_offset, sec->size))
9113 return FALSE;
9114
9115 return TRUE;
9116 }
9117
9118 static bfd_boolean
9119 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
9120 {
9121 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
9122
9123 /* Invoke the regular ELF backend linker to do all the work. */
9124 if (!bfd_elf_final_link (abfd, info))
9125 return FALSE;
9126
9127 /* Write out any glue sections now that we have created all the
9128 stubs. */
9129 if (globals->bfd_of_glue_owner != NULL)
9130 {
9131 if (! elf32_arm_output_glue_section (info, abfd,
9132 globals->bfd_of_glue_owner,
9133 ARM2THUMB_GLUE_SECTION_NAME))
9134 return FALSE;
9135
9136 if (! elf32_arm_output_glue_section (info, abfd,
9137 globals->bfd_of_glue_owner,
9138 THUMB2ARM_GLUE_SECTION_NAME))
9139 return FALSE;
9140
9141 if (! elf32_arm_output_glue_section (info, abfd,
9142 globals->bfd_of_glue_owner,
9143 VFP11_ERRATUM_VENEER_SECTION_NAME))
9144 return FALSE;
9145
9146 if (! elf32_arm_output_glue_section (info, abfd,
9147 globals->bfd_of_glue_owner,
9148 ARM_BX_GLUE_SECTION_NAME))
9149 return FALSE;
9150 }
9151
9152 return TRUE;
9153 }
9154
9155 /* Set the right machine number. */
9156
9157 static bfd_boolean
9158 elf32_arm_object_p (bfd *abfd)
9159 {
9160 unsigned int mach;
9161
9162 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
9163
9164 if (mach != bfd_mach_arm_unknown)
9165 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9166
9167 else if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
9168 bfd_default_set_arch_mach (abfd, bfd_arch_arm, bfd_mach_arm_ep9312);
9169
9170 else
9171 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9172
9173 return TRUE;
9174 }
9175
9176 /* Function to keep ARM specific flags in the ELF header. */
9177
9178 static bfd_boolean
9179 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
9180 {
9181 if (elf_flags_init (abfd)
9182 && elf_elfheader (abfd)->e_flags != flags)
9183 {
9184 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
9185 {
9186 if (flags & EF_ARM_INTERWORK)
9187 (*_bfd_error_handler)
9188 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
9189 abfd);
9190 else
9191 _bfd_error_handler
9192 (_("Warning: Clearing the interworking flag of %B due to outside request"),
9193 abfd);
9194 }
9195 }
9196 else
9197 {
9198 elf_elfheader (abfd)->e_flags = flags;
9199 elf_flags_init (abfd) = TRUE;
9200 }
9201
9202 return TRUE;
9203 }
9204
9205 /* Copy backend specific data from one object module to another. */
9206
9207 static bfd_boolean
9208 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
9209 {
9210 flagword in_flags;
9211 flagword out_flags;
9212
9213 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
9214 return TRUE;
9215
9216 in_flags = elf_elfheader (ibfd)->e_flags;
9217 out_flags = elf_elfheader (obfd)->e_flags;
9218
9219 if (elf_flags_init (obfd)
9220 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
9221 && in_flags != out_flags)
9222 {
9223 /* Cannot mix APCS26 and APCS32 code. */
9224 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
9225 return FALSE;
9226
9227 /* Cannot mix float APCS and non-float APCS code. */
9228 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
9229 return FALSE;
9230
9231 /* If the src and dest have different interworking flags
9232 then turn off the interworking bit. */
9233 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
9234 {
9235 if (out_flags & EF_ARM_INTERWORK)
9236 _bfd_error_handler
9237 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
9238 obfd, ibfd);
9239
9240 in_flags &= ~EF_ARM_INTERWORK;
9241 }
9242
9243 /* Likewise for PIC, though don't warn for this case. */
9244 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
9245 in_flags &= ~EF_ARM_PIC;
9246 }
9247
9248 elf_elfheader (obfd)->e_flags = in_flags;
9249 elf_flags_init (obfd) = TRUE;
9250
9251 /* Also copy the EI_OSABI field. */
9252 elf_elfheader (obfd)->e_ident[EI_OSABI] =
9253 elf_elfheader (ibfd)->e_ident[EI_OSABI];
9254
9255 /* Copy object attributes. */
9256 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9257
9258 return TRUE;
9259 }
9260
9261 /* Values for Tag_ABI_PCS_R9_use. */
9262 enum
9263 {
9264 AEABI_R9_V6,
9265 AEABI_R9_SB,
9266 AEABI_R9_TLS,
9267 AEABI_R9_unused
9268 };
9269
9270 /* Values for Tag_ABI_PCS_RW_data. */
9271 enum
9272 {
9273 AEABI_PCS_RW_data_absolute,
9274 AEABI_PCS_RW_data_PCrel,
9275 AEABI_PCS_RW_data_SBrel,
9276 AEABI_PCS_RW_data_unused
9277 };
9278
9279 /* Values for Tag_ABI_enum_size. */
9280 enum
9281 {
9282 AEABI_enum_unused,
9283 AEABI_enum_short,
9284 AEABI_enum_wide,
9285 AEABI_enum_forced_wide
9286 };
9287
9288 /* Determine whether an object attribute tag takes an integer, a
9289 string or both. */
9290
9291 static int
9292 elf32_arm_obj_attrs_arg_type (int tag)
9293 {
9294 if (tag == Tag_compatibility)
9295 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
9296 else if (tag == Tag_nodefaults)
9297 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
9298 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
9299 return ATTR_TYPE_FLAG_STR_VAL;
9300 else if (tag < 32)
9301 return ATTR_TYPE_FLAG_INT_VAL;
9302 else
9303 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
9304 }
9305
9306 /* The ABI defines that Tag_conformance should be emitted first, and that
9307 Tag_nodefaults should be second (if either is defined). This sets those
9308 two positions, and bumps up the position of all the remaining tags to
9309 compensate. */
9310 static int
9311 elf32_arm_obj_attrs_order (int num)
9312 {
9313 if (num == 4)
9314 return Tag_conformance;
9315 if (num == 5)
9316 return Tag_nodefaults;
9317 if ((num - 2) < Tag_nodefaults)
9318 return num - 2;
9319 if ((num - 1) < Tag_conformance)
9320 return num - 1;
9321 return num;
9322 }
9323
9324 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
9325 Returns -1 if no architecture could be read. */
9326
9327 static int
9328 get_secondary_compatible_arch (bfd *abfd)
9329 {
9330 obj_attribute *attr =
9331 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9332
9333 /* Note: the tag and its argument below are uleb128 values, though
9334 currently-defined values fit in one byte for each. */
9335 if (attr->s
9336 && attr->s[0] == Tag_CPU_arch
9337 && (attr->s[1] & 128) != 128
9338 && attr->s[2] == 0)
9339 return attr->s[1];
9340
9341 /* This tag is "safely ignorable", so don't complain if it looks funny. */
9342 return -1;
9343 }
9344
9345 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
9346 The tag is removed if ARCH is -1. */
9347
9348 static void
9349 set_secondary_compatible_arch (bfd *abfd, int arch)
9350 {
9351 obj_attribute *attr =
9352 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9353
9354 if (arch == -1)
9355 {
9356 attr->s = NULL;
9357 return;
9358 }
9359
9360 /* Note: the tag and its argument below are uleb128 values, though
9361 currently-defined values fit in one byte for each. */
9362 if (!attr->s)
9363 attr->s = bfd_alloc (abfd, 3);
9364 attr->s[0] = Tag_CPU_arch;
9365 attr->s[1] = arch;
9366 attr->s[2] = '\0';
9367 }
9368
9369 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
9370 into account. */
9371
9372 static int
9373 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
9374 int newtag, int secondary_compat)
9375 {
9376 #define T(X) TAG_CPU_ARCH_##X
9377 int tagl, tagh, result;
9378 const int v6t2[] =
9379 {
9380 T(V6T2), /* PRE_V4. */
9381 T(V6T2), /* V4. */
9382 T(V6T2), /* V4T. */
9383 T(V6T2), /* V5T. */
9384 T(V6T2), /* V5TE. */
9385 T(V6T2), /* V5TEJ. */
9386 T(V6T2), /* V6. */
9387 T(V7), /* V6KZ. */
9388 T(V6T2) /* V6T2. */
9389 };
9390 const int v6k[] =
9391 {
9392 T(V6K), /* PRE_V4. */
9393 T(V6K), /* V4. */
9394 T(V6K), /* V4T. */
9395 T(V6K), /* V5T. */
9396 T(V6K), /* V5TE. */
9397 T(V6K), /* V5TEJ. */
9398 T(V6K), /* V6. */
9399 T(V6KZ), /* V6KZ. */
9400 T(V7), /* V6T2. */
9401 T(V6K) /* V6K. */
9402 };
9403 const int v7[] =
9404 {
9405 T(V7), /* PRE_V4. */
9406 T(V7), /* V4. */
9407 T(V7), /* V4T. */
9408 T(V7), /* V5T. */
9409 T(V7), /* V5TE. */
9410 T(V7), /* V5TEJ. */
9411 T(V7), /* V6. */
9412 T(V7), /* V6KZ. */
9413 T(V7), /* V6T2. */
9414 T(V7), /* V6K. */
9415 T(V7) /* V7. */
9416 };
9417 const int v6_m[] =
9418 {
9419 -1, /* PRE_V4. */
9420 -1, /* V4. */
9421 T(V6K), /* V4T. */
9422 T(V6K), /* V5T. */
9423 T(V6K), /* V5TE. */
9424 T(V6K), /* V5TEJ. */
9425 T(V6K), /* V6. */
9426 T(V6KZ), /* V6KZ. */
9427 T(V7), /* V6T2. */
9428 T(V6K), /* V6K. */
9429 T(V7), /* V7. */
9430 T(V6_M) /* V6_M. */
9431 };
9432 const int v6s_m[] =
9433 {
9434 -1, /* PRE_V4. */
9435 -1, /* V4. */
9436 T(V6K), /* V4T. */
9437 T(V6K), /* V5T. */
9438 T(V6K), /* V5TE. */
9439 T(V6K), /* V5TEJ. */
9440 T(V6K), /* V6. */
9441 T(V6KZ), /* V6KZ. */
9442 T(V7), /* V6T2. */
9443 T(V6K), /* V6K. */
9444 T(V7), /* V7. */
9445 T(V6S_M), /* V6_M. */
9446 T(V6S_M) /* V6S_M. */
9447 };
9448 const int v4t_plus_v6_m[] =
9449 {
9450 -1, /* PRE_V4. */
9451 -1, /* V4. */
9452 T(V4T), /* V4T. */
9453 T(V5T), /* V5T. */
9454 T(V5TE), /* V5TE. */
9455 T(V5TEJ), /* V5TEJ. */
9456 T(V6), /* V6. */
9457 T(V6KZ), /* V6KZ. */
9458 T(V6T2), /* V6T2. */
9459 T(V6K), /* V6K. */
9460 T(V7), /* V7. */
9461 T(V6_M), /* V6_M. */
9462 T(V6S_M), /* V6S_M. */
9463 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
9464 };
9465 const int *comb[] =
9466 {
9467 v6t2,
9468 v6k,
9469 v7,
9470 v6_m,
9471 v6s_m,
9472 /* Pseudo-architecture. */
9473 v4t_plus_v6_m
9474 };
9475
9476 /* Check we've not got a higher architecture than we know about. */
9477
9478 if (oldtag >= MAX_TAG_CPU_ARCH || newtag >= MAX_TAG_CPU_ARCH)
9479 {
9480 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
9481 return -1;
9482 }
9483
9484 /* Override old tag if we have a Tag_also_compatible_with on the output. */
9485
9486 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
9487 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
9488 oldtag = T(V4T_PLUS_V6_M);
9489
9490 /* And override the new tag if we have a Tag_also_compatible_with on the
9491 input. */
9492
9493 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
9494 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
9495 newtag = T(V4T_PLUS_V6_M);
9496
9497 tagl = (oldtag < newtag) ? oldtag : newtag;
9498 result = tagh = (oldtag > newtag) ? oldtag : newtag;
9499
9500 /* Architectures before V6KZ add features monotonically. */
9501 if (tagh <= TAG_CPU_ARCH_V6KZ)
9502 return result;
9503
9504 result = comb[tagh - T(V6T2)][tagl];
9505
9506 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
9507 as the canonical version. */
9508 if (result == T(V4T_PLUS_V6_M))
9509 {
9510 result = T(V4T);
9511 *secondary_compat_out = T(V6_M);
9512 }
9513 else
9514 *secondary_compat_out = -1;
9515
9516 if (result == -1)
9517 {
9518 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
9519 ibfd, oldtag, newtag);
9520 return -1;
9521 }
9522
9523 return result;
9524 #undef T
9525 }
9526
9527 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
9528 are conflicting attributes. */
9529
9530 static bfd_boolean
9531 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
9532 {
9533 obj_attribute *in_attr;
9534 obj_attribute *out_attr;
9535 obj_attribute_list *in_list;
9536 obj_attribute_list *out_list;
9537 obj_attribute_list **out_listp;
9538 /* Some tags have 0 = don't care, 1 = strong requirement,
9539 2 = weak requirement. */
9540 static const int order_021[3] = {0, 2, 1};
9541 /* For use with Tag_VFP_arch. */
9542 static const int order_01243[5] = {0, 1, 2, 4, 3};
9543 int i;
9544 bfd_boolean result = TRUE;
9545
9546 /* Skip the linker stubs file. This preserves previous behavior
9547 of accepting unknown attributes in the first input file - but
9548 is that a bug? */
9549 if (ibfd->flags & BFD_LINKER_CREATED)
9550 return TRUE;
9551
9552 if (!elf_known_obj_attributes_proc (obfd)[0].i)
9553 {
9554 /* This is the first object. Copy the attributes. */
9555 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9556
9557 /* Use the Tag_null value to indicate the attributes have been
9558 initialized. */
9559 elf_known_obj_attributes_proc (obfd)[0].i = 1;
9560
9561 return TRUE;
9562 }
9563
9564 in_attr = elf_known_obj_attributes_proc (ibfd);
9565 out_attr = elf_known_obj_attributes_proc (obfd);
9566 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
9567 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
9568 {
9569 /* Ignore mismatches if the object doesn't use floating point. */
9570 if (out_attr[Tag_ABI_FP_number_model].i == 0)
9571 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
9572 else if (in_attr[Tag_ABI_FP_number_model].i != 0)
9573 {
9574 _bfd_error_handler
9575 (_("error: %B uses VFP register arguments, %B does not"),
9576 ibfd, obfd);
9577 result = FALSE;
9578 }
9579 }
9580
9581 for (i = 4; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
9582 {
9583 /* Merge this attribute with existing attributes. */
9584 switch (i)
9585 {
9586 case Tag_CPU_raw_name:
9587 case Tag_CPU_name:
9588 /* These are merged after Tag_CPU_arch. */
9589 break;
9590
9591 case Tag_ABI_optimization_goals:
9592 case Tag_ABI_FP_optimization_goals:
9593 /* Use the first value seen. */
9594 break;
9595
9596 case Tag_CPU_arch:
9597 {
9598 int secondary_compat = -1, secondary_compat_out = -1;
9599 unsigned int saved_out_attr = out_attr[i].i;
9600 static const char *name_table[] = {
9601 /* These aren't real CPU names, but we can't guess
9602 that from the architecture version alone. */
9603 "Pre v4",
9604 "ARM v4",
9605 "ARM v4T",
9606 "ARM v5T",
9607 "ARM v5TE",
9608 "ARM v5TEJ",
9609 "ARM v6",
9610 "ARM v6KZ",
9611 "ARM v6T2",
9612 "ARM v6K",
9613 "ARM v7",
9614 "ARM v6-M",
9615 "ARM v6S-M"
9616 };
9617
9618 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
9619 secondary_compat = get_secondary_compatible_arch (ibfd);
9620 secondary_compat_out = get_secondary_compatible_arch (obfd);
9621 out_attr[i].i = tag_cpu_arch_combine (ibfd, out_attr[i].i,
9622 &secondary_compat_out,
9623 in_attr[i].i,
9624 secondary_compat);
9625 set_secondary_compatible_arch (obfd, secondary_compat_out);
9626
9627 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
9628 if (out_attr[i].i == saved_out_attr)
9629 ; /* Leave the names alone. */
9630 else if (out_attr[i].i == in_attr[i].i)
9631 {
9632 /* The output architecture has been changed to match the
9633 input architecture. Use the input names. */
9634 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
9635 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
9636 : NULL;
9637 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
9638 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
9639 : NULL;
9640 }
9641 else
9642 {
9643 out_attr[Tag_CPU_name].s = NULL;
9644 out_attr[Tag_CPU_raw_name].s = NULL;
9645 }
9646
9647 /* If we still don't have a value for Tag_CPU_name,
9648 make one up now. Tag_CPU_raw_name remains blank. */
9649 if (out_attr[Tag_CPU_name].s == NULL
9650 && out_attr[i].i < ARRAY_SIZE (name_table))
9651 out_attr[Tag_CPU_name].s =
9652 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
9653 }
9654 break;
9655
9656 case Tag_ARM_ISA_use:
9657 case Tag_THUMB_ISA_use:
9658 case Tag_WMMX_arch:
9659 case Tag_Advanced_SIMD_arch:
9660 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
9661 case Tag_ABI_FP_rounding:
9662 case Tag_ABI_FP_exceptions:
9663 case Tag_ABI_FP_user_exceptions:
9664 case Tag_ABI_FP_number_model:
9665 case Tag_VFP_HP_extension:
9666 case Tag_CPU_unaligned_access:
9667 case Tag_T2EE_use:
9668 case Tag_Virtualization_use:
9669 case Tag_MPextension_use:
9670 /* Use the largest value specified. */
9671 if (in_attr[i].i > out_attr[i].i)
9672 out_attr[i].i = in_attr[i].i;
9673 break;
9674
9675 case Tag_ABI_align8_preserved:
9676 case Tag_ABI_PCS_RO_data:
9677 /* Use the smallest value specified. */
9678 if (in_attr[i].i < out_attr[i].i)
9679 out_attr[i].i = in_attr[i].i;
9680 break;
9681
9682 case Tag_ABI_align8_needed:
9683 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
9684 && (in_attr[Tag_ABI_align8_preserved].i == 0
9685 || out_attr[Tag_ABI_align8_preserved].i == 0))
9686 {
9687 /* This error message should be enabled once all non-conformant
9688 binaries in the toolchain have had the attributes set
9689 properly.
9690 _bfd_error_handler
9691 (_("error: %B: 8-byte data alignment conflicts with %B"),
9692 obfd, ibfd);
9693 result = FALSE; */
9694 }
9695 /* Fall through. */
9696 case Tag_ABI_FP_denormal:
9697 case Tag_ABI_PCS_GOT_use:
9698 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
9699 value if greater than 2 (for future-proofing). */
9700 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
9701 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
9702 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
9703 out_attr[i].i = in_attr[i].i;
9704 break;
9705
9706
9707 case Tag_CPU_arch_profile:
9708 if (out_attr[i].i != in_attr[i].i)
9709 {
9710 /* 0 will merge with anything.
9711 'A' and 'S' merge to 'A'.
9712 'R' and 'S' merge to 'R'.
9713 'M' and 'A|R|S' is an error. */
9714 if (out_attr[i].i == 0
9715 || (out_attr[i].i == 'S'
9716 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
9717 out_attr[i].i = in_attr[i].i;
9718 else if (in_attr[i].i == 0
9719 || (in_attr[i].i == 'S'
9720 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
9721 ; /* Do nothing. */
9722 else
9723 {
9724 _bfd_error_handler
9725 (_("error: %B: Conflicting architecture profiles %c/%c"),
9726 ibfd,
9727 in_attr[i].i ? in_attr[i].i : '0',
9728 out_attr[i].i ? out_attr[i].i : '0');
9729 result = FALSE;
9730 }
9731 }
9732 break;
9733 case Tag_VFP_arch:
9734 /* Use the "greatest" from the sequence 0, 1, 2, 4, 3, or the
9735 largest value if greater than 4 (for future-proofing). */
9736 if ((in_attr[i].i > 4 && in_attr[i].i > out_attr[i].i)
9737 || (in_attr[i].i <= 4 && out_attr[i].i <= 4
9738 && order_01243[in_attr[i].i] > order_01243[out_attr[i].i]))
9739 out_attr[i].i = in_attr[i].i;
9740 break;
9741 case Tag_PCS_config:
9742 if (out_attr[i].i == 0)
9743 out_attr[i].i = in_attr[i].i;
9744 else if (in_attr[i].i != 0 && out_attr[i].i != 0)
9745 {
9746 /* It's sometimes ok to mix different configs, so this is only
9747 a warning. */
9748 _bfd_error_handler
9749 (_("Warning: %B: Conflicting platform configuration"), ibfd);
9750 }
9751 break;
9752 case Tag_ABI_PCS_R9_use:
9753 if (in_attr[i].i != out_attr[i].i
9754 && out_attr[i].i != AEABI_R9_unused
9755 && in_attr[i].i != AEABI_R9_unused)
9756 {
9757 _bfd_error_handler
9758 (_("error: %B: Conflicting use of R9"), ibfd);
9759 result = FALSE;
9760 }
9761 if (out_attr[i].i == AEABI_R9_unused)
9762 out_attr[i].i = in_attr[i].i;
9763 break;
9764 case Tag_ABI_PCS_RW_data:
9765 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
9766 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
9767 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
9768 {
9769 _bfd_error_handler
9770 (_("error: %B: SB relative addressing conflicts with use of R9"),
9771 ibfd);
9772 result = FALSE;
9773 }
9774 /* Use the smallest value specified. */
9775 if (in_attr[i].i < out_attr[i].i)
9776 out_attr[i].i = in_attr[i].i;
9777 break;
9778 case Tag_ABI_PCS_wchar_t:
9779 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
9780 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
9781 {
9782 _bfd_error_handler
9783 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
9784 ibfd, in_attr[i].i, out_attr[i].i);
9785 }
9786 else if (in_attr[i].i && !out_attr[i].i)
9787 out_attr[i].i = in_attr[i].i;
9788 break;
9789 case Tag_ABI_enum_size:
9790 if (in_attr[i].i != AEABI_enum_unused)
9791 {
9792 if (out_attr[i].i == AEABI_enum_unused
9793 || out_attr[i].i == AEABI_enum_forced_wide)
9794 {
9795 /* The existing object is compatible with anything.
9796 Use whatever requirements the new object has. */
9797 out_attr[i].i = in_attr[i].i;
9798 }
9799 else if (in_attr[i].i != AEABI_enum_forced_wide
9800 && out_attr[i].i != in_attr[i].i
9801 && !elf_arm_tdata (obfd)->no_enum_size_warning)
9802 {
9803 static const char *aeabi_enum_names[] =
9804 { "", "variable-size", "32-bit", "" };
9805 const char *in_name =
9806 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
9807 ? aeabi_enum_names[in_attr[i].i]
9808 : "<unknown>";
9809 const char *out_name =
9810 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
9811 ? aeabi_enum_names[out_attr[i].i]
9812 : "<unknown>";
9813 _bfd_error_handler
9814 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
9815 ibfd, in_name, out_name);
9816 }
9817 }
9818 break;
9819 case Tag_ABI_VFP_args:
9820 /* Aready done. */
9821 break;
9822 case Tag_ABI_WMMX_args:
9823 if (in_attr[i].i != out_attr[i].i)
9824 {
9825 _bfd_error_handler
9826 (_("error: %B uses iWMMXt register arguments, %B does not"),
9827 ibfd, obfd);
9828 result = FALSE;
9829 }
9830 break;
9831 case Tag_compatibility:
9832 /* Merged in target-independent code. */
9833 break;
9834 case Tag_ABI_HardFP_use:
9835 /* 1 (SP) and 2 (DP) conflict, so combine to 3 (SP & DP). */
9836 if ((in_attr[i].i == 1 && out_attr[i].i == 2)
9837 || (in_attr[i].i == 2 && out_attr[i].i == 1))
9838 out_attr[i].i = 3;
9839 else if (in_attr[i].i > out_attr[i].i)
9840 out_attr[i].i = in_attr[i].i;
9841 break;
9842 case Tag_ABI_FP_16bit_format:
9843 if (in_attr[i].i != 0 && out_attr[i].i != 0)
9844 {
9845 if (in_attr[i].i != out_attr[i].i)
9846 {
9847 _bfd_error_handler
9848 (_("error: fp16 format mismatch between %B and %B"),
9849 ibfd, obfd);
9850 result = FALSE;
9851 }
9852 }
9853 if (in_attr[i].i != 0)
9854 out_attr[i].i = in_attr[i].i;
9855 break;
9856
9857 case Tag_nodefaults:
9858 /* This tag is set if it exists, but the value is unused (and is
9859 typically zero). We don't actually need to do anything here -
9860 the merge happens automatically when the type flags are merged
9861 below. */
9862 break;
9863 case Tag_also_compatible_with:
9864 /* Already done in Tag_CPU_arch. */
9865 break;
9866 case Tag_conformance:
9867 /* Keep the attribute if it matches. Throw it away otherwise.
9868 No attribute means no claim to conform. */
9869 if (!in_attr[i].s || !out_attr[i].s
9870 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
9871 out_attr[i].s = NULL;
9872 break;
9873
9874 default:
9875 {
9876 bfd *err_bfd = NULL;
9877
9878 /* The "known_obj_attributes" table does contain some undefined
9879 attributes. Ensure that there are unused. */
9880 if (out_attr[i].i != 0 || out_attr[i].s != NULL)
9881 err_bfd = obfd;
9882 else if (in_attr[i].i != 0 || in_attr[i].s != NULL)
9883 err_bfd = ibfd;
9884
9885 if (err_bfd != NULL)
9886 {
9887 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
9888 if ((i & 127) < 64)
9889 {
9890 _bfd_error_handler
9891 (_("%B: Unknown mandatory EABI object attribute %d"),
9892 err_bfd, i);
9893 bfd_set_error (bfd_error_bad_value);
9894 result = FALSE;
9895 }
9896 else
9897 {
9898 _bfd_error_handler
9899 (_("Warning: %B: Unknown EABI object attribute %d"),
9900 err_bfd, i);
9901 }
9902 }
9903
9904 /* Only pass on attributes that match in both inputs. */
9905 if (in_attr[i].i != out_attr[i].i
9906 || in_attr[i].s != out_attr[i].s
9907 || (in_attr[i].s != NULL && out_attr[i].s != NULL
9908 && strcmp (in_attr[i].s, out_attr[i].s) != 0))
9909 {
9910 out_attr[i].i = 0;
9911 out_attr[i].s = NULL;
9912 }
9913 }
9914 }
9915
9916 /* If out_attr was copied from in_attr then it won't have a type yet. */
9917 if (in_attr[i].type && !out_attr[i].type)
9918 out_attr[i].type = in_attr[i].type;
9919 }
9920
9921 /* Merge Tag_compatibility attributes and any common GNU ones. */
9922 _bfd_elf_merge_object_attributes (ibfd, obfd);
9923
9924 /* Check for any attributes not known on ARM. */
9925 in_list = elf_other_obj_attributes_proc (ibfd);
9926 out_listp = &elf_other_obj_attributes_proc (obfd);
9927 out_list = *out_listp;
9928
9929 for (; in_list || out_list; )
9930 {
9931 bfd *err_bfd = NULL;
9932 int err_tag = 0;
9933
9934 /* The tags for each list are in numerical order. */
9935 /* If the tags are equal, then merge. */
9936 if (out_list && (!in_list || in_list->tag > out_list->tag))
9937 {
9938 /* This attribute only exists in obfd. We can't merge, and we don't
9939 know what the tag means, so delete it. */
9940 err_bfd = obfd;
9941 err_tag = out_list->tag;
9942 *out_listp = out_list->next;
9943 out_list = *out_listp;
9944 }
9945 else if (in_list && (!out_list || in_list->tag < out_list->tag))
9946 {
9947 /* This attribute only exists in ibfd. We can't merge, and we don't
9948 know what the tag means, so ignore it. */
9949 err_bfd = ibfd;
9950 err_tag = in_list->tag;
9951 in_list = in_list->next;
9952 }
9953 else /* The tags are equal. */
9954 {
9955 /* As present, all attributes in the list are unknown, and
9956 therefore can't be merged meaningfully. */
9957 err_bfd = obfd;
9958 err_tag = out_list->tag;
9959
9960 /* Only pass on attributes that match in both inputs. */
9961 if (in_list->attr.i != out_list->attr.i
9962 || in_list->attr.s != out_list->attr.s
9963 || (in_list->attr.s && out_list->attr.s
9964 && strcmp (in_list->attr.s, out_list->attr.s) != 0))
9965 {
9966 /* No match. Delete the attribute. */
9967 *out_listp = out_list->next;
9968 out_list = *out_listp;
9969 }
9970 else
9971 {
9972 /* Matched. Keep the attribute and move to the next. */
9973 out_list = out_list->next;
9974 in_list = in_list->next;
9975 }
9976 }
9977
9978 if (err_bfd)
9979 {
9980 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
9981 if ((err_tag & 127) < 64)
9982 {
9983 _bfd_error_handler
9984 (_("%B: Unknown mandatory EABI object attribute %d"),
9985 err_bfd, err_tag);
9986 bfd_set_error (bfd_error_bad_value);
9987 result = FALSE;
9988 }
9989 else
9990 {
9991 _bfd_error_handler
9992 (_("Warning: %B: Unknown EABI object attribute %d"),
9993 err_bfd, err_tag);
9994 }
9995 }
9996 }
9997 return result;
9998 }
9999
10000
10001 /* Return TRUE if the two EABI versions are incompatible. */
10002
10003 static bfd_boolean
10004 elf32_arm_versions_compatible (unsigned iver, unsigned over)
10005 {
10006 /* v4 and v5 are the same spec before and after it was released,
10007 so allow mixing them. */
10008 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
10009 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
10010 return TRUE;
10011
10012 return (iver == over);
10013 }
10014
10015 /* Merge backend specific data from an object file to the output
10016 object file when linking. */
10017
10018 static bfd_boolean
10019 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
10020 {
10021 flagword out_flags;
10022 flagword in_flags;
10023 bfd_boolean flags_compatible = TRUE;
10024 asection *sec;
10025
10026 /* Check if we have the same endianess. */
10027 if (! _bfd_generic_verify_endian_match (ibfd, obfd))
10028 return FALSE;
10029
10030 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
10031 return TRUE;
10032
10033 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
10034 return FALSE;
10035
10036 /* The input BFD must have had its flags initialised. */
10037 /* The following seems bogus to me -- The flags are initialized in
10038 the assembler but I don't think an elf_flags_init field is
10039 written into the object. */
10040 /* BFD_ASSERT (elf_flags_init (ibfd)); */
10041
10042 in_flags = elf_elfheader (ibfd)->e_flags;
10043 out_flags = elf_elfheader (obfd)->e_flags;
10044
10045 /* In theory there is no reason why we couldn't handle this. However
10046 in practice it isn't even close to working and there is no real
10047 reason to want it. */
10048 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
10049 && !(ibfd->flags & DYNAMIC)
10050 && (in_flags & EF_ARM_BE8))
10051 {
10052 _bfd_error_handler (_("error: %B is already in final BE8 format"),
10053 ibfd);
10054 return FALSE;
10055 }
10056
10057 if (!elf_flags_init (obfd))
10058 {
10059 /* If the input is the default architecture and had the default
10060 flags then do not bother setting the flags for the output
10061 architecture, instead allow future merges to do this. If no
10062 future merges ever set these flags then they will retain their
10063 uninitialised values, which surprise surprise, correspond
10064 to the default values. */
10065 if (bfd_get_arch_info (ibfd)->the_default
10066 && elf_elfheader (ibfd)->e_flags == 0)
10067 return TRUE;
10068
10069 elf_flags_init (obfd) = TRUE;
10070 elf_elfheader (obfd)->e_flags = in_flags;
10071
10072 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
10073 && bfd_get_arch_info (obfd)->the_default)
10074 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
10075
10076 return TRUE;
10077 }
10078
10079 /* Determine what should happen if the input ARM architecture
10080 does not match the output ARM architecture. */
10081 if (! bfd_arm_merge_machines (ibfd, obfd))
10082 return FALSE;
10083
10084 /* Identical flags must be compatible. */
10085 if (in_flags == out_flags)
10086 return TRUE;
10087
10088 /* Check to see if the input BFD actually contains any sections. If
10089 not, its flags may not have been initialised either, but it
10090 cannot actually cause any incompatiblity. Do not short-circuit
10091 dynamic objects; their section list may be emptied by
10092 elf_link_add_object_symbols.
10093
10094 Also check to see if there are no code sections in the input.
10095 In this case there is no need to check for code specific flags.
10096 XXX - do we need to worry about floating-point format compatability
10097 in data sections ? */
10098 if (!(ibfd->flags & DYNAMIC))
10099 {
10100 bfd_boolean null_input_bfd = TRUE;
10101 bfd_boolean only_data_sections = TRUE;
10102
10103 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
10104 {
10105 /* Ignore synthetic glue sections. */
10106 if (strcmp (sec->name, ".glue_7")
10107 && strcmp (sec->name, ".glue_7t"))
10108 {
10109 if ((bfd_get_section_flags (ibfd, sec)
10110 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
10111 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
10112 only_data_sections = FALSE;
10113
10114 null_input_bfd = FALSE;
10115 break;
10116 }
10117 }
10118
10119 if (null_input_bfd || only_data_sections)
10120 return TRUE;
10121 }
10122
10123 /* Complain about various flag mismatches. */
10124 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
10125 EF_ARM_EABI_VERSION (out_flags)))
10126 {
10127 _bfd_error_handler
10128 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
10129 ibfd, obfd,
10130 (in_flags & EF_ARM_EABIMASK) >> 24,
10131 (out_flags & EF_ARM_EABIMASK) >> 24);
10132 return FALSE;
10133 }
10134
10135 /* Not sure what needs to be checked for EABI versions >= 1. */
10136 /* VxWorks libraries do not use these flags. */
10137 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
10138 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
10139 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
10140 {
10141 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
10142 {
10143 _bfd_error_handler
10144 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
10145 ibfd, obfd,
10146 in_flags & EF_ARM_APCS_26 ? 26 : 32,
10147 out_flags & EF_ARM_APCS_26 ? 26 : 32);
10148 flags_compatible = FALSE;
10149 }
10150
10151 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
10152 {
10153 if (in_flags & EF_ARM_APCS_FLOAT)
10154 _bfd_error_handler
10155 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
10156 ibfd, obfd);
10157 else
10158 _bfd_error_handler
10159 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
10160 ibfd, obfd);
10161
10162 flags_compatible = FALSE;
10163 }
10164
10165 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
10166 {
10167 if (in_flags & EF_ARM_VFP_FLOAT)
10168 _bfd_error_handler
10169 (_("error: %B uses VFP instructions, whereas %B does not"),
10170 ibfd, obfd);
10171 else
10172 _bfd_error_handler
10173 (_("error: %B uses FPA instructions, whereas %B does not"),
10174 ibfd, obfd);
10175
10176 flags_compatible = FALSE;
10177 }
10178
10179 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
10180 {
10181 if (in_flags & EF_ARM_MAVERICK_FLOAT)
10182 _bfd_error_handler
10183 (_("error: %B uses Maverick instructions, whereas %B does not"),
10184 ibfd, obfd);
10185 else
10186 _bfd_error_handler
10187 (_("error: %B does not use Maverick instructions, whereas %B does"),
10188 ibfd, obfd);
10189
10190 flags_compatible = FALSE;
10191 }
10192
10193 #ifdef EF_ARM_SOFT_FLOAT
10194 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
10195 {
10196 /* We can allow interworking between code that is VFP format
10197 layout, and uses either soft float or integer regs for
10198 passing floating point arguments and results. We already
10199 know that the APCS_FLOAT flags match; similarly for VFP
10200 flags. */
10201 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
10202 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
10203 {
10204 if (in_flags & EF_ARM_SOFT_FLOAT)
10205 _bfd_error_handler
10206 (_("error: %B uses software FP, whereas %B uses hardware FP"),
10207 ibfd, obfd);
10208 else
10209 _bfd_error_handler
10210 (_("error: %B uses hardware FP, whereas %B uses software FP"),
10211 ibfd, obfd);
10212
10213 flags_compatible = FALSE;
10214 }
10215 }
10216 #endif
10217
10218 /* Interworking mismatch is only a warning. */
10219 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
10220 {
10221 if (in_flags & EF_ARM_INTERWORK)
10222 {
10223 _bfd_error_handler
10224 (_("Warning: %B supports interworking, whereas %B does not"),
10225 ibfd, obfd);
10226 }
10227 else
10228 {
10229 _bfd_error_handler
10230 (_("Warning: %B does not support interworking, whereas %B does"),
10231 ibfd, obfd);
10232 }
10233 }
10234 }
10235
10236 return flags_compatible;
10237 }
10238
10239 /* Display the flags field. */
10240
10241 static bfd_boolean
10242 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
10243 {
10244 FILE * file = (FILE *) ptr;
10245 unsigned long flags;
10246
10247 BFD_ASSERT (abfd != NULL && ptr != NULL);
10248
10249 /* Print normal ELF private data. */
10250 _bfd_elf_print_private_bfd_data (abfd, ptr);
10251
10252 flags = elf_elfheader (abfd)->e_flags;
10253 /* Ignore init flag - it may not be set, despite the flags field
10254 containing valid data. */
10255
10256 /* xgettext:c-format */
10257 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
10258
10259 switch (EF_ARM_EABI_VERSION (flags))
10260 {
10261 case EF_ARM_EABI_UNKNOWN:
10262 /* The following flag bits are GNU extensions and not part of the
10263 official ARM ELF extended ABI. Hence they are only decoded if
10264 the EABI version is not set. */
10265 if (flags & EF_ARM_INTERWORK)
10266 fprintf (file, _(" [interworking enabled]"));
10267
10268 if (flags & EF_ARM_APCS_26)
10269 fprintf (file, " [APCS-26]");
10270 else
10271 fprintf (file, " [APCS-32]");
10272
10273 if (flags & EF_ARM_VFP_FLOAT)
10274 fprintf (file, _(" [VFP float format]"));
10275 else if (flags & EF_ARM_MAVERICK_FLOAT)
10276 fprintf (file, _(" [Maverick float format]"));
10277 else
10278 fprintf (file, _(" [FPA float format]"));
10279
10280 if (flags & EF_ARM_APCS_FLOAT)
10281 fprintf (file, _(" [floats passed in float registers]"));
10282
10283 if (flags & EF_ARM_PIC)
10284 fprintf (file, _(" [position independent]"));
10285
10286 if (flags & EF_ARM_NEW_ABI)
10287 fprintf (file, _(" [new ABI]"));
10288
10289 if (flags & EF_ARM_OLD_ABI)
10290 fprintf (file, _(" [old ABI]"));
10291
10292 if (flags & EF_ARM_SOFT_FLOAT)
10293 fprintf (file, _(" [software FP]"));
10294
10295 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
10296 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
10297 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
10298 | EF_ARM_MAVERICK_FLOAT);
10299 break;
10300
10301 case EF_ARM_EABI_VER1:
10302 fprintf (file, _(" [Version1 EABI]"));
10303
10304 if (flags & EF_ARM_SYMSARESORTED)
10305 fprintf (file, _(" [sorted symbol table]"));
10306 else
10307 fprintf (file, _(" [unsorted symbol table]"));
10308
10309 flags &= ~ EF_ARM_SYMSARESORTED;
10310 break;
10311
10312 case EF_ARM_EABI_VER2:
10313 fprintf (file, _(" [Version2 EABI]"));
10314
10315 if (flags & EF_ARM_SYMSARESORTED)
10316 fprintf (file, _(" [sorted symbol table]"));
10317 else
10318 fprintf (file, _(" [unsorted symbol table]"));
10319
10320 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
10321 fprintf (file, _(" [dynamic symbols use segment index]"));
10322
10323 if (flags & EF_ARM_MAPSYMSFIRST)
10324 fprintf (file, _(" [mapping symbols precede others]"));
10325
10326 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
10327 | EF_ARM_MAPSYMSFIRST);
10328 break;
10329
10330 case EF_ARM_EABI_VER3:
10331 fprintf (file, _(" [Version3 EABI]"));
10332 break;
10333
10334 case EF_ARM_EABI_VER4:
10335 fprintf (file, _(" [Version4 EABI]"));
10336 goto eabi;
10337
10338 case EF_ARM_EABI_VER5:
10339 fprintf (file, _(" [Version5 EABI]"));
10340 eabi:
10341 if (flags & EF_ARM_BE8)
10342 fprintf (file, _(" [BE8]"));
10343
10344 if (flags & EF_ARM_LE8)
10345 fprintf (file, _(" [LE8]"));
10346
10347 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
10348 break;
10349
10350 default:
10351 fprintf (file, _(" <EABI version unrecognised>"));
10352 break;
10353 }
10354
10355 flags &= ~ EF_ARM_EABIMASK;
10356
10357 if (flags & EF_ARM_RELEXEC)
10358 fprintf (file, _(" [relocatable executable]"));
10359
10360 if (flags & EF_ARM_HASENTRY)
10361 fprintf (file, _(" [has entry point]"));
10362
10363 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_HASENTRY);
10364
10365 if (flags)
10366 fprintf (file, _("<Unrecognised flag bits set>"));
10367
10368 fputc ('\n', file);
10369
10370 return TRUE;
10371 }
10372
10373 static int
10374 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
10375 {
10376 switch (ELF_ST_TYPE (elf_sym->st_info))
10377 {
10378 case STT_ARM_TFUNC:
10379 return ELF_ST_TYPE (elf_sym->st_info);
10380
10381 case STT_ARM_16BIT:
10382 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
10383 This allows us to distinguish between data used by Thumb instructions
10384 and non-data (which is probably code) inside Thumb regions of an
10385 executable. */
10386 if (type != STT_OBJECT && type != STT_TLS)
10387 return ELF_ST_TYPE (elf_sym->st_info);
10388 break;
10389
10390 default:
10391 break;
10392 }
10393
10394 return type;
10395 }
10396
10397 static asection *
10398 elf32_arm_gc_mark_hook (asection *sec,
10399 struct bfd_link_info *info,
10400 Elf_Internal_Rela *rel,
10401 struct elf_link_hash_entry *h,
10402 Elf_Internal_Sym *sym)
10403 {
10404 if (h != NULL)
10405 switch (ELF32_R_TYPE (rel->r_info))
10406 {
10407 case R_ARM_GNU_VTINHERIT:
10408 case R_ARM_GNU_VTENTRY:
10409 return NULL;
10410 }
10411
10412 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
10413 }
10414
10415 /* Update the got entry reference counts for the section being removed. */
10416
10417 static bfd_boolean
10418 elf32_arm_gc_sweep_hook (bfd * abfd,
10419 struct bfd_link_info * info,
10420 asection * sec,
10421 const Elf_Internal_Rela * relocs)
10422 {
10423 Elf_Internal_Shdr *symtab_hdr;
10424 struct elf_link_hash_entry **sym_hashes;
10425 bfd_signed_vma *local_got_refcounts;
10426 const Elf_Internal_Rela *rel, *relend;
10427 struct elf32_arm_link_hash_table * globals;
10428
10429 if (info->relocatable)
10430 return TRUE;
10431
10432 globals = elf32_arm_hash_table (info);
10433
10434 elf_section_data (sec)->local_dynrel = NULL;
10435
10436 symtab_hdr = & elf_symtab_hdr (abfd);
10437 sym_hashes = elf_sym_hashes (abfd);
10438 local_got_refcounts = elf_local_got_refcounts (abfd);
10439
10440 check_use_blx (globals);
10441
10442 relend = relocs + sec->reloc_count;
10443 for (rel = relocs; rel < relend; rel++)
10444 {
10445 unsigned long r_symndx;
10446 struct elf_link_hash_entry *h = NULL;
10447 int r_type;
10448
10449 r_symndx = ELF32_R_SYM (rel->r_info);
10450 if (r_symndx >= symtab_hdr->sh_info)
10451 {
10452 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10453 while (h->root.type == bfd_link_hash_indirect
10454 || h->root.type == bfd_link_hash_warning)
10455 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10456 }
10457
10458 r_type = ELF32_R_TYPE (rel->r_info);
10459 r_type = arm_real_reloc_type (globals, r_type);
10460 switch (r_type)
10461 {
10462 case R_ARM_GOT32:
10463 case R_ARM_GOT_PREL:
10464 case R_ARM_TLS_GD32:
10465 case R_ARM_TLS_IE32:
10466 if (h != NULL)
10467 {
10468 if (h->got.refcount > 0)
10469 h->got.refcount -= 1;
10470 }
10471 else if (local_got_refcounts != NULL)
10472 {
10473 if (local_got_refcounts[r_symndx] > 0)
10474 local_got_refcounts[r_symndx] -= 1;
10475 }
10476 break;
10477
10478 case R_ARM_TLS_LDM32:
10479 elf32_arm_hash_table (info)->tls_ldm_got.refcount -= 1;
10480 break;
10481
10482 case R_ARM_ABS32:
10483 case R_ARM_ABS32_NOI:
10484 case R_ARM_REL32:
10485 case R_ARM_REL32_NOI:
10486 case R_ARM_PC24:
10487 case R_ARM_PLT32:
10488 case R_ARM_CALL:
10489 case R_ARM_JUMP24:
10490 case R_ARM_PREL31:
10491 case R_ARM_THM_CALL:
10492 case R_ARM_THM_JUMP24:
10493 case R_ARM_THM_JUMP19:
10494 case R_ARM_MOVW_ABS_NC:
10495 case R_ARM_MOVT_ABS:
10496 case R_ARM_MOVW_PREL_NC:
10497 case R_ARM_MOVT_PREL:
10498 case R_ARM_THM_MOVW_ABS_NC:
10499 case R_ARM_THM_MOVT_ABS:
10500 case R_ARM_THM_MOVW_PREL_NC:
10501 case R_ARM_THM_MOVT_PREL:
10502 /* Should the interworking branches be here also? */
10503
10504 if (h != NULL)
10505 {
10506 struct elf32_arm_link_hash_entry *eh;
10507 struct elf32_arm_relocs_copied **pp;
10508 struct elf32_arm_relocs_copied *p;
10509
10510 eh = (struct elf32_arm_link_hash_entry *) h;
10511
10512 if (h->plt.refcount > 0)
10513 {
10514 h->plt.refcount -= 1;
10515 if (r_type == R_ARM_THM_CALL)
10516 eh->plt_maybe_thumb_refcount--;
10517
10518 if (r_type == R_ARM_THM_JUMP24
10519 || r_type == R_ARM_THM_JUMP19)
10520 eh->plt_thumb_refcount--;
10521 }
10522
10523 if (r_type == R_ARM_ABS32
10524 || r_type == R_ARM_REL32
10525 || r_type == R_ARM_ABS32_NOI
10526 || r_type == R_ARM_REL32_NOI)
10527 {
10528 for (pp = &eh->relocs_copied; (p = *pp) != NULL;
10529 pp = &p->next)
10530 if (p->section == sec)
10531 {
10532 p->count -= 1;
10533 if (ELF32_R_TYPE (rel->r_info) == R_ARM_REL32
10534 || ELF32_R_TYPE (rel->r_info) == R_ARM_REL32_NOI)
10535 p->pc_count -= 1;
10536 if (p->count == 0)
10537 *pp = p->next;
10538 break;
10539 }
10540 }
10541 }
10542 break;
10543
10544 default:
10545 break;
10546 }
10547 }
10548
10549 return TRUE;
10550 }
10551
10552 /* Look through the relocs for a section during the first phase. */
10553
10554 static bfd_boolean
10555 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
10556 asection *sec, const Elf_Internal_Rela *relocs)
10557 {
10558 Elf_Internal_Shdr *symtab_hdr;
10559 struct elf_link_hash_entry **sym_hashes;
10560 const Elf_Internal_Rela *rel;
10561 const Elf_Internal_Rela *rel_end;
10562 bfd *dynobj;
10563 asection *sreloc;
10564 bfd_vma *local_got_offsets;
10565 struct elf32_arm_link_hash_table *htab;
10566 bfd_boolean needs_plt;
10567 unsigned long nsyms;
10568
10569 if (info->relocatable)
10570 return TRUE;
10571
10572 BFD_ASSERT (is_arm_elf (abfd));
10573
10574 htab = elf32_arm_hash_table (info);
10575 sreloc = NULL;
10576
10577 /* Create dynamic sections for relocatable executables so that we can
10578 copy relocations. */
10579 if (htab->root.is_relocatable_executable
10580 && ! htab->root.dynamic_sections_created)
10581 {
10582 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
10583 return FALSE;
10584 }
10585
10586 dynobj = elf_hash_table (info)->dynobj;
10587 local_got_offsets = elf_local_got_offsets (abfd);
10588
10589 symtab_hdr = & elf_symtab_hdr (abfd);
10590 sym_hashes = elf_sym_hashes (abfd);
10591 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
10592
10593 rel_end = relocs + sec->reloc_count;
10594 for (rel = relocs; rel < rel_end; rel++)
10595 {
10596 struct elf_link_hash_entry *h;
10597 struct elf32_arm_link_hash_entry *eh;
10598 unsigned long r_symndx;
10599 int r_type;
10600
10601 r_symndx = ELF32_R_SYM (rel->r_info);
10602 r_type = ELF32_R_TYPE (rel->r_info);
10603 r_type = arm_real_reloc_type (htab, r_type);
10604
10605 if (r_symndx >= nsyms
10606 /* PR 9934: It is possible to have relocations that do not
10607 refer to symbols, thus it is also possible to have an
10608 object file containing relocations but no symbol table. */
10609 && (r_symndx > 0 || nsyms > 0))
10610 {
10611 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
10612 r_symndx);
10613 return FALSE;
10614 }
10615
10616 if (nsyms == 0 || r_symndx < symtab_hdr->sh_info)
10617 h = NULL;
10618 else
10619 {
10620 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10621 while (h->root.type == bfd_link_hash_indirect
10622 || h->root.type == bfd_link_hash_warning)
10623 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10624 }
10625
10626 eh = (struct elf32_arm_link_hash_entry *) h;
10627
10628 switch (r_type)
10629 {
10630 case R_ARM_GOT32:
10631 case R_ARM_GOT_PREL:
10632 case R_ARM_TLS_GD32:
10633 case R_ARM_TLS_IE32:
10634 /* This symbol requires a global offset table entry. */
10635 {
10636 int tls_type, old_tls_type;
10637
10638 switch (r_type)
10639 {
10640 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
10641 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
10642 default: tls_type = GOT_NORMAL; break;
10643 }
10644
10645 if (h != NULL)
10646 {
10647 h->got.refcount++;
10648 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
10649 }
10650 else
10651 {
10652 bfd_signed_vma *local_got_refcounts;
10653
10654 /* This is a global offset table entry for a local symbol. */
10655 local_got_refcounts = elf_local_got_refcounts (abfd);
10656 if (local_got_refcounts == NULL)
10657 {
10658 bfd_size_type size;
10659
10660 size = symtab_hdr->sh_info;
10661 size *= (sizeof (bfd_signed_vma) + sizeof (char));
10662 local_got_refcounts = bfd_zalloc (abfd, size);
10663 if (local_got_refcounts == NULL)
10664 return FALSE;
10665 elf_local_got_refcounts (abfd) = local_got_refcounts;
10666 elf32_arm_local_got_tls_type (abfd)
10667 = (char *) (local_got_refcounts + symtab_hdr->sh_info);
10668 }
10669 local_got_refcounts[r_symndx] += 1;
10670 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
10671 }
10672
10673 /* We will already have issued an error message if there is a
10674 TLS / non-TLS mismatch, based on the symbol type. We don't
10675 support any linker relaxations. So just combine any TLS
10676 types needed. */
10677 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
10678 && tls_type != GOT_NORMAL)
10679 tls_type |= old_tls_type;
10680
10681 if (old_tls_type != tls_type)
10682 {
10683 if (h != NULL)
10684 elf32_arm_hash_entry (h)->tls_type = tls_type;
10685 else
10686 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
10687 }
10688 }
10689 /* Fall through. */
10690
10691 case R_ARM_TLS_LDM32:
10692 if (r_type == R_ARM_TLS_LDM32)
10693 htab->tls_ldm_got.refcount++;
10694 /* Fall through. */
10695
10696 case R_ARM_GOTOFF32:
10697 case R_ARM_GOTPC:
10698 if (htab->sgot == NULL)
10699 {
10700 if (htab->root.dynobj == NULL)
10701 htab->root.dynobj = abfd;
10702 if (!create_got_section (htab->root.dynobj, info))
10703 return FALSE;
10704 }
10705 break;
10706
10707 case R_ARM_ABS12:
10708 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
10709 ldr __GOTT_INDEX__ offsets. */
10710 if (!htab->vxworks_p)
10711 break;
10712 /* Fall through. */
10713
10714 case R_ARM_PC24:
10715 case R_ARM_PLT32:
10716 case R_ARM_CALL:
10717 case R_ARM_JUMP24:
10718 case R_ARM_PREL31:
10719 case R_ARM_THM_CALL:
10720 case R_ARM_THM_JUMP24:
10721 case R_ARM_THM_JUMP19:
10722 needs_plt = 1;
10723 goto normal_reloc;
10724
10725 case R_ARM_MOVW_ABS_NC:
10726 case R_ARM_MOVT_ABS:
10727 case R_ARM_THM_MOVW_ABS_NC:
10728 case R_ARM_THM_MOVT_ABS:
10729 if (info->shared)
10730 {
10731 (*_bfd_error_handler)
10732 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
10733 abfd, elf32_arm_howto_table_1[r_type].name,
10734 (h) ? h->root.root.string : "a local symbol");
10735 bfd_set_error (bfd_error_bad_value);
10736 return FALSE;
10737 }
10738
10739 /* Fall through. */
10740 case R_ARM_ABS32:
10741 case R_ARM_ABS32_NOI:
10742 case R_ARM_REL32:
10743 case R_ARM_REL32_NOI:
10744 case R_ARM_MOVW_PREL_NC:
10745 case R_ARM_MOVT_PREL:
10746 case R_ARM_THM_MOVW_PREL_NC:
10747 case R_ARM_THM_MOVT_PREL:
10748 needs_plt = 0;
10749 normal_reloc:
10750
10751 /* Should the interworking branches be listed here? */
10752 if (h != NULL)
10753 {
10754 /* If this reloc is in a read-only section, we might
10755 need a copy reloc. We can't check reliably at this
10756 stage whether the section is read-only, as input
10757 sections have not yet been mapped to output sections.
10758 Tentatively set the flag for now, and correct in
10759 adjust_dynamic_symbol. */
10760 if (!info->shared)
10761 h->non_got_ref = 1;
10762
10763 /* We may need a .plt entry if the function this reloc
10764 refers to is in a different object. We can't tell for
10765 sure yet, because something later might force the
10766 symbol local. */
10767 if (needs_plt)
10768 h->needs_plt = 1;
10769
10770 /* If we create a PLT entry, this relocation will reference
10771 it, even if it's an ABS32 relocation. */
10772 h->plt.refcount += 1;
10773
10774 /* It's too early to use htab->use_blx here, so we have to
10775 record possible blx references separately from
10776 relocs that definitely need a thumb stub. */
10777
10778 if (r_type == R_ARM_THM_CALL)
10779 eh->plt_maybe_thumb_refcount += 1;
10780
10781 if (r_type == R_ARM_THM_JUMP24
10782 || r_type == R_ARM_THM_JUMP19)
10783 eh->plt_thumb_refcount += 1;
10784 }
10785
10786 /* If we are creating a shared library or relocatable executable,
10787 and this is a reloc against a global symbol, or a non PC
10788 relative reloc against a local symbol, then we need to copy
10789 the reloc into the shared library. However, if we are linking
10790 with -Bsymbolic, we do not need to copy a reloc against a
10791 global symbol which is defined in an object we are
10792 including in the link (i.e., DEF_REGULAR is set). At
10793 this point we have not seen all the input files, so it is
10794 possible that DEF_REGULAR is not set now but will be set
10795 later (it is never cleared). We account for that
10796 possibility below by storing information in the
10797 relocs_copied field of the hash table entry. */
10798 if ((info->shared || htab->root.is_relocatable_executable)
10799 && (sec->flags & SEC_ALLOC) != 0
10800 && ((r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI)
10801 || (h != NULL && ! h->needs_plt
10802 && (! info->symbolic || ! h->def_regular))))
10803 {
10804 struct elf32_arm_relocs_copied *p, **head;
10805
10806 /* When creating a shared object, we must copy these
10807 reloc types into the output file. We create a reloc
10808 section in dynobj and make room for this reloc. */
10809 if (sreloc == NULL)
10810 {
10811 sreloc = _bfd_elf_make_dynamic_reloc_section
10812 (sec, dynobj, 2, abfd, ! htab->use_rel);
10813
10814 if (sreloc == NULL)
10815 return FALSE;
10816
10817 /* BPABI objects never have dynamic relocations mapped. */
10818 if (htab->symbian_p)
10819 {
10820 flagword flags;
10821
10822 flags = bfd_get_section_flags (dynobj, sreloc);
10823 flags &= ~(SEC_LOAD | SEC_ALLOC);
10824 bfd_set_section_flags (dynobj, sreloc, flags);
10825 }
10826 }
10827
10828 /* If this is a global symbol, we count the number of
10829 relocations we need for this symbol. */
10830 if (h != NULL)
10831 {
10832 head = &((struct elf32_arm_link_hash_entry *) h)->relocs_copied;
10833 }
10834 else
10835 {
10836 /* Track dynamic relocs needed for local syms too.
10837 We really need local syms available to do this
10838 easily. Oh well. */
10839 asection *s;
10840 void *vpp;
10841 Elf_Internal_Sym *isym;
10842
10843 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
10844 abfd, r_symndx);
10845 if (isym == NULL)
10846 return FALSE;
10847
10848 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
10849 if (s == NULL)
10850 s = sec;
10851
10852 vpp = &elf_section_data (s)->local_dynrel;
10853 head = (struct elf32_arm_relocs_copied **) vpp;
10854 }
10855
10856 p = *head;
10857 if (p == NULL || p->section != sec)
10858 {
10859 bfd_size_type amt = sizeof *p;
10860
10861 p = bfd_alloc (htab->root.dynobj, amt);
10862 if (p == NULL)
10863 return FALSE;
10864 p->next = *head;
10865 *head = p;
10866 p->section = sec;
10867 p->count = 0;
10868 p->pc_count = 0;
10869 }
10870
10871 if (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
10872 p->pc_count += 1;
10873 p->count += 1;
10874 }
10875 break;
10876
10877 /* This relocation describes the C++ object vtable hierarchy.
10878 Reconstruct it for later use during GC. */
10879 case R_ARM_GNU_VTINHERIT:
10880 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
10881 return FALSE;
10882 break;
10883
10884 /* This relocation describes which C++ vtable entries are actually
10885 used. Record for later use during GC. */
10886 case R_ARM_GNU_VTENTRY:
10887 BFD_ASSERT (h != NULL);
10888 if (h != NULL
10889 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
10890 return FALSE;
10891 break;
10892 }
10893 }
10894
10895 return TRUE;
10896 }
10897
10898 /* Unwinding tables are not referenced directly. This pass marks them as
10899 required if the corresponding code section is marked. */
10900
10901 static bfd_boolean
10902 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
10903 elf_gc_mark_hook_fn gc_mark_hook)
10904 {
10905 bfd *sub;
10906 Elf_Internal_Shdr **elf_shdrp;
10907 bfd_boolean again;
10908
10909 /* Marking EH data may cause additional code sections to be marked,
10910 requiring multiple passes. */
10911 again = TRUE;
10912 while (again)
10913 {
10914 again = FALSE;
10915 for (sub = info->input_bfds; sub != NULL; sub = sub->link_next)
10916 {
10917 asection *o;
10918
10919 if (! is_arm_elf (sub))
10920 continue;
10921
10922 elf_shdrp = elf_elfsections (sub);
10923 for (o = sub->sections; o != NULL; o = o->next)
10924 {
10925 Elf_Internal_Shdr *hdr;
10926
10927 hdr = &elf_section_data (o)->this_hdr;
10928 if (hdr->sh_type == SHT_ARM_EXIDX
10929 && hdr->sh_link
10930 && hdr->sh_link < elf_numsections (sub)
10931 && !o->gc_mark
10932 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
10933 {
10934 again = TRUE;
10935 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
10936 return FALSE;
10937 }
10938 }
10939 }
10940 }
10941
10942 return TRUE;
10943 }
10944
10945 /* Treat mapping symbols as special target symbols. */
10946
10947 static bfd_boolean
10948 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
10949 {
10950 return bfd_is_arm_special_symbol_name (sym->name,
10951 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
10952 }
10953
10954 /* This is a copy of elf_find_function() from elf.c except that
10955 ARM mapping symbols are ignored when looking for function names
10956 and STT_ARM_TFUNC is considered to a function type. */
10957
10958 static bfd_boolean
10959 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
10960 asection * section,
10961 asymbol ** symbols,
10962 bfd_vma offset,
10963 const char ** filename_ptr,
10964 const char ** functionname_ptr)
10965 {
10966 const char * filename = NULL;
10967 asymbol * func = NULL;
10968 bfd_vma low_func = 0;
10969 asymbol ** p;
10970
10971 for (p = symbols; *p != NULL; p++)
10972 {
10973 elf_symbol_type *q;
10974
10975 q = (elf_symbol_type *) *p;
10976
10977 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
10978 {
10979 default:
10980 break;
10981 case STT_FILE:
10982 filename = bfd_asymbol_name (&q->symbol);
10983 break;
10984 case STT_FUNC:
10985 case STT_ARM_TFUNC:
10986 case STT_NOTYPE:
10987 /* Skip mapping symbols. */
10988 if ((q->symbol.flags & BSF_LOCAL)
10989 && bfd_is_arm_special_symbol_name (q->symbol.name,
10990 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
10991 continue;
10992 /* Fall through. */
10993 if (bfd_get_section (&q->symbol) == section
10994 && q->symbol.value >= low_func
10995 && q->symbol.value <= offset)
10996 {
10997 func = (asymbol *) q;
10998 low_func = q->symbol.value;
10999 }
11000 break;
11001 }
11002 }
11003
11004 if (func == NULL)
11005 return FALSE;
11006
11007 if (filename_ptr)
11008 *filename_ptr = filename;
11009 if (functionname_ptr)
11010 *functionname_ptr = bfd_asymbol_name (func);
11011
11012 return TRUE;
11013 }
11014
11015
11016 /* Find the nearest line to a particular section and offset, for error
11017 reporting. This code is a duplicate of the code in elf.c, except
11018 that it uses arm_elf_find_function. */
11019
11020 static bfd_boolean
11021 elf32_arm_find_nearest_line (bfd * abfd,
11022 asection * section,
11023 asymbol ** symbols,
11024 bfd_vma offset,
11025 const char ** filename_ptr,
11026 const char ** functionname_ptr,
11027 unsigned int * line_ptr)
11028 {
11029 bfd_boolean found = FALSE;
11030
11031 /* We skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain uses it. */
11032
11033 if (_bfd_dwarf2_find_nearest_line (abfd, section, symbols, offset,
11034 filename_ptr, functionname_ptr,
11035 line_ptr, 0,
11036 & elf_tdata (abfd)->dwarf2_find_line_info))
11037 {
11038 if (!*functionname_ptr)
11039 arm_elf_find_function (abfd, section, symbols, offset,
11040 *filename_ptr ? NULL : filename_ptr,
11041 functionname_ptr);
11042
11043 return TRUE;
11044 }
11045
11046 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
11047 & found, filename_ptr,
11048 functionname_ptr, line_ptr,
11049 & elf_tdata (abfd)->line_info))
11050 return FALSE;
11051
11052 if (found && (*functionname_ptr || *line_ptr))
11053 return TRUE;
11054
11055 if (symbols == NULL)
11056 return FALSE;
11057
11058 if (! arm_elf_find_function (abfd, section, symbols, offset,
11059 filename_ptr, functionname_ptr))
11060 return FALSE;
11061
11062 *line_ptr = 0;
11063 return TRUE;
11064 }
11065
11066 static bfd_boolean
11067 elf32_arm_find_inliner_info (bfd * abfd,
11068 const char ** filename_ptr,
11069 const char ** functionname_ptr,
11070 unsigned int * line_ptr)
11071 {
11072 bfd_boolean found;
11073 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
11074 functionname_ptr, line_ptr,
11075 & elf_tdata (abfd)->dwarf2_find_line_info);
11076 return found;
11077 }
11078
11079 /* Adjust a symbol defined by a dynamic object and referenced by a
11080 regular object. The current definition is in some section of the
11081 dynamic object, but we're not including those sections. We have to
11082 change the definition to something the rest of the link can
11083 understand. */
11084
11085 static bfd_boolean
11086 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
11087 struct elf_link_hash_entry * h)
11088 {
11089 bfd * dynobj;
11090 asection * s;
11091 struct elf32_arm_link_hash_entry * eh;
11092 struct elf32_arm_link_hash_table *globals;
11093
11094 globals = elf32_arm_hash_table (info);
11095 dynobj = elf_hash_table (info)->dynobj;
11096
11097 /* Make sure we know what is going on here. */
11098 BFD_ASSERT (dynobj != NULL
11099 && (h->needs_plt
11100 || h->u.weakdef != NULL
11101 || (h->def_dynamic
11102 && h->ref_regular
11103 && !h->def_regular)));
11104
11105 eh = (struct elf32_arm_link_hash_entry *) h;
11106
11107 /* If this is a function, put it in the procedure linkage table. We
11108 will fill in the contents of the procedure linkage table later,
11109 when we know the address of the .got section. */
11110 if (h->type == STT_FUNC || h->type == STT_ARM_TFUNC
11111 || h->needs_plt)
11112 {
11113 if (h->plt.refcount <= 0
11114 || SYMBOL_CALLS_LOCAL (info, h)
11115 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
11116 && h->root.type == bfd_link_hash_undefweak))
11117 {
11118 /* This case can occur if we saw a PLT32 reloc in an input
11119 file, but the symbol was never referred to by a dynamic
11120 object, or if all references were garbage collected. In
11121 such a case, we don't actually need to build a procedure
11122 linkage table, and we can just do a PC24 reloc instead. */
11123 h->plt.offset = (bfd_vma) -1;
11124 eh->plt_thumb_refcount = 0;
11125 eh->plt_maybe_thumb_refcount = 0;
11126 h->needs_plt = 0;
11127 }
11128
11129 return TRUE;
11130 }
11131 else
11132 {
11133 /* It's possible that we incorrectly decided a .plt reloc was
11134 needed for an R_ARM_PC24 or similar reloc to a non-function sym
11135 in check_relocs. We can't decide accurately between function
11136 and non-function syms in check-relocs; Objects loaded later in
11137 the link may change h->type. So fix it now. */
11138 h->plt.offset = (bfd_vma) -1;
11139 eh->plt_thumb_refcount = 0;
11140 eh->plt_maybe_thumb_refcount = 0;
11141 }
11142
11143 /* If this is a weak symbol, and there is a real definition, the
11144 processor independent code will have arranged for us to see the
11145 real definition first, and we can just use the same value. */
11146 if (h->u.weakdef != NULL)
11147 {
11148 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
11149 || h->u.weakdef->root.type == bfd_link_hash_defweak);
11150 h->root.u.def.section = h->u.weakdef->root.u.def.section;
11151 h->root.u.def.value = h->u.weakdef->root.u.def.value;
11152 return TRUE;
11153 }
11154
11155 /* If there are no non-GOT references, we do not need a copy
11156 relocation. */
11157 if (!h->non_got_ref)
11158 return TRUE;
11159
11160 /* This is a reference to a symbol defined by a dynamic object which
11161 is not a function. */
11162
11163 /* If we are creating a shared library, we must presume that the
11164 only references to the symbol are via the global offset table.
11165 For such cases we need not do anything here; the relocations will
11166 be handled correctly by relocate_section. Relocatable executables
11167 can reference data in shared objects directly, so we don't need to
11168 do anything here. */
11169 if (info->shared || globals->root.is_relocatable_executable)
11170 return TRUE;
11171
11172 if (h->size == 0)
11173 {
11174 (*_bfd_error_handler) (_("dynamic variable `%s' is zero size"),
11175 h->root.root.string);
11176 return TRUE;
11177 }
11178
11179 /* We must allocate the symbol in our .dynbss section, which will
11180 become part of the .bss section of the executable. There will be
11181 an entry for this symbol in the .dynsym section. The dynamic
11182 object will contain position independent code, so all references
11183 from the dynamic object to this symbol will go through the global
11184 offset table. The dynamic linker will use the .dynsym entry to
11185 determine the address it must put in the global offset table, so
11186 both the dynamic object and the regular object will refer to the
11187 same memory location for the variable. */
11188 s = bfd_get_section_by_name (dynobj, ".dynbss");
11189 BFD_ASSERT (s != NULL);
11190
11191 /* We must generate a R_ARM_COPY reloc to tell the dynamic linker to
11192 copy the initial value out of the dynamic object and into the
11193 runtime process image. We need to remember the offset into the
11194 .rel(a).bss section we are going to use. */
11195 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0)
11196 {
11197 asection *srel;
11198
11199 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (globals, ".bss"));
11200 BFD_ASSERT (srel != NULL);
11201 srel->size += RELOC_SIZE (globals);
11202 h->needs_copy = 1;
11203 }
11204
11205 return _bfd_elf_adjust_dynamic_copy (h, s);
11206 }
11207
11208 /* Allocate space in .plt, .got and associated reloc sections for
11209 dynamic relocs. */
11210
11211 static bfd_boolean
11212 allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
11213 {
11214 struct bfd_link_info *info;
11215 struct elf32_arm_link_hash_table *htab;
11216 struct elf32_arm_link_hash_entry *eh;
11217 struct elf32_arm_relocs_copied *p;
11218 bfd_signed_vma thumb_refs;
11219
11220 eh = (struct elf32_arm_link_hash_entry *) h;
11221
11222 if (h->root.type == bfd_link_hash_indirect)
11223 return TRUE;
11224
11225 if (h->root.type == bfd_link_hash_warning)
11226 /* When warning symbols are created, they **replace** the "real"
11227 entry in the hash table, thus we never get to see the real
11228 symbol in a hash traversal. So look at it now. */
11229 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11230
11231 info = (struct bfd_link_info *) inf;
11232 htab = elf32_arm_hash_table (info);
11233
11234 if (htab->root.dynamic_sections_created
11235 && h->plt.refcount > 0)
11236 {
11237 /* Make sure this symbol is output as a dynamic symbol.
11238 Undefined weak syms won't yet be marked as dynamic. */
11239 if (h->dynindx == -1
11240 && !h->forced_local)
11241 {
11242 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11243 return FALSE;
11244 }
11245
11246 if (info->shared
11247 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
11248 {
11249 asection *s = htab->splt;
11250
11251 /* If this is the first .plt entry, make room for the special
11252 first entry. */
11253 if (s->size == 0)
11254 s->size += htab->plt_header_size;
11255
11256 h->plt.offset = s->size;
11257
11258 /* If we will insert a Thumb trampoline before this PLT, leave room
11259 for it. */
11260 thumb_refs = eh->plt_thumb_refcount;
11261 if (!htab->use_blx)
11262 thumb_refs += eh->plt_maybe_thumb_refcount;
11263
11264 if (thumb_refs > 0)
11265 {
11266 h->plt.offset += PLT_THUMB_STUB_SIZE;
11267 s->size += PLT_THUMB_STUB_SIZE;
11268 }
11269
11270 /* If this symbol is not defined in a regular file, and we are
11271 not generating a shared library, then set the symbol to this
11272 location in the .plt. This is required to make function
11273 pointers compare as equal between the normal executable and
11274 the shared library. */
11275 if (! info->shared
11276 && !h->def_regular)
11277 {
11278 h->root.u.def.section = s;
11279 h->root.u.def.value = h->plt.offset;
11280
11281 /* Make sure the function is not marked as Thumb, in case
11282 it is the target of an ABS32 relocation, which will
11283 point to the PLT entry. */
11284 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
11285 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11286 }
11287
11288 /* Make room for this entry. */
11289 s->size += htab->plt_entry_size;
11290
11291 if (!htab->symbian_p)
11292 {
11293 /* We also need to make an entry in the .got.plt section, which
11294 will be placed in the .got section by the linker script. */
11295 eh->plt_got_offset = htab->sgotplt->size;
11296 htab->sgotplt->size += 4;
11297 }
11298
11299 /* We also need to make an entry in the .rel(a).plt section. */
11300 htab->srelplt->size += RELOC_SIZE (htab);
11301
11302 /* VxWorks executables have a second set of relocations for
11303 each PLT entry. They go in a separate relocation section,
11304 which is processed by the kernel loader. */
11305 if (htab->vxworks_p && !info->shared)
11306 {
11307 /* There is a relocation for the initial PLT entry:
11308 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
11309 if (h->plt.offset == htab->plt_header_size)
11310 htab->srelplt2->size += RELOC_SIZE (htab);
11311
11312 /* There are two extra relocations for each subsequent
11313 PLT entry: an R_ARM_32 relocation for the GOT entry,
11314 and an R_ARM_32 relocation for the PLT entry. */
11315 htab->srelplt2->size += RELOC_SIZE (htab) * 2;
11316 }
11317 }
11318 else
11319 {
11320 h->plt.offset = (bfd_vma) -1;
11321 h->needs_plt = 0;
11322 }
11323 }
11324 else
11325 {
11326 h->plt.offset = (bfd_vma) -1;
11327 h->needs_plt = 0;
11328 }
11329
11330 if (h->got.refcount > 0)
11331 {
11332 asection *s;
11333 bfd_boolean dyn;
11334 int tls_type = elf32_arm_hash_entry (h)->tls_type;
11335 int indx;
11336
11337 /* Make sure this symbol is output as a dynamic symbol.
11338 Undefined weak syms won't yet be marked as dynamic. */
11339 if (h->dynindx == -1
11340 && !h->forced_local)
11341 {
11342 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11343 return FALSE;
11344 }
11345
11346 if (!htab->symbian_p)
11347 {
11348 s = htab->sgot;
11349 h->got.offset = s->size;
11350
11351 if (tls_type == GOT_UNKNOWN)
11352 abort ();
11353
11354 if (tls_type == GOT_NORMAL)
11355 /* Non-TLS symbols need one GOT slot. */
11356 s->size += 4;
11357 else
11358 {
11359 if (tls_type & GOT_TLS_GD)
11360 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. */
11361 s->size += 8;
11362 if (tls_type & GOT_TLS_IE)
11363 /* R_ARM_TLS_IE32 needs one GOT slot. */
11364 s->size += 4;
11365 }
11366
11367 dyn = htab->root.dynamic_sections_created;
11368
11369 indx = 0;
11370 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
11371 && (!info->shared
11372 || !SYMBOL_REFERENCES_LOCAL (info, h)))
11373 indx = h->dynindx;
11374
11375 if (tls_type != GOT_NORMAL
11376 && (info->shared || indx != 0)
11377 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11378 || h->root.type != bfd_link_hash_undefweak))
11379 {
11380 if (tls_type & GOT_TLS_IE)
11381 htab->srelgot->size += RELOC_SIZE (htab);
11382
11383 if (tls_type & GOT_TLS_GD)
11384 htab->srelgot->size += RELOC_SIZE (htab);
11385
11386 if ((tls_type & GOT_TLS_GD) && indx != 0)
11387 htab->srelgot->size += RELOC_SIZE (htab);
11388 }
11389 else if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11390 || h->root.type != bfd_link_hash_undefweak)
11391 && (info->shared
11392 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
11393 htab->srelgot->size += RELOC_SIZE (htab);
11394 }
11395 }
11396 else
11397 h->got.offset = (bfd_vma) -1;
11398
11399 /* Allocate stubs for exported Thumb functions on v4t. */
11400 if (!htab->use_blx && h->dynindx != -1
11401 && h->def_regular
11402 && ELF_ST_TYPE (h->type) == STT_ARM_TFUNC
11403 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
11404 {
11405 struct elf_link_hash_entry * th;
11406 struct bfd_link_hash_entry * bh;
11407 struct elf_link_hash_entry * myh;
11408 char name[1024];
11409 asection *s;
11410 bh = NULL;
11411 /* Create a new symbol to regist the real location of the function. */
11412 s = h->root.u.def.section;
11413 sprintf (name, "__real_%s", h->root.root.string);
11414 _bfd_generic_link_add_one_symbol (info, s->owner,
11415 name, BSF_GLOBAL, s,
11416 h->root.u.def.value,
11417 NULL, TRUE, FALSE, &bh);
11418
11419 myh = (struct elf_link_hash_entry *) bh;
11420 myh->type = ELF_ST_INFO (STB_LOCAL, STT_ARM_TFUNC);
11421 myh->forced_local = 1;
11422 eh->export_glue = myh;
11423 th = record_arm_to_thumb_glue (info, h);
11424 /* Point the symbol at the stub. */
11425 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11426 h->root.u.def.section = th->root.u.def.section;
11427 h->root.u.def.value = th->root.u.def.value & ~1;
11428 }
11429
11430 if (eh->relocs_copied == NULL)
11431 return TRUE;
11432
11433 /* In the shared -Bsymbolic case, discard space allocated for
11434 dynamic pc-relative relocs against symbols which turn out to be
11435 defined in regular objects. For the normal shared case, discard
11436 space for pc-relative relocs that have become local due to symbol
11437 visibility changes. */
11438
11439 if (info->shared || htab->root.is_relocatable_executable)
11440 {
11441 /* The only relocs that use pc_count are R_ARM_REL32 and
11442 R_ARM_REL32_NOI, which will appear on something like
11443 ".long foo - .". We want calls to protected symbols to resolve
11444 directly to the function rather than going via the plt. If people
11445 want function pointer comparisons to work as expected then they
11446 should avoid writing assembly like ".long foo - .". */
11447 if (SYMBOL_CALLS_LOCAL (info, h))
11448 {
11449 struct elf32_arm_relocs_copied **pp;
11450
11451 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11452 {
11453 p->count -= p->pc_count;
11454 p->pc_count = 0;
11455 if (p->count == 0)
11456 *pp = p->next;
11457 else
11458 pp = &p->next;
11459 }
11460 }
11461
11462 if (elf32_arm_hash_table (info)->vxworks_p)
11463 {
11464 struct elf32_arm_relocs_copied **pp;
11465
11466 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11467 {
11468 if (strcmp (p->section->output_section->name, ".tls_vars") == 0)
11469 *pp = p->next;
11470 else
11471 pp = &p->next;
11472 }
11473 }
11474
11475 /* Also discard relocs on undefined weak syms with non-default
11476 visibility. */
11477 if (eh->relocs_copied != NULL
11478 && h->root.type == bfd_link_hash_undefweak)
11479 {
11480 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
11481 eh->relocs_copied = NULL;
11482
11483 /* Make sure undefined weak symbols are output as a dynamic
11484 symbol in PIEs. */
11485 else if (h->dynindx == -1
11486 && !h->forced_local)
11487 {
11488 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11489 return FALSE;
11490 }
11491 }
11492
11493 else if (htab->root.is_relocatable_executable && h->dynindx == -1
11494 && h->root.type == bfd_link_hash_new)
11495 {
11496 /* Output absolute symbols so that we can create relocations
11497 against them. For normal symbols we output a relocation
11498 against the section that contains them. */
11499 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11500 return FALSE;
11501 }
11502
11503 }
11504 else
11505 {
11506 /* For the non-shared case, discard space for relocs against
11507 symbols which turn out to need copy relocs or are not
11508 dynamic. */
11509
11510 if (!h->non_got_ref
11511 && ((h->def_dynamic
11512 && !h->def_regular)
11513 || (htab->root.dynamic_sections_created
11514 && (h->root.type == bfd_link_hash_undefweak
11515 || h->root.type == bfd_link_hash_undefined))))
11516 {
11517 /* Make sure this symbol is output as a dynamic symbol.
11518 Undefined weak syms won't yet be marked as dynamic. */
11519 if (h->dynindx == -1
11520 && !h->forced_local)
11521 {
11522 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11523 return FALSE;
11524 }
11525
11526 /* If that succeeded, we know we'll be keeping all the
11527 relocs. */
11528 if (h->dynindx != -1)
11529 goto keep;
11530 }
11531
11532 eh->relocs_copied = NULL;
11533
11534 keep: ;
11535 }
11536
11537 /* Finally, allocate space. */
11538 for (p = eh->relocs_copied; p != NULL; p = p->next)
11539 {
11540 asection *sreloc = elf_section_data (p->section)->sreloc;
11541 sreloc->size += p->count * RELOC_SIZE (htab);
11542 }
11543
11544 return TRUE;
11545 }
11546
11547 /* Find any dynamic relocs that apply to read-only sections. */
11548
11549 static bfd_boolean
11550 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
11551 {
11552 struct elf32_arm_link_hash_entry * eh;
11553 struct elf32_arm_relocs_copied * p;
11554
11555 if (h->root.type == bfd_link_hash_warning)
11556 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11557
11558 eh = (struct elf32_arm_link_hash_entry *) h;
11559 for (p = eh->relocs_copied; p != NULL; p = p->next)
11560 {
11561 asection *s = p->section;
11562
11563 if (s != NULL && (s->flags & SEC_READONLY) != 0)
11564 {
11565 struct bfd_link_info *info = (struct bfd_link_info *) inf;
11566
11567 info->flags |= DF_TEXTREL;
11568
11569 /* Not an error, just cut short the traversal. */
11570 return FALSE;
11571 }
11572 }
11573 return TRUE;
11574 }
11575
11576 void
11577 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
11578 int byteswap_code)
11579 {
11580 struct elf32_arm_link_hash_table *globals;
11581
11582 globals = elf32_arm_hash_table (info);
11583 globals->byteswap_code = byteswap_code;
11584 }
11585
11586 /* Set the sizes of the dynamic sections. */
11587
11588 static bfd_boolean
11589 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
11590 struct bfd_link_info * info)
11591 {
11592 bfd * dynobj;
11593 asection * s;
11594 bfd_boolean plt;
11595 bfd_boolean relocs;
11596 bfd *ibfd;
11597 struct elf32_arm_link_hash_table *htab;
11598
11599 htab = elf32_arm_hash_table (info);
11600 dynobj = elf_hash_table (info)->dynobj;
11601 BFD_ASSERT (dynobj != NULL);
11602 check_use_blx (htab);
11603
11604 if (elf_hash_table (info)->dynamic_sections_created)
11605 {
11606 /* Set the contents of the .interp section to the interpreter. */
11607 if (info->executable)
11608 {
11609 s = bfd_get_section_by_name (dynobj, ".interp");
11610 BFD_ASSERT (s != NULL);
11611 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
11612 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
11613 }
11614 }
11615
11616 /* Set up .got offsets for local syms, and space for local dynamic
11617 relocs. */
11618 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
11619 {
11620 bfd_signed_vma *local_got;
11621 bfd_signed_vma *end_local_got;
11622 char *local_tls_type;
11623 bfd_size_type locsymcount;
11624 Elf_Internal_Shdr *symtab_hdr;
11625 asection *srel;
11626 bfd_boolean is_vxworks = elf32_arm_hash_table (info)->vxworks_p;
11627
11628 if (! is_arm_elf (ibfd))
11629 continue;
11630
11631 for (s = ibfd->sections; s != NULL; s = s->next)
11632 {
11633 struct elf32_arm_relocs_copied *p;
11634
11635 for (p = elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
11636 {
11637 if (!bfd_is_abs_section (p->section)
11638 && bfd_is_abs_section (p->section->output_section))
11639 {
11640 /* Input section has been discarded, either because
11641 it is a copy of a linkonce section or due to
11642 linker script /DISCARD/, so we'll be discarding
11643 the relocs too. */
11644 }
11645 else if (is_vxworks
11646 && strcmp (p->section->output_section->name,
11647 ".tls_vars") == 0)
11648 {
11649 /* Relocations in vxworks .tls_vars sections are
11650 handled specially by the loader. */
11651 }
11652 else if (p->count != 0)
11653 {
11654 srel = elf_section_data (p->section)->sreloc;
11655 srel->size += p->count * RELOC_SIZE (htab);
11656 if ((p->section->output_section->flags & SEC_READONLY) != 0)
11657 info->flags |= DF_TEXTREL;
11658 }
11659 }
11660 }
11661
11662 local_got = elf_local_got_refcounts (ibfd);
11663 if (!local_got)
11664 continue;
11665
11666 symtab_hdr = & elf_symtab_hdr (ibfd);
11667 locsymcount = symtab_hdr->sh_info;
11668 end_local_got = local_got + locsymcount;
11669 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
11670 s = htab->sgot;
11671 srel = htab->srelgot;
11672 for (; local_got < end_local_got; ++local_got, ++local_tls_type)
11673 {
11674 if (*local_got > 0)
11675 {
11676 *local_got = s->size;
11677 if (*local_tls_type & GOT_TLS_GD)
11678 /* TLS_GD relocs need an 8-byte structure in the GOT. */
11679 s->size += 8;
11680 if (*local_tls_type & GOT_TLS_IE)
11681 s->size += 4;
11682 if (*local_tls_type == GOT_NORMAL)
11683 s->size += 4;
11684
11685 if (info->shared || *local_tls_type == GOT_TLS_GD)
11686 srel->size += RELOC_SIZE (htab);
11687 }
11688 else
11689 *local_got = (bfd_vma) -1;
11690 }
11691 }
11692
11693 if (htab->tls_ldm_got.refcount > 0)
11694 {
11695 /* Allocate two GOT entries and one dynamic relocation (if necessary)
11696 for R_ARM_TLS_LDM32 relocations. */
11697 htab->tls_ldm_got.offset = htab->sgot->size;
11698 htab->sgot->size += 8;
11699 if (info->shared)
11700 htab->srelgot->size += RELOC_SIZE (htab);
11701 }
11702 else
11703 htab->tls_ldm_got.offset = -1;
11704
11705 /* Allocate global sym .plt and .got entries, and space for global
11706 sym dynamic relocs. */
11707 elf_link_hash_traverse (& htab->root, allocate_dynrelocs, info);
11708
11709 /* Here we rummage through the found bfds to collect glue information. */
11710 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
11711 {
11712 if (! is_arm_elf (ibfd))
11713 continue;
11714
11715 /* Initialise mapping tables for code/data. */
11716 bfd_elf32_arm_init_maps (ibfd);
11717
11718 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
11719 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info))
11720 /* xgettext:c-format */
11721 _bfd_error_handler (_("Errors encountered processing file %s"),
11722 ibfd->filename);
11723 }
11724
11725 /* Allocate space for the glue sections now that we've sized them. */
11726 bfd_elf32_arm_allocate_interworking_sections (info);
11727
11728 /* The check_relocs and adjust_dynamic_symbol entry points have
11729 determined the sizes of the various dynamic sections. Allocate
11730 memory for them. */
11731 plt = FALSE;
11732 relocs = FALSE;
11733 for (s = dynobj->sections; s != NULL; s = s->next)
11734 {
11735 const char * name;
11736
11737 if ((s->flags & SEC_LINKER_CREATED) == 0)
11738 continue;
11739
11740 /* It's OK to base decisions on the section name, because none
11741 of the dynobj section names depend upon the input files. */
11742 name = bfd_get_section_name (dynobj, s);
11743
11744 if (strcmp (name, ".plt") == 0)
11745 {
11746 /* Remember whether there is a PLT. */
11747 plt = s->size != 0;
11748 }
11749 else if (CONST_STRNEQ (name, ".rel"))
11750 {
11751 if (s->size != 0)
11752 {
11753 /* Remember whether there are any reloc sections other
11754 than .rel(a).plt and .rela.plt.unloaded. */
11755 if (s != htab->srelplt && s != htab->srelplt2)
11756 relocs = TRUE;
11757
11758 /* We use the reloc_count field as a counter if we need
11759 to copy relocs into the output file. */
11760 s->reloc_count = 0;
11761 }
11762 }
11763 else if (! CONST_STRNEQ (name, ".got")
11764 && strcmp (name, ".dynbss") != 0)
11765 {
11766 /* It's not one of our sections, so don't allocate space. */
11767 continue;
11768 }
11769
11770 if (s->size == 0)
11771 {
11772 /* If we don't need this section, strip it from the
11773 output file. This is mostly to handle .rel(a).bss and
11774 .rel(a).plt. We must create both sections in
11775 create_dynamic_sections, because they must be created
11776 before the linker maps input sections to output
11777 sections. The linker does that before
11778 adjust_dynamic_symbol is called, and it is that
11779 function which decides whether anything needs to go
11780 into these sections. */
11781 s->flags |= SEC_EXCLUDE;
11782 continue;
11783 }
11784
11785 if ((s->flags & SEC_HAS_CONTENTS) == 0)
11786 continue;
11787
11788 /* Allocate memory for the section contents. */
11789 s->contents = bfd_zalloc (dynobj, s->size);
11790 if (s->contents == NULL)
11791 return FALSE;
11792 }
11793
11794 if (elf_hash_table (info)->dynamic_sections_created)
11795 {
11796 /* Add some entries to the .dynamic section. We fill in the
11797 values later, in elf32_arm_finish_dynamic_sections, but we
11798 must add the entries now so that we get the correct size for
11799 the .dynamic section. The DT_DEBUG entry is filled in by the
11800 dynamic linker and used by the debugger. */
11801 #define add_dynamic_entry(TAG, VAL) \
11802 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
11803
11804 if (info->executable)
11805 {
11806 if (!add_dynamic_entry (DT_DEBUG, 0))
11807 return FALSE;
11808 }
11809
11810 if (plt)
11811 {
11812 if ( !add_dynamic_entry (DT_PLTGOT, 0)
11813 || !add_dynamic_entry (DT_PLTRELSZ, 0)
11814 || !add_dynamic_entry (DT_PLTREL,
11815 htab->use_rel ? DT_REL : DT_RELA)
11816 || !add_dynamic_entry (DT_JMPREL, 0))
11817 return FALSE;
11818 }
11819
11820 if (relocs)
11821 {
11822 if (htab->use_rel)
11823 {
11824 if (!add_dynamic_entry (DT_REL, 0)
11825 || !add_dynamic_entry (DT_RELSZ, 0)
11826 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
11827 return FALSE;
11828 }
11829 else
11830 {
11831 if (!add_dynamic_entry (DT_RELA, 0)
11832 || !add_dynamic_entry (DT_RELASZ, 0)
11833 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
11834 return FALSE;
11835 }
11836 }
11837
11838 /* If any dynamic relocs apply to a read-only section,
11839 then we need a DT_TEXTREL entry. */
11840 if ((info->flags & DF_TEXTREL) == 0)
11841 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs,
11842 info);
11843
11844 if ((info->flags & DF_TEXTREL) != 0)
11845 {
11846 if (!add_dynamic_entry (DT_TEXTREL, 0))
11847 return FALSE;
11848 }
11849 if (htab->vxworks_p
11850 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
11851 return FALSE;
11852 }
11853 #undef add_dynamic_entry
11854
11855 return TRUE;
11856 }
11857
11858 /* Finish up dynamic symbol handling. We set the contents of various
11859 dynamic sections here. */
11860
11861 static bfd_boolean
11862 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
11863 struct bfd_link_info * info,
11864 struct elf_link_hash_entry * h,
11865 Elf_Internal_Sym * sym)
11866 {
11867 bfd * dynobj;
11868 struct elf32_arm_link_hash_table *htab;
11869 struct elf32_arm_link_hash_entry *eh;
11870
11871 dynobj = elf_hash_table (info)->dynobj;
11872 htab = elf32_arm_hash_table (info);
11873 eh = (struct elf32_arm_link_hash_entry *) h;
11874
11875 if (h->plt.offset != (bfd_vma) -1)
11876 {
11877 asection * splt;
11878 asection * srel;
11879 bfd_byte *loc;
11880 bfd_vma plt_index;
11881 Elf_Internal_Rela rel;
11882
11883 /* This symbol has an entry in the procedure linkage table. Set
11884 it up. */
11885
11886 BFD_ASSERT (h->dynindx != -1);
11887
11888 splt = bfd_get_section_by_name (dynobj, ".plt");
11889 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".plt"));
11890 BFD_ASSERT (splt != NULL && srel != NULL);
11891
11892 /* Fill in the entry in the procedure linkage table. */
11893 if (htab->symbian_p)
11894 {
11895 put_arm_insn (htab, output_bfd,
11896 elf32_arm_symbian_plt_entry[0],
11897 splt->contents + h->plt.offset);
11898 bfd_put_32 (output_bfd,
11899 elf32_arm_symbian_plt_entry[1],
11900 splt->contents + h->plt.offset + 4);
11901
11902 /* Fill in the entry in the .rel.plt section. */
11903 rel.r_offset = (splt->output_section->vma
11904 + splt->output_offset
11905 + h->plt.offset + 4);
11906 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
11907
11908 /* Get the index in the procedure linkage table which
11909 corresponds to this symbol. This is the index of this symbol
11910 in all the symbols for which we are making plt entries. The
11911 first entry in the procedure linkage table is reserved. */
11912 plt_index = ((h->plt.offset - htab->plt_header_size)
11913 / htab->plt_entry_size);
11914 }
11915 else
11916 {
11917 bfd_vma got_offset, got_address, plt_address;
11918 bfd_vma got_displacement;
11919 asection * sgot;
11920 bfd_byte * ptr;
11921
11922 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
11923 BFD_ASSERT (sgot != NULL);
11924
11925 /* Get the offset into the .got.plt table of the entry that
11926 corresponds to this function. */
11927 got_offset = eh->plt_got_offset;
11928
11929 /* Get the index in the procedure linkage table which
11930 corresponds to this symbol. This is the index of this symbol
11931 in all the symbols for which we are making plt entries. The
11932 first three entries in .got.plt are reserved; after that
11933 symbols appear in the same order as in .plt. */
11934 plt_index = (got_offset - 12) / 4;
11935
11936 /* Calculate the address of the GOT entry. */
11937 got_address = (sgot->output_section->vma
11938 + sgot->output_offset
11939 + got_offset);
11940
11941 /* ...and the address of the PLT entry. */
11942 plt_address = (splt->output_section->vma
11943 + splt->output_offset
11944 + h->plt.offset);
11945
11946 ptr = htab->splt->contents + h->plt.offset;
11947 if (htab->vxworks_p && info->shared)
11948 {
11949 unsigned int i;
11950 bfd_vma val;
11951
11952 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
11953 {
11954 val = elf32_arm_vxworks_shared_plt_entry[i];
11955 if (i == 2)
11956 val |= got_address - sgot->output_section->vma;
11957 if (i == 5)
11958 val |= plt_index * RELOC_SIZE (htab);
11959 if (i == 2 || i == 5)
11960 bfd_put_32 (output_bfd, val, ptr);
11961 else
11962 put_arm_insn (htab, output_bfd, val, ptr);
11963 }
11964 }
11965 else if (htab->vxworks_p)
11966 {
11967 unsigned int i;
11968 bfd_vma val;
11969
11970 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
11971 {
11972 val = elf32_arm_vxworks_exec_plt_entry[i];
11973 if (i == 2)
11974 val |= got_address;
11975 if (i == 4)
11976 val |= 0xffffff & -((h->plt.offset + i * 4 + 8) >> 2);
11977 if (i == 5)
11978 val |= plt_index * RELOC_SIZE (htab);
11979 if (i == 2 || i == 5)
11980 bfd_put_32 (output_bfd, val, ptr);
11981 else
11982 put_arm_insn (htab, output_bfd, val, ptr);
11983 }
11984
11985 loc = (htab->srelplt2->contents
11986 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
11987
11988 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
11989 referencing the GOT for this PLT entry. */
11990 rel.r_offset = plt_address + 8;
11991 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
11992 rel.r_addend = got_offset;
11993 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
11994 loc += RELOC_SIZE (htab);
11995
11996 /* Create the R_ARM_ABS32 relocation referencing the
11997 beginning of the PLT for this GOT entry. */
11998 rel.r_offset = got_address;
11999 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12000 rel.r_addend = 0;
12001 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12002 }
12003 else
12004 {
12005 bfd_signed_vma thumb_refs;
12006 /* Calculate the displacement between the PLT slot and the
12007 entry in the GOT. The eight-byte offset accounts for the
12008 value produced by adding to pc in the first instruction
12009 of the PLT stub. */
12010 got_displacement = got_address - (plt_address + 8);
12011
12012 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
12013
12014 thumb_refs = eh->plt_thumb_refcount;
12015 if (!htab->use_blx)
12016 thumb_refs += eh->plt_maybe_thumb_refcount;
12017
12018 if (thumb_refs > 0)
12019 {
12020 put_thumb_insn (htab, output_bfd,
12021 elf32_arm_plt_thumb_stub[0], ptr - 4);
12022 put_thumb_insn (htab, output_bfd,
12023 elf32_arm_plt_thumb_stub[1], ptr - 2);
12024 }
12025
12026 put_arm_insn (htab, output_bfd,
12027 elf32_arm_plt_entry[0]
12028 | ((got_displacement & 0x0ff00000) >> 20),
12029 ptr + 0);
12030 put_arm_insn (htab, output_bfd,
12031 elf32_arm_plt_entry[1]
12032 | ((got_displacement & 0x000ff000) >> 12),
12033 ptr+ 4);
12034 put_arm_insn (htab, output_bfd,
12035 elf32_arm_plt_entry[2]
12036 | (got_displacement & 0x00000fff),
12037 ptr + 8);
12038 #ifdef FOUR_WORD_PLT
12039 bfd_put_32 (output_bfd, elf32_arm_plt_entry[3], ptr + 12);
12040 #endif
12041 }
12042
12043 /* Fill in the entry in the global offset table. */
12044 bfd_put_32 (output_bfd,
12045 (splt->output_section->vma
12046 + splt->output_offset),
12047 sgot->contents + got_offset);
12048
12049 /* Fill in the entry in the .rel(a).plt section. */
12050 rel.r_addend = 0;
12051 rel.r_offset = got_address;
12052 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_JUMP_SLOT);
12053 }
12054
12055 loc = srel->contents + plt_index * RELOC_SIZE (htab);
12056 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12057
12058 if (!h->def_regular)
12059 {
12060 /* Mark the symbol as undefined, rather than as defined in
12061 the .plt section. Leave the value alone. */
12062 sym->st_shndx = SHN_UNDEF;
12063 /* If the symbol is weak, we do need to clear the value.
12064 Otherwise, the PLT entry would provide a definition for
12065 the symbol even if the symbol wasn't defined anywhere,
12066 and so the symbol would never be NULL. */
12067 if (!h->ref_regular_nonweak)
12068 sym->st_value = 0;
12069 }
12070 }
12071
12072 if (h->got.offset != (bfd_vma) -1
12073 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_GD) == 0
12074 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_IE) == 0)
12075 {
12076 asection * sgot;
12077 asection * srel;
12078 Elf_Internal_Rela rel;
12079 bfd_byte *loc;
12080 bfd_vma offset;
12081
12082 /* This symbol has an entry in the global offset table. Set it
12083 up. */
12084 sgot = bfd_get_section_by_name (dynobj, ".got");
12085 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".got"));
12086 BFD_ASSERT (sgot != NULL && srel != NULL);
12087
12088 offset = (h->got.offset & ~(bfd_vma) 1);
12089 rel.r_addend = 0;
12090 rel.r_offset = (sgot->output_section->vma
12091 + sgot->output_offset
12092 + offset);
12093
12094 /* If this is a static link, or it is a -Bsymbolic link and the
12095 symbol is defined locally or was forced to be local because
12096 of a version file, we just want to emit a RELATIVE reloc.
12097 The entry in the global offset table will already have been
12098 initialized in the relocate_section function. */
12099 if (info->shared
12100 && SYMBOL_REFERENCES_LOCAL (info, h))
12101 {
12102 BFD_ASSERT ((h->got.offset & 1) != 0);
12103 rel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12104 if (!htab->use_rel)
12105 {
12106 rel.r_addend = bfd_get_32 (output_bfd, sgot->contents + offset);
12107 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12108 }
12109 }
12110 else
12111 {
12112 BFD_ASSERT ((h->got.offset & 1) == 0);
12113 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12114 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
12115 }
12116
12117 loc = srel->contents + srel->reloc_count++ * RELOC_SIZE (htab);
12118 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12119 }
12120
12121 if (h->needs_copy)
12122 {
12123 asection * s;
12124 Elf_Internal_Rela rel;
12125 bfd_byte *loc;
12126
12127 /* This symbol needs a copy reloc. Set it up. */
12128 BFD_ASSERT (h->dynindx != -1
12129 && (h->root.type == bfd_link_hash_defined
12130 || h->root.type == bfd_link_hash_defweak));
12131
12132 s = bfd_get_section_by_name (h->root.u.def.section->owner,
12133 RELOC_SECTION (htab, ".bss"));
12134 BFD_ASSERT (s != NULL);
12135
12136 rel.r_addend = 0;
12137 rel.r_offset = (h->root.u.def.value
12138 + h->root.u.def.section->output_section->vma
12139 + h->root.u.def.section->output_offset);
12140 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
12141 loc = s->contents + s->reloc_count++ * RELOC_SIZE (htab);
12142 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12143 }
12144
12145 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
12146 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
12147 to the ".got" section. */
12148 if (strcmp (h->root.root.string, "_DYNAMIC") == 0
12149 || (!htab->vxworks_p && h == htab->root.hgot))
12150 sym->st_shndx = SHN_ABS;
12151
12152 return TRUE;
12153 }
12154
12155 /* Finish up the dynamic sections. */
12156
12157 static bfd_boolean
12158 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
12159 {
12160 bfd * dynobj;
12161 asection * sgot;
12162 asection * sdyn;
12163
12164 dynobj = elf_hash_table (info)->dynobj;
12165
12166 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
12167 BFD_ASSERT (elf32_arm_hash_table (info)->symbian_p || sgot != NULL);
12168 sdyn = bfd_get_section_by_name (dynobj, ".dynamic");
12169
12170 if (elf_hash_table (info)->dynamic_sections_created)
12171 {
12172 asection *splt;
12173 Elf32_External_Dyn *dyncon, *dynconend;
12174 struct elf32_arm_link_hash_table *htab;
12175
12176 htab = elf32_arm_hash_table (info);
12177 splt = bfd_get_section_by_name (dynobj, ".plt");
12178 BFD_ASSERT (splt != NULL && sdyn != NULL);
12179
12180 dyncon = (Elf32_External_Dyn *) sdyn->contents;
12181 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
12182
12183 for (; dyncon < dynconend; dyncon++)
12184 {
12185 Elf_Internal_Dyn dyn;
12186 const char * name;
12187 asection * s;
12188
12189 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
12190
12191 switch (dyn.d_tag)
12192 {
12193 unsigned int type;
12194
12195 default:
12196 if (htab->vxworks_p
12197 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
12198 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12199 break;
12200
12201 case DT_HASH:
12202 name = ".hash";
12203 goto get_vma_if_bpabi;
12204 case DT_STRTAB:
12205 name = ".dynstr";
12206 goto get_vma_if_bpabi;
12207 case DT_SYMTAB:
12208 name = ".dynsym";
12209 goto get_vma_if_bpabi;
12210 case DT_VERSYM:
12211 name = ".gnu.version";
12212 goto get_vma_if_bpabi;
12213 case DT_VERDEF:
12214 name = ".gnu.version_d";
12215 goto get_vma_if_bpabi;
12216 case DT_VERNEED:
12217 name = ".gnu.version_r";
12218 goto get_vma_if_bpabi;
12219
12220 case DT_PLTGOT:
12221 name = ".got";
12222 goto get_vma;
12223 case DT_JMPREL:
12224 name = RELOC_SECTION (htab, ".plt");
12225 get_vma:
12226 s = bfd_get_section_by_name (output_bfd, name);
12227 BFD_ASSERT (s != NULL);
12228 if (!htab->symbian_p)
12229 dyn.d_un.d_ptr = s->vma;
12230 else
12231 /* In the BPABI, tags in the PT_DYNAMIC section point
12232 at the file offset, not the memory address, for the
12233 convenience of the post linker. */
12234 dyn.d_un.d_ptr = s->filepos;
12235 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12236 break;
12237
12238 get_vma_if_bpabi:
12239 if (htab->symbian_p)
12240 goto get_vma;
12241 break;
12242
12243 case DT_PLTRELSZ:
12244 s = bfd_get_section_by_name (output_bfd,
12245 RELOC_SECTION (htab, ".plt"));
12246 BFD_ASSERT (s != NULL);
12247 dyn.d_un.d_val = s->size;
12248 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12249 break;
12250
12251 case DT_RELSZ:
12252 case DT_RELASZ:
12253 if (!htab->symbian_p)
12254 {
12255 /* My reading of the SVR4 ABI indicates that the
12256 procedure linkage table relocs (DT_JMPREL) should be
12257 included in the overall relocs (DT_REL). This is
12258 what Solaris does. However, UnixWare can not handle
12259 that case. Therefore, we override the DT_RELSZ entry
12260 here to make it not include the JMPREL relocs. Since
12261 the linker script arranges for .rel(a).plt to follow all
12262 other relocation sections, we don't have to worry
12263 about changing the DT_REL entry. */
12264 s = bfd_get_section_by_name (output_bfd,
12265 RELOC_SECTION (htab, ".plt"));
12266 if (s != NULL)
12267 dyn.d_un.d_val -= s->size;
12268 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12269 break;
12270 }
12271 /* Fall through. */
12272
12273 case DT_REL:
12274 case DT_RELA:
12275 /* In the BPABI, the DT_REL tag must point at the file
12276 offset, not the VMA, of the first relocation
12277 section. So, we use code similar to that in
12278 elflink.c, but do not check for SHF_ALLOC on the
12279 relcoation section, since relocations sections are
12280 never allocated under the BPABI. The comments above
12281 about Unixware notwithstanding, we include all of the
12282 relocations here. */
12283 if (htab->symbian_p)
12284 {
12285 unsigned int i;
12286 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
12287 ? SHT_REL : SHT_RELA);
12288 dyn.d_un.d_val = 0;
12289 for (i = 1; i < elf_numsections (output_bfd); i++)
12290 {
12291 Elf_Internal_Shdr *hdr
12292 = elf_elfsections (output_bfd)[i];
12293 if (hdr->sh_type == type)
12294 {
12295 if (dyn.d_tag == DT_RELSZ
12296 || dyn.d_tag == DT_RELASZ)
12297 dyn.d_un.d_val += hdr->sh_size;
12298 else if ((ufile_ptr) hdr->sh_offset
12299 <= dyn.d_un.d_val - 1)
12300 dyn.d_un.d_val = hdr->sh_offset;
12301 }
12302 }
12303 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12304 }
12305 break;
12306
12307 /* Set the bottom bit of DT_INIT/FINI if the
12308 corresponding function is Thumb. */
12309 case DT_INIT:
12310 name = info->init_function;
12311 goto get_sym;
12312 case DT_FINI:
12313 name = info->fini_function;
12314 get_sym:
12315 /* If it wasn't set by elf_bfd_final_link
12316 then there is nothing to adjust. */
12317 if (dyn.d_un.d_val != 0)
12318 {
12319 struct elf_link_hash_entry * eh;
12320
12321 eh = elf_link_hash_lookup (elf_hash_table (info), name,
12322 FALSE, FALSE, TRUE);
12323 if (eh != NULL
12324 && ELF_ST_TYPE (eh->type) == STT_ARM_TFUNC)
12325 {
12326 dyn.d_un.d_val |= 1;
12327 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12328 }
12329 }
12330 break;
12331 }
12332 }
12333
12334 /* Fill in the first entry in the procedure linkage table. */
12335 if (splt->size > 0 && elf32_arm_hash_table (info)->plt_header_size)
12336 {
12337 const bfd_vma *plt0_entry;
12338 bfd_vma got_address, plt_address, got_displacement;
12339
12340 /* Calculate the addresses of the GOT and PLT. */
12341 got_address = sgot->output_section->vma + sgot->output_offset;
12342 plt_address = splt->output_section->vma + splt->output_offset;
12343
12344 if (htab->vxworks_p)
12345 {
12346 /* The VxWorks GOT is relocated by the dynamic linker.
12347 Therefore, we must emit relocations rather than simply
12348 computing the values now. */
12349 Elf_Internal_Rela rel;
12350
12351 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
12352 put_arm_insn (htab, output_bfd, plt0_entry[0],
12353 splt->contents + 0);
12354 put_arm_insn (htab, output_bfd, plt0_entry[1],
12355 splt->contents + 4);
12356 put_arm_insn (htab, output_bfd, plt0_entry[2],
12357 splt->contents + 8);
12358 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
12359
12360 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
12361 rel.r_offset = plt_address + 12;
12362 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12363 rel.r_addend = 0;
12364 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
12365 htab->srelplt2->contents);
12366 }
12367 else
12368 {
12369 got_displacement = got_address - (plt_address + 16);
12370
12371 plt0_entry = elf32_arm_plt0_entry;
12372 put_arm_insn (htab, output_bfd, plt0_entry[0],
12373 splt->contents + 0);
12374 put_arm_insn (htab, output_bfd, plt0_entry[1],
12375 splt->contents + 4);
12376 put_arm_insn (htab, output_bfd, plt0_entry[2],
12377 splt->contents + 8);
12378 put_arm_insn (htab, output_bfd, plt0_entry[3],
12379 splt->contents + 12);
12380
12381 #ifdef FOUR_WORD_PLT
12382 /* The displacement value goes in the otherwise-unused
12383 last word of the second entry. */
12384 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
12385 #else
12386 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
12387 #endif
12388 }
12389 }
12390
12391 /* UnixWare sets the entsize of .plt to 4, although that doesn't
12392 really seem like the right value. */
12393 if (splt->output_section->owner == output_bfd)
12394 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
12395
12396 if (htab->vxworks_p && !info->shared && htab->splt->size > 0)
12397 {
12398 /* Correct the .rel(a).plt.unloaded relocations. They will have
12399 incorrect symbol indexes. */
12400 int num_plts;
12401 unsigned char *p;
12402
12403 num_plts = ((htab->splt->size - htab->plt_header_size)
12404 / htab->plt_entry_size);
12405 p = htab->srelplt2->contents + RELOC_SIZE (htab);
12406
12407 for (; num_plts; num_plts--)
12408 {
12409 Elf_Internal_Rela rel;
12410
12411 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12412 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12413 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12414 p += RELOC_SIZE (htab);
12415
12416 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12417 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12418 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12419 p += RELOC_SIZE (htab);
12420 }
12421 }
12422 }
12423
12424 /* Fill in the first three entries in the global offset table. */
12425 if (sgot)
12426 {
12427 if (sgot->size > 0)
12428 {
12429 if (sdyn == NULL)
12430 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
12431 else
12432 bfd_put_32 (output_bfd,
12433 sdyn->output_section->vma + sdyn->output_offset,
12434 sgot->contents);
12435 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
12436 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
12437 }
12438
12439 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
12440 }
12441
12442 return TRUE;
12443 }
12444
12445 static void
12446 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
12447 {
12448 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
12449 struct elf32_arm_link_hash_table *globals;
12450
12451 i_ehdrp = elf_elfheader (abfd);
12452
12453 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
12454 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
12455 else
12456 i_ehdrp->e_ident[EI_OSABI] = 0;
12457 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
12458
12459 if (link_info)
12460 {
12461 globals = elf32_arm_hash_table (link_info);
12462 if (globals->byteswap_code)
12463 i_ehdrp->e_flags |= EF_ARM_BE8;
12464 }
12465 }
12466
12467 static enum elf_reloc_type_class
12468 elf32_arm_reloc_type_class (const Elf_Internal_Rela *rela)
12469 {
12470 switch ((int) ELF32_R_TYPE (rela->r_info))
12471 {
12472 case R_ARM_RELATIVE:
12473 return reloc_class_relative;
12474 case R_ARM_JUMP_SLOT:
12475 return reloc_class_plt;
12476 case R_ARM_COPY:
12477 return reloc_class_copy;
12478 default:
12479 return reloc_class_normal;
12480 }
12481 }
12482
12483 /* Set the right machine number for an Arm ELF file. */
12484
12485 static bfd_boolean
12486 elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr *hdr)
12487 {
12488 if (hdr->sh_type == SHT_NOTE)
12489 *flags |= SEC_LINK_ONCE | SEC_LINK_DUPLICATES_SAME_CONTENTS;
12490
12491 return TRUE;
12492 }
12493
12494 static void
12495 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
12496 {
12497 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
12498 }
12499
12500 /* Return TRUE if this is an unwinding table entry. */
12501
12502 static bfd_boolean
12503 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
12504 {
12505 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
12506 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
12507 }
12508
12509
12510 /* Set the type and flags for an ARM section. We do this by
12511 the section name, which is a hack, but ought to work. */
12512
12513 static bfd_boolean
12514 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
12515 {
12516 const char * name;
12517
12518 name = bfd_get_section_name (abfd, sec);
12519
12520 if (is_arm_elf_unwind_section_name (abfd, name))
12521 {
12522 hdr->sh_type = SHT_ARM_EXIDX;
12523 hdr->sh_flags |= SHF_LINK_ORDER;
12524 }
12525 return TRUE;
12526 }
12527
12528 /* Handle an ARM specific section when reading an object file. This is
12529 called when bfd_section_from_shdr finds a section with an unknown
12530 type. */
12531
12532 static bfd_boolean
12533 elf32_arm_section_from_shdr (bfd *abfd,
12534 Elf_Internal_Shdr * hdr,
12535 const char *name,
12536 int shindex)
12537 {
12538 /* There ought to be a place to keep ELF backend specific flags, but
12539 at the moment there isn't one. We just keep track of the
12540 sections by their name, instead. Fortunately, the ABI gives
12541 names for all the ARM specific sections, so we will probably get
12542 away with this. */
12543 switch (hdr->sh_type)
12544 {
12545 case SHT_ARM_EXIDX:
12546 case SHT_ARM_PREEMPTMAP:
12547 case SHT_ARM_ATTRIBUTES:
12548 break;
12549
12550 default:
12551 return FALSE;
12552 }
12553
12554 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
12555 return FALSE;
12556
12557 return TRUE;
12558 }
12559
12560 /* A structure used to record a list of sections, independently
12561 of the next and prev fields in the asection structure. */
12562 typedef struct section_list
12563 {
12564 asection * sec;
12565 struct section_list * next;
12566 struct section_list * prev;
12567 }
12568 section_list;
12569
12570 /* Unfortunately we need to keep a list of sections for which
12571 an _arm_elf_section_data structure has been allocated. This
12572 is because it is possible for functions like elf32_arm_write_section
12573 to be called on a section which has had an elf_data_structure
12574 allocated for it (and so the used_by_bfd field is valid) but
12575 for which the ARM extended version of this structure - the
12576 _arm_elf_section_data structure - has not been allocated. */
12577 static section_list * sections_with_arm_elf_section_data = NULL;
12578
12579 static void
12580 record_section_with_arm_elf_section_data (asection * sec)
12581 {
12582 struct section_list * entry;
12583
12584 entry = bfd_malloc (sizeof (* entry));
12585 if (entry == NULL)
12586 return;
12587 entry->sec = sec;
12588 entry->next = sections_with_arm_elf_section_data;
12589 entry->prev = NULL;
12590 if (entry->next != NULL)
12591 entry->next->prev = entry;
12592 sections_with_arm_elf_section_data = entry;
12593 }
12594
12595 static struct section_list *
12596 find_arm_elf_section_entry (asection * sec)
12597 {
12598 struct section_list * entry;
12599 static struct section_list * last_entry = NULL;
12600
12601 /* This is a short cut for the typical case where the sections are added
12602 to the sections_with_arm_elf_section_data list in forward order and
12603 then looked up here in backwards order. This makes a real difference
12604 to the ld-srec/sec64k.exp linker test. */
12605 entry = sections_with_arm_elf_section_data;
12606 if (last_entry != NULL)
12607 {
12608 if (last_entry->sec == sec)
12609 entry = last_entry;
12610 else if (last_entry->next != NULL
12611 && last_entry->next->sec == sec)
12612 entry = last_entry->next;
12613 }
12614
12615 for (; entry; entry = entry->next)
12616 if (entry->sec == sec)
12617 break;
12618
12619 if (entry)
12620 /* Record the entry prior to this one - it is the entry we are most
12621 likely to want to locate next time. Also this way if we have been
12622 called from unrecord_section_with_arm_elf_section_data() we will not
12623 be caching a pointer that is about to be freed. */
12624 last_entry = entry->prev;
12625
12626 return entry;
12627 }
12628
12629 static _arm_elf_section_data *
12630 get_arm_elf_section_data (asection * sec)
12631 {
12632 struct section_list * entry;
12633
12634 entry = find_arm_elf_section_entry (sec);
12635
12636 if (entry)
12637 return elf32_arm_section_data (entry->sec);
12638 else
12639 return NULL;
12640 }
12641
12642 static void
12643 unrecord_section_with_arm_elf_section_data (asection * sec)
12644 {
12645 struct section_list * entry;
12646
12647 entry = find_arm_elf_section_entry (sec);
12648
12649 if (entry)
12650 {
12651 if (entry->prev != NULL)
12652 entry->prev->next = entry->next;
12653 if (entry->next != NULL)
12654 entry->next->prev = entry->prev;
12655 if (entry == sections_with_arm_elf_section_data)
12656 sections_with_arm_elf_section_data = entry->next;
12657 free (entry);
12658 }
12659 }
12660
12661
12662 typedef struct
12663 {
12664 void *finfo;
12665 struct bfd_link_info *info;
12666 asection *sec;
12667 int sec_shndx;
12668 int (*func) (void *, const char *, Elf_Internal_Sym *,
12669 asection *, struct elf_link_hash_entry *);
12670 } output_arch_syminfo;
12671
12672 enum map_symbol_type
12673 {
12674 ARM_MAP_ARM,
12675 ARM_MAP_THUMB,
12676 ARM_MAP_DATA
12677 };
12678
12679
12680 /* Output a single mapping symbol. */
12681
12682 static bfd_boolean
12683 elf32_arm_output_map_sym (output_arch_syminfo *osi,
12684 enum map_symbol_type type,
12685 bfd_vma offset)
12686 {
12687 static const char *names[3] = {"$a", "$t", "$d"};
12688 struct elf32_arm_link_hash_table *htab;
12689 Elf_Internal_Sym sym;
12690
12691 htab = elf32_arm_hash_table (osi->info);
12692 sym.st_value = osi->sec->output_section->vma
12693 + osi->sec->output_offset
12694 + offset;
12695 sym.st_size = 0;
12696 sym.st_other = 0;
12697 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
12698 sym.st_shndx = osi->sec_shndx;
12699 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
12700 }
12701
12702
12703 /* Output mapping symbols for PLT entries associated with H. */
12704
12705 static bfd_boolean
12706 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
12707 {
12708 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
12709 struct elf32_arm_link_hash_table *htab;
12710 struct elf32_arm_link_hash_entry *eh;
12711 bfd_vma addr;
12712
12713 htab = elf32_arm_hash_table (osi->info);
12714
12715 if (h->root.type == bfd_link_hash_indirect)
12716 return TRUE;
12717
12718 if (h->root.type == bfd_link_hash_warning)
12719 /* When warning symbols are created, they **replace** the "real"
12720 entry in the hash table, thus we never get to see the real
12721 symbol in a hash traversal. So look at it now. */
12722 h = (struct elf_link_hash_entry *) h->root.u.i.link;
12723
12724 if (h->plt.offset == (bfd_vma) -1)
12725 return TRUE;
12726
12727 eh = (struct elf32_arm_link_hash_entry *) h;
12728 addr = h->plt.offset;
12729 if (htab->symbian_p)
12730 {
12731 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12732 return FALSE;
12733 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
12734 return FALSE;
12735 }
12736 else if (htab->vxworks_p)
12737 {
12738 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12739 return FALSE;
12740 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
12741 return FALSE;
12742 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
12743 return FALSE;
12744 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
12745 return FALSE;
12746 }
12747 else
12748 {
12749 bfd_signed_vma thumb_refs;
12750
12751 thumb_refs = eh->plt_thumb_refcount;
12752 if (!htab->use_blx)
12753 thumb_refs += eh->plt_maybe_thumb_refcount;
12754
12755 if (thumb_refs > 0)
12756 {
12757 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
12758 return FALSE;
12759 }
12760 #ifdef FOUR_WORD_PLT
12761 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12762 return FALSE;
12763 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
12764 return FALSE;
12765 #else
12766 /* A three-word PLT with no Thumb thunk contains only Arm code,
12767 so only need to output a mapping symbol for the first PLT entry and
12768 entries with thumb thunks. */
12769 if (thumb_refs > 0 || addr == 20)
12770 {
12771 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12772 return FALSE;
12773 }
12774 #endif
12775 }
12776
12777 return TRUE;
12778 }
12779
12780 /* Output a single local symbol for a generated stub. */
12781
12782 static bfd_boolean
12783 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
12784 bfd_vma offset, bfd_vma size)
12785 {
12786 struct elf32_arm_link_hash_table *htab;
12787 Elf_Internal_Sym sym;
12788
12789 htab = elf32_arm_hash_table (osi->info);
12790 sym.st_value = osi->sec->output_section->vma
12791 + osi->sec->output_offset
12792 + offset;
12793 sym.st_size = size;
12794 sym.st_other = 0;
12795 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
12796 sym.st_shndx = osi->sec_shndx;
12797 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
12798 }
12799
12800 static bfd_boolean
12801 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
12802 void * in_arg)
12803 {
12804 struct elf32_arm_stub_hash_entry *stub_entry;
12805 struct bfd_link_info *info;
12806 struct elf32_arm_link_hash_table *htab;
12807 asection *stub_sec;
12808 bfd_vma addr;
12809 char *stub_name;
12810 output_arch_syminfo *osi;
12811 const insn_sequence *template;
12812 enum stub_insn_type prev_type;
12813 int size;
12814 int i;
12815 enum map_symbol_type sym_type;
12816
12817 /* Massage our args to the form they really have. */
12818 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
12819 osi = (output_arch_syminfo *) in_arg;
12820
12821 info = osi->info;
12822
12823 htab = elf32_arm_hash_table (info);
12824 stub_sec = stub_entry->stub_sec;
12825
12826 /* Ensure this stub is attached to the current section being
12827 processed. */
12828 if (stub_sec != osi->sec)
12829 return TRUE;
12830
12831 addr = (bfd_vma) stub_entry->stub_offset;
12832 stub_name = stub_entry->output_name;
12833
12834 template = stub_entry->stub_template;
12835 switch (template[0].type)
12836 {
12837 case ARM_TYPE:
12838 if (!elf32_arm_output_stub_sym (osi, stub_name, addr, stub_entry->stub_size))
12839 return FALSE;
12840 break;
12841 case THUMB16_TYPE:
12842 case THUMB32_TYPE:
12843 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
12844 stub_entry->stub_size))
12845 return FALSE;
12846 break;
12847 default:
12848 BFD_FAIL ();
12849 return 0;
12850 }
12851
12852 prev_type = DATA_TYPE;
12853 size = 0;
12854 for (i = 0; i < stub_entry->stub_template_size; i++)
12855 {
12856 switch (template[i].type)
12857 {
12858 case ARM_TYPE:
12859 sym_type = ARM_MAP_ARM;
12860 break;
12861
12862 case THUMB16_TYPE:
12863 case THUMB32_TYPE:
12864 sym_type = ARM_MAP_THUMB;
12865 break;
12866
12867 case DATA_TYPE:
12868 sym_type = ARM_MAP_DATA;
12869 break;
12870
12871 default:
12872 BFD_FAIL ();
12873 return FALSE;
12874 }
12875
12876 if (template[i].type != prev_type)
12877 {
12878 prev_type = template[i].type;
12879 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
12880 return FALSE;
12881 }
12882
12883 switch (template[i].type)
12884 {
12885 case ARM_TYPE:
12886 case THUMB32_TYPE:
12887 size += 4;
12888 break;
12889
12890 case THUMB16_TYPE:
12891 size += 2;
12892 break;
12893
12894 case DATA_TYPE:
12895 size += 4;
12896 break;
12897
12898 default:
12899 BFD_FAIL ();
12900 return FALSE;
12901 }
12902 }
12903
12904 return TRUE;
12905 }
12906
12907 /* Output mapping symbols for linker generated sections. */
12908
12909 static bfd_boolean
12910 elf32_arm_output_arch_local_syms (bfd *output_bfd,
12911 struct bfd_link_info *info,
12912 void *finfo,
12913 int (*func) (void *, const char *,
12914 Elf_Internal_Sym *,
12915 asection *,
12916 struct elf_link_hash_entry *))
12917 {
12918 output_arch_syminfo osi;
12919 struct elf32_arm_link_hash_table *htab;
12920 bfd_vma offset;
12921 bfd_size_type size;
12922
12923 htab = elf32_arm_hash_table (info);
12924 check_use_blx (htab);
12925
12926 osi.finfo = finfo;
12927 osi.info = info;
12928 osi.func = func;
12929
12930 /* ARM->Thumb glue. */
12931 if (htab->arm_glue_size > 0)
12932 {
12933 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
12934 ARM2THUMB_GLUE_SECTION_NAME);
12935
12936 osi.sec_shndx = _bfd_elf_section_from_bfd_section
12937 (output_bfd, osi.sec->output_section);
12938 if (info->shared || htab->root.is_relocatable_executable
12939 || htab->pic_veneer)
12940 size = ARM2THUMB_PIC_GLUE_SIZE;
12941 else if (htab->use_blx)
12942 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
12943 else
12944 size = ARM2THUMB_STATIC_GLUE_SIZE;
12945
12946 for (offset = 0; offset < htab->arm_glue_size; offset += size)
12947 {
12948 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
12949 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
12950 }
12951 }
12952
12953 /* Thumb->ARM glue. */
12954 if (htab->thumb_glue_size > 0)
12955 {
12956 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
12957 THUMB2ARM_GLUE_SECTION_NAME);
12958
12959 osi.sec_shndx = _bfd_elf_section_from_bfd_section
12960 (output_bfd, osi.sec->output_section);
12961 size = THUMB2ARM_GLUE_SIZE;
12962
12963 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
12964 {
12965 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
12966 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
12967 }
12968 }
12969
12970 /* ARMv4 BX veneers. */
12971 if (htab->bx_glue_size > 0)
12972 {
12973 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
12974 ARM_BX_GLUE_SECTION_NAME);
12975
12976 osi.sec_shndx = _bfd_elf_section_from_bfd_section
12977 (output_bfd, osi.sec->output_section);
12978
12979 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
12980 }
12981
12982 /* Long calls stubs. */
12983 if (htab->stub_bfd && htab->stub_bfd->sections)
12984 {
12985 asection* stub_sec;
12986
12987 for (stub_sec = htab->stub_bfd->sections;
12988 stub_sec != NULL;
12989 stub_sec = stub_sec->next)
12990 {
12991 /* Ignore non-stub sections. */
12992 if (!strstr (stub_sec->name, STUB_SUFFIX))
12993 continue;
12994
12995 osi.sec = stub_sec;
12996
12997 osi.sec_shndx = _bfd_elf_section_from_bfd_section
12998 (output_bfd, osi.sec->output_section);
12999
13000 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
13001 }
13002 }
13003
13004 /* Finally, output mapping symbols for the PLT. */
13005 if (!htab->splt || htab->splt->size == 0)
13006 return TRUE;
13007
13008 osi.sec_shndx = _bfd_elf_section_from_bfd_section (output_bfd,
13009 htab->splt->output_section);
13010 osi.sec = htab->splt;
13011 /* Output mapping symbols for the plt header. SymbianOS does not have a
13012 plt header. */
13013 if (htab->vxworks_p)
13014 {
13015 /* VxWorks shared libraries have no PLT header. */
13016 if (!info->shared)
13017 {
13018 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13019 return FALSE;
13020 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
13021 return FALSE;
13022 }
13023 }
13024 else if (!htab->symbian_p)
13025 {
13026 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13027 return FALSE;
13028 #ifndef FOUR_WORD_PLT
13029 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
13030 return FALSE;
13031 #endif
13032 }
13033
13034 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, (void *) &osi);
13035 return TRUE;
13036 }
13037
13038 /* Allocate target specific section data. */
13039
13040 static bfd_boolean
13041 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
13042 {
13043 if (!sec->used_by_bfd)
13044 {
13045 _arm_elf_section_data *sdata;
13046 bfd_size_type amt = sizeof (*sdata);
13047
13048 sdata = bfd_zalloc (abfd, amt);
13049 if (sdata == NULL)
13050 return FALSE;
13051 sec->used_by_bfd = sdata;
13052 }
13053
13054 record_section_with_arm_elf_section_data (sec);
13055
13056 return _bfd_elf_new_section_hook (abfd, sec);
13057 }
13058
13059
13060 /* Used to order a list of mapping symbols by address. */
13061
13062 static int
13063 elf32_arm_compare_mapping (const void * a, const void * b)
13064 {
13065 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
13066 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
13067
13068 if (amap->vma > bmap->vma)
13069 return 1;
13070 else if (amap->vma < bmap->vma)
13071 return -1;
13072 else if (amap->type > bmap->type)
13073 /* Ensure results do not depend on the host qsort for objects with
13074 multiple mapping symbols at the same address by sorting on type
13075 after vma. */
13076 return 1;
13077 else if (amap->type < bmap->type)
13078 return -1;
13079 else
13080 return 0;
13081 }
13082
13083 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
13084
13085 static unsigned long
13086 offset_prel31 (unsigned long addr, bfd_vma offset)
13087 {
13088 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
13089 }
13090
13091 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
13092 relocations. */
13093
13094 static void
13095 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
13096 {
13097 unsigned long first_word = bfd_get_32 (output_bfd, from);
13098 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
13099
13100 /* High bit of first word is supposed to be zero. */
13101 if ((first_word & 0x80000000ul) == 0)
13102 first_word = offset_prel31 (first_word, offset);
13103
13104 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
13105 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
13106 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
13107 second_word = offset_prel31 (second_word, offset);
13108
13109 bfd_put_32 (output_bfd, first_word, to);
13110 bfd_put_32 (output_bfd, second_word, to + 4);
13111 }
13112
13113 /* Data for make_branch_to_a8_stub(). */
13114
13115 struct a8_branch_to_stub_data {
13116 asection *writing_section;
13117 bfd_byte *contents;
13118 };
13119
13120
13121 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
13122 places for a particular section. */
13123
13124 static bfd_boolean
13125 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
13126 void *in_arg)
13127 {
13128 struct elf32_arm_stub_hash_entry *stub_entry;
13129 struct a8_branch_to_stub_data *data;
13130 bfd_byte *contents;
13131 unsigned long branch_insn;
13132 bfd_vma veneered_insn_loc, veneer_entry_loc;
13133 bfd_signed_vma branch_offset;
13134 bfd *abfd;
13135 unsigned int index;
13136
13137 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
13138 data = (struct a8_branch_to_stub_data *) in_arg;
13139
13140 if (stub_entry->target_section != data->writing_section
13141 || stub_entry->stub_type < arm_stub_a8_veneer_b_cond)
13142 return TRUE;
13143
13144 contents = data->contents;
13145
13146 veneered_insn_loc = stub_entry->target_section->output_section->vma
13147 + stub_entry->target_section->output_offset
13148 + stub_entry->target_value;
13149
13150 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
13151 + stub_entry->stub_sec->output_offset
13152 + stub_entry->stub_offset;
13153
13154 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
13155 veneered_insn_loc &= ~3u;
13156
13157 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
13158
13159 abfd = stub_entry->target_section->owner;
13160 index = stub_entry->target_value;
13161
13162 /* We attempt to avoid this condition by setting stubs_always_after_branch
13163 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
13164 This check is just to be on the safe side... */
13165 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
13166 {
13167 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
13168 "allocated in unsafe location"), abfd);
13169 return FALSE;
13170 }
13171
13172 switch (stub_entry->stub_type)
13173 {
13174 case arm_stub_a8_veneer_b:
13175 case arm_stub_a8_veneer_b_cond:
13176 branch_insn = 0xf0009000;
13177 goto jump24;
13178
13179 case arm_stub_a8_veneer_blx:
13180 branch_insn = 0xf000e800;
13181 goto jump24;
13182
13183 case arm_stub_a8_veneer_bl:
13184 {
13185 unsigned int i1, j1, i2, j2, s;
13186
13187 branch_insn = 0xf000d000;
13188
13189 jump24:
13190 if (branch_offset < -16777216 || branch_offset > 16777214)
13191 {
13192 /* There's not much we can do apart from complain if this
13193 happens. */
13194 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
13195 "of range (input file too large)"), abfd);
13196 return FALSE;
13197 }
13198
13199 /* i1 = not(j1 eor s), so:
13200 not i1 = j1 eor s
13201 j1 = (not i1) eor s. */
13202
13203 branch_insn |= (branch_offset >> 1) & 0x7ff;
13204 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
13205 i2 = (branch_offset >> 22) & 1;
13206 i1 = (branch_offset >> 23) & 1;
13207 s = (branch_offset >> 24) & 1;
13208 j1 = (!i1) ^ s;
13209 j2 = (!i2) ^ s;
13210 branch_insn |= j2 << 11;
13211 branch_insn |= j1 << 13;
13212 branch_insn |= s << 26;
13213 }
13214 break;
13215
13216 default:
13217 BFD_FAIL ();
13218 return FALSE;
13219 }
13220
13221 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[index]);
13222 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[index + 2]);
13223
13224 return TRUE;
13225 }
13226
13227 /* Do code byteswapping. Return FALSE afterwards so that the section is
13228 written out as normal. */
13229
13230 static bfd_boolean
13231 elf32_arm_write_section (bfd *output_bfd,
13232 struct bfd_link_info *link_info,
13233 asection *sec,
13234 bfd_byte *contents)
13235 {
13236 unsigned int mapcount, errcount;
13237 _arm_elf_section_data *arm_data;
13238 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
13239 elf32_arm_section_map *map;
13240 elf32_vfp11_erratum_list *errnode;
13241 bfd_vma ptr;
13242 bfd_vma end;
13243 bfd_vma offset = sec->output_section->vma + sec->output_offset;
13244 bfd_byte tmp;
13245 unsigned int i;
13246
13247 /* If this section has not been allocated an _arm_elf_section_data
13248 structure then we cannot record anything. */
13249 arm_data = get_arm_elf_section_data (sec);
13250 if (arm_data == NULL)
13251 return FALSE;
13252
13253 mapcount = arm_data->mapcount;
13254 map = arm_data->map;
13255 errcount = arm_data->erratumcount;
13256
13257 if (errcount != 0)
13258 {
13259 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
13260
13261 for (errnode = arm_data->erratumlist; errnode != 0;
13262 errnode = errnode->next)
13263 {
13264 bfd_vma index = errnode->vma - offset;
13265
13266 switch (errnode->type)
13267 {
13268 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
13269 {
13270 bfd_vma branch_to_veneer;
13271 /* Original condition code of instruction, plus bit mask for
13272 ARM B instruction. */
13273 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
13274 | 0x0a000000;
13275
13276 /* The instruction is before the label. */
13277 index -= 4;
13278
13279 /* Above offset included in -4 below. */
13280 branch_to_veneer = errnode->u.b.veneer->vma
13281 - errnode->vma - 4;
13282
13283 if ((signed) branch_to_veneer < -(1 << 25)
13284 || (signed) branch_to_veneer >= (1 << 25))
13285 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13286 "range"), output_bfd);
13287
13288 insn |= (branch_to_veneer >> 2) & 0xffffff;
13289 contents[endianflip ^ index] = insn & 0xff;
13290 contents[endianflip ^ (index + 1)] = (insn >> 8) & 0xff;
13291 contents[endianflip ^ (index + 2)] = (insn >> 16) & 0xff;
13292 contents[endianflip ^ (index + 3)] = (insn >> 24) & 0xff;
13293 }
13294 break;
13295
13296 case VFP11_ERRATUM_ARM_VENEER:
13297 {
13298 bfd_vma branch_from_veneer;
13299 unsigned int insn;
13300
13301 /* Take size of veneer into account. */
13302 branch_from_veneer = errnode->u.v.branch->vma
13303 - errnode->vma - 12;
13304
13305 if ((signed) branch_from_veneer < -(1 << 25)
13306 || (signed) branch_from_veneer >= (1 << 25))
13307 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13308 "range"), output_bfd);
13309
13310 /* Original instruction. */
13311 insn = errnode->u.v.branch->u.b.vfp_insn;
13312 contents[endianflip ^ index] = insn & 0xff;
13313 contents[endianflip ^ (index + 1)] = (insn >> 8) & 0xff;
13314 contents[endianflip ^ (index + 2)] = (insn >> 16) & 0xff;
13315 contents[endianflip ^ (index + 3)] = (insn >> 24) & 0xff;
13316
13317 /* Branch back to insn after original insn. */
13318 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
13319 contents[endianflip ^ (index + 4)] = insn & 0xff;
13320 contents[endianflip ^ (index + 5)] = (insn >> 8) & 0xff;
13321 contents[endianflip ^ (index + 6)] = (insn >> 16) & 0xff;
13322 contents[endianflip ^ (index + 7)] = (insn >> 24) & 0xff;
13323 }
13324 break;
13325
13326 default:
13327 abort ();
13328 }
13329 }
13330 }
13331
13332 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
13333 {
13334 arm_unwind_table_edit *edit_node
13335 = arm_data->u.exidx.unwind_edit_list;
13336 /* Now, sec->size is the size of the section we will write. The original
13337 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
13338 markers) was sec->rawsize. (This isn't the case if we perform no
13339 edits, then rawsize will be zero and we should use size). */
13340 bfd_byte *edited_contents = bfd_malloc (sec->size);
13341 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
13342 unsigned int in_index, out_index;
13343 bfd_vma add_to_offsets = 0;
13344
13345 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
13346 {
13347 if (edit_node)
13348 {
13349 unsigned int edit_index = edit_node->index;
13350
13351 if (in_index < edit_index && in_index * 8 < input_size)
13352 {
13353 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13354 contents + in_index * 8, add_to_offsets);
13355 out_index++;
13356 in_index++;
13357 }
13358 else if (in_index == edit_index
13359 || (in_index * 8 >= input_size
13360 && edit_index == UINT_MAX))
13361 {
13362 switch (edit_node->type)
13363 {
13364 case DELETE_EXIDX_ENTRY:
13365 in_index++;
13366 add_to_offsets += 8;
13367 break;
13368
13369 case INSERT_EXIDX_CANTUNWIND_AT_END:
13370 {
13371 asection *text_sec = edit_node->linked_section;
13372 bfd_vma text_offset = text_sec->output_section->vma
13373 + text_sec->output_offset
13374 + text_sec->size;
13375 bfd_vma exidx_offset = offset + out_index * 8;
13376 unsigned long prel31_offset;
13377
13378 /* Note: this is meant to be equivalent to an
13379 R_ARM_PREL31 relocation. These synthetic
13380 EXIDX_CANTUNWIND markers are not relocated by the
13381 usual BFD method. */
13382 prel31_offset = (text_offset - exidx_offset)
13383 & 0x7ffffffful;
13384
13385 /* First address we can't unwind. */
13386 bfd_put_32 (output_bfd, prel31_offset,
13387 &edited_contents[out_index * 8]);
13388
13389 /* Code for EXIDX_CANTUNWIND. */
13390 bfd_put_32 (output_bfd, 0x1,
13391 &edited_contents[out_index * 8 + 4]);
13392
13393 out_index++;
13394 add_to_offsets -= 8;
13395 }
13396 break;
13397 }
13398
13399 edit_node = edit_node->next;
13400 }
13401 }
13402 else
13403 {
13404 /* No more edits, copy remaining entries verbatim. */
13405 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13406 contents + in_index * 8, add_to_offsets);
13407 out_index++;
13408 in_index++;
13409 }
13410 }
13411
13412 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
13413 bfd_set_section_contents (output_bfd, sec->output_section,
13414 edited_contents,
13415 (file_ptr) sec->output_offset, sec->size);
13416
13417 return TRUE;
13418 }
13419
13420 /* Fix code to point to Cortex-A8 erratum stubs. */
13421 if (globals->fix_cortex_a8)
13422 {
13423 struct a8_branch_to_stub_data data;
13424
13425 data.writing_section = sec;
13426 data.contents = contents;
13427
13428 bfd_hash_traverse (&globals->stub_hash_table, make_branch_to_a8_stub,
13429 &data);
13430 }
13431
13432 if (mapcount == 0)
13433 return FALSE;
13434
13435 if (globals->byteswap_code)
13436 {
13437 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
13438
13439 ptr = map[0].vma;
13440 for (i = 0; i < mapcount; i++)
13441 {
13442 if (i == mapcount - 1)
13443 end = sec->size;
13444 else
13445 end = map[i + 1].vma;
13446
13447 switch (map[i].type)
13448 {
13449 case 'a':
13450 /* Byte swap code words. */
13451 while (ptr + 3 < end)
13452 {
13453 tmp = contents[ptr];
13454 contents[ptr] = contents[ptr + 3];
13455 contents[ptr + 3] = tmp;
13456 tmp = contents[ptr + 1];
13457 contents[ptr + 1] = contents[ptr + 2];
13458 contents[ptr + 2] = tmp;
13459 ptr += 4;
13460 }
13461 break;
13462
13463 case 't':
13464 /* Byte swap code halfwords. */
13465 while (ptr + 1 < end)
13466 {
13467 tmp = contents[ptr];
13468 contents[ptr] = contents[ptr + 1];
13469 contents[ptr + 1] = tmp;
13470 ptr += 2;
13471 }
13472 break;
13473
13474 case 'd':
13475 /* Leave data alone. */
13476 break;
13477 }
13478 ptr = end;
13479 }
13480 }
13481
13482 free (map);
13483 arm_data->mapcount = 0;
13484 arm_data->mapsize = 0;
13485 arm_data->map = NULL;
13486 unrecord_section_with_arm_elf_section_data (sec);
13487
13488 return FALSE;
13489 }
13490
13491 static void
13492 unrecord_section_via_map_over_sections (bfd * abfd ATTRIBUTE_UNUSED,
13493 asection * sec,
13494 void * ignore ATTRIBUTE_UNUSED)
13495 {
13496 unrecord_section_with_arm_elf_section_data (sec);
13497 }
13498
13499 static bfd_boolean
13500 elf32_arm_close_and_cleanup (bfd * abfd)
13501 {
13502 if (abfd->sections)
13503 bfd_map_over_sections (abfd,
13504 unrecord_section_via_map_over_sections,
13505 NULL);
13506
13507 return _bfd_elf_close_and_cleanup (abfd);
13508 }
13509
13510 static bfd_boolean
13511 elf32_arm_bfd_free_cached_info (bfd * abfd)
13512 {
13513 if (abfd->sections)
13514 bfd_map_over_sections (abfd,
13515 unrecord_section_via_map_over_sections,
13516 NULL);
13517
13518 return _bfd_free_cached_info (abfd);
13519 }
13520
13521 /* Display STT_ARM_TFUNC symbols as functions. */
13522
13523 static void
13524 elf32_arm_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
13525 asymbol *asym)
13526 {
13527 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
13528
13529 if (ELF_ST_TYPE (elfsym->internal_elf_sym.st_info) == STT_ARM_TFUNC)
13530 elfsym->symbol.flags |= BSF_FUNCTION;
13531 }
13532
13533
13534 /* Mangle thumb function symbols as we read them in. */
13535
13536 static bfd_boolean
13537 elf32_arm_swap_symbol_in (bfd * abfd,
13538 const void *psrc,
13539 const void *pshn,
13540 Elf_Internal_Sym *dst)
13541 {
13542 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
13543 return FALSE;
13544
13545 /* New EABI objects mark thumb function symbols by setting the low bit of
13546 the address. Turn these into STT_ARM_TFUNC. */
13547 if ((ELF_ST_TYPE (dst->st_info) == STT_FUNC)
13548 && (dst->st_value & 1))
13549 {
13550 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_ARM_TFUNC);
13551 dst->st_value &= ~(bfd_vma) 1;
13552 }
13553 return TRUE;
13554 }
13555
13556
13557 /* Mangle thumb function symbols as we write them out. */
13558
13559 static void
13560 elf32_arm_swap_symbol_out (bfd *abfd,
13561 const Elf_Internal_Sym *src,
13562 void *cdst,
13563 void *shndx)
13564 {
13565 Elf_Internal_Sym newsym;
13566
13567 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
13568 of the address set, as per the new EABI. We do this unconditionally
13569 because objcopy does not set the elf header flags until after
13570 it writes out the symbol table. */
13571 if (ELF_ST_TYPE (src->st_info) == STT_ARM_TFUNC)
13572 {
13573 newsym = *src;
13574 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
13575 if (newsym.st_shndx != SHN_UNDEF)
13576 {
13577 /* Do this only for defined symbols. At link type, the static
13578 linker will simulate the work of dynamic linker of resolving
13579 symbols and will carry over the thumbness of found symbols to
13580 the output symbol table. It's not clear how it happens, but
13581 the thumbness of undefined symbols can well be different at
13582 runtime, and writing '1' for them will be confusing for users
13583 and possibly for dynamic linker itself.
13584 */
13585 newsym.st_value |= 1;
13586 }
13587
13588 src = &newsym;
13589 }
13590 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
13591 }
13592
13593 /* Add the PT_ARM_EXIDX program header. */
13594
13595 static bfd_boolean
13596 elf32_arm_modify_segment_map (bfd *abfd,
13597 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13598 {
13599 struct elf_segment_map *m;
13600 asection *sec;
13601
13602 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13603 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13604 {
13605 /* If there is already a PT_ARM_EXIDX header, then we do not
13606 want to add another one. This situation arises when running
13607 "strip"; the input binary already has the header. */
13608 m = elf_tdata (abfd)->segment_map;
13609 while (m && m->p_type != PT_ARM_EXIDX)
13610 m = m->next;
13611 if (!m)
13612 {
13613 m = bfd_zalloc (abfd, sizeof (struct elf_segment_map));
13614 if (m == NULL)
13615 return FALSE;
13616 m->p_type = PT_ARM_EXIDX;
13617 m->count = 1;
13618 m->sections[0] = sec;
13619
13620 m->next = elf_tdata (abfd)->segment_map;
13621 elf_tdata (abfd)->segment_map = m;
13622 }
13623 }
13624
13625 return TRUE;
13626 }
13627
13628 /* We may add a PT_ARM_EXIDX program header. */
13629
13630 static int
13631 elf32_arm_additional_program_headers (bfd *abfd,
13632 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13633 {
13634 asection *sec;
13635
13636 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13637 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13638 return 1;
13639 else
13640 return 0;
13641 }
13642
13643 /* We have two function types: STT_FUNC and STT_ARM_TFUNC. */
13644
13645 static bfd_boolean
13646 elf32_arm_is_function_type (unsigned int type)
13647 {
13648 return (type == STT_FUNC) || (type == STT_ARM_TFUNC);
13649 }
13650
13651 /* We use this to override swap_symbol_in and swap_symbol_out. */
13652 const struct elf_size_info elf32_arm_size_info =
13653 {
13654 sizeof (Elf32_External_Ehdr),
13655 sizeof (Elf32_External_Phdr),
13656 sizeof (Elf32_External_Shdr),
13657 sizeof (Elf32_External_Rel),
13658 sizeof (Elf32_External_Rela),
13659 sizeof (Elf32_External_Sym),
13660 sizeof (Elf32_External_Dyn),
13661 sizeof (Elf_External_Note),
13662 4,
13663 1,
13664 32, 2,
13665 ELFCLASS32, EV_CURRENT,
13666 bfd_elf32_write_out_phdrs,
13667 bfd_elf32_write_shdrs_and_ehdr,
13668 bfd_elf32_checksum_contents,
13669 bfd_elf32_write_relocs,
13670 elf32_arm_swap_symbol_in,
13671 elf32_arm_swap_symbol_out,
13672 bfd_elf32_slurp_reloc_table,
13673 bfd_elf32_slurp_symbol_table,
13674 bfd_elf32_swap_dyn_in,
13675 bfd_elf32_swap_dyn_out,
13676 bfd_elf32_swap_reloc_in,
13677 bfd_elf32_swap_reloc_out,
13678 bfd_elf32_swap_reloca_in,
13679 bfd_elf32_swap_reloca_out
13680 };
13681
13682 #define ELF_ARCH bfd_arch_arm
13683 #define ELF_MACHINE_CODE EM_ARM
13684 #ifdef __QNXTARGET__
13685 #define ELF_MAXPAGESIZE 0x1000
13686 #else
13687 #define ELF_MAXPAGESIZE 0x8000
13688 #endif
13689 #define ELF_MINPAGESIZE 0x1000
13690 #define ELF_COMMONPAGESIZE 0x1000
13691
13692 #define bfd_elf32_mkobject elf32_arm_mkobject
13693
13694 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
13695 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
13696 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
13697 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
13698 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
13699 #define bfd_elf32_bfd_link_hash_table_free elf32_arm_hash_table_free
13700 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
13701 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
13702 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
13703 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
13704 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
13705 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
13706 #define bfd_elf32_close_and_cleanup elf32_arm_close_and_cleanup
13707 #define bfd_elf32_bfd_free_cached_info elf32_arm_bfd_free_cached_info
13708 #define bfd_elf32_bfd_final_link elf32_arm_final_link
13709
13710 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
13711 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
13712 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
13713 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
13714 #define elf_backend_check_relocs elf32_arm_check_relocs
13715 #define elf_backend_relocate_section elf32_arm_relocate_section
13716 #define elf_backend_write_section elf32_arm_write_section
13717 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
13718 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
13719 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
13720 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
13721 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
13722 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
13723 #define elf_backend_post_process_headers elf32_arm_post_process_headers
13724 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
13725 #define elf_backend_object_p elf32_arm_object_p
13726 #define elf_backend_section_flags elf32_arm_section_flags
13727 #define elf_backend_fake_sections elf32_arm_fake_sections
13728 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
13729 #define elf_backend_final_write_processing elf32_arm_final_write_processing
13730 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
13731 #define elf_backend_symbol_processing elf32_arm_symbol_processing
13732 #define elf_backend_size_info elf32_arm_size_info
13733 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
13734 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
13735 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
13736 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
13737 #define elf_backend_is_function_type elf32_arm_is_function_type
13738
13739 #define elf_backend_can_refcount 1
13740 #define elf_backend_can_gc_sections 1
13741 #define elf_backend_plt_readonly 1
13742 #define elf_backend_want_got_plt 1
13743 #define elf_backend_want_plt_sym 0
13744 #define elf_backend_may_use_rel_p 1
13745 #define elf_backend_may_use_rela_p 0
13746 #define elf_backend_default_use_rela_p 0
13747
13748 #define elf_backend_got_header_size 12
13749
13750 #undef elf_backend_obj_attrs_vendor
13751 #define elf_backend_obj_attrs_vendor "aeabi"
13752 #undef elf_backend_obj_attrs_section
13753 #define elf_backend_obj_attrs_section ".ARM.attributes"
13754 #undef elf_backend_obj_attrs_arg_type
13755 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
13756 #undef elf_backend_obj_attrs_section_type
13757 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
13758 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
13759
13760 #include "elf32-target.h"
13761
13762 /* VxWorks Targets. */
13763
13764 #undef TARGET_LITTLE_SYM
13765 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vxworks_vec
13766 #undef TARGET_LITTLE_NAME
13767 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
13768 #undef TARGET_BIG_SYM
13769 #define TARGET_BIG_SYM bfd_elf32_bigarm_vxworks_vec
13770 #undef TARGET_BIG_NAME
13771 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
13772
13773 /* Like elf32_arm_link_hash_table_create -- but overrides
13774 appropriately for VxWorks. */
13775
13776 static struct bfd_link_hash_table *
13777 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
13778 {
13779 struct bfd_link_hash_table *ret;
13780
13781 ret = elf32_arm_link_hash_table_create (abfd);
13782 if (ret)
13783 {
13784 struct elf32_arm_link_hash_table *htab
13785 = (struct elf32_arm_link_hash_table *) ret;
13786 htab->use_rel = 0;
13787 htab->vxworks_p = 1;
13788 }
13789 return ret;
13790 }
13791
13792 static void
13793 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
13794 {
13795 elf32_arm_final_write_processing (abfd, linker);
13796 elf_vxworks_final_write_processing (abfd, linker);
13797 }
13798
13799 #undef elf32_bed
13800 #define elf32_bed elf32_arm_vxworks_bed
13801
13802 #undef bfd_elf32_bfd_link_hash_table_create
13803 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
13804 #undef elf_backend_add_symbol_hook
13805 #define elf_backend_add_symbol_hook elf_vxworks_add_symbol_hook
13806 #undef elf_backend_final_write_processing
13807 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
13808 #undef elf_backend_emit_relocs
13809 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
13810
13811 #undef elf_backend_may_use_rel_p
13812 #define elf_backend_may_use_rel_p 0
13813 #undef elf_backend_may_use_rela_p
13814 #define elf_backend_may_use_rela_p 1
13815 #undef elf_backend_default_use_rela_p
13816 #define elf_backend_default_use_rela_p 1
13817 #undef elf_backend_want_plt_sym
13818 #define elf_backend_want_plt_sym 1
13819 #undef ELF_MAXPAGESIZE
13820 #define ELF_MAXPAGESIZE 0x1000
13821
13822 #include "elf32-target.h"
13823
13824
13825 /* Symbian OS Targets. */
13826
13827 #undef TARGET_LITTLE_SYM
13828 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_symbian_vec
13829 #undef TARGET_LITTLE_NAME
13830 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
13831 #undef TARGET_BIG_SYM
13832 #define TARGET_BIG_SYM bfd_elf32_bigarm_symbian_vec
13833 #undef TARGET_BIG_NAME
13834 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
13835
13836 /* Like elf32_arm_link_hash_table_create -- but overrides
13837 appropriately for Symbian OS. */
13838
13839 static struct bfd_link_hash_table *
13840 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
13841 {
13842 struct bfd_link_hash_table *ret;
13843
13844 ret = elf32_arm_link_hash_table_create (abfd);
13845 if (ret)
13846 {
13847 struct elf32_arm_link_hash_table *htab
13848 = (struct elf32_arm_link_hash_table *)ret;
13849 /* There is no PLT header for Symbian OS. */
13850 htab->plt_header_size = 0;
13851 /* The PLT entries are each one instruction and one word. */
13852 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
13853 htab->symbian_p = 1;
13854 /* Symbian uses armv5t or above, so use_blx is always true. */
13855 htab->use_blx = 1;
13856 htab->root.is_relocatable_executable = 1;
13857 }
13858 return ret;
13859 }
13860
13861 static const struct bfd_elf_special_section
13862 elf32_arm_symbian_special_sections[] =
13863 {
13864 /* In a BPABI executable, the dynamic linking sections do not go in
13865 the loadable read-only segment. The post-linker may wish to
13866 refer to these sections, but they are not part of the final
13867 program image. */
13868 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
13869 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
13870 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
13871 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
13872 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
13873 /* These sections do not need to be writable as the SymbianOS
13874 postlinker will arrange things so that no dynamic relocation is
13875 required. */
13876 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
13877 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
13878 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
13879 { NULL, 0, 0, 0, 0 }
13880 };
13881
13882 static void
13883 elf32_arm_symbian_begin_write_processing (bfd *abfd,
13884 struct bfd_link_info *link_info)
13885 {
13886 /* BPABI objects are never loaded directly by an OS kernel; they are
13887 processed by a postlinker first, into an OS-specific format. If
13888 the D_PAGED bit is set on the file, BFD will align segments on
13889 page boundaries, so that an OS can directly map the file. With
13890 BPABI objects, that just results in wasted space. In addition,
13891 because we clear the D_PAGED bit, map_sections_to_segments will
13892 recognize that the program headers should not be mapped into any
13893 loadable segment. */
13894 abfd->flags &= ~D_PAGED;
13895 elf32_arm_begin_write_processing (abfd, link_info);
13896 }
13897
13898 static bfd_boolean
13899 elf32_arm_symbian_modify_segment_map (bfd *abfd,
13900 struct bfd_link_info *info)
13901 {
13902 struct elf_segment_map *m;
13903 asection *dynsec;
13904
13905 /* BPABI shared libraries and executables should have a PT_DYNAMIC
13906 segment. However, because the .dynamic section is not marked
13907 with SEC_LOAD, the generic ELF code will not create such a
13908 segment. */
13909 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
13910 if (dynsec)
13911 {
13912 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
13913 if (m->p_type == PT_DYNAMIC)
13914 break;
13915
13916 if (m == NULL)
13917 {
13918 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
13919 m->next = elf_tdata (abfd)->segment_map;
13920 elf_tdata (abfd)->segment_map = m;
13921 }
13922 }
13923
13924 /* Also call the generic arm routine. */
13925 return elf32_arm_modify_segment_map (abfd, info);
13926 }
13927
13928 /* Return address for Ith PLT stub in section PLT, for relocation REL
13929 or (bfd_vma) -1 if it should not be included. */
13930
13931 static bfd_vma
13932 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
13933 const arelent *rel ATTRIBUTE_UNUSED)
13934 {
13935 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
13936 }
13937
13938
13939 #undef elf32_bed
13940 #define elf32_bed elf32_arm_symbian_bed
13941
13942 /* The dynamic sections are not allocated on SymbianOS; the postlinker
13943 will process them and then discard them. */
13944 #undef ELF_DYNAMIC_SEC_FLAGS
13945 #define ELF_DYNAMIC_SEC_FLAGS \
13946 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
13947
13948 #undef elf_backend_add_symbol_hook
13949 #undef elf_backend_emit_relocs
13950
13951 #undef bfd_elf32_bfd_link_hash_table_create
13952 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
13953 #undef elf_backend_special_sections
13954 #define elf_backend_special_sections elf32_arm_symbian_special_sections
13955 #undef elf_backend_begin_write_processing
13956 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
13957 #undef elf_backend_final_write_processing
13958 #define elf_backend_final_write_processing elf32_arm_final_write_processing
13959
13960 #undef elf_backend_modify_segment_map
13961 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
13962
13963 /* There is no .got section for BPABI objects, and hence no header. */
13964 #undef elf_backend_got_header_size
13965 #define elf_backend_got_header_size 0
13966
13967 /* Similarly, there is no .got.plt section. */
13968 #undef elf_backend_want_got_plt
13969 #define elf_backend_want_got_plt 0
13970
13971 #undef elf_backend_plt_sym_val
13972 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
13973
13974 #undef elf_backend_may_use_rel_p
13975 #define elf_backend_may_use_rel_p 1
13976 #undef elf_backend_may_use_rela_p
13977 #define elf_backend_may_use_rela_p 0
13978 #undef elf_backend_default_use_rela_p
13979 #define elf_backend_default_use_rela_p 0
13980 #undef elf_backend_want_plt_sym
13981 #define elf_backend_want_plt_sym 0
13982 #undef ELF_MAXPAGESIZE
13983 #define ELF_MAXPAGESIZE 0x8000
13984
13985 #include "elf32-target.h"