daily update
[binutils-gdb.git] / bfd / elf32-arm.c
1 /* 32-bit ELF support for ARM
2 Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
3 2008, 2009, 2010 Free Software Foundation, Inc.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "sysdep.h"
23 #include <limits.h>
24
25 #include "bfd.h"
26 #include "libiberty.h"
27 #include "libbfd.h"
28 #include "elf-bfd.h"
29 #include "elf-vxworks.h"
30 #include "elf/arm.h"
31
32 /* Return the relocation section associated with NAME. HTAB is the
33 bfd's elf32_arm_link_hash_entry. */
34 #define RELOC_SECTION(HTAB, NAME) \
35 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
36
37 /* Return size of a relocation entry. HTAB is the bfd's
38 elf32_arm_link_hash_entry. */
39 #define RELOC_SIZE(HTAB) \
40 ((HTAB)->use_rel \
41 ? sizeof (Elf32_External_Rel) \
42 : sizeof (Elf32_External_Rela))
43
44 /* Return function to swap relocations in. HTAB is the bfd's
45 elf32_arm_link_hash_entry. */
46 #define SWAP_RELOC_IN(HTAB) \
47 ((HTAB)->use_rel \
48 ? bfd_elf32_swap_reloc_in \
49 : bfd_elf32_swap_reloca_in)
50
51 /* Return function to swap relocations out. HTAB is the bfd's
52 elf32_arm_link_hash_entry. */
53 #define SWAP_RELOC_OUT(HTAB) \
54 ((HTAB)->use_rel \
55 ? bfd_elf32_swap_reloc_out \
56 : bfd_elf32_swap_reloca_out)
57
58 #define elf_info_to_howto 0
59 #define elf_info_to_howto_rel elf32_arm_info_to_howto
60
61 #define ARM_ELF_ABI_VERSION 0
62 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
63
64 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
65 struct bfd_link_info *link_info,
66 asection *sec,
67 bfd_byte *contents);
68
69 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
70 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
71 in that slot. */
72
73 static reloc_howto_type elf32_arm_howto_table_1[] =
74 {
75 /* No relocation. */
76 HOWTO (R_ARM_NONE, /* type */
77 0, /* rightshift */
78 0, /* size (0 = byte, 1 = short, 2 = long) */
79 0, /* bitsize */
80 FALSE, /* pc_relative */
81 0, /* bitpos */
82 complain_overflow_dont,/* complain_on_overflow */
83 bfd_elf_generic_reloc, /* special_function */
84 "R_ARM_NONE", /* name */
85 FALSE, /* partial_inplace */
86 0, /* src_mask */
87 0, /* dst_mask */
88 FALSE), /* pcrel_offset */
89
90 HOWTO (R_ARM_PC24, /* type */
91 2, /* rightshift */
92 2, /* size (0 = byte, 1 = short, 2 = long) */
93 24, /* bitsize */
94 TRUE, /* pc_relative */
95 0, /* bitpos */
96 complain_overflow_signed,/* complain_on_overflow */
97 bfd_elf_generic_reloc, /* special_function */
98 "R_ARM_PC24", /* name */
99 FALSE, /* partial_inplace */
100 0x00ffffff, /* src_mask */
101 0x00ffffff, /* dst_mask */
102 TRUE), /* pcrel_offset */
103
104 /* 32 bit absolute */
105 HOWTO (R_ARM_ABS32, /* type */
106 0, /* rightshift */
107 2, /* size (0 = byte, 1 = short, 2 = long) */
108 32, /* bitsize */
109 FALSE, /* pc_relative */
110 0, /* bitpos */
111 complain_overflow_bitfield,/* complain_on_overflow */
112 bfd_elf_generic_reloc, /* special_function */
113 "R_ARM_ABS32", /* name */
114 FALSE, /* partial_inplace */
115 0xffffffff, /* src_mask */
116 0xffffffff, /* dst_mask */
117 FALSE), /* pcrel_offset */
118
119 /* standard 32bit pc-relative reloc */
120 HOWTO (R_ARM_REL32, /* type */
121 0, /* rightshift */
122 2, /* size (0 = byte, 1 = short, 2 = long) */
123 32, /* bitsize */
124 TRUE, /* pc_relative */
125 0, /* bitpos */
126 complain_overflow_bitfield,/* complain_on_overflow */
127 bfd_elf_generic_reloc, /* special_function */
128 "R_ARM_REL32", /* name */
129 FALSE, /* partial_inplace */
130 0xffffffff, /* src_mask */
131 0xffffffff, /* dst_mask */
132 TRUE), /* pcrel_offset */
133
134 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
135 HOWTO (R_ARM_LDR_PC_G0, /* type */
136 0, /* rightshift */
137 0, /* size (0 = byte, 1 = short, 2 = long) */
138 32, /* bitsize */
139 TRUE, /* pc_relative */
140 0, /* bitpos */
141 complain_overflow_dont,/* complain_on_overflow */
142 bfd_elf_generic_reloc, /* special_function */
143 "R_ARM_LDR_PC_G0", /* name */
144 FALSE, /* partial_inplace */
145 0xffffffff, /* src_mask */
146 0xffffffff, /* dst_mask */
147 TRUE), /* pcrel_offset */
148
149 /* 16 bit absolute */
150 HOWTO (R_ARM_ABS16, /* type */
151 0, /* rightshift */
152 1, /* size (0 = byte, 1 = short, 2 = long) */
153 16, /* bitsize */
154 FALSE, /* pc_relative */
155 0, /* bitpos */
156 complain_overflow_bitfield,/* complain_on_overflow */
157 bfd_elf_generic_reloc, /* special_function */
158 "R_ARM_ABS16", /* name */
159 FALSE, /* partial_inplace */
160 0x0000ffff, /* src_mask */
161 0x0000ffff, /* dst_mask */
162 FALSE), /* pcrel_offset */
163
164 /* 12 bit absolute */
165 HOWTO (R_ARM_ABS12, /* type */
166 0, /* rightshift */
167 2, /* size (0 = byte, 1 = short, 2 = long) */
168 12, /* bitsize */
169 FALSE, /* pc_relative */
170 0, /* bitpos */
171 complain_overflow_bitfield,/* complain_on_overflow */
172 bfd_elf_generic_reloc, /* special_function */
173 "R_ARM_ABS12", /* name */
174 FALSE, /* partial_inplace */
175 0x00000fff, /* src_mask */
176 0x00000fff, /* dst_mask */
177 FALSE), /* pcrel_offset */
178
179 HOWTO (R_ARM_THM_ABS5, /* type */
180 6, /* rightshift */
181 1, /* size (0 = byte, 1 = short, 2 = long) */
182 5, /* bitsize */
183 FALSE, /* pc_relative */
184 0, /* bitpos */
185 complain_overflow_bitfield,/* complain_on_overflow */
186 bfd_elf_generic_reloc, /* special_function */
187 "R_ARM_THM_ABS5", /* name */
188 FALSE, /* partial_inplace */
189 0x000007e0, /* src_mask */
190 0x000007e0, /* dst_mask */
191 FALSE), /* pcrel_offset */
192
193 /* 8 bit absolute */
194 HOWTO (R_ARM_ABS8, /* type */
195 0, /* rightshift */
196 0, /* size (0 = byte, 1 = short, 2 = long) */
197 8, /* bitsize */
198 FALSE, /* pc_relative */
199 0, /* bitpos */
200 complain_overflow_bitfield,/* complain_on_overflow */
201 bfd_elf_generic_reloc, /* special_function */
202 "R_ARM_ABS8", /* name */
203 FALSE, /* partial_inplace */
204 0x000000ff, /* src_mask */
205 0x000000ff, /* dst_mask */
206 FALSE), /* pcrel_offset */
207
208 HOWTO (R_ARM_SBREL32, /* type */
209 0, /* rightshift */
210 2, /* size (0 = byte, 1 = short, 2 = long) */
211 32, /* bitsize */
212 FALSE, /* pc_relative */
213 0, /* bitpos */
214 complain_overflow_dont,/* complain_on_overflow */
215 bfd_elf_generic_reloc, /* special_function */
216 "R_ARM_SBREL32", /* name */
217 FALSE, /* partial_inplace */
218 0xffffffff, /* src_mask */
219 0xffffffff, /* dst_mask */
220 FALSE), /* pcrel_offset */
221
222 HOWTO (R_ARM_THM_CALL, /* type */
223 1, /* rightshift */
224 2, /* size (0 = byte, 1 = short, 2 = long) */
225 24, /* bitsize */
226 TRUE, /* pc_relative */
227 0, /* bitpos */
228 complain_overflow_signed,/* complain_on_overflow */
229 bfd_elf_generic_reloc, /* special_function */
230 "R_ARM_THM_CALL", /* name */
231 FALSE, /* partial_inplace */
232 0x07ff07ff, /* src_mask */
233 0x07ff07ff, /* dst_mask */
234 TRUE), /* pcrel_offset */
235
236 HOWTO (R_ARM_THM_PC8, /* type */
237 1, /* rightshift */
238 1, /* size (0 = byte, 1 = short, 2 = long) */
239 8, /* bitsize */
240 TRUE, /* pc_relative */
241 0, /* bitpos */
242 complain_overflow_signed,/* complain_on_overflow */
243 bfd_elf_generic_reloc, /* special_function */
244 "R_ARM_THM_PC8", /* name */
245 FALSE, /* partial_inplace */
246 0x000000ff, /* src_mask */
247 0x000000ff, /* dst_mask */
248 TRUE), /* pcrel_offset */
249
250 HOWTO (R_ARM_BREL_ADJ, /* type */
251 1, /* rightshift */
252 1, /* size (0 = byte, 1 = short, 2 = long) */
253 32, /* bitsize */
254 FALSE, /* pc_relative */
255 0, /* bitpos */
256 complain_overflow_signed,/* complain_on_overflow */
257 bfd_elf_generic_reloc, /* special_function */
258 "R_ARM_BREL_ADJ", /* name */
259 FALSE, /* partial_inplace */
260 0xffffffff, /* src_mask */
261 0xffffffff, /* dst_mask */
262 FALSE), /* pcrel_offset */
263
264 HOWTO (R_ARM_SWI24, /* type */
265 0, /* rightshift */
266 0, /* size (0 = byte, 1 = short, 2 = long) */
267 0, /* bitsize */
268 FALSE, /* pc_relative */
269 0, /* bitpos */
270 complain_overflow_signed,/* complain_on_overflow */
271 bfd_elf_generic_reloc, /* special_function */
272 "R_ARM_SWI24", /* name */
273 FALSE, /* partial_inplace */
274 0x00000000, /* src_mask */
275 0x00000000, /* dst_mask */
276 FALSE), /* pcrel_offset */
277
278 HOWTO (R_ARM_THM_SWI8, /* type */
279 0, /* rightshift */
280 0, /* size (0 = byte, 1 = short, 2 = long) */
281 0, /* bitsize */
282 FALSE, /* pc_relative */
283 0, /* bitpos */
284 complain_overflow_signed,/* complain_on_overflow */
285 bfd_elf_generic_reloc, /* special_function */
286 "R_ARM_SWI8", /* name */
287 FALSE, /* partial_inplace */
288 0x00000000, /* src_mask */
289 0x00000000, /* dst_mask */
290 FALSE), /* pcrel_offset */
291
292 /* BLX instruction for the ARM. */
293 HOWTO (R_ARM_XPC25, /* type */
294 2, /* rightshift */
295 2, /* size (0 = byte, 1 = short, 2 = long) */
296 25, /* bitsize */
297 TRUE, /* pc_relative */
298 0, /* bitpos */
299 complain_overflow_signed,/* complain_on_overflow */
300 bfd_elf_generic_reloc, /* special_function */
301 "R_ARM_XPC25", /* name */
302 FALSE, /* partial_inplace */
303 0x00ffffff, /* src_mask */
304 0x00ffffff, /* dst_mask */
305 TRUE), /* pcrel_offset */
306
307 /* BLX instruction for the Thumb. */
308 HOWTO (R_ARM_THM_XPC22, /* type */
309 2, /* rightshift */
310 2, /* size (0 = byte, 1 = short, 2 = long) */
311 22, /* bitsize */
312 TRUE, /* pc_relative */
313 0, /* bitpos */
314 complain_overflow_signed,/* complain_on_overflow */
315 bfd_elf_generic_reloc, /* special_function */
316 "R_ARM_THM_XPC22", /* name */
317 FALSE, /* partial_inplace */
318 0x07ff07ff, /* src_mask */
319 0x07ff07ff, /* dst_mask */
320 TRUE), /* pcrel_offset */
321
322 /* Dynamic TLS relocations. */
323
324 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
325 0, /* rightshift */
326 2, /* size (0 = byte, 1 = short, 2 = long) */
327 32, /* bitsize */
328 FALSE, /* pc_relative */
329 0, /* bitpos */
330 complain_overflow_bitfield,/* complain_on_overflow */
331 bfd_elf_generic_reloc, /* special_function */
332 "R_ARM_TLS_DTPMOD32", /* name */
333 TRUE, /* partial_inplace */
334 0xffffffff, /* src_mask */
335 0xffffffff, /* dst_mask */
336 FALSE), /* pcrel_offset */
337
338 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
339 0, /* rightshift */
340 2, /* size (0 = byte, 1 = short, 2 = long) */
341 32, /* bitsize */
342 FALSE, /* pc_relative */
343 0, /* bitpos */
344 complain_overflow_bitfield,/* complain_on_overflow */
345 bfd_elf_generic_reloc, /* special_function */
346 "R_ARM_TLS_DTPOFF32", /* name */
347 TRUE, /* partial_inplace */
348 0xffffffff, /* src_mask */
349 0xffffffff, /* dst_mask */
350 FALSE), /* pcrel_offset */
351
352 HOWTO (R_ARM_TLS_TPOFF32, /* type */
353 0, /* rightshift */
354 2, /* size (0 = byte, 1 = short, 2 = long) */
355 32, /* bitsize */
356 FALSE, /* pc_relative */
357 0, /* bitpos */
358 complain_overflow_bitfield,/* complain_on_overflow */
359 bfd_elf_generic_reloc, /* special_function */
360 "R_ARM_TLS_TPOFF32", /* name */
361 TRUE, /* partial_inplace */
362 0xffffffff, /* src_mask */
363 0xffffffff, /* dst_mask */
364 FALSE), /* pcrel_offset */
365
366 /* Relocs used in ARM Linux */
367
368 HOWTO (R_ARM_COPY, /* type */
369 0, /* rightshift */
370 2, /* size (0 = byte, 1 = short, 2 = long) */
371 32, /* bitsize */
372 FALSE, /* pc_relative */
373 0, /* bitpos */
374 complain_overflow_bitfield,/* complain_on_overflow */
375 bfd_elf_generic_reloc, /* special_function */
376 "R_ARM_COPY", /* name */
377 TRUE, /* partial_inplace */
378 0xffffffff, /* src_mask */
379 0xffffffff, /* dst_mask */
380 FALSE), /* pcrel_offset */
381
382 HOWTO (R_ARM_GLOB_DAT, /* type */
383 0, /* rightshift */
384 2, /* size (0 = byte, 1 = short, 2 = long) */
385 32, /* bitsize */
386 FALSE, /* pc_relative */
387 0, /* bitpos */
388 complain_overflow_bitfield,/* complain_on_overflow */
389 bfd_elf_generic_reloc, /* special_function */
390 "R_ARM_GLOB_DAT", /* name */
391 TRUE, /* partial_inplace */
392 0xffffffff, /* src_mask */
393 0xffffffff, /* dst_mask */
394 FALSE), /* pcrel_offset */
395
396 HOWTO (R_ARM_JUMP_SLOT, /* type */
397 0, /* rightshift */
398 2, /* size (0 = byte, 1 = short, 2 = long) */
399 32, /* bitsize */
400 FALSE, /* pc_relative */
401 0, /* bitpos */
402 complain_overflow_bitfield,/* complain_on_overflow */
403 bfd_elf_generic_reloc, /* special_function */
404 "R_ARM_JUMP_SLOT", /* name */
405 TRUE, /* partial_inplace */
406 0xffffffff, /* src_mask */
407 0xffffffff, /* dst_mask */
408 FALSE), /* pcrel_offset */
409
410 HOWTO (R_ARM_RELATIVE, /* type */
411 0, /* rightshift */
412 2, /* size (0 = byte, 1 = short, 2 = long) */
413 32, /* bitsize */
414 FALSE, /* pc_relative */
415 0, /* bitpos */
416 complain_overflow_bitfield,/* complain_on_overflow */
417 bfd_elf_generic_reloc, /* special_function */
418 "R_ARM_RELATIVE", /* name */
419 TRUE, /* partial_inplace */
420 0xffffffff, /* src_mask */
421 0xffffffff, /* dst_mask */
422 FALSE), /* pcrel_offset */
423
424 HOWTO (R_ARM_GOTOFF32, /* type */
425 0, /* rightshift */
426 2, /* size (0 = byte, 1 = short, 2 = long) */
427 32, /* bitsize */
428 FALSE, /* pc_relative */
429 0, /* bitpos */
430 complain_overflow_bitfield,/* complain_on_overflow */
431 bfd_elf_generic_reloc, /* special_function */
432 "R_ARM_GOTOFF32", /* name */
433 TRUE, /* partial_inplace */
434 0xffffffff, /* src_mask */
435 0xffffffff, /* dst_mask */
436 FALSE), /* pcrel_offset */
437
438 HOWTO (R_ARM_GOTPC, /* type */
439 0, /* rightshift */
440 2, /* size (0 = byte, 1 = short, 2 = long) */
441 32, /* bitsize */
442 TRUE, /* pc_relative */
443 0, /* bitpos */
444 complain_overflow_bitfield,/* complain_on_overflow */
445 bfd_elf_generic_reloc, /* special_function */
446 "R_ARM_GOTPC", /* name */
447 TRUE, /* partial_inplace */
448 0xffffffff, /* src_mask */
449 0xffffffff, /* dst_mask */
450 TRUE), /* pcrel_offset */
451
452 HOWTO (R_ARM_GOT32, /* type */
453 0, /* rightshift */
454 2, /* size (0 = byte, 1 = short, 2 = long) */
455 32, /* bitsize */
456 FALSE, /* pc_relative */
457 0, /* bitpos */
458 complain_overflow_bitfield,/* complain_on_overflow */
459 bfd_elf_generic_reloc, /* special_function */
460 "R_ARM_GOT32", /* name */
461 TRUE, /* partial_inplace */
462 0xffffffff, /* src_mask */
463 0xffffffff, /* dst_mask */
464 FALSE), /* pcrel_offset */
465
466 HOWTO (R_ARM_PLT32, /* type */
467 2, /* rightshift */
468 2, /* size (0 = byte, 1 = short, 2 = long) */
469 24, /* bitsize */
470 TRUE, /* pc_relative */
471 0, /* bitpos */
472 complain_overflow_bitfield,/* complain_on_overflow */
473 bfd_elf_generic_reloc, /* special_function */
474 "R_ARM_PLT32", /* name */
475 FALSE, /* partial_inplace */
476 0x00ffffff, /* src_mask */
477 0x00ffffff, /* dst_mask */
478 TRUE), /* pcrel_offset */
479
480 HOWTO (R_ARM_CALL, /* type */
481 2, /* rightshift */
482 2, /* size (0 = byte, 1 = short, 2 = long) */
483 24, /* bitsize */
484 TRUE, /* pc_relative */
485 0, /* bitpos */
486 complain_overflow_signed,/* complain_on_overflow */
487 bfd_elf_generic_reloc, /* special_function */
488 "R_ARM_CALL", /* name */
489 FALSE, /* partial_inplace */
490 0x00ffffff, /* src_mask */
491 0x00ffffff, /* dst_mask */
492 TRUE), /* pcrel_offset */
493
494 HOWTO (R_ARM_JUMP24, /* type */
495 2, /* rightshift */
496 2, /* size (0 = byte, 1 = short, 2 = long) */
497 24, /* bitsize */
498 TRUE, /* pc_relative */
499 0, /* bitpos */
500 complain_overflow_signed,/* complain_on_overflow */
501 bfd_elf_generic_reloc, /* special_function */
502 "R_ARM_JUMP24", /* name */
503 FALSE, /* partial_inplace */
504 0x00ffffff, /* src_mask */
505 0x00ffffff, /* dst_mask */
506 TRUE), /* pcrel_offset */
507
508 HOWTO (R_ARM_THM_JUMP24, /* type */
509 1, /* rightshift */
510 2, /* size (0 = byte, 1 = short, 2 = long) */
511 24, /* bitsize */
512 TRUE, /* pc_relative */
513 0, /* bitpos */
514 complain_overflow_signed,/* complain_on_overflow */
515 bfd_elf_generic_reloc, /* special_function */
516 "R_ARM_THM_JUMP24", /* name */
517 FALSE, /* partial_inplace */
518 0x07ff2fff, /* src_mask */
519 0x07ff2fff, /* dst_mask */
520 TRUE), /* pcrel_offset */
521
522 HOWTO (R_ARM_BASE_ABS, /* type */
523 0, /* rightshift */
524 2, /* size (0 = byte, 1 = short, 2 = long) */
525 32, /* bitsize */
526 FALSE, /* pc_relative */
527 0, /* bitpos */
528 complain_overflow_dont,/* complain_on_overflow */
529 bfd_elf_generic_reloc, /* special_function */
530 "R_ARM_BASE_ABS", /* name */
531 FALSE, /* partial_inplace */
532 0xffffffff, /* src_mask */
533 0xffffffff, /* dst_mask */
534 FALSE), /* pcrel_offset */
535
536 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
537 0, /* rightshift */
538 2, /* size (0 = byte, 1 = short, 2 = long) */
539 12, /* bitsize */
540 TRUE, /* pc_relative */
541 0, /* bitpos */
542 complain_overflow_dont,/* complain_on_overflow */
543 bfd_elf_generic_reloc, /* special_function */
544 "R_ARM_ALU_PCREL_7_0", /* name */
545 FALSE, /* partial_inplace */
546 0x00000fff, /* src_mask */
547 0x00000fff, /* dst_mask */
548 TRUE), /* pcrel_offset */
549
550 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
551 0, /* rightshift */
552 2, /* size (0 = byte, 1 = short, 2 = long) */
553 12, /* bitsize */
554 TRUE, /* pc_relative */
555 8, /* bitpos */
556 complain_overflow_dont,/* complain_on_overflow */
557 bfd_elf_generic_reloc, /* special_function */
558 "R_ARM_ALU_PCREL_15_8",/* name */
559 FALSE, /* partial_inplace */
560 0x00000fff, /* src_mask */
561 0x00000fff, /* dst_mask */
562 TRUE), /* pcrel_offset */
563
564 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
565 0, /* rightshift */
566 2, /* size (0 = byte, 1 = short, 2 = long) */
567 12, /* bitsize */
568 TRUE, /* pc_relative */
569 16, /* bitpos */
570 complain_overflow_dont,/* complain_on_overflow */
571 bfd_elf_generic_reloc, /* special_function */
572 "R_ARM_ALU_PCREL_23_15",/* name */
573 FALSE, /* partial_inplace */
574 0x00000fff, /* src_mask */
575 0x00000fff, /* dst_mask */
576 TRUE), /* pcrel_offset */
577
578 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
579 0, /* rightshift */
580 2, /* size (0 = byte, 1 = short, 2 = long) */
581 12, /* bitsize */
582 FALSE, /* pc_relative */
583 0, /* bitpos */
584 complain_overflow_dont,/* complain_on_overflow */
585 bfd_elf_generic_reloc, /* special_function */
586 "R_ARM_LDR_SBREL_11_0",/* name */
587 FALSE, /* partial_inplace */
588 0x00000fff, /* src_mask */
589 0x00000fff, /* dst_mask */
590 FALSE), /* pcrel_offset */
591
592 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
593 0, /* rightshift */
594 2, /* size (0 = byte, 1 = short, 2 = long) */
595 8, /* bitsize */
596 FALSE, /* pc_relative */
597 12, /* bitpos */
598 complain_overflow_dont,/* complain_on_overflow */
599 bfd_elf_generic_reloc, /* special_function */
600 "R_ARM_ALU_SBREL_19_12",/* name */
601 FALSE, /* partial_inplace */
602 0x000ff000, /* src_mask */
603 0x000ff000, /* dst_mask */
604 FALSE), /* pcrel_offset */
605
606 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
607 0, /* rightshift */
608 2, /* size (0 = byte, 1 = short, 2 = long) */
609 8, /* bitsize */
610 FALSE, /* pc_relative */
611 20, /* bitpos */
612 complain_overflow_dont,/* complain_on_overflow */
613 bfd_elf_generic_reloc, /* special_function */
614 "R_ARM_ALU_SBREL_27_20",/* name */
615 FALSE, /* partial_inplace */
616 0x0ff00000, /* src_mask */
617 0x0ff00000, /* dst_mask */
618 FALSE), /* pcrel_offset */
619
620 HOWTO (R_ARM_TARGET1, /* type */
621 0, /* rightshift */
622 2, /* size (0 = byte, 1 = short, 2 = long) */
623 32, /* bitsize */
624 FALSE, /* pc_relative */
625 0, /* bitpos */
626 complain_overflow_dont,/* complain_on_overflow */
627 bfd_elf_generic_reloc, /* special_function */
628 "R_ARM_TARGET1", /* name */
629 FALSE, /* partial_inplace */
630 0xffffffff, /* src_mask */
631 0xffffffff, /* dst_mask */
632 FALSE), /* pcrel_offset */
633
634 HOWTO (R_ARM_ROSEGREL32, /* type */
635 0, /* rightshift */
636 2, /* size (0 = byte, 1 = short, 2 = long) */
637 32, /* bitsize */
638 FALSE, /* pc_relative */
639 0, /* bitpos */
640 complain_overflow_dont,/* complain_on_overflow */
641 bfd_elf_generic_reloc, /* special_function */
642 "R_ARM_ROSEGREL32", /* name */
643 FALSE, /* partial_inplace */
644 0xffffffff, /* src_mask */
645 0xffffffff, /* dst_mask */
646 FALSE), /* pcrel_offset */
647
648 HOWTO (R_ARM_V4BX, /* type */
649 0, /* rightshift */
650 2, /* size (0 = byte, 1 = short, 2 = long) */
651 32, /* bitsize */
652 FALSE, /* pc_relative */
653 0, /* bitpos */
654 complain_overflow_dont,/* complain_on_overflow */
655 bfd_elf_generic_reloc, /* special_function */
656 "R_ARM_V4BX", /* name */
657 FALSE, /* partial_inplace */
658 0xffffffff, /* src_mask */
659 0xffffffff, /* dst_mask */
660 FALSE), /* pcrel_offset */
661
662 HOWTO (R_ARM_TARGET2, /* type */
663 0, /* rightshift */
664 2, /* size (0 = byte, 1 = short, 2 = long) */
665 32, /* bitsize */
666 FALSE, /* pc_relative */
667 0, /* bitpos */
668 complain_overflow_signed,/* complain_on_overflow */
669 bfd_elf_generic_reloc, /* special_function */
670 "R_ARM_TARGET2", /* name */
671 FALSE, /* partial_inplace */
672 0xffffffff, /* src_mask */
673 0xffffffff, /* dst_mask */
674 TRUE), /* pcrel_offset */
675
676 HOWTO (R_ARM_PREL31, /* type */
677 0, /* rightshift */
678 2, /* size (0 = byte, 1 = short, 2 = long) */
679 31, /* bitsize */
680 TRUE, /* pc_relative */
681 0, /* bitpos */
682 complain_overflow_signed,/* complain_on_overflow */
683 bfd_elf_generic_reloc, /* special_function */
684 "R_ARM_PREL31", /* name */
685 FALSE, /* partial_inplace */
686 0x7fffffff, /* src_mask */
687 0x7fffffff, /* dst_mask */
688 TRUE), /* pcrel_offset */
689
690 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
691 0, /* rightshift */
692 2, /* size (0 = byte, 1 = short, 2 = long) */
693 16, /* bitsize */
694 FALSE, /* pc_relative */
695 0, /* bitpos */
696 complain_overflow_dont,/* complain_on_overflow */
697 bfd_elf_generic_reloc, /* special_function */
698 "R_ARM_MOVW_ABS_NC", /* name */
699 FALSE, /* partial_inplace */
700 0x000f0fff, /* src_mask */
701 0x000f0fff, /* dst_mask */
702 FALSE), /* pcrel_offset */
703
704 HOWTO (R_ARM_MOVT_ABS, /* type */
705 0, /* rightshift */
706 2, /* size (0 = byte, 1 = short, 2 = long) */
707 16, /* bitsize */
708 FALSE, /* pc_relative */
709 0, /* bitpos */
710 complain_overflow_bitfield,/* complain_on_overflow */
711 bfd_elf_generic_reloc, /* special_function */
712 "R_ARM_MOVT_ABS", /* name */
713 FALSE, /* partial_inplace */
714 0x000f0fff, /* src_mask */
715 0x000f0fff, /* dst_mask */
716 FALSE), /* pcrel_offset */
717
718 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
719 0, /* rightshift */
720 2, /* size (0 = byte, 1 = short, 2 = long) */
721 16, /* bitsize */
722 TRUE, /* pc_relative */
723 0, /* bitpos */
724 complain_overflow_dont,/* complain_on_overflow */
725 bfd_elf_generic_reloc, /* special_function */
726 "R_ARM_MOVW_PREL_NC", /* name */
727 FALSE, /* partial_inplace */
728 0x000f0fff, /* src_mask */
729 0x000f0fff, /* dst_mask */
730 TRUE), /* pcrel_offset */
731
732 HOWTO (R_ARM_MOVT_PREL, /* type */
733 0, /* rightshift */
734 2, /* size (0 = byte, 1 = short, 2 = long) */
735 16, /* bitsize */
736 TRUE, /* pc_relative */
737 0, /* bitpos */
738 complain_overflow_bitfield,/* complain_on_overflow */
739 bfd_elf_generic_reloc, /* special_function */
740 "R_ARM_MOVT_PREL", /* name */
741 FALSE, /* partial_inplace */
742 0x000f0fff, /* src_mask */
743 0x000f0fff, /* dst_mask */
744 TRUE), /* pcrel_offset */
745
746 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
747 0, /* rightshift */
748 2, /* size (0 = byte, 1 = short, 2 = long) */
749 16, /* bitsize */
750 FALSE, /* pc_relative */
751 0, /* bitpos */
752 complain_overflow_dont,/* complain_on_overflow */
753 bfd_elf_generic_reloc, /* special_function */
754 "R_ARM_THM_MOVW_ABS_NC",/* name */
755 FALSE, /* partial_inplace */
756 0x040f70ff, /* src_mask */
757 0x040f70ff, /* dst_mask */
758 FALSE), /* pcrel_offset */
759
760 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
761 0, /* rightshift */
762 2, /* size (0 = byte, 1 = short, 2 = long) */
763 16, /* bitsize */
764 FALSE, /* pc_relative */
765 0, /* bitpos */
766 complain_overflow_bitfield,/* complain_on_overflow */
767 bfd_elf_generic_reloc, /* special_function */
768 "R_ARM_THM_MOVT_ABS", /* name */
769 FALSE, /* partial_inplace */
770 0x040f70ff, /* src_mask */
771 0x040f70ff, /* dst_mask */
772 FALSE), /* pcrel_offset */
773
774 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
775 0, /* rightshift */
776 2, /* size (0 = byte, 1 = short, 2 = long) */
777 16, /* bitsize */
778 TRUE, /* pc_relative */
779 0, /* bitpos */
780 complain_overflow_dont,/* complain_on_overflow */
781 bfd_elf_generic_reloc, /* special_function */
782 "R_ARM_THM_MOVW_PREL_NC",/* name */
783 FALSE, /* partial_inplace */
784 0x040f70ff, /* src_mask */
785 0x040f70ff, /* dst_mask */
786 TRUE), /* pcrel_offset */
787
788 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
789 0, /* rightshift */
790 2, /* size (0 = byte, 1 = short, 2 = long) */
791 16, /* bitsize */
792 TRUE, /* pc_relative */
793 0, /* bitpos */
794 complain_overflow_bitfield,/* complain_on_overflow */
795 bfd_elf_generic_reloc, /* special_function */
796 "R_ARM_THM_MOVT_PREL", /* name */
797 FALSE, /* partial_inplace */
798 0x040f70ff, /* src_mask */
799 0x040f70ff, /* dst_mask */
800 TRUE), /* pcrel_offset */
801
802 HOWTO (R_ARM_THM_JUMP19, /* type */
803 1, /* rightshift */
804 2, /* size (0 = byte, 1 = short, 2 = long) */
805 19, /* bitsize */
806 TRUE, /* pc_relative */
807 0, /* bitpos */
808 complain_overflow_signed,/* complain_on_overflow */
809 bfd_elf_generic_reloc, /* special_function */
810 "R_ARM_THM_JUMP19", /* name */
811 FALSE, /* partial_inplace */
812 0x043f2fff, /* src_mask */
813 0x043f2fff, /* dst_mask */
814 TRUE), /* pcrel_offset */
815
816 HOWTO (R_ARM_THM_JUMP6, /* type */
817 1, /* rightshift */
818 1, /* size (0 = byte, 1 = short, 2 = long) */
819 6, /* bitsize */
820 TRUE, /* pc_relative */
821 0, /* bitpos */
822 complain_overflow_unsigned,/* complain_on_overflow */
823 bfd_elf_generic_reloc, /* special_function */
824 "R_ARM_THM_JUMP6", /* name */
825 FALSE, /* partial_inplace */
826 0x02f8, /* src_mask */
827 0x02f8, /* dst_mask */
828 TRUE), /* pcrel_offset */
829
830 /* These are declared as 13-bit signed relocations because we can
831 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
832 versa. */
833 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
834 0, /* rightshift */
835 2, /* size (0 = byte, 1 = short, 2 = long) */
836 13, /* bitsize */
837 TRUE, /* pc_relative */
838 0, /* bitpos */
839 complain_overflow_dont,/* complain_on_overflow */
840 bfd_elf_generic_reloc, /* special_function */
841 "R_ARM_THM_ALU_PREL_11_0",/* name */
842 FALSE, /* partial_inplace */
843 0xffffffff, /* src_mask */
844 0xffffffff, /* dst_mask */
845 TRUE), /* pcrel_offset */
846
847 HOWTO (R_ARM_THM_PC12, /* type */
848 0, /* rightshift */
849 2, /* size (0 = byte, 1 = short, 2 = long) */
850 13, /* bitsize */
851 TRUE, /* pc_relative */
852 0, /* bitpos */
853 complain_overflow_dont,/* complain_on_overflow */
854 bfd_elf_generic_reloc, /* special_function */
855 "R_ARM_THM_PC12", /* name */
856 FALSE, /* partial_inplace */
857 0xffffffff, /* src_mask */
858 0xffffffff, /* dst_mask */
859 TRUE), /* pcrel_offset */
860
861 HOWTO (R_ARM_ABS32_NOI, /* type */
862 0, /* rightshift */
863 2, /* size (0 = byte, 1 = short, 2 = long) */
864 32, /* bitsize */
865 FALSE, /* pc_relative */
866 0, /* bitpos */
867 complain_overflow_dont,/* complain_on_overflow */
868 bfd_elf_generic_reloc, /* special_function */
869 "R_ARM_ABS32_NOI", /* name */
870 FALSE, /* partial_inplace */
871 0xffffffff, /* src_mask */
872 0xffffffff, /* dst_mask */
873 FALSE), /* pcrel_offset */
874
875 HOWTO (R_ARM_REL32_NOI, /* type */
876 0, /* rightshift */
877 2, /* size (0 = byte, 1 = short, 2 = long) */
878 32, /* bitsize */
879 TRUE, /* pc_relative */
880 0, /* bitpos */
881 complain_overflow_dont,/* complain_on_overflow */
882 bfd_elf_generic_reloc, /* special_function */
883 "R_ARM_REL32_NOI", /* name */
884 FALSE, /* partial_inplace */
885 0xffffffff, /* src_mask */
886 0xffffffff, /* dst_mask */
887 FALSE), /* pcrel_offset */
888
889 /* Group relocations. */
890
891 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
892 0, /* rightshift */
893 2, /* size (0 = byte, 1 = short, 2 = long) */
894 32, /* bitsize */
895 TRUE, /* pc_relative */
896 0, /* bitpos */
897 complain_overflow_dont,/* complain_on_overflow */
898 bfd_elf_generic_reloc, /* special_function */
899 "R_ARM_ALU_PC_G0_NC", /* name */
900 FALSE, /* partial_inplace */
901 0xffffffff, /* src_mask */
902 0xffffffff, /* dst_mask */
903 TRUE), /* pcrel_offset */
904
905 HOWTO (R_ARM_ALU_PC_G0, /* type */
906 0, /* rightshift */
907 2, /* size (0 = byte, 1 = short, 2 = long) */
908 32, /* bitsize */
909 TRUE, /* pc_relative */
910 0, /* bitpos */
911 complain_overflow_dont,/* complain_on_overflow */
912 bfd_elf_generic_reloc, /* special_function */
913 "R_ARM_ALU_PC_G0", /* name */
914 FALSE, /* partial_inplace */
915 0xffffffff, /* src_mask */
916 0xffffffff, /* dst_mask */
917 TRUE), /* pcrel_offset */
918
919 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
920 0, /* rightshift */
921 2, /* size (0 = byte, 1 = short, 2 = long) */
922 32, /* bitsize */
923 TRUE, /* pc_relative */
924 0, /* bitpos */
925 complain_overflow_dont,/* complain_on_overflow */
926 bfd_elf_generic_reloc, /* special_function */
927 "R_ARM_ALU_PC_G1_NC", /* name */
928 FALSE, /* partial_inplace */
929 0xffffffff, /* src_mask */
930 0xffffffff, /* dst_mask */
931 TRUE), /* pcrel_offset */
932
933 HOWTO (R_ARM_ALU_PC_G1, /* type */
934 0, /* rightshift */
935 2, /* size (0 = byte, 1 = short, 2 = long) */
936 32, /* bitsize */
937 TRUE, /* pc_relative */
938 0, /* bitpos */
939 complain_overflow_dont,/* complain_on_overflow */
940 bfd_elf_generic_reloc, /* special_function */
941 "R_ARM_ALU_PC_G1", /* name */
942 FALSE, /* partial_inplace */
943 0xffffffff, /* src_mask */
944 0xffffffff, /* dst_mask */
945 TRUE), /* pcrel_offset */
946
947 HOWTO (R_ARM_ALU_PC_G2, /* type */
948 0, /* rightshift */
949 2, /* size (0 = byte, 1 = short, 2 = long) */
950 32, /* bitsize */
951 TRUE, /* pc_relative */
952 0, /* bitpos */
953 complain_overflow_dont,/* complain_on_overflow */
954 bfd_elf_generic_reloc, /* special_function */
955 "R_ARM_ALU_PC_G2", /* name */
956 FALSE, /* partial_inplace */
957 0xffffffff, /* src_mask */
958 0xffffffff, /* dst_mask */
959 TRUE), /* pcrel_offset */
960
961 HOWTO (R_ARM_LDR_PC_G1, /* type */
962 0, /* rightshift */
963 2, /* size (0 = byte, 1 = short, 2 = long) */
964 32, /* bitsize */
965 TRUE, /* pc_relative */
966 0, /* bitpos */
967 complain_overflow_dont,/* complain_on_overflow */
968 bfd_elf_generic_reloc, /* special_function */
969 "R_ARM_LDR_PC_G1", /* name */
970 FALSE, /* partial_inplace */
971 0xffffffff, /* src_mask */
972 0xffffffff, /* dst_mask */
973 TRUE), /* pcrel_offset */
974
975 HOWTO (R_ARM_LDR_PC_G2, /* type */
976 0, /* rightshift */
977 2, /* size (0 = byte, 1 = short, 2 = long) */
978 32, /* bitsize */
979 TRUE, /* pc_relative */
980 0, /* bitpos */
981 complain_overflow_dont,/* complain_on_overflow */
982 bfd_elf_generic_reloc, /* special_function */
983 "R_ARM_LDR_PC_G2", /* name */
984 FALSE, /* partial_inplace */
985 0xffffffff, /* src_mask */
986 0xffffffff, /* dst_mask */
987 TRUE), /* pcrel_offset */
988
989 HOWTO (R_ARM_LDRS_PC_G0, /* type */
990 0, /* rightshift */
991 2, /* size (0 = byte, 1 = short, 2 = long) */
992 32, /* bitsize */
993 TRUE, /* pc_relative */
994 0, /* bitpos */
995 complain_overflow_dont,/* complain_on_overflow */
996 bfd_elf_generic_reloc, /* special_function */
997 "R_ARM_LDRS_PC_G0", /* name */
998 FALSE, /* partial_inplace */
999 0xffffffff, /* src_mask */
1000 0xffffffff, /* dst_mask */
1001 TRUE), /* pcrel_offset */
1002
1003 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1004 0, /* rightshift */
1005 2, /* size (0 = byte, 1 = short, 2 = long) */
1006 32, /* bitsize */
1007 TRUE, /* pc_relative */
1008 0, /* bitpos */
1009 complain_overflow_dont,/* complain_on_overflow */
1010 bfd_elf_generic_reloc, /* special_function */
1011 "R_ARM_LDRS_PC_G1", /* name */
1012 FALSE, /* partial_inplace */
1013 0xffffffff, /* src_mask */
1014 0xffffffff, /* dst_mask */
1015 TRUE), /* pcrel_offset */
1016
1017 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1018 0, /* rightshift */
1019 2, /* size (0 = byte, 1 = short, 2 = long) */
1020 32, /* bitsize */
1021 TRUE, /* pc_relative */
1022 0, /* bitpos */
1023 complain_overflow_dont,/* complain_on_overflow */
1024 bfd_elf_generic_reloc, /* special_function */
1025 "R_ARM_LDRS_PC_G2", /* name */
1026 FALSE, /* partial_inplace */
1027 0xffffffff, /* src_mask */
1028 0xffffffff, /* dst_mask */
1029 TRUE), /* pcrel_offset */
1030
1031 HOWTO (R_ARM_LDC_PC_G0, /* type */
1032 0, /* rightshift */
1033 2, /* size (0 = byte, 1 = short, 2 = long) */
1034 32, /* bitsize */
1035 TRUE, /* pc_relative */
1036 0, /* bitpos */
1037 complain_overflow_dont,/* complain_on_overflow */
1038 bfd_elf_generic_reloc, /* special_function */
1039 "R_ARM_LDC_PC_G0", /* name */
1040 FALSE, /* partial_inplace */
1041 0xffffffff, /* src_mask */
1042 0xffffffff, /* dst_mask */
1043 TRUE), /* pcrel_offset */
1044
1045 HOWTO (R_ARM_LDC_PC_G1, /* type */
1046 0, /* rightshift */
1047 2, /* size (0 = byte, 1 = short, 2 = long) */
1048 32, /* bitsize */
1049 TRUE, /* pc_relative */
1050 0, /* bitpos */
1051 complain_overflow_dont,/* complain_on_overflow */
1052 bfd_elf_generic_reloc, /* special_function */
1053 "R_ARM_LDC_PC_G1", /* name */
1054 FALSE, /* partial_inplace */
1055 0xffffffff, /* src_mask */
1056 0xffffffff, /* dst_mask */
1057 TRUE), /* pcrel_offset */
1058
1059 HOWTO (R_ARM_LDC_PC_G2, /* type */
1060 0, /* rightshift */
1061 2, /* size (0 = byte, 1 = short, 2 = long) */
1062 32, /* bitsize */
1063 TRUE, /* pc_relative */
1064 0, /* bitpos */
1065 complain_overflow_dont,/* complain_on_overflow */
1066 bfd_elf_generic_reloc, /* special_function */
1067 "R_ARM_LDC_PC_G2", /* name */
1068 FALSE, /* partial_inplace */
1069 0xffffffff, /* src_mask */
1070 0xffffffff, /* dst_mask */
1071 TRUE), /* pcrel_offset */
1072
1073 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1074 0, /* rightshift */
1075 2, /* size (0 = byte, 1 = short, 2 = long) */
1076 32, /* bitsize */
1077 TRUE, /* pc_relative */
1078 0, /* bitpos */
1079 complain_overflow_dont,/* complain_on_overflow */
1080 bfd_elf_generic_reloc, /* special_function */
1081 "R_ARM_ALU_SB_G0_NC", /* name */
1082 FALSE, /* partial_inplace */
1083 0xffffffff, /* src_mask */
1084 0xffffffff, /* dst_mask */
1085 TRUE), /* pcrel_offset */
1086
1087 HOWTO (R_ARM_ALU_SB_G0, /* type */
1088 0, /* rightshift */
1089 2, /* size (0 = byte, 1 = short, 2 = long) */
1090 32, /* bitsize */
1091 TRUE, /* pc_relative */
1092 0, /* bitpos */
1093 complain_overflow_dont,/* complain_on_overflow */
1094 bfd_elf_generic_reloc, /* special_function */
1095 "R_ARM_ALU_SB_G0", /* name */
1096 FALSE, /* partial_inplace */
1097 0xffffffff, /* src_mask */
1098 0xffffffff, /* dst_mask */
1099 TRUE), /* pcrel_offset */
1100
1101 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1102 0, /* rightshift */
1103 2, /* size (0 = byte, 1 = short, 2 = long) */
1104 32, /* bitsize */
1105 TRUE, /* pc_relative */
1106 0, /* bitpos */
1107 complain_overflow_dont,/* complain_on_overflow */
1108 bfd_elf_generic_reloc, /* special_function */
1109 "R_ARM_ALU_SB_G1_NC", /* name */
1110 FALSE, /* partial_inplace */
1111 0xffffffff, /* src_mask */
1112 0xffffffff, /* dst_mask */
1113 TRUE), /* pcrel_offset */
1114
1115 HOWTO (R_ARM_ALU_SB_G1, /* type */
1116 0, /* rightshift */
1117 2, /* size (0 = byte, 1 = short, 2 = long) */
1118 32, /* bitsize */
1119 TRUE, /* pc_relative */
1120 0, /* bitpos */
1121 complain_overflow_dont,/* complain_on_overflow */
1122 bfd_elf_generic_reloc, /* special_function */
1123 "R_ARM_ALU_SB_G1", /* name */
1124 FALSE, /* partial_inplace */
1125 0xffffffff, /* src_mask */
1126 0xffffffff, /* dst_mask */
1127 TRUE), /* pcrel_offset */
1128
1129 HOWTO (R_ARM_ALU_SB_G2, /* type */
1130 0, /* rightshift */
1131 2, /* size (0 = byte, 1 = short, 2 = long) */
1132 32, /* bitsize */
1133 TRUE, /* pc_relative */
1134 0, /* bitpos */
1135 complain_overflow_dont,/* complain_on_overflow */
1136 bfd_elf_generic_reloc, /* special_function */
1137 "R_ARM_ALU_SB_G2", /* name */
1138 FALSE, /* partial_inplace */
1139 0xffffffff, /* src_mask */
1140 0xffffffff, /* dst_mask */
1141 TRUE), /* pcrel_offset */
1142
1143 HOWTO (R_ARM_LDR_SB_G0, /* type */
1144 0, /* rightshift */
1145 2, /* size (0 = byte, 1 = short, 2 = long) */
1146 32, /* bitsize */
1147 TRUE, /* pc_relative */
1148 0, /* bitpos */
1149 complain_overflow_dont,/* complain_on_overflow */
1150 bfd_elf_generic_reloc, /* special_function */
1151 "R_ARM_LDR_SB_G0", /* name */
1152 FALSE, /* partial_inplace */
1153 0xffffffff, /* src_mask */
1154 0xffffffff, /* dst_mask */
1155 TRUE), /* pcrel_offset */
1156
1157 HOWTO (R_ARM_LDR_SB_G1, /* type */
1158 0, /* rightshift */
1159 2, /* size (0 = byte, 1 = short, 2 = long) */
1160 32, /* bitsize */
1161 TRUE, /* pc_relative */
1162 0, /* bitpos */
1163 complain_overflow_dont,/* complain_on_overflow */
1164 bfd_elf_generic_reloc, /* special_function */
1165 "R_ARM_LDR_SB_G1", /* name */
1166 FALSE, /* partial_inplace */
1167 0xffffffff, /* src_mask */
1168 0xffffffff, /* dst_mask */
1169 TRUE), /* pcrel_offset */
1170
1171 HOWTO (R_ARM_LDR_SB_G2, /* type */
1172 0, /* rightshift */
1173 2, /* size (0 = byte, 1 = short, 2 = long) */
1174 32, /* bitsize */
1175 TRUE, /* pc_relative */
1176 0, /* bitpos */
1177 complain_overflow_dont,/* complain_on_overflow */
1178 bfd_elf_generic_reloc, /* special_function */
1179 "R_ARM_LDR_SB_G2", /* name */
1180 FALSE, /* partial_inplace */
1181 0xffffffff, /* src_mask */
1182 0xffffffff, /* dst_mask */
1183 TRUE), /* pcrel_offset */
1184
1185 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1186 0, /* rightshift */
1187 2, /* size (0 = byte, 1 = short, 2 = long) */
1188 32, /* bitsize */
1189 TRUE, /* pc_relative */
1190 0, /* bitpos */
1191 complain_overflow_dont,/* complain_on_overflow */
1192 bfd_elf_generic_reloc, /* special_function */
1193 "R_ARM_LDRS_SB_G0", /* name */
1194 FALSE, /* partial_inplace */
1195 0xffffffff, /* src_mask */
1196 0xffffffff, /* dst_mask */
1197 TRUE), /* pcrel_offset */
1198
1199 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1200 0, /* rightshift */
1201 2, /* size (0 = byte, 1 = short, 2 = long) */
1202 32, /* bitsize */
1203 TRUE, /* pc_relative */
1204 0, /* bitpos */
1205 complain_overflow_dont,/* complain_on_overflow */
1206 bfd_elf_generic_reloc, /* special_function */
1207 "R_ARM_LDRS_SB_G1", /* name */
1208 FALSE, /* partial_inplace */
1209 0xffffffff, /* src_mask */
1210 0xffffffff, /* dst_mask */
1211 TRUE), /* pcrel_offset */
1212
1213 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1214 0, /* rightshift */
1215 2, /* size (0 = byte, 1 = short, 2 = long) */
1216 32, /* bitsize */
1217 TRUE, /* pc_relative */
1218 0, /* bitpos */
1219 complain_overflow_dont,/* complain_on_overflow */
1220 bfd_elf_generic_reloc, /* special_function */
1221 "R_ARM_LDRS_SB_G2", /* name */
1222 FALSE, /* partial_inplace */
1223 0xffffffff, /* src_mask */
1224 0xffffffff, /* dst_mask */
1225 TRUE), /* pcrel_offset */
1226
1227 HOWTO (R_ARM_LDC_SB_G0, /* type */
1228 0, /* rightshift */
1229 2, /* size (0 = byte, 1 = short, 2 = long) */
1230 32, /* bitsize */
1231 TRUE, /* pc_relative */
1232 0, /* bitpos */
1233 complain_overflow_dont,/* complain_on_overflow */
1234 bfd_elf_generic_reloc, /* special_function */
1235 "R_ARM_LDC_SB_G0", /* name */
1236 FALSE, /* partial_inplace */
1237 0xffffffff, /* src_mask */
1238 0xffffffff, /* dst_mask */
1239 TRUE), /* pcrel_offset */
1240
1241 HOWTO (R_ARM_LDC_SB_G1, /* type */
1242 0, /* rightshift */
1243 2, /* size (0 = byte, 1 = short, 2 = long) */
1244 32, /* bitsize */
1245 TRUE, /* pc_relative */
1246 0, /* bitpos */
1247 complain_overflow_dont,/* complain_on_overflow */
1248 bfd_elf_generic_reloc, /* special_function */
1249 "R_ARM_LDC_SB_G1", /* name */
1250 FALSE, /* partial_inplace */
1251 0xffffffff, /* src_mask */
1252 0xffffffff, /* dst_mask */
1253 TRUE), /* pcrel_offset */
1254
1255 HOWTO (R_ARM_LDC_SB_G2, /* type */
1256 0, /* rightshift */
1257 2, /* size (0 = byte, 1 = short, 2 = long) */
1258 32, /* bitsize */
1259 TRUE, /* pc_relative */
1260 0, /* bitpos */
1261 complain_overflow_dont,/* complain_on_overflow */
1262 bfd_elf_generic_reloc, /* special_function */
1263 "R_ARM_LDC_SB_G2", /* name */
1264 FALSE, /* partial_inplace */
1265 0xffffffff, /* src_mask */
1266 0xffffffff, /* dst_mask */
1267 TRUE), /* pcrel_offset */
1268
1269 /* End of group relocations. */
1270
1271 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1272 0, /* rightshift */
1273 2, /* size (0 = byte, 1 = short, 2 = long) */
1274 16, /* bitsize */
1275 FALSE, /* pc_relative */
1276 0, /* bitpos */
1277 complain_overflow_dont,/* complain_on_overflow */
1278 bfd_elf_generic_reloc, /* special_function */
1279 "R_ARM_MOVW_BREL_NC", /* name */
1280 FALSE, /* partial_inplace */
1281 0x0000ffff, /* src_mask */
1282 0x0000ffff, /* dst_mask */
1283 FALSE), /* pcrel_offset */
1284
1285 HOWTO (R_ARM_MOVT_BREL, /* type */
1286 0, /* rightshift */
1287 2, /* size (0 = byte, 1 = short, 2 = long) */
1288 16, /* bitsize */
1289 FALSE, /* pc_relative */
1290 0, /* bitpos */
1291 complain_overflow_bitfield,/* complain_on_overflow */
1292 bfd_elf_generic_reloc, /* special_function */
1293 "R_ARM_MOVT_BREL", /* name */
1294 FALSE, /* partial_inplace */
1295 0x0000ffff, /* src_mask */
1296 0x0000ffff, /* dst_mask */
1297 FALSE), /* pcrel_offset */
1298
1299 HOWTO (R_ARM_MOVW_BREL, /* type */
1300 0, /* rightshift */
1301 2, /* size (0 = byte, 1 = short, 2 = long) */
1302 16, /* bitsize */
1303 FALSE, /* pc_relative */
1304 0, /* bitpos */
1305 complain_overflow_dont,/* complain_on_overflow */
1306 bfd_elf_generic_reloc, /* special_function */
1307 "R_ARM_MOVW_BREL", /* name */
1308 FALSE, /* partial_inplace */
1309 0x0000ffff, /* src_mask */
1310 0x0000ffff, /* dst_mask */
1311 FALSE), /* pcrel_offset */
1312
1313 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1314 0, /* rightshift */
1315 2, /* size (0 = byte, 1 = short, 2 = long) */
1316 16, /* bitsize */
1317 FALSE, /* pc_relative */
1318 0, /* bitpos */
1319 complain_overflow_dont,/* complain_on_overflow */
1320 bfd_elf_generic_reloc, /* special_function */
1321 "R_ARM_THM_MOVW_BREL_NC",/* name */
1322 FALSE, /* partial_inplace */
1323 0x040f70ff, /* src_mask */
1324 0x040f70ff, /* dst_mask */
1325 FALSE), /* pcrel_offset */
1326
1327 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1328 0, /* rightshift */
1329 2, /* size (0 = byte, 1 = short, 2 = long) */
1330 16, /* bitsize */
1331 FALSE, /* pc_relative */
1332 0, /* bitpos */
1333 complain_overflow_bitfield,/* complain_on_overflow */
1334 bfd_elf_generic_reloc, /* special_function */
1335 "R_ARM_THM_MOVT_BREL", /* name */
1336 FALSE, /* partial_inplace */
1337 0x040f70ff, /* src_mask */
1338 0x040f70ff, /* dst_mask */
1339 FALSE), /* pcrel_offset */
1340
1341 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1342 0, /* rightshift */
1343 2, /* size (0 = byte, 1 = short, 2 = long) */
1344 16, /* bitsize */
1345 FALSE, /* pc_relative */
1346 0, /* bitpos */
1347 complain_overflow_dont,/* complain_on_overflow */
1348 bfd_elf_generic_reloc, /* special_function */
1349 "R_ARM_THM_MOVW_BREL", /* name */
1350 FALSE, /* partial_inplace */
1351 0x040f70ff, /* src_mask */
1352 0x040f70ff, /* dst_mask */
1353 FALSE), /* pcrel_offset */
1354
1355 EMPTY_HOWTO (90), /* Unallocated. */
1356 EMPTY_HOWTO (91),
1357 EMPTY_HOWTO (92),
1358 EMPTY_HOWTO (93),
1359
1360 HOWTO (R_ARM_PLT32_ABS, /* type */
1361 0, /* rightshift */
1362 2, /* size (0 = byte, 1 = short, 2 = long) */
1363 32, /* bitsize */
1364 FALSE, /* pc_relative */
1365 0, /* bitpos */
1366 complain_overflow_dont,/* complain_on_overflow */
1367 bfd_elf_generic_reloc, /* special_function */
1368 "R_ARM_PLT32_ABS", /* name */
1369 FALSE, /* partial_inplace */
1370 0xffffffff, /* src_mask */
1371 0xffffffff, /* dst_mask */
1372 FALSE), /* pcrel_offset */
1373
1374 HOWTO (R_ARM_GOT_ABS, /* type */
1375 0, /* rightshift */
1376 2, /* size (0 = byte, 1 = short, 2 = long) */
1377 32, /* bitsize */
1378 FALSE, /* pc_relative */
1379 0, /* bitpos */
1380 complain_overflow_dont,/* complain_on_overflow */
1381 bfd_elf_generic_reloc, /* special_function */
1382 "R_ARM_GOT_ABS", /* name */
1383 FALSE, /* partial_inplace */
1384 0xffffffff, /* src_mask */
1385 0xffffffff, /* dst_mask */
1386 FALSE), /* pcrel_offset */
1387
1388 HOWTO (R_ARM_GOT_PREL, /* type */
1389 0, /* rightshift */
1390 2, /* size (0 = byte, 1 = short, 2 = long) */
1391 32, /* bitsize */
1392 TRUE, /* pc_relative */
1393 0, /* bitpos */
1394 complain_overflow_dont, /* complain_on_overflow */
1395 bfd_elf_generic_reloc, /* special_function */
1396 "R_ARM_GOT_PREL", /* name */
1397 FALSE, /* partial_inplace */
1398 0xffffffff, /* src_mask */
1399 0xffffffff, /* dst_mask */
1400 TRUE), /* pcrel_offset */
1401
1402 HOWTO (R_ARM_GOT_BREL12, /* type */
1403 0, /* rightshift */
1404 2, /* size (0 = byte, 1 = short, 2 = long) */
1405 12, /* bitsize */
1406 FALSE, /* pc_relative */
1407 0, /* bitpos */
1408 complain_overflow_bitfield,/* complain_on_overflow */
1409 bfd_elf_generic_reloc, /* special_function */
1410 "R_ARM_GOT_BREL12", /* name */
1411 FALSE, /* partial_inplace */
1412 0x00000fff, /* src_mask */
1413 0x00000fff, /* dst_mask */
1414 FALSE), /* pcrel_offset */
1415
1416 HOWTO (R_ARM_GOTOFF12, /* type */
1417 0, /* rightshift */
1418 2, /* size (0 = byte, 1 = short, 2 = long) */
1419 12, /* bitsize */
1420 FALSE, /* pc_relative */
1421 0, /* bitpos */
1422 complain_overflow_bitfield,/* complain_on_overflow */
1423 bfd_elf_generic_reloc, /* special_function */
1424 "R_ARM_GOTOFF12", /* name */
1425 FALSE, /* partial_inplace */
1426 0x00000fff, /* src_mask */
1427 0x00000fff, /* dst_mask */
1428 FALSE), /* pcrel_offset */
1429
1430 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1431
1432 /* GNU extension to record C++ vtable member usage */
1433 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1434 0, /* rightshift */
1435 2, /* size (0 = byte, 1 = short, 2 = long) */
1436 0, /* bitsize */
1437 FALSE, /* pc_relative */
1438 0, /* bitpos */
1439 complain_overflow_dont, /* complain_on_overflow */
1440 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1441 "R_ARM_GNU_VTENTRY", /* name */
1442 FALSE, /* partial_inplace */
1443 0, /* src_mask */
1444 0, /* dst_mask */
1445 FALSE), /* pcrel_offset */
1446
1447 /* GNU extension to record C++ vtable hierarchy */
1448 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1449 0, /* rightshift */
1450 2, /* size (0 = byte, 1 = short, 2 = long) */
1451 0, /* bitsize */
1452 FALSE, /* pc_relative */
1453 0, /* bitpos */
1454 complain_overflow_dont, /* complain_on_overflow */
1455 NULL, /* special_function */
1456 "R_ARM_GNU_VTINHERIT", /* name */
1457 FALSE, /* partial_inplace */
1458 0, /* src_mask */
1459 0, /* dst_mask */
1460 FALSE), /* pcrel_offset */
1461
1462 HOWTO (R_ARM_THM_JUMP11, /* type */
1463 1, /* rightshift */
1464 1, /* size (0 = byte, 1 = short, 2 = long) */
1465 11, /* bitsize */
1466 TRUE, /* pc_relative */
1467 0, /* bitpos */
1468 complain_overflow_signed, /* complain_on_overflow */
1469 bfd_elf_generic_reloc, /* special_function */
1470 "R_ARM_THM_JUMP11", /* name */
1471 FALSE, /* partial_inplace */
1472 0x000007ff, /* src_mask */
1473 0x000007ff, /* dst_mask */
1474 TRUE), /* pcrel_offset */
1475
1476 HOWTO (R_ARM_THM_JUMP8, /* type */
1477 1, /* rightshift */
1478 1, /* size (0 = byte, 1 = short, 2 = long) */
1479 8, /* bitsize */
1480 TRUE, /* pc_relative */
1481 0, /* bitpos */
1482 complain_overflow_signed, /* complain_on_overflow */
1483 bfd_elf_generic_reloc, /* special_function */
1484 "R_ARM_THM_JUMP8", /* name */
1485 FALSE, /* partial_inplace */
1486 0x000000ff, /* src_mask */
1487 0x000000ff, /* dst_mask */
1488 TRUE), /* pcrel_offset */
1489
1490 /* TLS relocations */
1491 HOWTO (R_ARM_TLS_GD32, /* type */
1492 0, /* rightshift */
1493 2, /* size (0 = byte, 1 = short, 2 = long) */
1494 32, /* bitsize */
1495 FALSE, /* pc_relative */
1496 0, /* bitpos */
1497 complain_overflow_bitfield,/* complain_on_overflow */
1498 NULL, /* special_function */
1499 "R_ARM_TLS_GD32", /* name */
1500 TRUE, /* partial_inplace */
1501 0xffffffff, /* src_mask */
1502 0xffffffff, /* dst_mask */
1503 FALSE), /* pcrel_offset */
1504
1505 HOWTO (R_ARM_TLS_LDM32, /* type */
1506 0, /* rightshift */
1507 2, /* size (0 = byte, 1 = short, 2 = long) */
1508 32, /* bitsize */
1509 FALSE, /* pc_relative */
1510 0, /* bitpos */
1511 complain_overflow_bitfield,/* complain_on_overflow */
1512 bfd_elf_generic_reloc, /* special_function */
1513 "R_ARM_TLS_LDM32", /* name */
1514 TRUE, /* partial_inplace */
1515 0xffffffff, /* src_mask */
1516 0xffffffff, /* dst_mask */
1517 FALSE), /* pcrel_offset */
1518
1519 HOWTO (R_ARM_TLS_LDO32, /* type */
1520 0, /* rightshift */
1521 2, /* size (0 = byte, 1 = short, 2 = long) */
1522 32, /* bitsize */
1523 FALSE, /* pc_relative */
1524 0, /* bitpos */
1525 complain_overflow_bitfield,/* complain_on_overflow */
1526 bfd_elf_generic_reloc, /* special_function */
1527 "R_ARM_TLS_LDO32", /* name */
1528 TRUE, /* partial_inplace */
1529 0xffffffff, /* src_mask */
1530 0xffffffff, /* dst_mask */
1531 FALSE), /* pcrel_offset */
1532
1533 HOWTO (R_ARM_TLS_IE32, /* type */
1534 0, /* rightshift */
1535 2, /* size (0 = byte, 1 = short, 2 = long) */
1536 32, /* bitsize */
1537 FALSE, /* pc_relative */
1538 0, /* bitpos */
1539 complain_overflow_bitfield,/* complain_on_overflow */
1540 NULL, /* special_function */
1541 "R_ARM_TLS_IE32", /* name */
1542 TRUE, /* partial_inplace */
1543 0xffffffff, /* src_mask */
1544 0xffffffff, /* dst_mask */
1545 FALSE), /* pcrel_offset */
1546
1547 HOWTO (R_ARM_TLS_LE32, /* type */
1548 0, /* rightshift */
1549 2, /* size (0 = byte, 1 = short, 2 = long) */
1550 32, /* bitsize */
1551 FALSE, /* pc_relative */
1552 0, /* bitpos */
1553 complain_overflow_bitfield,/* complain_on_overflow */
1554 bfd_elf_generic_reloc, /* special_function */
1555 "R_ARM_TLS_LE32", /* name */
1556 TRUE, /* partial_inplace */
1557 0xffffffff, /* src_mask */
1558 0xffffffff, /* dst_mask */
1559 FALSE), /* pcrel_offset */
1560
1561 HOWTO (R_ARM_TLS_LDO12, /* type */
1562 0, /* rightshift */
1563 2, /* size (0 = byte, 1 = short, 2 = long) */
1564 12, /* bitsize */
1565 FALSE, /* pc_relative */
1566 0, /* bitpos */
1567 complain_overflow_bitfield,/* complain_on_overflow */
1568 bfd_elf_generic_reloc, /* special_function */
1569 "R_ARM_TLS_LDO12", /* name */
1570 FALSE, /* partial_inplace */
1571 0x00000fff, /* src_mask */
1572 0x00000fff, /* dst_mask */
1573 FALSE), /* pcrel_offset */
1574
1575 HOWTO (R_ARM_TLS_LE12, /* type */
1576 0, /* rightshift */
1577 2, /* size (0 = byte, 1 = short, 2 = long) */
1578 12, /* bitsize */
1579 FALSE, /* pc_relative */
1580 0, /* bitpos */
1581 complain_overflow_bitfield,/* complain_on_overflow */
1582 bfd_elf_generic_reloc, /* special_function */
1583 "R_ARM_TLS_LE12", /* name */
1584 FALSE, /* partial_inplace */
1585 0x00000fff, /* src_mask */
1586 0x00000fff, /* dst_mask */
1587 FALSE), /* pcrel_offset */
1588
1589 HOWTO (R_ARM_TLS_IE12GP, /* type */
1590 0, /* rightshift */
1591 2, /* size (0 = byte, 1 = short, 2 = long) */
1592 12, /* bitsize */
1593 FALSE, /* pc_relative */
1594 0, /* bitpos */
1595 complain_overflow_bitfield,/* complain_on_overflow */
1596 bfd_elf_generic_reloc, /* special_function */
1597 "R_ARM_TLS_IE12GP", /* name */
1598 FALSE, /* partial_inplace */
1599 0x00000fff, /* src_mask */
1600 0x00000fff, /* dst_mask */
1601 FALSE), /* pcrel_offset */
1602 };
1603
1604 /* 112-127 private relocations
1605 128 R_ARM_ME_TOO, obsolete
1606 129-255 unallocated in AAELF.
1607
1608 249-255 extended, currently unused, relocations: */
1609
1610 static reloc_howto_type elf32_arm_howto_table_2[4] =
1611 {
1612 HOWTO (R_ARM_RREL32, /* type */
1613 0, /* rightshift */
1614 0, /* size (0 = byte, 1 = short, 2 = long) */
1615 0, /* bitsize */
1616 FALSE, /* pc_relative */
1617 0, /* bitpos */
1618 complain_overflow_dont,/* complain_on_overflow */
1619 bfd_elf_generic_reloc, /* special_function */
1620 "R_ARM_RREL32", /* name */
1621 FALSE, /* partial_inplace */
1622 0, /* src_mask */
1623 0, /* dst_mask */
1624 FALSE), /* pcrel_offset */
1625
1626 HOWTO (R_ARM_RABS32, /* type */
1627 0, /* rightshift */
1628 0, /* size (0 = byte, 1 = short, 2 = long) */
1629 0, /* bitsize */
1630 FALSE, /* pc_relative */
1631 0, /* bitpos */
1632 complain_overflow_dont,/* complain_on_overflow */
1633 bfd_elf_generic_reloc, /* special_function */
1634 "R_ARM_RABS32", /* name */
1635 FALSE, /* partial_inplace */
1636 0, /* src_mask */
1637 0, /* dst_mask */
1638 FALSE), /* pcrel_offset */
1639
1640 HOWTO (R_ARM_RPC24, /* type */
1641 0, /* rightshift */
1642 0, /* size (0 = byte, 1 = short, 2 = long) */
1643 0, /* bitsize */
1644 FALSE, /* pc_relative */
1645 0, /* bitpos */
1646 complain_overflow_dont,/* complain_on_overflow */
1647 bfd_elf_generic_reloc, /* special_function */
1648 "R_ARM_RPC24", /* name */
1649 FALSE, /* partial_inplace */
1650 0, /* src_mask */
1651 0, /* dst_mask */
1652 FALSE), /* pcrel_offset */
1653
1654 HOWTO (R_ARM_RBASE, /* type */
1655 0, /* rightshift */
1656 0, /* size (0 = byte, 1 = short, 2 = long) */
1657 0, /* bitsize */
1658 FALSE, /* pc_relative */
1659 0, /* bitpos */
1660 complain_overflow_dont,/* complain_on_overflow */
1661 bfd_elf_generic_reloc, /* special_function */
1662 "R_ARM_RBASE", /* name */
1663 FALSE, /* partial_inplace */
1664 0, /* src_mask */
1665 0, /* dst_mask */
1666 FALSE) /* pcrel_offset */
1667 };
1668
1669 static reloc_howto_type *
1670 elf32_arm_howto_from_type (unsigned int r_type)
1671 {
1672 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1673 return &elf32_arm_howto_table_1[r_type];
1674
1675 if (r_type >= R_ARM_RREL32
1676 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_2))
1677 return &elf32_arm_howto_table_2[r_type - R_ARM_RREL32];
1678
1679 return NULL;
1680 }
1681
1682 static void
1683 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc,
1684 Elf_Internal_Rela * elf_reloc)
1685 {
1686 unsigned int r_type;
1687
1688 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1689 bfd_reloc->howto = elf32_arm_howto_from_type (r_type);
1690 }
1691
1692 struct elf32_arm_reloc_map
1693 {
1694 bfd_reloc_code_real_type bfd_reloc_val;
1695 unsigned char elf_reloc_val;
1696 };
1697
1698 /* All entries in this list must also be present in elf32_arm_howto_table. */
1699 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1700 {
1701 {BFD_RELOC_NONE, R_ARM_NONE},
1702 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1703 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1704 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1705 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1706 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1707 {BFD_RELOC_32, R_ARM_ABS32},
1708 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1709 {BFD_RELOC_8, R_ARM_ABS8},
1710 {BFD_RELOC_16, R_ARM_ABS16},
1711 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1712 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1713 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1714 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1715 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1716 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1717 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1718 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1719 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1720 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1721 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1722 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1723 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1724 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1725 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1726 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1727 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1728 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1729 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1730 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1731 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1732 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
1733 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
1734 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
1735 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
1736 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
1737 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
1738 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
1739 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
1740 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
1741 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
1742 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
1743 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
1744 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
1745 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
1746 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
1747 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
1748 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1749 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1750 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1751 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1752 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1753 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1754 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1755 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1756 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1757 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1758 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1759 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1760 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1761 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1762 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1763 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1764 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1765 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1766 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1767 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1768 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1769 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1770 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1771 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1772 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1773 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1774 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1775 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1776 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1777 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1778 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX}
1779 };
1780
1781 static reloc_howto_type *
1782 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1783 bfd_reloc_code_real_type code)
1784 {
1785 unsigned int i;
1786
1787 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1788 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1789 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1790
1791 return NULL;
1792 }
1793
1794 static reloc_howto_type *
1795 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1796 const char *r_name)
1797 {
1798 unsigned int i;
1799
1800 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1801 if (elf32_arm_howto_table_1[i].name != NULL
1802 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1803 return &elf32_arm_howto_table_1[i];
1804
1805 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1806 if (elf32_arm_howto_table_2[i].name != NULL
1807 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1808 return &elf32_arm_howto_table_2[i];
1809
1810 return NULL;
1811 }
1812
1813 /* Support for core dump NOTE sections. */
1814
1815 static bfd_boolean
1816 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1817 {
1818 int offset;
1819 size_t size;
1820
1821 switch (note->descsz)
1822 {
1823 default:
1824 return FALSE;
1825
1826 case 148: /* Linux/ARM 32-bit. */
1827 /* pr_cursig */
1828 elf_tdata (abfd)->core_signal = bfd_get_16 (abfd, note->descdata + 12);
1829
1830 /* pr_pid */
1831 elf_tdata (abfd)->core_pid = bfd_get_32 (abfd, note->descdata + 24);
1832
1833 /* pr_reg */
1834 offset = 72;
1835 size = 72;
1836
1837 break;
1838 }
1839
1840 /* Make a ".reg/999" section. */
1841 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
1842 size, note->descpos + offset);
1843 }
1844
1845 static bfd_boolean
1846 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
1847 {
1848 switch (note->descsz)
1849 {
1850 default:
1851 return FALSE;
1852
1853 case 124: /* Linux/ARM elf_prpsinfo. */
1854 elf_tdata (abfd)->core_program
1855 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
1856 elf_tdata (abfd)->core_command
1857 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
1858 }
1859
1860 /* Note that for some reason, a spurious space is tacked
1861 onto the end of the args in some (at least one anyway)
1862 implementations, so strip it off if it exists. */
1863 {
1864 char *command = elf_tdata (abfd)->core_command;
1865 int n = strlen (command);
1866
1867 if (0 < n && command[n - 1] == ' ')
1868 command[n - 1] = '\0';
1869 }
1870
1871 return TRUE;
1872 }
1873
1874 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vec
1875 #define TARGET_LITTLE_NAME "elf32-littlearm"
1876 #define TARGET_BIG_SYM bfd_elf32_bigarm_vec
1877 #define TARGET_BIG_NAME "elf32-bigarm"
1878
1879 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
1880 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
1881
1882 typedef unsigned long int insn32;
1883 typedef unsigned short int insn16;
1884
1885 /* In lieu of proper flags, assume all EABIv4 or later objects are
1886 interworkable. */
1887 #define INTERWORK_FLAG(abfd) \
1888 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
1889 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
1890 || ((abfd)->flags & BFD_LINKER_CREATED))
1891
1892 /* The linker script knows the section names for placement.
1893 The entry_names are used to do simple name mangling on the stubs.
1894 Given a function name, and its type, the stub can be found. The
1895 name can be changed. The only requirement is the %s be present. */
1896 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
1897 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
1898
1899 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
1900 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
1901
1902 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
1903 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
1904
1905 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
1906 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
1907
1908 #define STUB_ENTRY_NAME "__%s_veneer"
1909
1910 /* The name of the dynamic interpreter. This is put in the .interp
1911 section. */
1912 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
1913
1914 #ifdef FOUR_WORD_PLT
1915
1916 /* The first entry in a procedure linkage table looks like
1917 this. It is set up so that any shared library function that is
1918 called before the relocation has been set up calls the dynamic
1919 linker first. */
1920 static const bfd_vma elf32_arm_plt0_entry [] =
1921 {
1922 0xe52de004, /* str lr, [sp, #-4]! */
1923 0xe59fe010, /* ldr lr, [pc, #16] */
1924 0xe08fe00e, /* add lr, pc, lr */
1925 0xe5bef008, /* ldr pc, [lr, #8]! */
1926 };
1927
1928 /* Subsequent entries in a procedure linkage table look like
1929 this. */
1930 static const bfd_vma elf32_arm_plt_entry [] =
1931 {
1932 0xe28fc600, /* add ip, pc, #NN */
1933 0xe28cca00, /* add ip, ip, #NN */
1934 0xe5bcf000, /* ldr pc, [ip, #NN]! */
1935 0x00000000, /* unused */
1936 };
1937
1938 #else
1939
1940 /* The first entry in a procedure linkage table looks like
1941 this. It is set up so that any shared library function that is
1942 called before the relocation has been set up calls the dynamic
1943 linker first. */
1944 static const bfd_vma elf32_arm_plt0_entry [] =
1945 {
1946 0xe52de004, /* str lr, [sp, #-4]! */
1947 0xe59fe004, /* ldr lr, [pc, #4] */
1948 0xe08fe00e, /* add lr, pc, lr */
1949 0xe5bef008, /* ldr pc, [lr, #8]! */
1950 0x00000000, /* &GOT[0] - . */
1951 };
1952
1953 /* Subsequent entries in a procedure linkage table look like
1954 this. */
1955 static const bfd_vma elf32_arm_plt_entry [] =
1956 {
1957 0xe28fc600, /* add ip, pc, #0xNN00000 */
1958 0xe28cca00, /* add ip, ip, #0xNN000 */
1959 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
1960 };
1961
1962 #endif
1963
1964 /* The format of the first entry in the procedure linkage table
1965 for a VxWorks executable. */
1966 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
1967 {
1968 0xe52dc008, /* str ip,[sp,#-8]! */
1969 0xe59fc000, /* ldr ip,[pc] */
1970 0xe59cf008, /* ldr pc,[ip,#8] */
1971 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
1972 };
1973
1974 /* The format of subsequent entries in a VxWorks executable. */
1975 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
1976 {
1977 0xe59fc000, /* ldr ip,[pc] */
1978 0xe59cf000, /* ldr pc,[ip] */
1979 0x00000000, /* .long @got */
1980 0xe59fc000, /* ldr ip,[pc] */
1981 0xea000000, /* b _PLT */
1982 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1983 };
1984
1985 /* The format of entries in a VxWorks shared library. */
1986 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
1987 {
1988 0xe59fc000, /* ldr ip,[pc] */
1989 0xe79cf009, /* ldr pc,[ip,r9] */
1990 0x00000000, /* .long @got */
1991 0xe59fc000, /* ldr ip,[pc] */
1992 0xe599f008, /* ldr pc,[r9,#8] */
1993 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1994 };
1995
1996 /* An initial stub used if the PLT entry is referenced from Thumb code. */
1997 #define PLT_THUMB_STUB_SIZE 4
1998 static const bfd_vma elf32_arm_plt_thumb_stub [] =
1999 {
2000 0x4778, /* bx pc */
2001 0x46c0 /* nop */
2002 };
2003
2004 /* The entries in a PLT when using a DLL-based target with multiple
2005 address spaces. */
2006 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2007 {
2008 0xe51ff004, /* ldr pc, [pc, #-4] */
2009 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2010 };
2011
2012 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2013 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2014 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2015 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2016 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2017 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2018
2019 enum stub_insn_type
2020 {
2021 THUMB16_TYPE = 1,
2022 THUMB32_TYPE,
2023 ARM_TYPE,
2024 DATA_TYPE
2025 };
2026
2027 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2028 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2029 is inserted in arm_build_one_stub(). */
2030 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2031 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2032 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2033 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2034 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2035 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2036
2037 typedef struct
2038 {
2039 bfd_vma data;
2040 enum stub_insn_type type;
2041 unsigned int r_type;
2042 int reloc_addend;
2043 } insn_sequence;
2044
2045 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2046 to reach the stub if necessary. */
2047 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2048 {
2049 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2050 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2051 };
2052
2053 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2054 available. */
2055 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2056 {
2057 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2058 ARM_INSN(0xe12fff1c), /* bx ip */
2059 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2060 };
2061
2062 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2063 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2064 {
2065 THUMB16_INSN(0xb401), /* push {r0} */
2066 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2067 THUMB16_INSN(0x4684), /* mov ip, r0 */
2068 THUMB16_INSN(0xbc01), /* pop {r0} */
2069 THUMB16_INSN(0x4760), /* bx ip */
2070 THUMB16_INSN(0xbf00), /* nop */
2071 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2072 };
2073
2074 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2075 allowed. */
2076 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2077 {
2078 THUMB16_INSN(0x4778), /* bx pc */
2079 THUMB16_INSN(0x46c0), /* nop */
2080 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2081 ARM_INSN(0xe12fff1c), /* bx ip */
2082 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2083 };
2084
2085 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2086 available. */
2087 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2088 {
2089 THUMB16_INSN(0x4778), /* bx pc */
2090 THUMB16_INSN(0x46c0), /* nop */
2091 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2092 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2093 };
2094
2095 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2096 one, when the destination is close enough. */
2097 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2098 {
2099 THUMB16_INSN(0x4778), /* bx pc */
2100 THUMB16_INSN(0x46c0), /* nop */
2101 ARM_REL_INSN(0xea000000, -8), /* b (X-8) */
2102 };
2103
2104 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2105 blx to reach the stub if necessary. */
2106 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2107 {
2108 ARM_INSN(0xe59fc000), /* ldr r12, [pc] */
2109 ARM_INSN(0xe08ff00c), /* add pc, pc, ip */
2110 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2111 };
2112
2113 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2114 blx to reach the stub if necessary. We can not add into pc;
2115 it is not guaranteed to mode switch (different in ARMv6 and
2116 ARMv7). */
2117 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2118 {
2119 ARM_INSN(0xe59fc004), /* ldr r12, [pc, #4] */
2120 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2121 ARM_INSN(0xe12fff1c), /* bx ip */
2122 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2123 };
2124
2125 /* V4T ARM -> ARM long branch stub, PIC. */
2126 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2127 {
2128 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2129 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2130 ARM_INSN(0xe12fff1c), /* bx ip */
2131 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2132 };
2133
2134 /* V4T Thumb -> ARM long branch stub, PIC. */
2135 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2136 {
2137 THUMB16_INSN(0x4778), /* bx pc */
2138 THUMB16_INSN(0x46c0), /* nop */
2139 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2140 ARM_INSN(0xe08cf00f), /* add pc, ip, pc */
2141 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2142 };
2143
2144 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2145 architectures. */
2146 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2147 {
2148 THUMB16_INSN(0xb401), /* push {r0} */
2149 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2150 THUMB16_INSN(0x46fc), /* mov ip, pc */
2151 THUMB16_INSN(0x4484), /* add ip, r0 */
2152 THUMB16_INSN(0xbc01), /* pop {r0} */
2153 THUMB16_INSN(0x4760), /* bx ip */
2154 DATA_WORD(0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2155 };
2156
2157 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2158 allowed. */
2159 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2160 {
2161 THUMB16_INSN(0x4778), /* bx pc */
2162 THUMB16_INSN(0x46c0), /* nop */
2163 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2164 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2165 ARM_INSN(0xe12fff1c), /* bx ip */
2166 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2167 };
2168
2169 /* Cortex-A8 erratum-workaround stubs. */
2170
2171 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2172 can't use a conditional branch to reach this stub). */
2173
2174 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2175 {
2176 THUMB16_BCOND_INSN(0xd001), /* b<cond>.n true. */
2177 THUMB32_B_INSN(0xf000b800, -4), /* b.w insn_after_original_branch. */
2178 THUMB32_B_INSN(0xf000b800, -4) /* true: b.w original_branch_dest. */
2179 };
2180
2181 /* Stub used for b.w and bl.w instructions. */
2182
2183 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2184 {
2185 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2186 };
2187
2188 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2189 {
2190 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2191 };
2192
2193 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2194 instruction (which switches to ARM mode) to point to this stub. Jump to the
2195 real destination using an ARM-mode branch. */
2196
2197 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2198 {
2199 ARM_REL_INSN(0xea000000, -8) /* b original_branch_dest. */
2200 };
2201
2202 /* Section name for stubs is the associated section name plus this
2203 string. */
2204 #define STUB_SUFFIX ".stub"
2205
2206 /* One entry per long/short branch stub defined above. */
2207 #define DEF_STUBS \
2208 DEF_STUB(long_branch_any_any) \
2209 DEF_STUB(long_branch_v4t_arm_thumb) \
2210 DEF_STUB(long_branch_thumb_only) \
2211 DEF_STUB(long_branch_v4t_thumb_thumb) \
2212 DEF_STUB(long_branch_v4t_thumb_arm) \
2213 DEF_STUB(short_branch_v4t_thumb_arm) \
2214 DEF_STUB(long_branch_any_arm_pic) \
2215 DEF_STUB(long_branch_any_thumb_pic) \
2216 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2217 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2218 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2219 DEF_STUB(long_branch_thumb_only_pic) \
2220 DEF_STUB(a8_veneer_b_cond) \
2221 DEF_STUB(a8_veneer_b) \
2222 DEF_STUB(a8_veneer_bl) \
2223 DEF_STUB(a8_veneer_blx)
2224
2225 #define DEF_STUB(x) arm_stub_##x,
2226 enum elf32_arm_stub_type {
2227 arm_stub_none,
2228 DEF_STUBS
2229 /* Note the first a8_veneer type */
2230 arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond
2231 };
2232 #undef DEF_STUB
2233
2234 typedef struct
2235 {
2236 const insn_sequence* template_sequence;
2237 int template_size;
2238 } stub_def;
2239
2240 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2241 static const stub_def stub_definitions[] = {
2242 {NULL, 0},
2243 DEF_STUBS
2244 };
2245
2246 struct elf32_arm_stub_hash_entry
2247 {
2248 /* Base hash table entry structure. */
2249 struct bfd_hash_entry root;
2250
2251 /* The stub section. */
2252 asection *stub_sec;
2253
2254 /* Offset within stub_sec of the beginning of this stub. */
2255 bfd_vma stub_offset;
2256
2257 /* Given the symbol's value and its section we can determine its final
2258 value when building the stubs (so the stub knows where to jump). */
2259 bfd_vma target_value;
2260 asection *target_section;
2261
2262 /* Offset to apply to relocation referencing target_value. */
2263 bfd_vma target_addend;
2264
2265 /* The instruction which caused this stub to be generated (only valid for
2266 Cortex-A8 erratum workaround stubs at present). */
2267 unsigned long orig_insn;
2268
2269 /* The stub type. */
2270 enum elf32_arm_stub_type stub_type;
2271 /* Its encoding size in bytes. */
2272 int stub_size;
2273 /* Its template. */
2274 const insn_sequence *stub_template;
2275 /* The size of the template (number of entries). */
2276 int stub_template_size;
2277
2278 /* The symbol table entry, if any, that this was derived from. */
2279 struct elf32_arm_link_hash_entry *h;
2280
2281 /* Destination symbol type (STT_ARM_TFUNC, ...) */
2282 unsigned char st_type;
2283
2284 /* Where this stub is being called from, or, in the case of combined
2285 stub sections, the first input section in the group. */
2286 asection *id_sec;
2287
2288 /* The name for the local symbol at the start of this stub. The
2289 stub name in the hash table has to be unique; this does not, so
2290 it can be friendlier. */
2291 char *output_name;
2292 };
2293
2294 /* Used to build a map of a section. This is required for mixed-endian
2295 code/data. */
2296
2297 typedef struct elf32_elf_section_map
2298 {
2299 bfd_vma vma;
2300 char type;
2301 }
2302 elf32_arm_section_map;
2303
2304 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2305
2306 typedef enum
2307 {
2308 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2309 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2310 VFP11_ERRATUM_ARM_VENEER,
2311 VFP11_ERRATUM_THUMB_VENEER
2312 }
2313 elf32_vfp11_erratum_type;
2314
2315 typedef struct elf32_vfp11_erratum_list
2316 {
2317 struct elf32_vfp11_erratum_list *next;
2318 bfd_vma vma;
2319 union
2320 {
2321 struct
2322 {
2323 struct elf32_vfp11_erratum_list *veneer;
2324 unsigned int vfp_insn;
2325 } b;
2326 struct
2327 {
2328 struct elf32_vfp11_erratum_list *branch;
2329 unsigned int id;
2330 } v;
2331 } u;
2332 elf32_vfp11_erratum_type type;
2333 }
2334 elf32_vfp11_erratum_list;
2335
2336 typedef enum
2337 {
2338 DELETE_EXIDX_ENTRY,
2339 INSERT_EXIDX_CANTUNWIND_AT_END
2340 }
2341 arm_unwind_edit_type;
2342
2343 /* A (sorted) list of edits to apply to an unwind table. */
2344 typedef struct arm_unwind_table_edit
2345 {
2346 arm_unwind_edit_type type;
2347 /* Note: we sometimes want to insert an unwind entry corresponding to a
2348 section different from the one we're currently writing out, so record the
2349 (text) section this edit relates to here. */
2350 asection *linked_section;
2351 unsigned int index;
2352 struct arm_unwind_table_edit *next;
2353 }
2354 arm_unwind_table_edit;
2355
2356 typedef struct _arm_elf_section_data
2357 {
2358 /* Information about mapping symbols. */
2359 struct bfd_elf_section_data elf;
2360 unsigned int mapcount;
2361 unsigned int mapsize;
2362 elf32_arm_section_map *map;
2363 /* Information about CPU errata. */
2364 unsigned int erratumcount;
2365 elf32_vfp11_erratum_list *erratumlist;
2366 /* Information about unwind tables. */
2367 union
2368 {
2369 /* Unwind info attached to a text section. */
2370 struct
2371 {
2372 asection *arm_exidx_sec;
2373 } text;
2374
2375 /* Unwind info attached to an .ARM.exidx section. */
2376 struct
2377 {
2378 arm_unwind_table_edit *unwind_edit_list;
2379 arm_unwind_table_edit *unwind_edit_tail;
2380 } exidx;
2381 } u;
2382 }
2383 _arm_elf_section_data;
2384
2385 #define elf32_arm_section_data(sec) \
2386 ((_arm_elf_section_data *) elf_section_data (sec))
2387
2388 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2389 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2390 so may be created multiple times: we use an array of these entries whilst
2391 relaxing which we can refresh easily, then create stubs for each potentially
2392 erratum-triggering instruction once we've settled on a solution. */
2393
2394 struct a8_erratum_fix {
2395 bfd *input_bfd;
2396 asection *section;
2397 bfd_vma offset;
2398 bfd_vma addend;
2399 unsigned long orig_insn;
2400 char *stub_name;
2401 enum elf32_arm_stub_type stub_type;
2402 };
2403
2404 /* A table of relocs applied to branches which might trigger Cortex-A8
2405 erratum. */
2406
2407 struct a8_erratum_reloc {
2408 bfd_vma from;
2409 bfd_vma destination;
2410 unsigned int r_type;
2411 unsigned char st_type;
2412 const char *sym_name;
2413 bfd_boolean non_a8_stub;
2414 };
2415
2416 /* The size of the thread control block. */
2417 #define TCB_SIZE 8
2418
2419 struct elf_arm_obj_tdata
2420 {
2421 struct elf_obj_tdata root;
2422
2423 /* tls_type for each local got entry. */
2424 char *local_got_tls_type;
2425
2426 /* Zero to warn when linking objects with incompatible enum sizes. */
2427 int no_enum_size_warning;
2428
2429 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2430 int no_wchar_size_warning;
2431 };
2432
2433 #define elf_arm_tdata(bfd) \
2434 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2435
2436 #define elf32_arm_local_got_tls_type(bfd) \
2437 (elf_arm_tdata (bfd)->local_got_tls_type)
2438
2439 #define is_arm_elf(bfd) \
2440 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2441 && elf_tdata (bfd) != NULL \
2442 && elf_object_id (bfd) == ARM_ELF_DATA)
2443
2444 static bfd_boolean
2445 elf32_arm_mkobject (bfd *abfd)
2446 {
2447 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2448 ARM_ELF_DATA);
2449 }
2450
2451 /* The ARM linker needs to keep track of the number of relocs that it
2452 decides to copy in check_relocs for each symbol. This is so that
2453 it can discard PC relative relocs if it doesn't need them when
2454 linking with -Bsymbolic. We store the information in a field
2455 extending the regular ELF linker hash table. */
2456
2457 /* This structure keeps track of the number of relocs we have copied
2458 for a given symbol. */
2459 struct elf32_arm_relocs_copied
2460 {
2461 /* Next section. */
2462 struct elf32_arm_relocs_copied * next;
2463 /* A section in dynobj. */
2464 asection * section;
2465 /* Number of relocs copied in this section. */
2466 bfd_size_type count;
2467 /* Number of PC-relative relocs copied in this section. */
2468 bfd_size_type pc_count;
2469 };
2470
2471 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2472
2473 /* Arm ELF linker hash entry. */
2474 struct elf32_arm_link_hash_entry
2475 {
2476 struct elf_link_hash_entry root;
2477
2478 /* Number of PC relative relocs copied for this symbol. */
2479 struct elf32_arm_relocs_copied * relocs_copied;
2480
2481 /* We reference count Thumb references to a PLT entry separately,
2482 so that we can emit the Thumb trampoline only if needed. */
2483 bfd_signed_vma plt_thumb_refcount;
2484
2485 /* Some references from Thumb code may be eliminated by BL->BLX
2486 conversion, so record them separately. */
2487 bfd_signed_vma plt_maybe_thumb_refcount;
2488
2489 /* Since PLT entries have variable size if the Thumb prologue is
2490 used, we need to record the index into .got.plt instead of
2491 recomputing it from the PLT offset. */
2492 bfd_signed_vma plt_got_offset;
2493
2494 #define GOT_UNKNOWN 0
2495 #define GOT_NORMAL 1
2496 #define GOT_TLS_GD 2
2497 #define GOT_TLS_IE 4
2498 unsigned char tls_type;
2499
2500 /* The symbol marking the real symbol location for exported thumb
2501 symbols with Arm stubs. */
2502 struct elf_link_hash_entry *export_glue;
2503
2504 /* A pointer to the most recently used stub hash entry against this
2505 symbol. */
2506 struct elf32_arm_stub_hash_entry *stub_cache;
2507 };
2508
2509 /* Traverse an arm ELF linker hash table. */
2510 #define elf32_arm_link_hash_traverse(table, func, info) \
2511 (elf_link_hash_traverse \
2512 (&(table)->root, \
2513 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
2514 (info)))
2515
2516 /* Get the ARM elf linker hash table from a link_info structure. */
2517 #define elf32_arm_hash_table(info) \
2518 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
2519 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
2520
2521 #define arm_stub_hash_lookup(table, string, create, copy) \
2522 ((struct elf32_arm_stub_hash_entry *) \
2523 bfd_hash_lookup ((table), (string), (create), (copy)))
2524
2525 /* Array to keep track of which stub sections have been created, and
2526 information on stub grouping. */
2527 struct map_stub
2528 {
2529 /* This is the section to which stubs in the group will be
2530 attached. */
2531 asection *link_sec;
2532 /* The stub section. */
2533 asection *stub_sec;
2534 };
2535
2536 /* ARM ELF linker hash table. */
2537 struct elf32_arm_link_hash_table
2538 {
2539 /* The main hash table. */
2540 struct elf_link_hash_table root;
2541
2542 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
2543 bfd_size_type thumb_glue_size;
2544
2545 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
2546 bfd_size_type arm_glue_size;
2547
2548 /* The size in bytes of section containing the ARMv4 BX veneers. */
2549 bfd_size_type bx_glue_size;
2550
2551 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
2552 veneer has been populated. */
2553 bfd_vma bx_glue_offset[15];
2554
2555 /* The size in bytes of the section containing glue for VFP11 erratum
2556 veneers. */
2557 bfd_size_type vfp11_erratum_glue_size;
2558
2559 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
2560 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
2561 elf32_arm_write_section(). */
2562 struct a8_erratum_fix *a8_erratum_fixes;
2563 unsigned int num_a8_erratum_fixes;
2564
2565 /* An arbitrary input BFD chosen to hold the glue sections. */
2566 bfd * bfd_of_glue_owner;
2567
2568 /* Nonzero to output a BE8 image. */
2569 int byteswap_code;
2570
2571 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
2572 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
2573 int target1_is_rel;
2574
2575 /* The relocation to use for R_ARM_TARGET2 relocations. */
2576 int target2_reloc;
2577
2578 /* 0 = Ignore R_ARM_V4BX.
2579 1 = Convert BX to MOV PC.
2580 2 = Generate v4 interworing stubs. */
2581 int fix_v4bx;
2582
2583 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
2584 int fix_cortex_a8;
2585
2586 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
2587 int use_blx;
2588
2589 /* What sort of code sequences we should look for which may trigger the
2590 VFP11 denorm erratum. */
2591 bfd_arm_vfp11_fix vfp11_fix;
2592
2593 /* Global counter for the number of fixes we have emitted. */
2594 int num_vfp11_fixes;
2595
2596 /* Nonzero to force PIC branch veneers. */
2597 int pic_veneer;
2598
2599 /* The number of bytes in the initial entry in the PLT. */
2600 bfd_size_type plt_header_size;
2601
2602 /* The number of bytes in the subsequent PLT etries. */
2603 bfd_size_type plt_entry_size;
2604
2605 /* True if the target system is VxWorks. */
2606 int vxworks_p;
2607
2608 /* True if the target system is Symbian OS. */
2609 int symbian_p;
2610
2611 /* True if the target uses REL relocations. */
2612 int use_rel;
2613
2614 /* Short-cuts to get to dynamic linker sections. */
2615 asection *sgot;
2616 asection *sgotplt;
2617 asection *srelgot;
2618 asection *splt;
2619 asection *srelplt;
2620 asection *sdynbss;
2621 asection *srelbss;
2622
2623 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
2624 asection *srelplt2;
2625
2626 /* Data for R_ARM_TLS_LDM32 relocations. */
2627 union
2628 {
2629 bfd_signed_vma refcount;
2630 bfd_vma offset;
2631 } tls_ldm_got;
2632
2633 /* Small local sym cache. */
2634 struct sym_cache sym_cache;
2635
2636 /* For convenience in allocate_dynrelocs. */
2637 bfd * obfd;
2638
2639 /* The stub hash table. */
2640 struct bfd_hash_table stub_hash_table;
2641
2642 /* Linker stub bfd. */
2643 bfd *stub_bfd;
2644
2645 /* Linker call-backs. */
2646 asection * (*add_stub_section) (const char *, asection *);
2647 void (*layout_sections_again) (void);
2648
2649 /* Array to keep track of which stub sections have been created, and
2650 information on stub grouping. */
2651 struct map_stub *stub_group;
2652
2653 /* Assorted information used by elf32_arm_size_stubs. */
2654 unsigned int bfd_count;
2655 int top_index;
2656 asection **input_list;
2657 };
2658
2659 /* Create an entry in an ARM ELF linker hash table. */
2660
2661 static struct bfd_hash_entry *
2662 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
2663 struct bfd_hash_table * table,
2664 const char * string)
2665 {
2666 struct elf32_arm_link_hash_entry * ret =
2667 (struct elf32_arm_link_hash_entry *) entry;
2668
2669 /* Allocate the structure if it has not already been allocated by a
2670 subclass. */
2671 if (ret == NULL)
2672 ret = (struct elf32_arm_link_hash_entry *)
2673 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
2674 if (ret == NULL)
2675 return (struct bfd_hash_entry *) ret;
2676
2677 /* Call the allocation method of the superclass. */
2678 ret = ((struct elf32_arm_link_hash_entry *)
2679 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
2680 table, string));
2681 if (ret != NULL)
2682 {
2683 ret->relocs_copied = NULL;
2684 ret->tls_type = GOT_UNKNOWN;
2685 ret->plt_thumb_refcount = 0;
2686 ret->plt_maybe_thumb_refcount = 0;
2687 ret->plt_got_offset = -1;
2688 ret->export_glue = NULL;
2689
2690 ret->stub_cache = NULL;
2691 }
2692
2693 return (struct bfd_hash_entry *) ret;
2694 }
2695
2696 /* Initialize an entry in the stub hash table. */
2697
2698 static struct bfd_hash_entry *
2699 stub_hash_newfunc (struct bfd_hash_entry *entry,
2700 struct bfd_hash_table *table,
2701 const char *string)
2702 {
2703 /* Allocate the structure if it has not already been allocated by a
2704 subclass. */
2705 if (entry == NULL)
2706 {
2707 entry = (struct bfd_hash_entry *)
2708 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
2709 if (entry == NULL)
2710 return entry;
2711 }
2712
2713 /* Call the allocation method of the superclass. */
2714 entry = bfd_hash_newfunc (entry, table, string);
2715 if (entry != NULL)
2716 {
2717 struct elf32_arm_stub_hash_entry *eh;
2718
2719 /* Initialize the local fields. */
2720 eh = (struct elf32_arm_stub_hash_entry *) entry;
2721 eh->stub_sec = NULL;
2722 eh->stub_offset = 0;
2723 eh->target_value = 0;
2724 eh->target_section = NULL;
2725 eh->target_addend = 0;
2726 eh->orig_insn = 0;
2727 eh->stub_type = arm_stub_none;
2728 eh->stub_size = 0;
2729 eh->stub_template = NULL;
2730 eh->stub_template_size = 0;
2731 eh->h = NULL;
2732 eh->id_sec = NULL;
2733 eh->output_name = NULL;
2734 }
2735
2736 return entry;
2737 }
2738
2739 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
2740 shortcuts to them in our hash table. */
2741
2742 static bfd_boolean
2743 create_got_section (bfd *dynobj, struct bfd_link_info *info)
2744 {
2745 struct elf32_arm_link_hash_table *htab;
2746
2747 htab = elf32_arm_hash_table (info);
2748 if (htab == NULL)
2749 return FALSE;
2750
2751 /* BPABI objects never have a GOT, or associated sections. */
2752 if (htab->symbian_p)
2753 return TRUE;
2754
2755 if (! _bfd_elf_create_got_section (dynobj, info))
2756 return FALSE;
2757
2758 htab->sgot = bfd_get_section_by_name (dynobj, ".got");
2759 htab->sgotplt = bfd_get_section_by_name (dynobj, ".got.plt");
2760 if (!htab->sgot || !htab->sgotplt)
2761 abort ();
2762
2763 htab->srelgot = bfd_get_section_by_name (dynobj,
2764 RELOC_SECTION (htab, ".got"));
2765 if (htab->srelgot == NULL)
2766 return FALSE;
2767 return TRUE;
2768 }
2769
2770 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
2771 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
2772 hash table. */
2773
2774 static bfd_boolean
2775 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
2776 {
2777 struct elf32_arm_link_hash_table *htab;
2778
2779 htab = elf32_arm_hash_table (info);
2780 if (htab == NULL)
2781 return FALSE;
2782
2783 if (!htab->sgot && !create_got_section (dynobj, info))
2784 return FALSE;
2785
2786 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
2787 return FALSE;
2788
2789 htab->splt = bfd_get_section_by_name (dynobj, ".plt");
2790 htab->srelplt = bfd_get_section_by_name (dynobj,
2791 RELOC_SECTION (htab, ".plt"));
2792 htab->sdynbss = bfd_get_section_by_name (dynobj, ".dynbss");
2793 if (!info->shared)
2794 htab->srelbss = bfd_get_section_by_name (dynobj,
2795 RELOC_SECTION (htab, ".bss"));
2796
2797 if (htab->vxworks_p)
2798 {
2799 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
2800 return FALSE;
2801
2802 if (info->shared)
2803 {
2804 htab->plt_header_size = 0;
2805 htab->plt_entry_size
2806 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
2807 }
2808 else
2809 {
2810 htab->plt_header_size
2811 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
2812 htab->plt_entry_size
2813 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
2814 }
2815 }
2816
2817 if (!htab->splt
2818 || !htab->srelplt
2819 || !htab->sdynbss
2820 || (!info->shared && !htab->srelbss))
2821 abort ();
2822
2823 return TRUE;
2824 }
2825
2826 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2827
2828 static void
2829 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
2830 struct elf_link_hash_entry *dir,
2831 struct elf_link_hash_entry *ind)
2832 {
2833 struct elf32_arm_link_hash_entry *edir, *eind;
2834
2835 edir = (struct elf32_arm_link_hash_entry *) dir;
2836 eind = (struct elf32_arm_link_hash_entry *) ind;
2837
2838 if (eind->relocs_copied != NULL)
2839 {
2840 if (edir->relocs_copied != NULL)
2841 {
2842 struct elf32_arm_relocs_copied **pp;
2843 struct elf32_arm_relocs_copied *p;
2844
2845 /* Add reloc counts against the indirect sym to the direct sym
2846 list. Merge any entries against the same section. */
2847 for (pp = &eind->relocs_copied; (p = *pp) != NULL; )
2848 {
2849 struct elf32_arm_relocs_copied *q;
2850
2851 for (q = edir->relocs_copied; q != NULL; q = q->next)
2852 if (q->section == p->section)
2853 {
2854 q->pc_count += p->pc_count;
2855 q->count += p->count;
2856 *pp = p->next;
2857 break;
2858 }
2859 if (q == NULL)
2860 pp = &p->next;
2861 }
2862 *pp = edir->relocs_copied;
2863 }
2864
2865 edir->relocs_copied = eind->relocs_copied;
2866 eind->relocs_copied = NULL;
2867 }
2868
2869 if (ind->root.type == bfd_link_hash_indirect)
2870 {
2871 /* Copy over PLT info. */
2872 edir->plt_thumb_refcount += eind->plt_thumb_refcount;
2873 eind->plt_thumb_refcount = 0;
2874 edir->plt_maybe_thumb_refcount += eind->plt_maybe_thumb_refcount;
2875 eind->plt_maybe_thumb_refcount = 0;
2876
2877 if (dir->got.refcount <= 0)
2878 {
2879 edir->tls_type = eind->tls_type;
2880 eind->tls_type = GOT_UNKNOWN;
2881 }
2882 }
2883
2884 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2885 }
2886
2887 /* Create an ARM elf linker hash table. */
2888
2889 static struct bfd_link_hash_table *
2890 elf32_arm_link_hash_table_create (bfd *abfd)
2891 {
2892 struct elf32_arm_link_hash_table *ret;
2893 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
2894
2895 ret = (struct elf32_arm_link_hash_table *) bfd_malloc (amt);
2896 if (ret == NULL)
2897 return NULL;
2898
2899 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
2900 elf32_arm_link_hash_newfunc,
2901 sizeof (struct elf32_arm_link_hash_entry),
2902 ARM_ELF_DATA))
2903 {
2904 free (ret);
2905 return NULL;
2906 }
2907
2908 ret->sgot = NULL;
2909 ret->sgotplt = NULL;
2910 ret->srelgot = NULL;
2911 ret->splt = NULL;
2912 ret->srelplt = NULL;
2913 ret->sdynbss = NULL;
2914 ret->srelbss = NULL;
2915 ret->srelplt2 = NULL;
2916 ret->thumb_glue_size = 0;
2917 ret->arm_glue_size = 0;
2918 ret->bx_glue_size = 0;
2919 memset (ret->bx_glue_offset, 0, sizeof (ret->bx_glue_offset));
2920 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
2921 ret->vfp11_erratum_glue_size = 0;
2922 ret->num_vfp11_fixes = 0;
2923 ret->fix_cortex_a8 = 0;
2924 ret->bfd_of_glue_owner = NULL;
2925 ret->byteswap_code = 0;
2926 ret->target1_is_rel = 0;
2927 ret->target2_reloc = R_ARM_NONE;
2928 #ifdef FOUR_WORD_PLT
2929 ret->plt_header_size = 16;
2930 ret->plt_entry_size = 16;
2931 #else
2932 ret->plt_header_size = 20;
2933 ret->plt_entry_size = 12;
2934 #endif
2935 ret->fix_v4bx = 0;
2936 ret->use_blx = 0;
2937 ret->vxworks_p = 0;
2938 ret->symbian_p = 0;
2939 ret->use_rel = 1;
2940 ret->sym_cache.abfd = NULL;
2941 ret->obfd = abfd;
2942 ret->tls_ldm_got.refcount = 0;
2943 ret->stub_bfd = NULL;
2944 ret->add_stub_section = NULL;
2945 ret->layout_sections_again = NULL;
2946 ret->stub_group = NULL;
2947 ret->bfd_count = 0;
2948 ret->top_index = 0;
2949 ret->input_list = NULL;
2950
2951 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2952 sizeof (struct elf32_arm_stub_hash_entry)))
2953 {
2954 free (ret);
2955 return NULL;
2956 }
2957
2958 return &ret->root.root;
2959 }
2960
2961 /* Free the derived linker hash table. */
2962
2963 static void
2964 elf32_arm_hash_table_free (struct bfd_link_hash_table *hash)
2965 {
2966 struct elf32_arm_link_hash_table *ret
2967 = (struct elf32_arm_link_hash_table *) hash;
2968
2969 bfd_hash_table_free (&ret->stub_hash_table);
2970 _bfd_generic_link_hash_table_free (hash);
2971 }
2972
2973 /* Determine if we're dealing with a Thumb only architecture. */
2974
2975 static bfd_boolean
2976 using_thumb_only (struct elf32_arm_link_hash_table *globals)
2977 {
2978 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2979 Tag_CPU_arch);
2980 int profile;
2981
2982 if (arch != TAG_CPU_ARCH_V7 && arch != TAG_CPU_ARCH_V7E_M)
2983 return FALSE;
2984
2985 profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2986 Tag_CPU_arch_profile);
2987
2988 return profile == 'M';
2989 }
2990
2991 /* Determine if we're dealing with a Thumb-2 object. */
2992
2993 static bfd_boolean
2994 using_thumb2 (struct elf32_arm_link_hash_table *globals)
2995 {
2996 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2997 Tag_CPU_arch);
2998 return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7;
2999 }
3000
3001 /* Determine what kind of NOPs are available. */
3002
3003 static bfd_boolean
3004 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
3005 {
3006 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3007 Tag_CPU_arch);
3008 return arch == TAG_CPU_ARCH_V6T2
3009 || arch == TAG_CPU_ARCH_V6K
3010 || arch == TAG_CPU_ARCH_V7
3011 || arch == TAG_CPU_ARCH_V7E_M;
3012 }
3013
3014 static bfd_boolean
3015 arch_has_thumb2_nop (struct elf32_arm_link_hash_table *globals)
3016 {
3017 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3018 Tag_CPU_arch);
3019 return (arch == TAG_CPU_ARCH_V6T2 || arch == TAG_CPU_ARCH_V7
3020 || arch == TAG_CPU_ARCH_V7E_M);
3021 }
3022
3023 static bfd_boolean
3024 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
3025 {
3026 switch (stub_type)
3027 {
3028 case arm_stub_long_branch_thumb_only:
3029 case arm_stub_long_branch_v4t_thumb_arm:
3030 case arm_stub_short_branch_v4t_thumb_arm:
3031 case arm_stub_long_branch_v4t_thumb_arm_pic:
3032 case arm_stub_long_branch_thumb_only_pic:
3033 return TRUE;
3034 case arm_stub_none:
3035 BFD_FAIL ();
3036 return FALSE;
3037 break;
3038 default:
3039 return FALSE;
3040 }
3041 }
3042
3043 /* Determine the type of stub needed, if any, for a call. */
3044
3045 static enum elf32_arm_stub_type
3046 arm_type_of_stub (struct bfd_link_info *info,
3047 asection *input_sec,
3048 const Elf_Internal_Rela *rel,
3049 unsigned char st_type,
3050 struct elf32_arm_link_hash_entry *hash,
3051 bfd_vma destination,
3052 asection *sym_sec,
3053 bfd *input_bfd,
3054 const char *name)
3055 {
3056 bfd_vma location;
3057 bfd_signed_vma branch_offset;
3058 unsigned int r_type;
3059 struct elf32_arm_link_hash_table * globals;
3060 int thumb2;
3061 int thumb_only;
3062 enum elf32_arm_stub_type stub_type = arm_stub_none;
3063 int use_plt = 0;
3064
3065 /* We don't know the actual type of destination in case it is of
3066 type STT_SECTION: give up. */
3067 if (st_type == STT_SECTION)
3068 return stub_type;
3069
3070 globals = elf32_arm_hash_table (info);
3071 if (globals == NULL)
3072 return stub_type;
3073
3074 thumb_only = using_thumb_only (globals);
3075
3076 thumb2 = using_thumb2 (globals);
3077
3078 /* Determine where the call point is. */
3079 location = (input_sec->output_offset
3080 + input_sec->output_section->vma
3081 + rel->r_offset);
3082
3083 branch_offset = (bfd_signed_vma)(destination - location);
3084
3085 r_type = ELF32_R_TYPE (rel->r_info);
3086
3087 /* Keep a simpler condition, for the sake of clarity. */
3088 if (globals->splt != NULL && hash != NULL && hash->root.plt.offset != (bfd_vma) -1)
3089 {
3090 use_plt = 1;
3091 /* Note when dealing with PLT entries: the main PLT stub is in
3092 ARM mode, so if the branch is in Thumb mode, another
3093 Thumb->ARM stub will be inserted later just before the ARM
3094 PLT stub. We don't take this extra distance into account
3095 here, because if a long branch stub is needed, we'll add a
3096 Thumb->Arm one and branch directly to the ARM PLT entry
3097 because it avoids spreading offset corrections in several
3098 places. */
3099 }
3100
3101 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
3102 {
3103 /* Handle cases where:
3104 - this call goes too far (different Thumb/Thumb2 max
3105 distance)
3106 - it's a Thumb->Arm call and blx is not available, or it's a
3107 Thumb->Arm branch (not bl). A stub is needed in this case,
3108 but only if this call is not through a PLT entry. Indeed,
3109 PLT stubs handle mode switching already.
3110 */
3111 if ((!thumb2
3112 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3113 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3114 || (thumb2
3115 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3116 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3117 || ((st_type != STT_ARM_TFUNC)
3118 && (((r_type == R_ARM_THM_CALL) && !globals->use_blx)
3119 || (r_type == R_ARM_THM_JUMP24))
3120 && !use_plt))
3121 {
3122 if (st_type == STT_ARM_TFUNC)
3123 {
3124 /* Thumb to thumb. */
3125 if (!thumb_only)
3126 {
3127 stub_type = (info->shared | globals->pic_veneer)
3128 /* PIC stubs. */
3129 ? ((globals->use_blx
3130 && (r_type ==R_ARM_THM_CALL))
3131 /* V5T and above. Stub starts with ARM code, so
3132 we must be able to switch mode before
3133 reaching it, which is only possible for 'bl'
3134 (ie R_ARM_THM_CALL relocation). */
3135 ? arm_stub_long_branch_any_thumb_pic
3136 /* On V4T, use Thumb code only. */
3137 : arm_stub_long_branch_v4t_thumb_thumb_pic)
3138
3139 /* non-PIC stubs. */
3140 : ((globals->use_blx
3141 && (r_type ==R_ARM_THM_CALL))
3142 /* V5T and above. */
3143 ? arm_stub_long_branch_any_any
3144 /* V4T. */
3145 : arm_stub_long_branch_v4t_thumb_thumb);
3146 }
3147 else
3148 {
3149 stub_type = (info->shared | globals->pic_veneer)
3150 /* PIC stub. */
3151 ? arm_stub_long_branch_thumb_only_pic
3152 /* non-PIC stub. */
3153 : arm_stub_long_branch_thumb_only;
3154 }
3155 }
3156 else
3157 {
3158 /* Thumb to arm. */
3159 if (sym_sec != NULL
3160 && sym_sec->owner != NULL
3161 && !INTERWORK_FLAG (sym_sec->owner))
3162 {
3163 (*_bfd_error_handler)
3164 (_("%B(%s): warning: interworking not enabled.\n"
3165 " first occurrence: %B: Thumb call to ARM"),
3166 sym_sec->owner, input_bfd, name);
3167 }
3168
3169 stub_type = (info->shared | globals->pic_veneer)
3170 /* PIC stubs. */
3171 ? ((globals->use_blx
3172 && (r_type ==R_ARM_THM_CALL))
3173 /* V5T and above. */
3174 ? arm_stub_long_branch_any_arm_pic
3175 /* V4T PIC stub. */
3176 : arm_stub_long_branch_v4t_thumb_arm_pic)
3177
3178 /* non-PIC stubs. */
3179 : ((globals->use_blx
3180 && (r_type ==R_ARM_THM_CALL))
3181 /* V5T and above. */
3182 ? arm_stub_long_branch_any_any
3183 /* V4T. */
3184 : arm_stub_long_branch_v4t_thumb_arm);
3185
3186 /* Handle v4t short branches. */
3187 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
3188 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
3189 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
3190 stub_type = arm_stub_short_branch_v4t_thumb_arm;
3191 }
3192 }
3193 }
3194 else if (r_type == R_ARM_CALL || r_type == R_ARM_JUMP24 || r_type == R_ARM_PLT32)
3195 {
3196 if (st_type == STT_ARM_TFUNC)
3197 {
3198 /* Arm to thumb. */
3199
3200 if (sym_sec != NULL
3201 && sym_sec->owner != NULL
3202 && !INTERWORK_FLAG (sym_sec->owner))
3203 {
3204 (*_bfd_error_handler)
3205 (_("%B(%s): warning: interworking not enabled.\n"
3206 " first occurrence: %B: ARM call to Thumb"),
3207 sym_sec->owner, input_bfd, name);
3208 }
3209
3210 /* We have an extra 2-bytes reach because of
3211 the mode change (bit 24 (H) of BLX encoding). */
3212 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
3213 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
3214 || ((r_type == R_ARM_CALL) && !globals->use_blx)
3215 || (r_type == R_ARM_JUMP24)
3216 || (r_type == R_ARM_PLT32))
3217 {
3218 stub_type = (info->shared | globals->pic_veneer)
3219 /* PIC stubs. */
3220 ? ((globals->use_blx)
3221 /* V5T and above. */
3222 ? arm_stub_long_branch_any_thumb_pic
3223 /* V4T stub. */
3224 : arm_stub_long_branch_v4t_arm_thumb_pic)
3225
3226 /* non-PIC stubs. */
3227 : ((globals->use_blx)
3228 /* V5T and above. */
3229 ? arm_stub_long_branch_any_any
3230 /* V4T. */
3231 : arm_stub_long_branch_v4t_arm_thumb);
3232 }
3233 }
3234 else
3235 {
3236 /* Arm to arm. */
3237 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
3238 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
3239 {
3240 stub_type = (info->shared | globals->pic_veneer)
3241 /* PIC stubs. */
3242 ? arm_stub_long_branch_any_arm_pic
3243 /* non-PIC stubs. */
3244 : arm_stub_long_branch_any_any;
3245 }
3246 }
3247 }
3248
3249 return stub_type;
3250 }
3251
3252 /* Build a name for an entry in the stub hash table. */
3253
3254 static char *
3255 elf32_arm_stub_name (const asection *input_section,
3256 const asection *sym_sec,
3257 const struct elf32_arm_link_hash_entry *hash,
3258 const Elf_Internal_Rela *rel)
3259 {
3260 char *stub_name;
3261 bfd_size_type len;
3262
3263 if (hash)
3264 {
3265 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1;
3266 stub_name = (char *) bfd_malloc (len);
3267 if (stub_name != NULL)
3268 sprintf (stub_name, "%08x_%s+%x",
3269 input_section->id & 0xffffffff,
3270 hash->root.root.root.string,
3271 (int) rel->r_addend & 0xffffffff);
3272 }
3273 else
3274 {
3275 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1;
3276 stub_name = (char *) bfd_malloc (len);
3277 if (stub_name != NULL)
3278 sprintf (stub_name, "%08x_%x:%x+%x",
3279 input_section->id & 0xffffffff,
3280 sym_sec->id & 0xffffffff,
3281 (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
3282 (int) rel->r_addend & 0xffffffff);
3283 }
3284
3285 return stub_name;
3286 }
3287
3288 /* Look up an entry in the stub hash. Stub entries are cached because
3289 creating the stub name takes a bit of time. */
3290
3291 static struct elf32_arm_stub_hash_entry *
3292 elf32_arm_get_stub_entry (const asection *input_section,
3293 const asection *sym_sec,
3294 struct elf_link_hash_entry *hash,
3295 const Elf_Internal_Rela *rel,
3296 struct elf32_arm_link_hash_table *htab)
3297 {
3298 struct elf32_arm_stub_hash_entry *stub_entry;
3299 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
3300 const asection *id_sec;
3301
3302 if ((input_section->flags & SEC_CODE) == 0)
3303 return NULL;
3304
3305 /* If this input section is part of a group of sections sharing one
3306 stub section, then use the id of the first section in the group.
3307 Stub names need to include a section id, as there may well be
3308 more than one stub used to reach say, printf, and we need to
3309 distinguish between them. */
3310 id_sec = htab->stub_group[input_section->id].link_sec;
3311
3312 if (h != NULL && h->stub_cache != NULL
3313 && h->stub_cache->h == h
3314 && h->stub_cache->id_sec == id_sec)
3315 {
3316 stub_entry = h->stub_cache;
3317 }
3318 else
3319 {
3320 char *stub_name;
3321
3322 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel);
3323 if (stub_name == NULL)
3324 return NULL;
3325
3326 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
3327 stub_name, FALSE, FALSE);
3328 if (h != NULL)
3329 h->stub_cache = stub_entry;
3330
3331 free (stub_name);
3332 }
3333
3334 return stub_entry;
3335 }
3336
3337 /* Find or create a stub section. Returns a pointer to the stub section, and
3338 the section to which the stub section will be attached (in *LINK_SEC_P).
3339 LINK_SEC_P may be NULL. */
3340
3341 static asection *
3342 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
3343 struct elf32_arm_link_hash_table *htab)
3344 {
3345 asection *link_sec;
3346 asection *stub_sec;
3347
3348 link_sec = htab->stub_group[section->id].link_sec;
3349 stub_sec = htab->stub_group[section->id].stub_sec;
3350 if (stub_sec == NULL)
3351 {
3352 stub_sec = htab->stub_group[link_sec->id].stub_sec;
3353 if (stub_sec == NULL)
3354 {
3355 size_t namelen;
3356 bfd_size_type len;
3357 char *s_name;
3358
3359 namelen = strlen (link_sec->name);
3360 len = namelen + sizeof (STUB_SUFFIX);
3361 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
3362 if (s_name == NULL)
3363 return NULL;
3364
3365 memcpy (s_name, link_sec->name, namelen);
3366 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
3367 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
3368 if (stub_sec == NULL)
3369 return NULL;
3370 htab->stub_group[link_sec->id].stub_sec = stub_sec;
3371 }
3372 htab->stub_group[section->id].stub_sec = stub_sec;
3373 }
3374
3375 if (link_sec_p)
3376 *link_sec_p = link_sec;
3377
3378 return stub_sec;
3379 }
3380
3381 /* Add a new stub entry to the stub hash. Not all fields of the new
3382 stub entry are initialised. */
3383
3384 static struct elf32_arm_stub_hash_entry *
3385 elf32_arm_add_stub (const char *stub_name,
3386 asection *section,
3387 struct elf32_arm_link_hash_table *htab)
3388 {
3389 asection *link_sec;
3390 asection *stub_sec;
3391 struct elf32_arm_stub_hash_entry *stub_entry;
3392
3393 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab);
3394 if (stub_sec == NULL)
3395 return NULL;
3396
3397 /* Enter this entry into the linker stub hash table. */
3398 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3399 TRUE, FALSE);
3400 if (stub_entry == NULL)
3401 {
3402 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
3403 section->owner,
3404 stub_name);
3405 return NULL;
3406 }
3407
3408 stub_entry->stub_sec = stub_sec;
3409 stub_entry->stub_offset = 0;
3410 stub_entry->id_sec = link_sec;
3411
3412 return stub_entry;
3413 }
3414
3415 /* Store an Arm insn into an output section not processed by
3416 elf32_arm_write_section. */
3417
3418 static void
3419 put_arm_insn (struct elf32_arm_link_hash_table * htab,
3420 bfd * output_bfd, bfd_vma val, void * ptr)
3421 {
3422 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3423 bfd_putl32 (val, ptr);
3424 else
3425 bfd_putb32 (val, ptr);
3426 }
3427
3428 /* Store a 16-bit Thumb insn into an output section not processed by
3429 elf32_arm_write_section. */
3430
3431 static void
3432 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
3433 bfd * output_bfd, bfd_vma val, void * ptr)
3434 {
3435 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3436 bfd_putl16 (val, ptr);
3437 else
3438 bfd_putb16 (val, ptr);
3439 }
3440
3441 static bfd_reloc_status_type elf32_arm_final_link_relocate
3442 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
3443 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
3444 const char *, int, struct elf_link_hash_entry *, bfd_boolean *, char **);
3445
3446 static bfd_boolean
3447 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
3448 void * in_arg)
3449 {
3450 #define MAXRELOCS 2
3451 struct elf32_arm_stub_hash_entry *stub_entry;
3452 struct elf32_arm_link_hash_table *globals;
3453 struct bfd_link_info *info;
3454 asection *stub_sec;
3455 bfd *stub_bfd;
3456 bfd_vma stub_addr;
3457 bfd_byte *loc;
3458 bfd_vma sym_value;
3459 int template_size;
3460 int size;
3461 const insn_sequence *template_sequence;
3462 int i;
3463 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
3464 int stub_reloc_offset[MAXRELOCS] = {0, 0};
3465 int nrelocs = 0;
3466
3467 /* Massage our args to the form they really have. */
3468 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3469 info = (struct bfd_link_info *) in_arg;
3470
3471 globals = elf32_arm_hash_table (info);
3472 if (globals == NULL)
3473 return FALSE;
3474
3475 stub_sec = stub_entry->stub_sec;
3476
3477 if ((globals->fix_cortex_a8 < 0)
3478 != (stub_entry->stub_type >= arm_stub_a8_veneer_lwm))
3479 /* We have to do the a8 fixes last, as they are less aligned than
3480 the other veneers. */
3481 return TRUE;
3482
3483 /* Make a note of the offset within the stubs for this entry. */
3484 stub_entry->stub_offset = stub_sec->size;
3485 loc = stub_sec->contents + stub_entry->stub_offset;
3486
3487 stub_bfd = stub_sec->owner;
3488
3489 /* This is the address of the start of the stub. */
3490 stub_addr = stub_sec->output_section->vma + stub_sec->output_offset
3491 + stub_entry->stub_offset;
3492
3493 /* This is the address of the stub destination. */
3494 sym_value = (stub_entry->target_value
3495 + stub_entry->target_section->output_offset
3496 + stub_entry->target_section->output_section->vma);
3497
3498 template_sequence = stub_entry->stub_template;
3499 template_size = stub_entry->stub_template_size;
3500
3501 size = 0;
3502 for (i = 0; i < template_size; i++)
3503 {
3504 switch (template_sequence[i].type)
3505 {
3506 case THUMB16_TYPE:
3507 {
3508 bfd_vma data = (bfd_vma) template_sequence[i].data;
3509 if (template_sequence[i].reloc_addend != 0)
3510 {
3511 /* We've borrowed the reloc_addend field to mean we should
3512 insert a condition code into this (Thumb-1 branch)
3513 instruction. See THUMB16_BCOND_INSN. */
3514 BFD_ASSERT ((data & 0xff00) == 0xd000);
3515 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
3516 }
3517 put_thumb_insn (globals, stub_bfd, data, loc + size);
3518 size += 2;
3519 }
3520 break;
3521
3522 case THUMB32_TYPE:
3523 put_thumb_insn (globals, stub_bfd,
3524 (template_sequence[i].data >> 16) & 0xffff,
3525 loc + size);
3526 put_thumb_insn (globals, stub_bfd, template_sequence[i].data & 0xffff,
3527 loc + size + 2);
3528 if (template_sequence[i].r_type != R_ARM_NONE)
3529 {
3530 stub_reloc_idx[nrelocs] = i;
3531 stub_reloc_offset[nrelocs++] = size;
3532 }
3533 size += 4;
3534 break;
3535
3536 case ARM_TYPE:
3537 put_arm_insn (globals, stub_bfd, template_sequence[i].data,
3538 loc + size);
3539 /* Handle cases where the target is encoded within the
3540 instruction. */
3541 if (template_sequence[i].r_type == R_ARM_JUMP24)
3542 {
3543 stub_reloc_idx[nrelocs] = i;
3544 stub_reloc_offset[nrelocs++] = size;
3545 }
3546 size += 4;
3547 break;
3548
3549 case DATA_TYPE:
3550 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
3551 stub_reloc_idx[nrelocs] = i;
3552 stub_reloc_offset[nrelocs++] = size;
3553 size += 4;
3554 break;
3555
3556 default:
3557 BFD_FAIL ();
3558 return FALSE;
3559 }
3560 }
3561
3562 stub_sec->size += size;
3563
3564 /* Stub size has already been computed in arm_size_one_stub. Check
3565 consistency. */
3566 BFD_ASSERT (size == stub_entry->stub_size);
3567
3568 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
3569 if (stub_entry->st_type == STT_ARM_TFUNC)
3570 sym_value |= 1;
3571
3572 /* Assume there is at least one and at most MAXRELOCS entries to relocate
3573 in each stub. */
3574 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
3575
3576 for (i = 0; i < nrelocs; i++)
3577 if (template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP24
3578 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP19
3579 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_CALL
3580 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_XPC22)
3581 {
3582 Elf_Internal_Rela rel;
3583 bfd_boolean unresolved_reloc;
3584 char *error_message;
3585 int sym_flags
3586 = (template_sequence[stub_reloc_idx[i]].r_type != R_ARM_THM_XPC22)
3587 ? STT_ARM_TFUNC : 0;
3588 bfd_vma points_to = sym_value + stub_entry->target_addend;
3589
3590 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
3591 rel.r_info = ELF32_R_INFO (0,
3592 template_sequence[stub_reloc_idx[i]].r_type);
3593 rel.r_addend = template_sequence[stub_reloc_idx[i]].reloc_addend;
3594
3595 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
3596 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
3597 template should refer back to the instruction after the original
3598 branch. */
3599 points_to = sym_value;
3600
3601 /* There may be unintended consequences if this is not true. */
3602 BFD_ASSERT (stub_entry->h == NULL);
3603
3604 /* Note: _bfd_final_link_relocate doesn't handle these relocations
3605 properly. We should probably use this function unconditionally,
3606 rather than only for certain relocations listed in the enclosing
3607 conditional, for the sake of consistency. */
3608 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
3609 (template_sequence[stub_reloc_idx[i]].r_type),
3610 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
3611 points_to, info, stub_entry->target_section, "", sym_flags,
3612 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
3613 &error_message);
3614 }
3615 else
3616 {
3617 _bfd_final_link_relocate (elf32_arm_howto_from_type
3618 (template_sequence[stub_reloc_idx[i]].r_type), stub_bfd, stub_sec,
3619 stub_sec->contents, stub_entry->stub_offset + stub_reloc_offset[i],
3620 sym_value + stub_entry->target_addend,
3621 template_sequence[stub_reloc_idx[i]].reloc_addend);
3622 }
3623
3624 return TRUE;
3625 #undef MAXRELOCS
3626 }
3627
3628 /* Calculate the template, template size and instruction size for a stub.
3629 Return value is the instruction size. */
3630
3631 static unsigned int
3632 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
3633 const insn_sequence **stub_template,
3634 int *stub_template_size)
3635 {
3636 const insn_sequence *template_sequence = NULL;
3637 int template_size = 0, i;
3638 unsigned int size;
3639
3640 template_sequence = stub_definitions[stub_type].template_sequence;
3641 template_size = stub_definitions[stub_type].template_size;
3642
3643 size = 0;
3644 for (i = 0; i < template_size; i++)
3645 {
3646 switch (template_sequence[i].type)
3647 {
3648 case THUMB16_TYPE:
3649 size += 2;
3650 break;
3651
3652 case ARM_TYPE:
3653 case THUMB32_TYPE:
3654 case DATA_TYPE:
3655 size += 4;
3656 break;
3657
3658 default:
3659 BFD_FAIL ();
3660 return FALSE;
3661 }
3662 }
3663
3664 if (stub_template)
3665 *stub_template = template_sequence;
3666
3667 if (stub_template_size)
3668 *stub_template_size = template_size;
3669
3670 return size;
3671 }
3672
3673 /* As above, but don't actually build the stub. Just bump offset so
3674 we know stub section sizes. */
3675
3676 static bfd_boolean
3677 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
3678 void * in_arg)
3679 {
3680 struct elf32_arm_stub_hash_entry *stub_entry;
3681 struct elf32_arm_link_hash_table *htab;
3682 const insn_sequence *template_sequence;
3683 int template_size, size;
3684
3685 /* Massage our args to the form they really have. */
3686 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3687 htab = (struct elf32_arm_link_hash_table *) in_arg;
3688
3689 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
3690 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
3691
3692 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
3693 &template_size);
3694
3695 stub_entry->stub_size = size;
3696 stub_entry->stub_template = template_sequence;
3697 stub_entry->stub_template_size = template_size;
3698
3699 size = (size + 7) & ~7;
3700 stub_entry->stub_sec->size += size;
3701
3702 return TRUE;
3703 }
3704
3705 /* External entry points for sizing and building linker stubs. */
3706
3707 /* Set up various things so that we can make a list of input sections
3708 for each output section included in the link. Returns -1 on error,
3709 0 when no stubs will be needed, and 1 on success. */
3710
3711 int
3712 elf32_arm_setup_section_lists (bfd *output_bfd,
3713 struct bfd_link_info *info)
3714 {
3715 bfd *input_bfd;
3716 unsigned int bfd_count;
3717 int top_id, top_index;
3718 asection *section;
3719 asection **input_list, **list;
3720 bfd_size_type amt;
3721 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3722
3723 if (htab == NULL)
3724 return 0;
3725 if (! is_elf_hash_table (htab))
3726 return 0;
3727
3728 /* Count the number of input BFDs and find the top input section id. */
3729 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
3730 input_bfd != NULL;
3731 input_bfd = input_bfd->link_next)
3732 {
3733 bfd_count += 1;
3734 for (section = input_bfd->sections;
3735 section != NULL;
3736 section = section->next)
3737 {
3738 if (top_id < section->id)
3739 top_id = section->id;
3740 }
3741 }
3742 htab->bfd_count = bfd_count;
3743
3744 amt = sizeof (struct map_stub) * (top_id + 1);
3745 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
3746 if (htab->stub_group == NULL)
3747 return -1;
3748
3749 /* We can't use output_bfd->section_count here to find the top output
3750 section index as some sections may have been removed, and
3751 _bfd_strip_section_from_output doesn't renumber the indices. */
3752 for (section = output_bfd->sections, top_index = 0;
3753 section != NULL;
3754 section = section->next)
3755 {
3756 if (top_index < section->index)
3757 top_index = section->index;
3758 }
3759
3760 htab->top_index = top_index;
3761 amt = sizeof (asection *) * (top_index + 1);
3762 input_list = (asection **) bfd_malloc (amt);
3763 htab->input_list = input_list;
3764 if (input_list == NULL)
3765 return -1;
3766
3767 /* For sections we aren't interested in, mark their entries with a
3768 value we can check later. */
3769 list = input_list + top_index;
3770 do
3771 *list = bfd_abs_section_ptr;
3772 while (list-- != input_list);
3773
3774 for (section = output_bfd->sections;
3775 section != NULL;
3776 section = section->next)
3777 {
3778 if ((section->flags & SEC_CODE) != 0)
3779 input_list[section->index] = NULL;
3780 }
3781
3782 return 1;
3783 }
3784
3785 /* The linker repeatedly calls this function for each input section,
3786 in the order that input sections are linked into output sections.
3787 Build lists of input sections to determine groupings between which
3788 we may insert linker stubs. */
3789
3790 void
3791 elf32_arm_next_input_section (struct bfd_link_info *info,
3792 asection *isec)
3793 {
3794 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3795
3796 if (htab == NULL)
3797 return;
3798
3799 if (isec->output_section->index <= htab->top_index)
3800 {
3801 asection **list = htab->input_list + isec->output_section->index;
3802
3803 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
3804 {
3805 /* Steal the link_sec pointer for our list. */
3806 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
3807 /* This happens to make the list in reverse order,
3808 which we reverse later. */
3809 PREV_SEC (isec) = *list;
3810 *list = isec;
3811 }
3812 }
3813 }
3814
3815 /* See whether we can group stub sections together. Grouping stub
3816 sections may result in fewer stubs. More importantly, we need to
3817 put all .init* and .fini* stubs at the end of the .init or
3818 .fini output sections respectively, because glibc splits the
3819 _init and _fini functions into multiple parts. Putting a stub in
3820 the middle of a function is not a good idea. */
3821
3822 static void
3823 group_sections (struct elf32_arm_link_hash_table *htab,
3824 bfd_size_type stub_group_size,
3825 bfd_boolean stubs_always_after_branch)
3826 {
3827 asection **list = htab->input_list;
3828
3829 do
3830 {
3831 asection *tail = *list;
3832 asection *head;
3833
3834 if (tail == bfd_abs_section_ptr)
3835 continue;
3836
3837 /* Reverse the list: we must avoid placing stubs at the
3838 beginning of the section because the beginning of the text
3839 section may be required for an interrupt vector in bare metal
3840 code. */
3841 #define NEXT_SEC PREV_SEC
3842 head = NULL;
3843 while (tail != NULL)
3844 {
3845 /* Pop from tail. */
3846 asection *item = tail;
3847 tail = PREV_SEC (item);
3848
3849 /* Push on head. */
3850 NEXT_SEC (item) = head;
3851 head = item;
3852 }
3853
3854 while (head != NULL)
3855 {
3856 asection *curr;
3857 asection *next;
3858 bfd_vma stub_group_start = head->output_offset;
3859 bfd_vma end_of_next;
3860
3861 curr = head;
3862 while (NEXT_SEC (curr) != NULL)
3863 {
3864 next = NEXT_SEC (curr);
3865 end_of_next = next->output_offset + next->size;
3866 if (end_of_next - stub_group_start >= stub_group_size)
3867 /* End of NEXT is too far from start, so stop. */
3868 break;
3869 /* Add NEXT to the group. */
3870 curr = next;
3871 }
3872
3873 /* OK, the size from the start to the start of CURR is less
3874 than stub_group_size and thus can be handled by one stub
3875 section. (Or the head section is itself larger than
3876 stub_group_size, in which case we may be toast.)
3877 We should really be keeping track of the total size of
3878 stubs added here, as stubs contribute to the final output
3879 section size. */
3880 do
3881 {
3882 next = NEXT_SEC (head);
3883 /* Set up this stub group. */
3884 htab->stub_group[head->id].link_sec = curr;
3885 }
3886 while (head != curr && (head = next) != NULL);
3887
3888 /* But wait, there's more! Input sections up to stub_group_size
3889 bytes after the stub section can be handled by it too. */
3890 if (!stubs_always_after_branch)
3891 {
3892 stub_group_start = curr->output_offset + curr->size;
3893
3894 while (next != NULL)
3895 {
3896 end_of_next = next->output_offset + next->size;
3897 if (end_of_next - stub_group_start >= stub_group_size)
3898 /* End of NEXT is too far from stubs, so stop. */
3899 break;
3900 /* Add NEXT to the stub group. */
3901 head = next;
3902 next = NEXT_SEC (head);
3903 htab->stub_group[head->id].link_sec = curr;
3904 }
3905 }
3906 head = next;
3907 }
3908 }
3909 while (list++ != htab->input_list + htab->top_index);
3910
3911 free (htab->input_list);
3912 #undef PREV_SEC
3913 #undef NEXT_SEC
3914 }
3915
3916 /* Comparison function for sorting/searching relocations relating to Cortex-A8
3917 erratum fix. */
3918
3919 static int
3920 a8_reloc_compare (const void *a, const void *b)
3921 {
3922 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
3923 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
3924
3925 if (ra->from < rb->from)
3926 return -1;
3927 else if (ra->from > rb->from)
3928 return 1;
3929 else
3930 return 0;
3931 }
3932
3933 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
3934 const char *, char **);
3935
3936 /* Helper function to scan code for sequences which might trigger the Cortex-A8
3937 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
3938 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
3939 otherwise. */
3940
3941 static bfd_boolean
3942 cortex_a8_erratum_scan (bfd *input_bfd,
3943 struct bfd_link_info *info,
3944 struct a8_erratum_fix **a8_fixes_p,
3945 unsigned int *num_a8_fixes_p,
3946 unsigned int *a8_fix_table_size_p,
3947 struct a8_erratum_reloc *a8_relocs,
3948 unsigned int num_a8_relocs,
3949 unsigned prev_num_a8_fixes,
3950 bfd_boolean *stub_changed_p)
3951 {
3952 asection *section;
3953 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3954 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
3955 unsigned int num_a8_fixes = *num_a8_fixes_p;
3956 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
3957
3958 if (htab == NULL)
3959 return FALSE;
3960
3961 for (section = input_bfd->sections;
3962 section != NULL;
3963 section = section->next)
3964 {
3965 bfd_byte *contents = NULL;
3966 struct _arm_elf_section_data *sec_data;
3967 unsigned int span;
3968 bfd_vma base_vma;
3969
3970 if (elf_section_type (section) != SHT_PROGBITS
3971 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3972 || (section->flags & SEC_EXCLUDE) != 0
3973 || (section->sec_info_type == ELF_INFO_TYPE_JUST_SYMS)
3974 || (section->output_section == bfd_abs_section_ptr))
3975 continue;
3976
3977 base_vma = section->output_section->vma + section->output_offset;
3978
3979 if (elf_section_data (section)->this_hdr.contents != NULL)
3980 contents = elf_section_data (section)->this_hdr.contents;
3981 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3982 return TRUE;
3983
3984 sec_data = elf32_arm_section_data (section);
3985
3986 for (span = 0; span < sec_data->mapcount; span++)
3987 {
3988 unsigned int span_start = sec_data->map[span].vma;
3989 unsigned int span_end = (span == sec_data->mapcount - 1)
3990 ? section->size : sec_data->map[span + 1].vma;
3991 unsigned int i;
3992 char span_type = sec_data->map[span].type;
3993 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
3994
3995 if (span_type != 't')
3996 continue;
3997
3998 /* Span is entirely within a single 4KB region: skip scanning. */
3999 if (((base_vma + span_start) & ~0xfff)
4000 == ((base_vma + span_end) & ~0xfff))
4001 continue;
4002
4003 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
4004
4005 * The opcode is BLX.W, BL.W, B.W, Bcc.W
4006 * The branch target is in the same 4KB region as the
4007 first half of the branch.
4008 * The instruction before the branch is a 32-bit
4009 length non-branch instruction. */
4010 for (i = span_start; i < span_end;)
4011 {
4012 unsigned int insn = bfd_getl16 (&contents[i]);
4013 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
4014 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
4015
4016 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
4017 insn_32bit = TRUE;
4018
4019 if (insn_32bit)
4020 {
4021 /* Load the rest of the insn (in manual-friendly order). */
4022 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
4023
4024 /* Encoding T4: B<c>.W. */
4025 is_b = (insn & 0xf800d000) == 0xf0009000;
4026 /* Encoding T1: BL<c>.W. */
4027 is_bl = (insn & 0xf800d000) == 0xf000d000;
4028 /* Encoding T2: BLX<c>.W. */
4029 is_blx = (insn & 0xf800d000) == 0xf000c000;
4030 /* Encoding T3: B<c>.W (not permitted in IT block). */
4031 is_bcc = (insn & 0xf800d000) == 0xf0008000
4032 && (insn & 0x07f00000) != 0x03800000;
4033 }
4034
4035 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
4036
4037 if (((base_vma + i) & 0xfff) == 0xffe
4038 && insn_32bit
4039 && is_32bit_branch
4040 && last_was_32bit
4041 && ! last_was_branch)
4042 {
4043 bfd_signed_vma offset;
4044 bfd_boolean force_target_arm = FALSE;
4045 bfd_boolean force_target_thumb = FALSE;
4046 bfd_vma target;
4047 enum elf32_arm_stub_type stub_type = arm_stub_none;
4048 struct a8_erratum_reloc key, *found;
4049
4050 key.from = base_vma + i;
4051 found = (struct a8_erratum_reloc *)
4052 bsearch (&key, a8_relocs, num_a8_relocs,
4053 sizeof (struct a8_erratum_reloc),
4054 &a8_reloc_compare);
4055
4056 if (found)
4057 {
4058 char *error_message = NULL;
4059 struct elf_link_hash_entry *entry;
4060
4061 /* We don't care about the error returned from this
4062 function, only if there is glue or not. */
4063 entry = find_thumb_glue (info, found->sym_name,
4064 &error_message);
4065
4066 if (entry)
4067 found->non_a8_stub = TRUE;
4068
4069 if (found->r_type == R_ARM_THM_CALL
4070 && found->st_type != STT_ARM_TFUNC)
4071 force_target_arm = TRUE;
4072 else if (found->r_type == R_ARM_THM_CALL
4073 && found->st_type == STT_ARM_TFUNC)
4074 force_target_thumb = TRUE;
4075 }
4076
4077 /* Check if we have an offending branch instruction. */
4078
4079 if (found && found->non_a8_stub)
4080 /* We've already made a stub for this instruction, e.g.
4081 it's a long branch or a Thumb->ARM stub. Assume that
4082 stub will suffice to work around the A8 erratum (see
4083 setting of always_after_branch above). */
4084 ;
4085 else if (is_bcc)
4086 {
4087 offset = (insn & 0x7ff) << 1;
4088 offset |= (insn & 0x3f0000) >> 4;
4089 offset |= (insn & 0x2000) ? 0x40000 : 0;
4090 offset |= (insn & 0x800) ? 0x80000 : 0;
4091 offset |= (insn & 0x4000000) ? 0x100000 : 0;
4092 if (offset & 0x100000)
4093 offset |= ~ ((bfd_signed_vma) 0xfffff);
4094 stub_type = arm_stub_a8_veneer_b_cond;
4095 }
4096 else if (is_b || is_bl || is_blx)
4097 {
4098 int s = (insn & 0x4000000) != 0;
4099 int j1 = (insn & 0x2000) != 0;
4100 int j2 = (insn & 0x800) != 0;
4101 int i1 = !(j1 ^ s);
4102 int i2 = !(j2 ^ s);
4103
4104 offset = (insn & 0x7ff) << 1;
4105 offset |= (insn & 0x3ff0000) >> 4;
4106 offset |= i2 << 22;
4107 offset |= i1 << 23;
4108 offset |= s << 24;
4109 if (offset & 0x1000000)
4110 offset |= ~ ((bfd_signed_vma) 0xffffff);
4111
4112 if (is_blx)
4113 offset &= ~ ((bfd_signed_vma) 3);
4114
4115 stub_type = is_blx ? arm_stub_a8_veneer_blx :
4116 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
4117 }
4118
4119 if (stub_type != arm_stub_none)
4120 {
4121 bfd_vma pc_for_insn = base_vma + i + 4;
4122
4123 /* The original instruction is a BL, but the target is
4124 an ARM instruction. If we were not making a stub,
4125 the BL would have been converted to a BLX. Use the
4126 BLX stub instead in that case. */
4127 if (htab->use_blx && force_target_arm
4128 && stub_type == arm_stub_a8_veneer_bl)
4129 {
4130 stub_type = arm_stub_a8_veneer_blx;
4131 is_blx = TRUE;
4132 is_bl = FALSE;
4133 }
4134 /* Conversely, if the original instruction was
4135 BLX but the target is Thumb mode, use the BL
4136 stub. */
4137 else if (force_target_thumb
4138 && stub_type == arm_stub_a8_veneer_blx)
4139 {
4140 stub_type = arm_stub_a8_veneer_bl;
4141 is_blx = FALSE;
4142 is_bl = TRUE;
4143 }
4144
4145 if (is_blx)
4146 pc_for_insn &= ~ ((bfd_vma) 3);
4147
4148 /* If we found a relocation, use the proper destination,
4149 not the offset in the (unrelocated) instruction.
4150 Note this is always done if we switched the stub type
4151 above. */
4152 if (found)
4153 offset =
4154 (bfd_signed_vma) (found->destination - pc_for_insn);
4155
4156 target = pc_for_insn + offset;
4157
4158 /* The BLX stub is ARM-mode code. Adjust the offset to
4159 take the different PC value (+8 instead of +4) into
4160 account. */
4161 if (stub_type == arm_stub_a8_veneer_blx)
4162 offset += 4;
4163
4164 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
4165 {
4166 char *stub_name = NULL;
4167
4168 if (num_a8_fixes == a8_fix_table_size)
4169 {
4170 a8_fix_table_size *= 2;
4171 a8_fixes = (struct a8_erratum_fix *)
4172 bfd_realloc (a8_fixes,
4173 sizeof (struct a8_erratum_fix)
4174 * a8_fix_table_size);
4175 }
4176
4177 if (num_a8_fixes < prev_num_a8_fixes)
4178 {
4179 /* If we're doing a subsequent scan,
4180 check if we've found the same fix as
4181 before, and try and reuse the stub
4182 name. */
4183 stub_name = a8_fixes[num_a8_fixes].stub_name;
4184 if ((a8_fixes[num_a8_fixes].section != section)
4185 || (a8_fixes[num_a8_fixes].offset != i))
4186 {
4187 free (stub_name);
4188 stub_name = NULL;
4189 *stub_changed_p = TRUE;
4190 }
4191 }
4192
4193 if (!stub_name)
4194 {
4195 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
4196 if (stub_name != NULL)
4197 sprintf (stub_name, "%x:%x", section->id, i);
4198 }
4199
4200 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
4201 a8_fixes[num_a8_fixes].section = section;
4202 a8_fixes[num_a8_fixes].offset = i;
4203 a8_fixes[num_a8_fixes].addend = offset;
4204 a8_fixes[num_a8_fixes].orig_insn = insn;
4205 a8_fixes[num_a8_fixes].stub_name = stub_name;
4206 a8_fixes[num_a8_fixes].stub_type = stub_type;
4207
4208 num_a8_fixes++;
4209 }
4210 }
4211 }
4212
4213 i += insn_32bit ? 4 : 2;
4214 last_was_32bit = insn_32bit;
4215 last_was_branch = is_32bit_branch;
4216 }
4217 }
4218
4219 if (elf_section_data (section)->this_hdr.contents == NULL)
4220 free (contents);
4221 }
4222
4223 *a8_fixes_p = a8_fixes;
4224 *num_a8_fixes_p = num_a8_fixes;
4225 *a8_fix_table_size_p = a8_fix_table_size;
4226
4227 return FALSE;
4228 }
4229
4230 /* Determine and set the size of the stub section for a final link.
4231
4232 The basic idea here is to examine all the relocations looking for
4233 PC-relative calls to a target that is unreachable with a "bl"
4234 instruction. */
4235
4236 bfd_boolean
4237 elf32_arm_size_stubs (bfd *output_bfd,
4238 bfd *stub_bfd,
4239 struct bfd_link_info *info,
4240 bfd_signed_vma group_size,
4241 asection * (*add_stub_section) (const char *, asection *),
4242 void (*layout_sections_again) (void))
4243 {
4244 bfd_size_type stub_group_size;
4245 bfd_boolean stubs_always_after_branch;
4246 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4247 struct a8_erratum_fix *a8_fixes = NULL;
4248 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
4249 struct a8_erratum_reloc *a8_relocs = NULL;
4250 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
4251
4252 if (htab == NULL)
4253 return FALSE;
4254
4255 if (htab->fix_cortex_a8)
4256 {
4257 a8_fixes = (struct a8_erratum_fix *)
4258 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
4259 a8_relocs = (struct a8_erratum_reloc *)
4260 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
4261 }
4262
4263 /* Propagate mach to stub bfd, because it may not have been
4264 finalized when we created stub_bfd. */
4265 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
4266 bfd_get_mach (output_bfd));
4267
4268 /* Stash our params away. */
4269 htab->stub_bfd = stub_bfd;
4270 htab->add_stub_section = add_stub_section;
4271 htab->layout_sections_again = layout_sections_again;
4272 stubs_always_after_branch = group_size < 0;
4273
4274 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
4275 as the first half of a 32-bit branch straddling two 4K pages. This is a
4276 crude way of enforcing that. */
4277 if (htab->fix_cortex_a8)
4278 stubs_always_after_branch = 1;
4279
4280 if (group_size < 0)
4281 stub_group_size = -group_size;
4282 else
4283 stub_group_size = group_size;
4284
4285 if (stub_group_size == 1)
4286 {
4287 /* Default values. */
4288 /* Thumb branch range is +-4MB has to be used as the default
4289 maximum size (a given section can contain both ARM and Thumb
4290 code, so the worst case has to be taken into account).
4291
4292 This value is 24K less than that, which allows for 2025
4293 12-byte stubs. If we exceed that, then we will fail to link.
4294 The user will have to relink with an explicit group size
4295 option. */
4296 stub_group_size = 4170000;
4297 }
4298
4299 group_sections (htab, stub_group_size, stubs_always_after_branch);
4300
4301 /* If we're applying the cortex A8 fix, we need to determine the
4302 program header size now, because we cannot change it later --
4303 that could alter section placements. Notice the A8 erratum fix
4304 ends up requiring the section addresses to remain unchanged
4305 modulo the page size. That's something we cannot represent
4306 inside BFD, and we don't want to force the section alignment to
4307 be the page size. */
4308 if (htab->fix_cortex_a8)
4309 (*htab->layout_sections_again) ();
4310
4311 while (1)
4312 {
4313 bfd *input_bfd;
4314 unsigned int bfd_indx;
4315 asection *stub_sec;
4316 bfd_boolean stub_changed = FALSE;
4317 unsigned prev_num_a8_fixes = num_a8_fixes;
4318
4319 num_a8_fixes = 0;
4320 for (input_bfd = info->input_bfds, bfd_indx = 0;
4321 input_bfd != NULL;
4322 input_bfd = input_bfd->link_next, bfd_indx++)
4323 {
4324 Elf_Internal_Shdr *symtab_hdr;
4325 asection *section;
4326 Elf_Internal_Sym *local_syms = NULL;
4327
4328 num_a8_relocs = 0;
4329
4330 /* We'll need the symbol table in a second. */
4331 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4332 if (symtab_hdr->sh_info == 0)
4333 continue;
4334
4335 /* Walk over each section attached to the input bfd. */
4336 for (section = input_bfd->sections;
4337 section != NULL;
4338 section = section->next)
4339 {
4340 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
4341
4342 /* If there aren't any relocs, then there's nothing more
4343 to do. */
4344 if ((section->flags & SEC_RELOC) == 0
4345 || section->reloc_count == 0
4346 || (section->flags & SEC_CODE) == 0)
4347 continue;
4348
4349 /* If this section is a link-once section that will be
4350 discarded, then don't create any stubs. */
4351 if (section->output_section == NULL
4352 || section->output_section->owner != output_bfd)
4353 continue;
4354
4355 /* Get the relocs. */
4356 internal_relocs
4357 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
4358 NULL, info->keep_memory);
4359 if (internal_relocs == NULL)
4360 goto error_ret_free_local;
4361
4362 /* Now examine each relocation. */
4363 irela = internal_relocs;
4364 irelaend = irela + section->reloc_count;
4365 for (; irela < irelaend; irela++)
4366 {
4367 unsigned int r_type, r_indx;
4368 enum elf32_arm_stub_type stub_type;
4369 struct elf32_arm_stub_hash_entry *stub_entry;
4370 asection *sym_sec;
4371 bfd_vma sym_value;
4372 bfd_vma destination;
4373 struct elf32_arm_link_hash_entry *hash;
4374 const char *sym_name;
4375 char *stub_name;
4376 const asection *id_sec;
4377 unsigned char st_type;
4378 bfd_boolean created_stub = FALSE;
4379
4380 r_type = ELF32_R_TYPE (irela->r_info);
4381 r_indx = ELF32_R_SYM (irela->r_info);
4382
4383 if (r_type >= (unsigned int) R_ARM_max)
4384 {
4385 bfd_set_error (bfd_error_bad_value);
4386 error_ret_free_internal:
4387 if (elf_section_data (section)->relocs == NULL)
4388 free (internal_relocs);
4389 goto error_ret_free_local;
4390 }
4391
4392 /* Only look for stubs on branch instructions. */
4393 if ((r_type != (unsigned int) R_ARM_CALL)
4394 && (r_type != (unsigned int) R_ARM_THM_CALL)
4395 && (r_type != (unsigned int) R_ARM_JUMP24)
4396 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
4397 && (r_type != (unsigned int) R_ARM_THM_XPC22)
4398 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
4399 && (r_type != (unsigned int) R_ARM_PLT32))
4400 continue;
4401
4402 /* Now determine the call target, its name, value,
4403 section. */
4404 sym_sec = NULL;
4405 sym_value = 0;
4406 destination = 0;
4407 hash = NULL;
4408 sym_name = NULL;
4409 if (r_indx < symtab_hdr->sh_info)
4410 {
4411 /* It's a local symbol. */
4412 Elf_Internal_Sym *sym;
4413 Elf_Internal_Shdr *hdr;
4414
4415 if (local_syms == NULL)
4416 {
4417 local_syms
4418 = (Elf_Internal_Sym *) symtab_hdr->contents;
4419 if (local_syms == NULL)
4420 local_syms
4421 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
4422 symtab_hdr->sh_info, 0,
4423 NULL, NULL, NULL);
4424 if (local_syms == NULL)
4425 goto error_ret_free_internal;
4426 }
4427
4428 sym = local_syms + r_indx;
4429 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
4430 sym_sec = hdr->bfd_section;
4431 if (!sym_sec)
4432 /* This is an undefined symbol. It can never
4433 be resolved. */
4434 continue;
4435
4436 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
4437 sym_value = sym->st_value;
4438 destination = (sym_value + irela->r_addend
4439 + sym_sec->output_offset
4440 + sym_sec->output_section->vma);
4441 st_type = ELF_ST_TYPE (sym->st_info);
4442 sym_name
4443 = bfd_elf_string_from_elf_section (input_bfd,
4444 symtab_hdr->sh_link,
4445 sym->st_name);
4446 }
4447 else
4448 {
4449 /* It's an external symbol. */
4450 int e_indx;
4451
4452 e_indx = r_indx - symtab_hdr->sh_info;
4453 hash = ((struct elf32_arm_link_hash_entry *)
4454 elf_sym_hashes (input_bfd)[e_indx]);
4455
4456 while (hash->root.root.type == bfd_link_hash_indirect
4457 || hash->root.root.type == bfd_link_hash_warning)
4458 hash = ((struct elf32_arm_link_hash_entry *)
4459 hash->root.root.u.i.link);
4460
4461 if (hash->root.root.type == bfd_link_hash_defined
4462 || hash->root.root.type == bfd_link_hash_defweak)
4463 {
4464 sym_sec = hash->root.root.u.def.section;
4465 sym_value = hash->root.root.u.def.value;
4466
4467 struct elf32_arm_link_hash_table *globals =
4468 elf32_arm_hash_table (info);
4469
4470 /* For a destination in a shared library,
4471 use the PLT stub as target address to
4472 decide whether a branch stub is
4473 needed. */
4474 if (globals != NULL
4475 && globals->splt != NULL
4476 && hash != NULL
4477 && hash->root.plt.offset != (bfd_vma) -1)
4478 {
4479 sym_sec = globals->splt;
4480 sym_value = hash->root.plt.offset;
4481 if (sym_sec->output_section != NULL)
4482 destination = (sym_value
4483 + sym_sec->output_offset
4484 + sym_sec->output_section->vma);
4485 }
4486 else if (sym_sec->output_section != NULL)
4487 destination = (sym_value + irela->r_addend
4488 + sym_sec->output_offset
4489 + sym_sec->output_section->vma);
4490 }
4491 else if ((hash->root.root.type == bfd_link_hash_undefined)
4492 || (hash->root.root.type == bfd_link_hash_undefweak))
4493 {
4494 /* For a shared library, use the PLT stub as
4495 target address to decide whether a long
4496 branch stub is needed.
4497 For absolute code, they cannot be handled. */
4498 struct elf32_arm_link_hash_table *globals =
4499 elf32_arm_hash_table (info);
4500
4501 if (globals != NULL
4502 && globals->splt != NULL
4503 && hash != NULL
4504 && hash->root.plt.offset != (bfd_vma) -1)
4505 {
4506 sym_sec = globals->splt;
4507 sym_value = hash->root.plt.offset;
4508 if (sym_sec->output_section != NULL)
4509 destination = (sym_value
4510 + sym_sec->output_offset
4511 + sym_sec->output_section->vma);
4512 }
4513 else
4514 continue;
4515 }
4516 else
4517 {
4518 bfd_set_error (bfd_error_bad_value);
4519 goto error_ret_free_internal;
4520 }
4521 st_type = ELF_ST_TYPE (hash->root.type);
4522 sym_name = hash->root.root.root.string;
4523 }
4524
4525 do
4526 {
4527 /* Determine what (if any) linker stub is needed. */
4528 stub_type = arm_type_of_stub (info, section, irela,
4529 st_type, hash,
4530 destination, sym_sec,
4531 input_bfd, sym_name);
4532 if (stub_type == arm_stub_none)
4533 break;
4534
4535 /* Support for grouping stub sections. */
4536 id_sec = htab->stub_group[section->id].link_sec;
4537
4538 /* Get the name of this stub. */
4539 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash,
4540 irela);
4541 if (!stub_name)
4542 goto error_ret_free_internal;
4543
4544 /* We've either created a stub for this reloc already,
4545 or we are about to. */
4546 created_stub = TRUE;
4547
4548 stub_entry = arm_stub_hash_lookup
4549 (&htab->stub_hash_table, stub_name,
4550 FALSE, FALSE);
4551 if (stub_entry != NULL)
4552 {
4553 /* The proper stub has already been created. */
4554 free (stub_name);
4555 stub_entry->target_value = sym_value;
4556 break;
4557 }
4558
4559 stub_entry = elf32_arm_add_stub (stub_name, section,
4560 htab);
4561 if (stub_entry == NULL)
4562 {
4563 free (stub_name);
4564 goto error_ret_free_internal;
4565 }
4566
4567 stub_entry->target_value = sym_value;
4568 stub_entry->target_section = sym_sec;
4569 stub_entry->stub_type = stub_type;
4570 stub_entry->h = hash;
4571 stub_entry->st_type = st_type;
4572
4573 if (sym_name == NULL)
4574 sym_name = "unnamed";
4575 stub_entry->output_name = (char *)
4576 bfd_alloc (htab->stub_bfd,
4577 sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
4578 + strlen (sym_name));
4579 if (stub_entry->output_name == NULL)
4580 {
4581 free (stub_name);
4582 goto error_ret_free_internal;
4583 }
4584
4585 /* For historical reasons, use the existing names for
4586 ARM-to-Thumb and Thumb-to-ARM stubs. */
4587 if ( ((r_type == (unsigned int) R_ARM_THM_CALL)
4588 || (r_type == (unsigned int) R_ARM_THM_JUMP24))
4589 && st_type != STT_ARM_TFUNC)
4590 sprintf (stub_entry->output_name,
4591 THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
4592 else if ( ((r_type == (unsigned int) R_ARM_CALL)
4593 || (r_type == (unsigned int) R_ARM_JUMP24))
4594 && st_type == STT_ARM_TFUNC)
4595 sprintf (stub_entry->output_name,
4596 ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
4597 else
4598 sprintf (stub_entry->output_name, STUB_ENTRY_NAME,
4599 sym_name);
4600
4601 stub_changed = TRUE;
4602 }
4603 while (0);
4604
4605 /* Look for relocations which might trigger Cortex-A8
4606 erratum. */
4607 if (htab->fix_cortex_a8
4608 && (r_type == (unsigned int) R_ARM_THM_JUMP24
4609 || r_type == (unsigned int) R_ARM_THM_JUMP19
4610 || r_type == (unsigned int) R_ARM_THM_CALL
4611 || r_type == (unsigned int) R_ARM_THM_XPC22))
4612 {
4613 bfd_vma from = section->output_section->vma
4614 + section->output_offset
4615 + irela->r_offset;
4616
4617 if ((from & 0xfff) == 0xffe)
4618 {
4619 /* Found a candidate. Note we haven't checked the
4620 destination is within 4K here: if we do so (and
4621 don't create an entry in a8_relocs) we can't tell
4622 that a branch should have been relocated when
4623 scanning later. */
4624 if (num_a8_relocs == a8_reloc_table_size)
4625 {
4626 a8_reloc_table_size *= 2;
4627 a8_relocs = (struct a8_erratum_reloc *)
4628 bfd_realloc (a8_relocs,
4629 sizeof (struct a8_erratum_reloc)
4630 * a8_reloc_table_size);
4631 }
4632
4633 a8_relocs[num_a8_relocs].from = from;
4634 a8_relocs[num_a8_relocs].destination = destination;
4635 a8_relocs[num_a8_relocs].r_type = r_type;
4636 a8_relocs[num_a8_relocs].st_type = st_type;
4637 a8_relocs[num_a8_relocs].sym_name = sym_name;
4638 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
4639
4640 num_a8_relocs++;
4641 }
4642 }
4643 }
4644
4645 /* We're done with the internal relocs, free them. */
4646 if (elf_section_data (section)->relocs == NULL)
4647 free (internal_relocs);
4648 }
4649
4650 if (htab->fix_cortex_a8)
4651 {
4652 /* Sort relocs which might apply to Cortex-A8 erratum. */
4653 qsort (a8_relocs, num_a8_relocs,
4654 sizeof (struct a8_erratum_reloc),
4655 &a8_reloc_compare);
4656
4657 /* Scan for branches which might trigger Cortex-A8 erratum. */
4658 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
4659 &num_a8_fixes, &a8_fix_table_size,
4660 a8_relocs, num_a8_relocs,
4661 prev_num_a8_fixes, &stub_changed)
4662 != 0)
4663 goto error_ret_free_local;
4664 }
4665 }
4666
4667 if (prev_num_a8_fixes != num_a8_fixes)
4668 stub_changed = TRUE;
4669
4670 if (!stub_changed)
4671 break;
4672
4673 /* OK, we've added some stubs. Find out the new size of the
4674 stub sections. */
4675 for (stub_sec = htab->stub_bfd->sections;
4676 stub_sec != NULL;
4677 stub_sec = stub_sec->next)
4678 {
4679 /* Ignore non-stub sections. */
4680 if (!strstr (stub_sec->name, STUB_SUFFIX))
4681 continue;
4682
4683 stub_sec->size = 0;
4684 }
4685
4686 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
4687
4688 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
4689 if (htab->fix_cortex_a8)
4690 for (i = 0; i < num_a8_fixes; i++)
4691 {
4692 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
4693 a8_fixes[i].section, htab);
4694
4695 if (stub_sec == NULL)
4696 goto error_ret_free_local;
4697
4698 stub_sec->size
4699 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
4700 NULL);
4701 }
4702
4703
4704 /* Ask the linker to do its stuff. */
4705 (*htab->layout_sections_again) ();
4706 }
4707
4708 /* Add stubs for Cortex-A8 erratum fixes now. */
4709 if (htab->fix_cortex_a8)
4710 {
4711 for (i = 0; i < num_a8_fixes; i++)
4712 {
4713 struct elf32_arm_stub_hash_entry *stub_entry;
4714 char *stub_name = a8_fixes[i].stub_name;
4715 asection *section = a8_fixes[i].section;
4716 unsigned int section_id = a8_fixes[i].section->id;
4717 asection *link_sec = htab->stub_group[section_id].link_sec;
4718 asection *stub_sec = htab->stub_group[section_id].stub_sec;
4719 const insn_sequence *template_sequence;
4720 int template_size, size = 0;
4721
4722 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4723 TRUE, FALSE);
4724 if (stub_entry == NULL)
4725 {
4726 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
4727 section->owner,
4728 stub_name);
4729 return FALSE;
4730 }
4731
4732 stub_entry->stub_sec = stub_sec;
4733 stub_entry->stub_offset = 0;
4734 stub_entry->id_sec = link_sec;
4735 stub_entry->stub_type = a8_fixes[i].stub_type;
4736 stub_entry->target_section = a8_fixes[i].section;
4737 stub_entry->target_value = a8_fixes[i].offset;
4738 stub_entry->target_addend = a8_fixes[i].addend;
4739 stub_entry->orig_insn = a8_fixes[i].orig_insn;
4740 stub_entry->st_type = STT_ARM_TFUNC;
4741
4742 size = find_stub_size_and_template (a8_fixes[i].stub_type,
4743 &template_sequence,
4744 &template_size);
4745
4746 stub_entry->stub_size = size;
4747 stub_entry->stub_template = template_sequence;
4748 stub_entry->stub_template_size = template_size;
4749 }
4750
4751 /* Stash the Cortex-A8 erratum fix array for use later in
4752 elf32_arm_write_section(). */
4753 htab->a8_erratum_fixes = a8_fixes;
4754 htab->num_a8_erratum_fixes = num_a8_fixes;
4755 }
4756 else
4757 {
4758 htab->a8_erratum_fixes = NULL;
4759 htab->num_a8_erratum_fixes = 0;
4760 }
4761 return TRUE;
4762
4763 error_ret_free_local:
4764 return FALSE;
4765 }
4766
4767 /* Build all the stubs associated with the current output file. The
4768 stubs are kept in a hash table attached to the main linker hash
4769 table. We also set up the .plt entries for statically linked PIC
4770 functions here. This function is called via arm_elf_finish in the
4771 linker. */
4772
4773 bfd_boolean
4774 elf32_arm_build_stubs (struct bfd_link_info *info)
4775 {
4776 asection *stub_sec;
4777 struct bfd_hash_table *table;
4778 struct elf32_arm_link_hash_table *htab;
4779
4780 htab = elf32_arm_hash_table (info);
4781 if (htab == NULL)
4782 return FALSE;
4783
4784 for (stub_sec = htab->stub_bfd->sections;
4785 stub_sec != NULL;
4786 stub_sec = stub_sec->next)
4787 {
4788 bfd_size_type size;
4789
4790 /* Ignore non-stub sections. */
4791 if (!strstr (stub_sec->name, STUB_SUFFIX))
4792 continue;
4793
4794 /* Allocate memory to hold the linker stubs. */
4795 size = stub_sec->size;
4796 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
4797 if (stub_sec->contents == NULL && size != 0)
4798 return FALSE;
4799 stub_sec->size = 0;
4800 }
4801
4802 /* Build the stubs as directed by the stub hash table. */
4803 table = &htab->stub_hash_table;
4804 bfd_hash_traverse (table, arm_build_one_stub, info);
4805 if (htab->fix_cortex_a8)
4806 {
4807 /* Place the cortex a8 stubs last. */
4808 htab->fix_cortex_a8 = -1;
4809 bfd_hash_traverse (table, arm_build_one_stub, info);
4810 }
4811
4812 return TRUE;
4813 }
4814
4815 /* Locate the Thumb encoded calling stub for NAME. */
4816
4817 static struct elf_link_hash_entry *
4818 find_thumb_glue (struct bfd_link_info *link_info,
4819 const char *name,
4820 char **error_message)
4821 {
4822 char *tmp_name;
4823 struct elf_link_hash_entry *hash;
4824 struct elf32_arm_link_hash_table *hash_table;
4825
4826 /* We need a pointer to the armelf specific hash table. */
4827 hash_table = elf32_arm_hash_table (link_info);
4828 if (hash_table == NULL)
4829 return NULL;
4830
4831 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
4832 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
4833
4834 BFD_ASSERT (tmp_name);
4835
4836 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
4837
4838 hash = elf_link_hash_lookup
4839 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4840
4841 if (hash == NULL
4842 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
4843 tmp_name, name) == -1)
4844 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4845
4846 free (tmp_name);
4847
4848 return hash;
4849 }
4850
4851 /* Locate the ARM encoded calling stub for NAME. */
4852
4853 static struct elf_link_hash_entry *
4854 find_arm_glue (struct bfd_link_info *link_info,
4855 const char *name,
4856 char **error_message)
4857 {
4858 char *tmp_name;
4859 struct elf_link_hash_entry *myh;
4860 struct elf32_arm_link_hash_table *hash_table;
4861
4862 /* We need a pointer to the elfarm specific hash table. */
4863 hash_table = elf32_arm_hash_table (link_info);
4864 if (hash_table == NULL)
4865 return NULL;
4866
4867 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
4868 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
4869
4870 BFD_ASSERT (tmp_name);
4871
4872 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
4873
4874 myh = elf_link_hash_lookup
4875 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4876
4877 if (myh == NULL
4878 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
4879 tmp_name, name) == -1)
4880 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4881
4882 free (tmp_name);
4883
4884 return myh;
4885 }
4886
4887 /* ARM->Thumb glue (static images):
4888
4889 .arm
4890 __func_from_arm:
4891 ldr r12, __func_addr
4892 bx r12
4893 __func_addr:
4894 .word func @ behave as if you saw a ARM_32 reloc.
4895
4896 (v5t static images)
4897 .arm
4898 __func_from_arm:
4899 ldr pc, __func_addr
4900 __func_addr:
4901 .word func @ behave as if you saw a ARM_32 reloc.
4902
4903 (relocatable images)
4904 .arm
4905 __func_from_arm:
4906 ldr r12, __func_offset
4907 add r12, r12, pc
4908 bx r12
4909 __func_offset:
4910 .word func - . */
4911
4912 #define ARM2THUMB_STATIC_GLUE_SIZE 12
4913 static const insn32 a2t1_ldr_insn = 0xe59fc000;
4914 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
4915 static const insn32 a2t3_func_addr_insn = 0x00000001;
4916
4917 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
4918 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
4919 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
4920
4921 #define ARM2THUMB_PIC_GLUE_SIZE 16
4922 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
4923 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
4924 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
4925
4926 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
4927
4928 .thumb .thumb
4929 .align 2 .align 2
4930 __func_from_thumb: __func_from_thumb:
4931 bx pc push {r6, lr}
4932 nop ldr r6, __func_addr
4933 .arm mov lr, pc
4934 b func bx r6
4935 .arm
4936 ;; back_to_thumb
4937 ldmia r13! {r6, lr}
4938 bx lr
4939 __func_addr:
4940 .word func */
4941
4942 #define THUMB2ARM_GLUE_SIZE 8
4943 static const insn16 t2a1_bx_pc_insn = 0x4778;
4944 static const insn16 t2a2_noop_insn = 0x46c0;
4945 static const insn32 t2a3_b_insn = 0xea000000;
4946
4947 #define VFP11_ERRATUM_VENEER_SIZE 8
4948
4949 #define ARM_BX_VENEER_SIZE 12
4950 static const insn32 armbx1_tst_insn = 0xe3100001;
4951 static const insn32 armbx2_moveq_insn = 0x01a0f000;
4952 static const insn32 armbx3_bx_insn = 0xe12fff10;
4953
4954 #ifndef ELFARM_NABI_C_INCLUDED
4955 static void
4956 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
4957 {
4958 asection * s;
4959 bfd_byte * contents;
4960
4961 if (size == 0)
4962 {
4963 /* Do not include empty glue sections in the output. */
4964 if (abfd != NULL)
4965 {
4966 s = bfd_get_section_by_name (abfd, name);
4967 if (s != NULL)
4968 s->flags |= SEC_EXCLUDE;
4969 }
4970 return;
4971 }
4972
4973 BFD_ASSERT (abfd != NULL);
4974
4975 s = bfd_get_section_by_name (abfd, name);
4976 BFD_ASSERT (s != NULL);
4977
4978 contents = (bfd_byte *) bfd_alloc (abfd, size);
4979
4980 BFD_ASSERT (s->size == size);
4981 s->contents = contents;
4982 }
4983
4984 bfd_boolean
4985 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
4986 {
4987 struct elf32_arm_link_hash_table * globals;
4988
4989 globals = elf32_arm_hash_table (info);
4990 BFD_ASSERT (globals != NULL);
4991
4992 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4993 globals->arm_glue_size,
4994 ARM2THUMB_GLUE_SECTION_NAME);
4995
4996 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4997 globals->thumb_glue_size,
4998 THUMB2ARM_GLUE_SECTION_NAME);
4999
5000 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5001 globals->vfp11_erratum_glue_size,
5002 VFP11_ERRATUM_VENEER_SECTION_NAME);
5003
5004 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5005 globals->bx_glue_size,
5006 ARM_BX_GLUE_SECTION_NAME);
5007
5008 return TRUE;
5009 }
5010
5011 /* Allocate space and symbols for calling a Thumb function from Arm mode.
5012 returns the symbol identifying the stub. */
5013
5014 static struct elf_link_hash_entry *
5015 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
5016 struct elf_link_hash_entry * h)
5017 {
5018 const char * name = h->root.root.string;
5019 asection * s;
5020 char * tmp_name;
5021 struct elf_link_hash_entry * myh;
5022 struct bfd_link_hash_entry * bh;
5023 struct elf32_arm_link_hash_table * globals;
5024 bfd_vma val;
5025 bfd_size_type size;
5026
5027 globals = elf32_arm_hash_table (link_info);
5028 BFD_ASSERT (globals != NULL);
5029 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5030
5031 s = bfd_get_section_by_name
5032 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
5033
5034 BFD_ASSERT (s != NULL);
5035
5036 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5037 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
5038
5039 BFD_ASSERT (tmp_name);
5040
5041 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
5042
5043 myh = elf_link_hash_lookup
5044 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
5045
5046 if (myh != NULL)
5047 {
5048 /* We've already seen this guy. */
5049 free (tmp_name);
5050 return myh;
5051 }
5052
5053 /* The only trick here is using hash_table->arm_glue_size as the value.
5054 Even though the section isn't allocated yet, this is where we will be
5055 putting it. The +1 on the value marks that the stub has not been
5056 output yet - not that it is a Thumb function. */
5057 bh = NULL;
5058 val = globals->arm_glue_size + 1;
5059 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5060 tmp_name, BSF_GLOBAL, s, val,
5061 NULL, TRUE, FALSE, &bh);
5062
5063 myh = (struct elf_link_hash_entry *) bh;
5064 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5065 myh->forced_local = 1;
5066
5067 free (tmp_name);
5068
5069 if (link_info->shared || globals->root.is_relocatable_executable
5070 || globals->pic_veneer)
5071 size = ARM2THUMB_PIC_GLUE_SIZE;
5072 else if (globals->use_blx)
5073 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
5074 else
5075 size = ARM2THUMB_STATIC_GLUE_SIZE;
5076
5077 s->size += size;
5078 globals->arm_glue_size += size;
5079
5080 return myh;
5081 }
5082
5083 /* Allocate space for ARMv4 BX veneers. */
5084
5085 static void
5086 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
5087 {
5088 asection * s;
5089 struct elf32_arm_link_hash_table *globals;
5090 char *tmp_name;
5091 struct elf_link_hash_entry *myh;
5092 struct bfd_link_hash_entry *bh;
5093 bfd_vma val;
5094
5095 /* BX PC does not need a veneer. */
5096 if (reg == 15)
5097 return;
5098
5099 globals = elf32_arm_hash_table (link_info);
5100 BFD_ASSERT (globals != NULL);
5101 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5102
5103 /* Check if this veneer has already been allocated. */
5104 if (globals->bx_glue_offset[reg])
5105 return;
5106
5107 s = bfd_get_section_by_name
5108 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
5109
5110 BFD_ASSERT (s != NULL);
5111
5112 /* Add symbol for veneer. */
5113 tmp_name = (char *)
5114 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
5115
5116 BFD_ASSERT (tmp_name);
5117
5118 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
5119
5120 myh = elf_link_hash_lookup
5121 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
5122
5123 BFD_ASSERT (myh == NULL);
5124
5125 bh = NULL;
5126 val = globals->bx_glue_size;
5127 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5128 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5129 NULL, TRUE, FALSE, &bh);
5130
5131 myh = (struct elf_link_hash_entry *) bh;
5132 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5133 myh->forced_local = 1;
5134
5135 s->size += ARM_BX_VENEER_SIZE;
5136 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
5137 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
5138 }
5139
5140
5141 /* Add an entry to the code/data map for section SEC. */
5142
5143 static void
5144 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
5145 {
5146 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
5147 unsigned int newidx;
5148
5149 if (sec_data->map == NULL)
5150 {
5151 sec_data->map = (elf32_arm_section_map *)
5152 bfd_malloc (sizeof (elf32_arm_section_map));
5153 sec_data->mapcount = 0;
5154 sec_data->mapsize = 1;
5155 }
5156
5157 newidx = sec_data->mapcount++;
5158
5159 if (sec_data->mapcount > sec_data->mapsize)
5160 {
5161 sec_data->mapsize *= 2;
5162 sec_data->map = (elf32_arm_section_map *)
5163 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
5164 * sizeof (elf32_arm_section_map));
5165 }
5166
5167 if (sec_data->map)
5168 {
5169 sec_data->map[newidx].vma = vma;
5170 sec_data->map[newidx].type = type;
5171 }
5172 }
5173
5174
5175 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
5176 veneers are handled for now. */
5177
5178 static bfd_vma
5179 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
5180 elf32_vfp11_erratum_list *branch,
5181 bfd *branch_bfd,
5182 asection *branch_sec,
5183 unsigned int offset)
5184 {
5185 asection *s;
5186 struct elf32_arm_link_hash_table *hash_table;
5187 char *tmp_name;
5188 struct elf_link_hash_entry *myh;
5189 struct bfd_link_hash_entry *bh;
5190 bfd_vma val;
5191 struct _arm_elf_section_data *sec_data;
5192 int errcount;
5193 elf32_vfp11_erratum_list *newerr;
5194
5195 hash_table = elf32_arm_hash_table (link_info);
5196 BFD_ASSERT (hash_table != NULL);
5197 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
5198
5199 s = bfd_get_section_by_name
5200 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
5201
5202 sec_data = elf32_arm_section_data (s);
5203
5204 BFD_ASSERT (s != NULL);
5205
5206 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
5207 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
5208
5209 BFD_ASSERT (tmp_name);
5210
5211 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
5212 hash_table->num_vfp11_fixes);
5213
5214 myh = elf_link_hash_lookup
5215 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5216
5217 BFD_ASSERT (myh == NULL);
5218
5219 bh = NULL;
5220 val = hash_table->vfp11_erratum_glue_size;
5221 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
5222 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5223 NULL, TRUE, FALSE, &bh);
5224
5225 myh = (struct elf_link_hash_entry *) bh;
5226 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5227 myh->forced_local = 1;
5228
5229 /* Link veneer back to calling location. */
5230 errcount = ++(sec_data->erratumcount);
5231 newerr = (elf32_vfp11_erratum_list *)
5232 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
5233
5234 newerr->type = VFP11_ERRATUM_ARM_VENEER;
5235 newerr->vma = -1;
5236 newerr->u.v.branch = branch;
5237 newerr->u.v.id = hash_table->num_vfp11_fixes;
5238 branch->u.b.veneer = newerr;
5239
5240 newerr->next = sec_data->erratumlist;
5241 sec_data->erratumlist = newerr;
5242
5243 /* A symbol for the return from the veneer. */
5244 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
5245 hash_table->num_vfp11_fixes);
5246
5247 myh = elf_link_hash_lookup
5248 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5249
5250 if (myh != NULL)
5251 abort ();
5252
5253 bh = NULL;
5254 val = offset + 4;
5255 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
5256 branch_sec, val, NULL, TRUE, FALSE, &bh);
5257
5258 myh = (struct elf_link_hash_entry *) bh;
5259 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5260 myh->forced_local = 1;
5261
5262 free (tmp_name);
5263
5264 /* Generate a mapping symbol for the veneer section, and explicitly add an
5265 entry for that symbol to the code/data map for the section. */
5266 if (hash_table->vfp11_erratum_glue_size == 0)
5267 {
5268 bh = NULL;
5269 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
5270 ever requires this erratum fix. */
5271 _bfd_generic_link_add_one_symbol (link_info,
5272 hash_table->bfd_of_glue_owner, "$a",
5273 BSF_LOCAL, s, 0, NULL,
5274 TRUE, FALSE, &bh);
5275
5276 myh = (struct elf_link_hash_entry *) bh;
5277 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
5278 myh->forced_local = 1;
5279
5280 /* The elf32_arm_init_maps function only cares about symbols from input
5281 BFDs. We must make a note of this generated mapping symbol
5282 ourselves so that code byteswapping works properly in
5283 elf32_arm_write_section. */
5284 elf32_arm_section_map_add (s, 'a', 0);
5285 }
5286
5287 s->size += VFP11_ERRATUM_VENEER_SIZE;
5288 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
5289 hash_table->num_vfp11_fixes++;
5290
5291 /* The offset of the veneer. */
5292 return val;
5293 }
5294
5295 #define ARM_GLUE_SECTION_FLAGS \
5296 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
5297 | SEC_READONLY | SEC_LINKER_CREATED)
5298
5299 /* Create a fake section for use by the ARM backend of the linker. */
5300
5301 static bfd_boolean
5302 arm_make_glue_section (bfd * abfd, const char * name)
5303 {
5304 asection * sec;
5305
5306 sec = bfd_get_section_by_name (abfd, name);
5307 if (sec != NULL)
5308 /* Already made. */
5309 return TRUE;
5310
5311 sec = bfd_make_section_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
5312
5313 if (sec == NULL
5314 || !bfd_set_section_alignment (abfd, sec, 2))
5315 return FALSE;
5316
5317 /* Set the gc mark to prevent the section from being removed by garbage
5318 collection, despite the fact that no relocs refer to this section. */
5319 sec->gc_mark = 1;
5320
5321 return TRUE;
5322 }
5323
5324 /* Add the glue sections to ABFD. This function is called from the
5325 linker scripts in ld/emultempl/{armelf}.em. */
5326
5327 bfd_boolean
5328 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
5329 struct bfd_link_info *info)
5330 {
5331 /* If we are only performing a partial
5332 link do not bother adding the glue. */
5333 if (info->relocatable)
5334 return TRUE;
5335
5336 return arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
5337 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
5338 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
5339 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
5340 }
5341
5342 /* Select a BFD to be used to hold the sections used by the glue code.
5343 This function is called from the linker scripts in ld/emultempl/
5344 {armelf/pe}.em. */
5345
5346 bfd_boolean
5347 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
5348 {
5349 struct elf32_arm_link_hash_table *globals;
5350
5351 /* If we are only performing a partial link
5352 do not bother getting a bfd to hold the glue. */
5353 if (info->relocatable)
5354 return TRUE;
5355
5356 /* Make sure we don't attach the glue sections to a dynamic object. */
5357 BFD_ASSERT (!(abfd->flags & DYNAMIC));
5358
5359 globals = elf32_arm_hash_table (info);
5360 BFD_ASSERT (globals != NULL);
5361
5362 if (globals->bfd_of_glue_owner != NULL)
5363 return TRUE;
5364
5365 /* Save the bfd for later use. */
5366 globals->bfd_of_glue_owner = abfd;
5367
5368 return TRUE;
5369 }
5370
5371 static void
5372 check_use_blx (struct elf32_arm_link_hash_table *globals)
5373 {
5374 if (bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
5375 Tag_CPU_arch) > 2)
5376 globals->use_blx = 1;
5377 }
5378
5379 bfd_boolean
5380 bfd_elf32_arm_process_before_allocation (bfd *abfd,
5381 struct bfd_link_info *link_info)
5382 {
5383 Elf_Internal_Shdr *symtab_hdr;
5384 Elf_Internal_Rela *internal_relocs = NULL;
5385 Elf_Internal_Rela *irel, *irelend;
5386 bfd_byte *contents = NULL;
5387
5388 asection *sec;
5389 struct elf32_arm_link_hash_table *globals;
5390
5391 /* If we are only performing a partial link do not bother
5392 to construct any glue. */
5393 if (link_info->relocatable)
5394 return TRUE;
5395
5396 /* Here we have a bfd that is to be included on the link. We have a
5397 hook to do reloc rummaging, before section sizes are nailed down. */
5398 globals = elf32_arm_hash_table (link_info);
5399 BFD_ASSERT (globals != NULL);
5400
5401 check_use_blx (globals);
5402
5403 if (globals->byteswap_code && !bfd_big_endian (abfd))
5404 {
5405 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
5406 abfd);
5407 return FALSE;
5408 }
5409
5410 /* PR 5398: If we have not decided to include any loadable sections in
5411 the output then we will not have a glue owner bfd. This is OK, it
5412 just means that there is nothing else for us to do here. */
5413 if (globals->bfd_of_glue_owner == NULL)
5414 return TRUE;
5415
5416 /* Rummage around all the relocs and map the glue vectors. */
5417 sec = abfd->sections;
5418
5419 if (sec == NULL)
5420 return TRUE;
5421
5422 for (; sec != NULL; sec = sec->next)
5423 {
5424 if (sec->reloc_count == 0)
5425 continue;
5426
5427 if ((sec->flags & SEC_EXCLUDE) != 0)
5428 continue;
5429
5430 symtab_hdr = & elf_symtab_hdr (abfd);
5431
5432 /* Load the relocs. */
5433 internal_relocs
5434 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
5435
5436 if (internal_relocs == NULL)
5437 goto error_return;
5438
5439 irelend = internal_relocs + sec->reloc_count;
5440 for (irel = internal_relocs; irel < irelend; irel++)
5441 {
5442 long r_type;
5443 unsigned long r_index;
5444
5445 struct elf_link_hash_entry *h;
5446
5447 r_type = ELF32_R_TYPE (irel->r_info);
5448 r_index = ELF32_R_SYM (irel->r_info);
5449
5450 /* These are the only relocation types we care about. */
5451 if ( r_type != R_ARM_PC24
5452 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
5453 continue;
5454
5455 /* Get the section contents if we haven't done so already. */
5456 if (contents == NULL)
5457 {
5458 /* Get cached copy if it exists. */
5459 if (elf_section_data (sec)->this_hdr.contents != NULL)
5460 contents = elf_section_data (sec)->this_hdr.contents;
5461 else
5462 {
5463 /* Go get them off disk. */
5464 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
5465 goto error_return;
5466 }
5467 }
5468
5469 if (r_type == R_ARM_V4BX)
5470 {
5471 int reg;
5472
5473 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
5474 record_arm_bx_glue (link_info, reg);
5475 continue;
5476 }
5477
5478 /* If the relocation is not against a symbol it cannot concern us. */
5479 h = NULL;
5480
5481 /* We don't care about local symbols. */
5482 if (r_index < symtab_hdr->sh_info)
5483 continue;
5484
5485 /* This is an external symbol. */
5486 r_index -= symtab_hdr->sh_info;
5487 h = (struct elf_link_hash_entry *)
5488 elf_sym_hashes (abfd)[r_index];
5489
5490 /* If the relocation is against a static symbol it must be within
5491 the current section and so cannot be a cross ARM/Thumb relocation. */
5492 if (h == NULL)
5493 continue;
5494
5495 /* If the call will go through a PLT entry then we do not need
5496 glue. */
5497 if (globals->splt != NULL && h->plt.offset != (bfd_vma) -1)
5498 continue;
5499
5500 switch (r_type)
5501 {
5502 case R_ARM_PC24:
5503 /* This one is a call from arm code. We need to look up
5504 the target of the call. If it is a thumb target, we
5505 insert glue. */
5506 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
5507 record_arm_to_thumb_glue (link_info, h);
5508 break;
5509
5510 default:
5511 abort ();
5512 }
5513 }
5514
5515 if (contents != NULL
5516 && elf_section_data (sec)->this_hdr.contents != contents)
5517 free (contents);
5518 contents = NULL;
5519
5520 if (internal_relocs != NULL
5521 && elf_section_data (sec)->relocs != internal_relocs)
5522 free (internal_relocs);
5523 internal_relocs = NULL;
5524 }
5525
5526 return TRUE;
5527
5528 error_return:
5529 if (contents != NULL
5530 && elf_section_data (sec)->this_hdr.contents != contents)
5531 free (contents);
5532 if (internal_relocs != NULL
5533 && elf_section_data (sec)->relocs != internal_relocs)
5534 free (internal_relocs);
5535
5536 return FALSE;
5537 }
5538 #endif
5539
5540
5541 /* Initialise maps of ARM/Thumb/data for input BFDs. */
5542
5543 void
5544 bfd_elf32_arm_init_maps (bfd *abfd)
5545 {
5546 Elf_Internal_Sym *isymbuf;
5547 Elf_Internal_Shdr *hdr;
5548 unsigned int i, localsyms;
5549
5550 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
5551 if (! is_arm_elf (abfd))
5552 return;
5553
5554 if ((abfd->flags & DYNAMIC) != 0)
5555 return;
5556
5557 hdr = & elf_symtab_hdr (abfd);
5558 localsyms = hdr->sh_info;
5559
5560 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
5561 should contain the number of local symbols, which should come before any
5562 global symbols. Mapping symbols are always local. */
5563 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
5564 NULL);
5565
5566 /* No internal symbols read? Skip this BFD. */
5567 if (isymbuf == NULL)
5568 return;
5569
5570 for (i = 0; i < localsyms; i++)
5571 {
5572 Elf_Internal_Sym *isym = &isymbuf[i];
5573 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
5574 const char *name;
5575
5576 if (sec != NULL
5577 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
5578 {
5579 name = bfd_elf_string_from_elf_section (abfd,
5580 hdr->sh_link, isym->st_name);
5581
5582 if (bfd_is_arm_special_symbol_name (name,
5583 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
5584 elf32_arm_section_map_add (sec, name[1], isym->st_value);
5585 }
5586 }
5587 }
5588
5589
5590 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
5591 say what they wanted. */
5592
5593 void
5594 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
5595 {
5596 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5597 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5598
5599 if (globals == NULL)
5600 return;
5601
5602 if (globals->fix_cortex_a8 == -1)
5603 {
5604 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
5605 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
5606 && (out_attr[Tag_CPU_arch_profile].i == 'A'
5607 || out_attr[Tag_CPU_arch_profile].i == 0))
5608 globals->fix_cortex_a8 = 1;
5609 else
5610 globals->fix_cortex_a8 = 0;
5611 }
5612 }
5613
5614
5615 void
5616 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
5617 {
5618 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5619 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5620
5621 if (globals == NULL)
5622 return;
5623 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
5624 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
5625 {
5626 switch (globals->vfp11_fix)
5627 {
5628 case BFD_ARM_VFP11_FIX_DEFAULT:
5629 case BFD_ARM_VFP11_FIX_NONE:
5630 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5631 break;
5632
5633 default:
5634 /* Give a warning, but do as the user requests anyway. */
5635 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
5636 "workaround is not necessary for target architecture"), obfd);
5637 }
5638 }
5639 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
5640 /* For earlier architectures, we might need the workaround, but do not
5641 enable it by default. If users is running with broken hardware, they
5642 must enable the erratum fix explicitly. */
5643 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5644 }
5645
5646
5647 enum bfd_arm_vfp11_pipe
5648 {
5649 VFP11_FMAC,
5650 VFP11_LS,
5651 VFP11_DS,
5652 VFP11_BAD
5653 };
5654
5655 /* Return a VFP register number. This is encoded as RX:X for single-precision
5656 registers, or X:RX for double-precision registers, where RX is the group of
5657 four bits in the instruction encoding and X is the single extension bit.
5658 RX and X fields are specified using their lowest (starting) bit. The return
5659 value is:
5660
5661 0...31: single-precision registers s0...s31
5662 32...63: double-precision registers d0...d31.
5663
5664 Although X should be zero for VFP11 (encoding d0...d15 only), we might
5665 encounter VFP3 instructions, so we allow the full range for DP registers. */
5666
5667 static unsigned int
5668 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
5669 unsigned int x)
5670 {
5671 if (is_double)
5672 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
5673 else
5674 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
5675 }
5676
5677 /* Set bits in *WMASK according to a register number REG as encoded by
5678 bfd_arm_vfp11_regno(). Ignore d16-d31. */
5679
5680 static void
5681 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
5682 {
5683 if (reg < 32)
5684 *wmask |= 1 << reg;
5685 else if (reg < 48)
5686 *wmask |= 3 << ((reg - 32) * 2);
5687 }
5688
5689 /* Return TRUE if WMASK overwrites anything in REGS. */
5690
5691 static bfd_boolean
5692 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
5693 {
5694 int i;
5695
5696 for (i = 0; i < numregs; i++)
5697 {
5698 unsigned int reg = regs[i];
5699
5700 if (reg < 32 && (wmask & (1 << reg)) != 0)
5701 return TRUE;
5702
5703 reg -= 32;
5704
5705 if (reg >= 16)
5706 continue;
5707
5708 if ((wmask & (3 << (reg * 2))) != 0)
5709 return TRUE;
5710 }
5711
5712 return FALSE;
5713 }
5714
5715 /* In this function, we're interested in two things: finding input registers
5716 for VFP data-processing instructions, and finding the set of registers which
5717 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
5718 hold the written set, so FLDM etc. are easy to deal with (we're only
5719 interested in 32 SP registers or 16 dp registers, due to the VFP version
5720 implemented by the chip in question). DP registers are marked by setting
5721 both SP registers in the write mask). */
5722
5723 static enum bfd_arm_vfp11_pipe
5724 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
5725 int *numregs)
5726 {
5727 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
5728 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
5729
5730 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
5731 {
5732 unsigned int pqrs;
5733 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5734 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5735
5736 pqrs = ((insn & 0x00800000) >> 20)
5737 | ((insn & 0x00300000) >> 19)
5738 | ((insn & 0x00000040) >> 6);
5739
5740 switch (pqrs)
5741 {
5742 case 0: /* fmac[sd]. */
5743 case 1: /* fnmac[sd]. */
5744 case 2: /* fmsc[sd]. */
5745 case 3: /* fnmsc[sd]. */
5746 vpipe = VFP11_FMAC;
5747 bfd_arm_vfp11_write_mask (destmask, fd);
5748 regs[0] = fd;
5749 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5750 regs[2] = fm;
5751 *numregs = 3;
5752 break;
5753
5754 case 4: /* fmul[sd]. */
5755 case 5: /* fnmul[sd]. */
5756 case 6: /* fadd[sd]. */
5757 case 7: /* fsub[sd]. */
5758 vpipe = VFP11_FMAC;
5759 goto vfp_binop;
5760
5761 case 8: /* fdiv[sd]. */
5762 vpipe = VFP11_DS;
5763 vfp_binop:
5764 bfd_arm_vfp11_write_mask (destmask, fd);
5765 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5766 regs[1] = fm;
5767 *numregs = 2;
5768 break;
5769
5770 case 15: /* extended opcode. */
5771 {
5772 unsigned int extn = ((insn >> 15) & 0x1e)
5773 | ((insn >> 7) & 1);
5774
5775 switch (extn)
5776 {
5777 case 0: /* fcpy[sd]. */
5778 case 1: /* fabs[sd]. */
5779 case 2: /* fneg[sd]. */
5780 case 8: /* fcmp[sd]. */
5781 case 9: /* fcmpe[sd]. */
5782 case 10: /* fcmpz[sd]. */
5783 case 11: /* fcmpez[sd]. */
5784 case 16: /* fuito[sd]. */
5785 case 17: /* fsito[sd]. */
5786 case 24: /* ftoui[sd]. */
5787 case 25: /* ftouiz[sd]. */
5788 case 26: /* ftosi[sd]. */
5789 case 27: /* ftosiz[sd]. */
5790 /* These instructions will not bounce due to underflow. */
5791 *numregs = 0;
5792 vpipe = VFP11_FMAC;
5793 break;
5794
5795 case 3: /* fsqrt[sd]. */
5796 /* fsqrt cannot underflow, but it can (perhaps) overwrite
5797 registers to cause the erratum in previous instructions. */
5798 bfd_arm_vfp11_write_mask (destmask, fd);
5799 vpipe = VFP11_DS;
5800 break;
5801
5802 case 15: /* fcvt{ds,sd}. */
5803 {
5804 int rnum = 0;
5805
5806 bfd_arm_vfp11_write_mask (destmask, fd);
5807
5808 /* Only FCVTSD can underflow. */
5809 if ((insn & 0x100) != 0)
5810 regs[rnum++] = fm;
5811
5812 *numregs = rnum;
5813
5814 vpipe = VFP11_FMAC;
5815 }
5816 break;
5817
5818 default:
5819 return VFP11_BAD;
5820 }
5821 }
5822 break;
5823
5824 default:
5825 return VFP11_BAD;
5826 }
5827 }
5828 /* Two-register transfer. */
5829 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
5830 {
5831 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5832
5833 if ((insn & 0x100000) == 0)
5834 {
5835 if (is_double)
5836 bfd_arm_vfp11_write_mask (destmask, fm);
5837 else
5838 {
5839 bfd_arm_vfp11_write_mask (destmask, fm);
5840 bfd_arm_vfp11_write_mask (destmask, fm + 1);
5841 }
5842 }
5843
5844 vpipe = VFP11_LS;
5845 }
5846 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
5847 {
5848 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5849 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
5850
5851 switch (puw)
5852 {
5853 case 0: /* Two-reg transfer. We should catch these above. */
5854 abort ();
5855
5856 case 2: /* fldm[sdx]. */
5857 case 3:
5858 case 5:
5859 {
5860 unsigned int i, offset = insn & 0xff;
5861
5862 if (is_double)
5863 offset >>= 1;
5864
5865 for (i = fd; i < fd + offset; i++)
5866 bfd_arm_vfp11_write_mask (destmask, i);
5867 }
5868 break;
5869
5870 case 4: /* fld[sd]. */
5871 case 6:
5872 bfd_arm_vfp11_write_mask (destmask, fd);
5873 break;
5874
5875 default:
5876 return VFP11_BAD;
5877 }
5878
5879 vpipe = VFP11_LS;
5880 }
5881 /* Single-register transfer. Note L==0. */
5882 else if ((insn & 0x0f100e10) == 0x0e000a10)
5883 {
5884 unsigned int opcode = (insn >> 21) & 7;
5885 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
5886
5887 switch (opcode)
5888 {
5889 case 0: /* fmsr/fmdlr. */
5890 case 1: /* fmdhr. */
5891 /* Mark fmdhr and fmdlr as writing to the whole of the DP
5892 destination register. I don't know if this is exactly right,
5893 but it is the conservative choice. */
5894 bfd_arm_vfp11_write_mask (destmask, fn);
5895 break;
5896
5897 case 7: /* fmxr. */
5898 break;
5899 }
5900
5901 vpipe = VFP11_LS;
5902 }
5903
5904 return vpipe;
5905 }
5906
5907
5908 static int elf32_arm_compare_mapping (const void * a, const void * b);
5909
5910
5911 /* Look for potentially-troublesome code sequences which might trigger the
5912 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
5913 (available from ARM) for details of the erratum. A short version is
5914 described in ld.texinfo. */
5915
5916 bfd_boolean
5917 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
5918 {
5919 asection *sec;
5920 bfd_byte *contents = NULL;
5921 int state = 0;
5922 int regs[3], numregs = 0;
5923 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5924 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
5925
5926 if (globals == NULL)
5927 return FALSE;
5928
5929 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
5930 The states transition as follows:
5931
5932 0 -> 1 (vector) or 0 -> 2 (scalar)
5933 A VFP FMAC-pipeline instruction has been seen. Fill
5934 regs[0]..regs[numregs-1] with its input operands. Remember this
5935 instruction in 'first_fmac'.
5936
5937 1 -> 2
5938 Any instruction, except for a VFP instruction which overwrites
5939 regs[*].
5940
5941 1 -> 3 [ -> 0 ] or
5942 2 -> 3 [ -> 0 ]
5943 A VFP instruction has been seen which overwrites any of regs[*].
5944 We must make a veneer! Reset state to 0 before examining next
5945 instruction.
5946
5947 2 -> 0
5948 If we fail to match anything in state 2, reset to state 0 and reset
5949 the instruction pointer to the instruction after 'first_fmac'.
5950
5951 If the VFP11 vector mode is in use, there must be at least two unrelated
5952 instructions between anti-dependent VFP11 instructions to properly avoid
5953 triggering the erratum, hence the use of the extra state 1. */
5954
5955 /* If we are only performing a partial link do not bother
5956 to construct any glue. */
5957 if (link_info->relocatable)
5958 return TRUE;
5959
5960 /* Skip if this bfd does not correspond to an ELF image. */
5961 if (! is_arm_elf (abfd))
5962 return TRUE;
5963
5964 /* We should have chosen a fix type by the time we get here. */
5965 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
5966
5967 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
5968 return TRUE;
5969
5970 /* Skip this BFD if it corresponds to an executable or dynamic object. */
5971 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
5972 return TRUE;
5973
5974 for (sec = abfd->sections; sec != NULL; sec = sec->next)
5975 {
5976 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
5977 struct _arm_elf_section_data *sec_data;
5978
5979 /* If we don't have executable progbits, we're not interested in this
5980 section. Also skip if section is to be excluded. */
5981 if (elf_section_type (sec) != SHT_PROGBITS
5982 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
5983 || (sec->flags & SEC_EXCLUDE) != 0
5984 || sec->sec_info_type == ELF_INFO_TYPE_JUST_SYMS
5985 || sec->output_section == bfd_abs_section_ptr
5986 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
5987 continue;
5988
5989 sec_data = elf32_arm_section_data (sec);
5990
5991 if (sec_data->mapcount == 0)
5992 continue;
5993
5994 if (elf_section_data (sec)->this_hdr.contents != NULL)
5995 contents = elf_section_data (sec)->this_hdr.contents;
5996 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
5997 goto error_return;
5998
5999 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
6000 elf32_arm_compare_mapping);
6001
6002 for (span = 0; span < sec_data->mapcount; span++)
6003 {
6004 unsigned int span_start = sec_data->map[span].vma;
6005 unsigned int span_end = (span == sec_data->mapcount - 1)
6006 ? sec->size : sec_data->map[span + 1].vma;
6007 char span_type = sec_data->map[span].type;
6008
6009 /* FIXME: Only ARM mode is supported at present. We may need to
6010 support Thumb-2 mode also at some point. */
6011 if (span_type != 'a')
6012 continue;
6013
6014 for (i = span_start; i < span_end;)
6015 {
6016 unsigned int next_i = i + 4;
6017 unsigned int insn = bfd_big_endian (abfd)
6018 ? (contents[i] << 24)
6019 | (contents[i + 1] << 16)
6020 | (contents[i + 2] << 8)
6021 | contents[i + 3]
6022 : (contents[i + 3] << 24)
6023 | (contents[i + 2] << 16)
6024 | (contents[i + 1] << 8)
6025 | contents[i];
6026 unsigned int writemask = 0;
6027 enum bfd_arm_vfp11_pipe vpipe;
6028
6029 switch (state)
6030 {
6031 case 0:
6032 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
6033 &numregs);
6034 /* I'm assuming the VFP11 erratum can trigger with denorm
6035 operands on either the FMAC or the DS pipeline. This might
6036 lead to slightly overenthusiastic veneer insertion. */
6037 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
6038 {
6039 state = use_vector ? 1 : 2;
6040 first_fmac = i;
6041 veneer_of_insn = insn;
6042 }
6043 break;
6044
6045 case 1:
6046 {
6047 int other_regs[3], other_numregs;
6048 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6049 other_regs,
6050 &other_numregs);
6051 if (vpipe != VFP11_BAD
6052 && bfd_arm_vfp11_antidependency (writemask, regs,
6053 numregs))
6054 state = 3;
6055 else
6056 state = 2;
6057 }
6058 break;
6059
6060 case 2:
6061 {
6062 int other_regs[3], other_numregs;
6063 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6064 other_regs,
6065 &other_numregs);
6066 if (vpipe != VFP11_BAD
6067 && bfd_arm_vfp11_antidependency (writemask, regs,
6068 numregs))
6069 state = 3;
6070 else
6071 {
6072 state = 0;
6073 next_i = first_fmac + 4;
6074 }
6075 }
6076 break;
6077
6078 case 3:
6079 abort (); /* Should be unreachable. */
6080 }
6081
6082 if (state == 3)
6083 {
6084 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
6085 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
6086 int errcount;
6087
6088 errcount = ++(elf32_arm_section_data (sec)->erratumcount);
6089
6090 newerr->u.b.vfp_insn = veneer_of_insn;
6091
6092 switch (span_type)
6093 {
6094 case 'a':
6095 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
6096 break;
6097
6098 default:
6099 abort ();
6100 }
6101
6102 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
6103 first_fmac);
6104
6105 newerr->vma = -1;
6106
6107 newerr->next = sec_data->erratumlist;
6108 sec_data->erratumlist = newerr;
6109
6110 state = 0;
6111 }
6112
6113 i = next_i;
6114 }
6115 }
6116
6117 if (contents != NULL
6118 && elf_section_data (sec)->this_hdr.contents != contents)
6119 free (contents);
6120 contents = NULL;
6121 }
6122
6123 return TRUE;
6124
6125 error_return:
6126 if (contents != NULL
6127 && elf_section_data (sec)->this_hdr.contents != contents)
6128 free (contents);
6129
6130 return FALSE;
6131 }
6132
6133 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
6134 after sections have been laid out, using specially-named symbols. */
6135
6136 void
6137 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
6138 struct bfd_link_info *link_info)
6139 {
6140 asection *sec;
6141 struct elf32_arm_link_hash_table *globals;
6142 char *tmp_name;
6143
6144 if (link_info->relocatable)
6145 return;
6146
6147 /* Skip if this bfd does not correspond to an ELF image. */
6148 if (! is_arm_elf (abfd))
6149 return;
6150
6151 globals = elf32_arm_hash_table (link_info);
6152 if (globals == NULL)
6153 return;
6154
6155 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
6156 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
6157
6158 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6159 {
6160 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
6161 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
6162
6163 for (; errnode != NULL; errnode = errnode->next)
6164 {
6165 struct elf_link_hash_entry *myh;
6166 bfd_vma vma;
6167
6168 switch (errnode->type)
6169 {
6170 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
6171 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
6172 /* Find veneer symbol. */
6173 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
6174 errnode->u.b.veneer->u.v.id);
6175
6176 myh = elf_link_hash_lookup
6177 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6178
6179 if (myh == NULL)
6180 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6181 "`%s'"), abfd, tmp_name);
6182
6183 vma = myh->root.u.def.section->output_section->vma
6184 + myh->root.u.def.section->output_offset
6185 + myh->root.u.def.value;
6186
6187 errnode->u.b.veneer->vma = vma;
6188 break;
6189
6190 case VFP11_ERRATUM_ARM_VENEER:
6191 case VFP11_ERRATUM_THUMB_VENEER:
6192 /* Find return location. */
6193 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6194 errnode->u.v.id);
6195
6196 myh = elf_link_hash_lookup
6197 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6198
6199 if (myh == NULL)
6200 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6201 "`%s'"), abfd, tmp_name);
6202
6203 vma = myh->root.u.def.section->output_section->vma
6204 + myh->root.u.def.section->output_offset
6205 + myh->root.u.def.value;
6206
6207 errnode->u.v.branch->vma = vma;
6208 break;
6209
6210 default:
6211 abort ();
6212 }
6213 }
6214 }
6215
6216 free (tmp_name);
6217 }
6218
6219
6220 /* Set target relocation values needed during linking. */
6221
6222 void
6223 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
6224 struct bfd_link_info *link_info,
6225 int target1_is_rel,
6226 char * target2_type,
6227 int fix_v4bx,
6228 int use_blx,
6229 bfd_arm_vfp11_fix vfp11_fix,
6230 int no_enum_warn, int no_wchar_warn,
6231 int pic_veneer, int fix_cortex_a8)
6232 {
6233 struct elf32_arm_link_hash_table *globals;
6234
6235 globals = elf32_arm_hash_table (link_info);
6236 if (globals == NULL)
6237 return;
6238
6239 globals->target1_is_rel = target1_is_rel;
6240 if (strcmp (target2_type, "rel") == 0)
6241 globals->target2_reloc = R_ARM_REL32;
6242 else if (strcmp (target2_type, "abs") == 0)
6243 globals->target2_reloc = R_ARM_ABS32;
6244 else if (strcmp (target2_type, "got-rel") == 0)
6245 globals->target2_reloc = R_ARM_GOT_PREL;
6246 else
6247 {
6248 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
6249 target2_type);
6250 }
6251 globals->fix_v4bx = fix_v4bx;
6252 globals->use_blx |= use_blx;
6253 globals->vfp11_fix = vfp11_fix;
6254 globals->pic_veneer = pic_veneer;
6255 globals->fix_cortex_a8 = fix_cortex_a8;
6256
6257 BFD_ASSERT (is_arm_elf (output_bfd));
6258 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
6259 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
6260 }
6261
6262 /* Replace the target offset of a Thumb bl or b.w instruction. */
6263
6264 static void
6265 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
6266 {
6267 bfd_vma upper;
6268 bfd_vma lower;
6269 int reloc_sign;
6270
6271 BFD_ASSERT ((offset & 1) == 0);
6272
6273 upper = bfd_get_16 (abfd, insn);
6274 lower = bfd_get_16 (abfd, insn + 2);
6275 reloc_sign = (offset < 0) ? 1 : 0;
6276 upper = (upper & ~(bfd_vma) 0x7ff)
6277 | ((offset >> 12) & 0x3ff)
6278 | (reloc_sign << 10);
6279 lower = (lower & ~(bfd_vma) 0x2fff)
6280 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
6281 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
6282 | ((offset >> 1) & 0x7ff);
6283 bfd_put_16 (abfd, upper, insn);
6284 bfd_put_16 (abfd, lower, insn + 2);
6285 }
6286
6287 /* Thumb code calling an ARM function. */
6288
6289 static int
6290 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
6291 const char * name,
6292 bfd * input_bfd,
6293 bfd * output_bfd,
6294 asection * input_section,
6295 bfd_byte * hit_data,
6296 asection * sym_sec,
6297 bfd_vma offset,
6298 bfd_signed_vma addend,
6299 bfd_vma val,
6300 char **error_message)
6301 {
6302 asection * s = 0;
6303 bfd_vma my_offset;
6304 long int ret_offset;
6305 struct elf_link_hash_entry * myh;
6306 struct elf32_arm_link_hash_table * globals;
6307
6308 myh = find_thumb_glue (info, name, error_message);
6309 if (myh == NULL)
6310 return FALSE;
6311
6312 globals = elf32_arm_hash_table (info);
6313 BFD_ASSERT (globals != NULL);
6314 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6315
6316 my_offset = myh->root.u.def.value;
6317
6318 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6319 THUMB2ARM_GLUE_SECTION_NAME);
6320
6321 BFD_ASSERT (s != NULL);
6322 BFD_ASSERT (s->contents != NULL);
6323 BFD_ASSERT (s->output_section != NULL);
6324
6325 if ((my_offset & 0x01) == 0x01)
6326 {
6327 if (sym_sec != NULL
6328 && sym_sec->owner != NULL
6329 && !INTERWORK_FLAG (sym_sec->owner))
6330 {
6331 (*_bfd_error_handler)
6332 (_("%B(%s): warning: interworking not enabled.\n"
6333 " first occurrence: %B: thumb call to arm"),
6334 sym_sec->owner, input_bfd, name);
6335
6336 return FALSE;
6337 }
6338
6339 --my_offset;
6340 myh->root.u.def.value = my_offset;
6341
6342 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
6343 s->contents + my_offset);
6344
6345 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
6346 s->contents + my_offset + 2);
6347
6348 ret_offset =
6349 /* Address of destination of the stub. */
6350 ((bfd_signed_vma) val)
6351 - ((bfd_signed_vma)
6352 /* Offset from the start of the current section
6353 to the start of the stubs. */
6354 (s->output_offset
6355 /* Offset of the start of this stub from the start of the stubs. */
6356 + my_offset
6357 /* Address of the start of the current section. */
6358 + s->output_section->vma)
6359 /* The branch instruction is 4 bytes into the stub. */
6360 + 4
6361 /* ARM branches work from the pc of the instruction + 8. */
6362 + 8);
6363
6364 put_arm_insn (globals, output_bfd,
6365 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
6366 s->contents + my_offset + 4);
6367 }
6368
6369 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
6370
6371 /* Now go back and fix up the original BL insn to point to here. */
6372 ret_offset =
6373 /* Address of where the stub is located. */
6374 (s->output_section->vma + s->output_offset + my_offset)
6375 /* Address of where the BL is located. */
6376 - (input_section->output_section->vma + input_section->output_offset
6377 + offset)
6378 /* Addend in the relocation. */
6379 - addend
6380 /* Biassing for PC-relative addressing. */
6381 - 8;
6382
6383 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
6384
6385 return TRUE;
6386 }
6387
6388 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
6389
6390 static struct elf_link_hash_entry *
6391 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
6392 const char * name,
6393 bfd * input_bfd,
6394 bfd * output_bfd,
6395 asection * sym_sec,
6396 bfd_vma val,
6397 asection * s,
6398 char ** error_message)
6399 {
6400 bfd_vma my_offset;
6401 long int ret_offset;
6402 struct elf_link_hash_entry * myh;
6403 struct elf32_arm_link_hash_table * globals;
6404
6405 myh = find_arm_glue (info, name, error_message);
6406 if (myh == NULL)
6407 return NULL;
6408
6409 globals = elf32_arm_hash_table (info);
6410 BFD_ASSERT (globals != NULL);
6411 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6412
6413 my_offset = myh->root.u.def.value;
6414
6415 if ((my_offset & 0x01) == 0x01)
6416 {
6417 if (sym_sec != NULL
6418 && sym_sec->owner != NULL
6419 && !INTERWORK_FLAG (sym_sec->owner))
6420 {
6421 (*_bfd_error_handler)
6422 (_("%B(%s): warning: interworking not enabled.\n"
6423 " first occurrence: %B: arm call to thumb"),
6424 sym_sec->owner, input_bfd, name);
6425 }
6426
6427 --my_offset;
6428 myh->root.u.def.value = my_offset;
6429
6430 if (info->shared || globals->root.is_relocatable_executable
6431 || globals->pic_veneer)
6432 {
6433 /* For relocatable objects we can't use absolute addresses,
6434 so construct the address from a relative offset. */
6435 /* TODO: If the offset is small it's probably worth
6436 constructing the address with adds. */
6437 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
6438 s->contents + my_offset);
6439 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
6440 s->contents + my_offset + 4);
6441 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
6442 s->contents + my_offset + 8);
6443 /* Adjust the offset by 4 for the position of the add,
6444 and 8 for the pipeline offset. */
6445 ret_offset = (val - (s->output_offset
6446 + s->output_section->vma
6447 + my_offset + 12))
6448 | 1;
6449 bfd_put_32 (output_bfd, ret_offset,
6450 s->contents + my_offset + 12);
6451 }
6452 else if (globals->use_blx)
6453 {
6454 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
6455 s->contents + my_offset);
6456
6457 /* It's a thumb address. Add the low order bit. */
6458 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
6459 s->contents + my_offset + 4);
6460 }
6461 else
6462 {
6463 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
6464 s->contents + my_offset);
6465
6466 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
6467 s->contents + my_offset + 4);
6468
6469 /* It's a thumb address. Add the low order bit. */
6470 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
6471 s->contents + my_offset + 8);
6472
6473 my_offset += 12;
6474 }
6475 }
6476
6477 BFD_ASSERT (my_offset <= globals->arm_glue_size);
6478
6479 return myh;
6480 }
6481
6482 /* Arm code calling a Thumb function. */
6483
6484 static int
6485 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
6486 const char * name,
6487 bfd * input_bfd,
6488 bfd * output_bfd,
6489 asection * input_section,
6490 bfd_byte * hit_data,
6491 asection * sym_sec,
6492 bfd_vma offset,
6493 bfd_signed_vma addend,
6494 bfd_vma val,
6495 char **error_message)
6496 {
6497 unsigned long int tmp;
6498 bfd_vma my_offset;
6499 asection * s;
6500 long int ret_offset;
6501 struct elf_link_hash_entry * myh;
6502 struct elf32_arm_link_hash_table * globals;
6503
6504 globals = elf32_arm_hash_table (info);
6505 BFD_ASSERT (globals != NULL);
6506 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6507
6508 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6509 ARM2THUMB_GLUE_SECTION_NAME);
6510 BFD_ASSERT (s != NULL);
6511 BFD_ASSERT (s->contents != NULL);
6512 BFD_ASSERT (s->output_section != NULL);
6513
6514 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
6515 sym_sec, val, s, error_message);
6516 if (!myh)
6517 return FALSE;
6518
6519 my_offset = myh->root.u.def.value;
6520 tmp = bfd_get_32 (input_bfd, hit_data);
6521 tmp = tmp & 0xFF000000;
6522
6523 /* Somehow these are both 4 too far, so subtract 8. */
6524 ret_offset = (s->output_offset
6525 + my_offset
6526 + s->output_section->vma
6527 - (input_section->output_offset
6528 + input_section->output_section->vma
6529 + offset + addend)
6530 - 8);
6531
6532 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
6533
6534 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
6535
6536 return TRUE;
6537 }
6538
6539 /* Populate Arm stub for an exported Thumb function. */
6540
6541 static bfd_boolean
6542 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
6543 {
6544 struct bfd_link_info * info = (struct bfd_link_info *) inf;
6545 asection * s;
6546 struct elf_link_hash_entry * myh;
6547 struct elf32_arm_link_hash_entry *eh;
6548 struct elf32_arm_link_hash_table * globals;
6549 asection *sec;
6550 bfd_vma val;
6551 char *error_message;
6552
6553 eh = elf32_arm_hash_entry (h);
6554 /* Allocate stubs for exported Thumb functions on v4t. */
6555 if (eh->export_glue == NULL)
6556 return TRUE;
6557
6558 globals = elf32_arm_hash_table (info);
6559 BFD_ASSERT (globals != NULL);
6560 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6561
6562 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6563 ARM2THUMB_GLUE_SECTION_NAME);
6564 BFD_ASSERT (s != NULL);
6565 BFD_ASSERT (s->contents != NULL);
6566 BFD_ASSERT (s->output_section != NULL);
6567
6568 sec = eh->export_glue->root.u.def.section;
6569
6570 BFD_ASSERT (sec->output_section != NULL);
6571
6572 val = eh->export_glue->root.u.def.value + sec->output_offset
6573 + sec->output_section->vma;
6574
6575 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
6576 h->root.u.def.section->owner,
6577 globals->obfd, sec, val, s,
6578 &error_message);
6579 BFD_ASSERT (myh);
6580 return TRUE;
6581 }
6582
6583 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
6584
6585 static bfd_vma
6586 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
6587 {
6588 bfd_byte *p;
6589 bfd_vma glue_addr;
6590 asection *s;
6591 struct elf32_arm_link_hash_table *globals;
6592
6593 globals = elf32_arm_hash_table (info);
6594 BFD_ASSERT (globals != NULL);
6595 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6596
6597 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6598 ARM_BX_GLUE_SECTION_NAME);
6599 BFD_ASSERT (s != NULL);
6600 BFD_ASSERT (s->contents != NULL);
6601 BFD_ASSERT (s->output_section != NULL);
6602
6603 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
6604
6605 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
6606
6607 if ((globals->bx_glue_offset[reg] & 1) == 0)
6608 {
6609 p = s->contents + glue_addr;
6610 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
6611 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
6612 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
6613 globals->bx_glue_offset[reg] |= 1;
6614 }
6615
6616 return glue_addr + s->output_section->vma + s->output_offset;
6617 }
6618
6619 /* Generate Arm stubs for exported Thumb symbols. */
6620 static void
6621 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
6622 struct bfd_link_info *link_info)
6623 {
6624 struct elf32_arm_link_hash_table * globals;
6625
6626 if (link_info == NULL)
6627 /* Ignore this if we are not called by the ELF backend linker. */
6628 return;
6629
6630 globals = elf32_arm_hash_table (link_info);
6631 if (globals == NULL)
6632 return;
6633
6634 /* If blx is available then exported Thumb symbols are OK and there is
6635 nothing to do. */
6636 if (globals->use_blx)
6637 return;
6638
6639 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
6640 link_info);
6641 }
6642
6643 /* Some relocations map to different relocations depending on the
6644 target. Return the real relocation. */
6645
6646 static int
6647 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
6648 int r_type)
6649 {
6650 switch (r_type)
6651 {
6652 case R_ARM_TARGET1:
6653 if (globals->target1_is_rel)
6654 return R_ARM_REL32;
6655 else
6656 return R_ARM_ABS32;
6657
6658 case R_ARM_TARGET2:
6659 return globals->target2_reloc;
6660
6661 default:
6662 return r_type;
6663 }
6664 }
6665
6666 /* Return the base VMA address which should be subtracted from real addresses
6667 when resolving @dtpoff relocation.
6668 This is PT_TLS segment p_vaddr. */
6669
6670 static bfd_vma
6671 dtpoff_base (struct bfd_link_info *info)
6672 {
6673 /* If tls_sec is NULL, we should have signalled an error already. */
6674 if (elf_hash_table (info)->tls_sec == NULL)
6675 return 0;
6676 return elf_hash_table (info)->tls_sec->vma;
6677 }
6678
6679 /* Return the relocation value for @tpoff relocation
6680 if STT_TLS virtual address is ADDRESS. */
6681
6682 static bfd_vma
6683 tpoff (struct bfd_link_info *info, bfd_vma address)
6684 {
6685 struct elf_link_hash_table *htab = elf_hash_table (info);
6686 bfd_vma base;
6687
6688 /* If tls_sec is NULL, we should have signalled an error already. */
6689 if (htab->tls_sec == NULL)
6690 return 0;
6691 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
6692 return address - htab->tls_sec->vma + base;
6693 }
6694
6695 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
6696 VALUE is the relocation value. */
6697
6698 static bfd_reloc_status_type
6699 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
6700 {
6701 if (value > 0xfff)
6702 return bfd_reloc_overflow;
6703
6704 value |= bfd_get_32 (abfd, data) & 0xfffff000;
6705 bfd_put_32 (abfd, value, data);
6706 return bfd_reloc_ok;
6707 }
6708
6709 /* For a given value of n, calculate the value of G_n as required to
6710 deal with group relocations. We return it in the form of an
6711 encoded constant-and-rotation, together with the final residual. If n is
6712 specified as less than zero, then final_residual is filled with the
6713 input value and no further action is performed. */
6714
6715 static bfd_vma
6716 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
6717 {
6718 int current_n;
6719 bfd_vma g_n;
6720 bfd_vma encoded_g_n = 0;
6721 bfd_vma residual = value; /* Also known as Y_n. */
6722
6723 for (current_n = 0; current_n <= n; current_n++)
6724 {
6725 int shift;
6726
6727 /* Calculate which part of the value to mask. */
6728 if (residual == 0)
6729 shift = 0;
6730 else
6731 {
6732 int msb;
6733
6734 /* Determine the most significant bit in the residual and
6735 align the resulting value to a 2-bit boundary. */
6736 for (msb = 30; msb >= 0; msb -= 2)
6737 if (residual & (3 << msb))
6738 break;
6739
6740 /* The desired shift is now (msb - 6), or zero, whichever
6741 is the greater. */
6742 shift = msb - 6;
6743 if (shift < 0)
6744 shift = 0;
6745 }
6746
6747 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
6748 g_n = residual & (0xff << shift);
6749 encoded_g_n = (g_n >> shift)
6750 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
6751
6752 /* Calculate the residual for the next time around. */
6753 residual &= ~g_n;
6754 }
6755
6756 *final_residual = residual;
6757
6758 return encoded_g_n;
6759 }
6760
6761 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
6762 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
6763
6764 static int
6765 identify_add_or_sub (bfd_vma insn)
6766 {
6767 int opcode = insn & 0x1e00000;
6768
6769 if (opcode == 1 << 23) /* ADD */
6770 return 1;
6771
6772 if (opcode == 1 << 22) /* SUB */
6773 return -1;
6774
6775 return 0;
6776 }
6777
6778 /* Perform a relocation as part of a final link. */
6779
6780 static bfd_reloc_status_type
6781 elf32_arm_final_link_relocate (reloc_howto_type * howto,
6782 bfd * input_bfd,
6783 bfd * output_bfd,
6784 asection * input_section,
6785 bfd_byte * contents,
6786 Elf_Internal_Rela * rel,
6787 bfd_vma value,
6788 struct bfd_link_info * info,
6789 asection * sym_sec,
6790 const char * sym_name,
6791 int sym_flags,
6792 struct elf_link_hash_entry * h,
6793 bfd_boolean * unresolved_reloc_p,
6794 char ** error_message)
6795 {
6796 unsigned long r_type = howto->type;
6797 unsigned long r_symndx;
6798 bfd_byte * hit_data = contents + rel->r_offset;
6799 bfd * dynobj = NULL;
6800 Elf_Internal_Shdr * symtab_hdr;
6801 struct elf_link_hash_entry ** sym_hashes;
6802 bfd_vma * local_got_offsets;
6803 asection * sgot = NULL;
6804 asection * splt = NULL;
6805 asection * sreloc = NULL;
6806 bfd_vma addend;
6807 bfd_signed_vma signed_addend;
6808 struct elf32_arm_link_hash_table * globals;
6809
6810 globals = elf32_arm_hash_table (info);
6811 if (globals == NULL)
6812 return bfd_reloc_notsupported;
6813
6814 BFD_ASSERT (is_arm_elf (input_bfd));
6815
6816 /* Some relocation types map to different relocations depending on the
6817 target. We pick the right one here. */
6818 r_type = arm_real_reloc_type (globals, r_type);
6819 if (r_type != howto->type)
6820 howto = elf32_arm_howto_from_type (r_type);
6821
6822 /* If the start address has been set, then set the EF_ARM_HASENTRY
6823 flag. Setting this more than once is redundant, but the cost is
6824 not too high, and it keeps the code simple.
6825
6826 The test is done here, rather than somewhere else, because the
6827 start address is only set just before the final link commences.
6828
6829 Note - if the user deliberately sets a start address of 0, the
6830 flag will not be set. */
6831 if (bfd_get_start_address (output_bfd) != 0)
6832 elf_elfheader (output_bfd)->e_flags |= EF_ARM_HASENTRY;
6833
6834 dynobj = elf_hash_table (info)->dynobj;
6835 if (dynobj)
6836 {
6837 sgot = bfd_get_section_by_name (dynobj, ".got");
6838 splt = bfd_get_section_by_name (dynobj, ".plt");
6839 }
6840 symtab_hdr = & elf_symtab_hdr (input_bfd);
6841 sym_hashes = elf_sym_hashes (input_bfd);
6842 local_got_offsets = elf_local_got_offsets (input_bfd);
6843 r_symndx = ELF32_R_SYM (rel->r_info);
6844
6845 if (globals->use_rel)
6846 {
6847 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
6848
6849 if (addend & ((howto->src_mask + 1) >> 1))
6850 {
6851 signed_addend = -1;
6852 signed_addend &= ~ howto->src_mask;
6853 signed_addend |= addend;
6854 }
6855 else
6856 signed_addend = addend;
6857 }
6858 else
6859 addend = signed_addend = rel->r_addend;
6860
6861 switch (r_type)
6862 {
6863 case R_ARM_NONE:
6864 /* We don't need to find a value for this symbol. It's just a
6865 marker. */
6866 *unresolved_reloc_p = FALSE;
6867 return bfd_reloc_ok;
6868
6869 case R_ARM_ABS12:
6870 if (!globals->vxworks_p)
6871 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
6872
6873 case R_ARM_PC24:
6874 case R_ARM_ABS32:
6875 case R_ARM_ABS32_NOI:
6876 case R_ARM_REL32:
6877 case R_ARM_REL32_NOI:
6878 case R_ARM_CALL:
6879 case R_ARM_JUMP24:
6880 case R_ARM_XPC25:
6881 case R_ARM_PREL31:
6882 case R_ARM_PLT32:
6883 /* Handle relocations which should use the PLT entry. ABS32/REL32
6884 will use the symbol's value, which may point to a PLT entry, but we
6885 don't need to handle that here. If we created a PLT entry, all
6886 branches in this object should go to it, except if the PLT is too
6887 far away, in which case a long branch stub should be inserted. */
6888 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
6889 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
6890 && r_type != R_ARM_CALL
6891 && r_type != R_ARM_JUMP24
6892 && r_type != R_ARM_PLT32)
6893 && h != NULL
6894 && splt != NULL
6895 && h->plt.offset != (bfd_vma) -1)
6896 {
6897 /* If we've created a .plt section, and assigned a PLT entry to
6898 this function, it should not be known to bind locally. If
6899 it were, we would have cleared the PLT entry. */
6900 BFD_ASSERT (!SYMBOL_CALLS_LOCAL (info, h));
6901
6902 value = (splt->output_section->vma
6903 + splt->output_offset
6904 + h->plt.offset);
6905 *unresolved_reloc_p = FALSE;
6906 return _bfd_final_link_relocate (howto, input_bfd, input_section,
6907 contents, rel->r_offset, value,
6908 rel->r_addend);
6909 }
6910
6911 /* When generating a shared object or relocatable executable, these
6912 relocations are copied into the output file to be resolved at
6913 run time. */
6914 if ((info->shared || globals->root.is_relocatable_executable)
6915 && (input_section->flags & SEC_ALLOC)
6916 && !(globals->vxworks_p
6917 && strcmp (input_section->output_section->name,
6918 ".tls_vars") == 0)
6919 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
6920 || !SYMBOL_CALLS_LOCAL (info, h))
6921 && (h == NULL
6922 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6923 || h->root.type != bfd_link_hash_undefweak)
6924 && r_type != R_ARM_PC24
6925 && r_type != R_ARM_CALL
6926 && r_type != R_ARM_JUMP24
6927 && r_type != R_ARM_PREL31
6928 && r_type != R_ARM_PLT32)
6929 {
6930 Elf_Internal_Rela outrel;
6931 bfd_byte *loc;
6932 bfd_boolean skip, relocate;
6933
6934 *unresolved_reloc_p = FALSE;
6935
6936 if (sreloc == NULL)
6937 {
6938 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
6939 ! globals->use_rel);
6940
6941 if (sreloc == NULL)
6942 return bfd_reloc_notsupported;
6943 }
6944
6945 skip = FALSE;
6946 relocate = FALSE;
6947
6948 outrel.r_addend = addend;
6949 outrel.r_offset =
6950 _bfd_elf_section_offset (output_bfd, info, input_section,
6951 rel->r_offset);
6952 if (outrel.r_offset == (bfd_vma) -1)
6953 skip = TRUE;
6954 else if (outrel.r_offset == (bfd_vma) -2)
6955 skip = TRUE, relocate = TRUE;
6956 outrel.r_offset += (input_section->output_section->vma
6957 + input_section->output_offset);
6958
6959 if (skip)
6960 memset (&outrel, 0, sizeof outrel);
6961 else if (h != NULL
6962 && h->dynindx != -1
6963 && (!info->shared
6964 || !info->symbolic
6965 || !h->def_regular))
6966 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
6967 else
6968 {
6969 int symbol;
6970
6971 /* This symbol is local, or marked to become local. */
6972 if (sym_flags == STT_ARM_TFUNC)
6973 value |= 1;
6974 if (globals->symbian_p)
6975 {
6976 asection *osec;
6977
6978 /* On Symbian OS, the data segment and text segement
6979 can be relocated independently. Therefore, we
6980 must indicate the segment to which this
6981 relocation is relative. The BPABI allows us to
6982 use any symbol in the right segment; we just use
6983 the section symbol as it is convenient. (We
6984 cannot use the symbol given by "h" directly as it
6985 will not appear in the dynamic symbol table.)
6986
6987 Note that the dynamic linker ignores the section
6988 symbol value, so we don't subtract osec->vma
6989 from the emitted reloc addend. */
6990 if (sym_sec)
6991 osec = sym_sec->output_section;
6992 else
6993 osec = input_section->output_section;
6994 symbol = elf_section_data (osec)->dynindx;
6995 if (symbol == 0)
6996 {
6997 struct elf_link_hash_table *htab = elf_hash_table (info);
6998
6999 if ((osec->flags & SEC_READONLY) == 0
7000 && htab->data_index_section != NULL)
7001 osec = htab->data_index_section;
7002 else
7003 osec = htab->text_index_section;
7004 symbol = elf_section_data (osec)->dynindx;
7005 }
7006 BFD_ASSERT (symbol != 0);
7007 }
7008 else
7009 /* On SVR4-ish systems, the dynamic loader cannot
7010 relocate the text and data segments independently,
7011 so the symbol does not matter. */
7012 symbol = 0;
7013 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
7014 if (globals->use_rel)
7015 relocate = TRUE;
7016 else
7017 outrel.r_addend += value;
7018 }
7019
7020 loc = sreloc->contents;
7021 loc += sreloc->reloc_count++ * RELOC_SIZE (globals);
7022 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7023
7024 /* If this reloc is against an external symbol, we do not want to
7025 fiddle with the addend. Otherwise, we need to include the symbol
7026 value so that it becomes an addend for the dynamic reloc. */
7027 if (! relocate)
7028 return bfd_reloc_ok;
7029
7030 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7031 contents, rel->r_offset, value,
7032 (bfd_vma) 0);
7033 }
7034 else switch (r_type)
7035 {
7036 case R_ARM_ABS12:
7037 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
7038
7039 case R_ARM_XPC25: /* Arm BLX instruction. */
7040 case R_ARM_CALL:
7041 case R_ARM_JUMP24:
7042 case R_ARM_PC24: /* Arm B/BL instruction. */
7043 case R_ARM_PLT32:
7044 {
7045 bfd_signed_vma branch_offset;
7046 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
7047
7048 if (r_type == R_ARM_XPC25)
7049 {
7050 /* Check for Arm calling Arm function. */
7051 /* FIXME: Should we translate the instruction into a BL
7052 instruction instead ? */
7053 if (sym_flags != STT_ARM_TFUNC)
7054 (*_bfd_error_handler)
7055 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
7056 input_bfd,
7057 h ? h->root.root.string : "(local)");
7058 }
7059 else if (r_type == R_ARM_PC24)
7060 {
7061 /* Check for Arm calling Thumb function. */
7062 if (sym_flags == STT_ARM_TFUNC)
7063 {
7064 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
7065 output_bfd, input_section,
7066 hit_data, sym_sec, rel->r_offset,
7067 signed_addend, value,
7068 error_message))
7069 return bfd_reloc_ok;
7070 else
7071 return bfd_reloc_dangerous;
7072 }
7073 }
7074
7075 /* Check if a stub has to be inserted because the
7076 destination is too far or we are changing mode. */
7077 if ( r_type == R_ARM_CALL
7078 || r_type == R_ARM_JUMP24
7079 || r_type == R_ARM_PLT32)
7080 {
7081 bfd_vma from;
7082
7083 /* If the call goes through a PLT entry, make sure to
7084 check distance to the right destination address. */
7085 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
7086 {
7087 value = (splt->output_section->vma
7088 + splt->output_offset
7089 + h->plt.offset);
7090 *unresolved_reloc_p = FALSE;
7091 /* The PLT entry is in ARM mode, regardless of the
7092 target function. */
7093 sym_flags = STT_FUNC;
7094 }
7095
7096 from = (input_section->output_section->vma
7097 + input_section->output_offset
7098 + rel->r_offset);
7099 branch_offset = (bfd_signed_vma)(value - from);
7100
7101 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
7102 || branch_offset < ARM_MAX_BWD_BRANCH_OFFSET
7103 || ((sym_flags == STT_ARM_TFUNC)
7104 && (((r_type == R_ARM_CALL) && !globals->use_blx)
7105 || (r_type == R_ARM_JUMP24)
7106 || (r_type == R_ARM_PLT32) ))
7107 )
7108 {
7109 /* The target is out of reach, so redirect the
7110 branch to the local stub for this function. */
7111
7112 stub_entry = elf32_arm_get_stub_entry (input_section,
7113 sym_sec, h,
7114 rel, globals);
7115 if (stub_entry != NULL)
7116 value = (stub_entry->stub_offset
7117 + stub_entry->stub_sec->output_offset
7118 + stub_entry->stub_sec->output_section->vma);
7119 }
7120 }
7121
7122 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
7123 where:
7124 S is the address of the symbol in the relocation.
7125 P is address of the instruction being relocated.
7126 A is the addend (extracted from the instruction) in bytes.
7127
7128 S is held in 'value'.
7129 P is the base address of the section containing the
7130 instruction plus the offset of the reloc into that
7131 section, ie:
7132 (input_section->output_section->vma +
7133 input_section->output_offset +
7134 rel->r_offset).
7135 A is the addend, converted into bytes, ie:
7136 (signed_addend * 4)
7137
7138 Note: None of these operations have knowledge of the pipeline
7139 size of the processor, thus it is up to the assembler to
7140 encode this information into the addend. */
7141 value -= (input_section->output_section->vma
7142 + input_section->output_offset);
7143 value -= rel->r_offset;
7144 if (globals->use_rel)
7145 value += (signed_addend << howto->size);
7146 else
7147 /* RELA addends do not have to be adjusted by howto->size. */
7148 value += signed_addend;
7149
7150 signed_addend = value;
7151 signed_addend >>= howto->rightshift;
7152
7153 /* A branch to an undefined weak symbol is turned into a jump to
7154 the next instruction unless a PLT entry will be created.
7155 Do the same for local undefined symbols.
7156 The jump to the next instruction is optimized as a NOP depending
7157 on the architecture. */
7158 if (h ? (h->root.type == bfd_link_hash_undefweak
7159 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7160 : bfd_is_und_section (sym_sec))
7161 {
7162 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
7163
7164 if (arch_has_arm_nop (globals))
7165 value |= 0x0320f000;
7166 else
7167 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
7168 }
7169 else
7170 {
7171 /* Perform a signed range check. */
7172 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
7173 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
7174 return bfd_reloc_overflow;
7175
7176 addend = (value & 2);
7177
7178 value = (signed_addend & howto->dst_mask)
7179 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
7180
7181 if (r_type == R_ARM_CALL)
7182 {
7183 /* Set the H bit in the BLX instruction. */
7184 if (sym_flags == STT_ARM_TFUNC)
7185 {
7186 if (addend)
7187 value |= (1 << 24);
7188 else
7189 value &= ~(bfd_vma)(1 << 24);
7190 }
7191
7192 /* Select the correct instruction (BL or BLX). */
7193 /* Only if we are not handling a BL to a stub. In this
7194 case, mode switching is performed by the stub. */
7195 if (sym_flags == STT_ARM_TFUNC && !stub_entry)
7196 value |= (1 << 28);
7197 else
7198 {
7199 value &= ~(bfd_vma)(1 << 28);
7200 value |= (1 << 24);
7201 }
7202 }
7203 }
7204 }
7205 break;
7206
7207 case R_ARM_ABS32:
7208 value += addend;
7209 if (sym_flags == STT_ARM_TFUNC)
7210 value |= 1;
7211 break;
7212
7213 case R_ARM_ABS32_NOI:
7214 value += addend;
7215 break;
7216
7217 case R_ARM_REL32:
7218 value += addend;
7219 if (sym_flags == STT_ARM_TFUNC)
7220 value |= 1;
7221 value -= (input_section->output_section->vma
7222 + input_section->output_offset + rel->r_offset);
7223 break;
7224
7225 case R_ARM_REL32_NOI:
7226 value += addend;
7227 value -= (input_section->output_section->vma
7228 + input_section->output_offset + rel->r_offset);
7229 break;
7230
7231 case R_ARM_PREL31:
7232 value -= (input_section->output_section->vma
7233 + input_section->output_offset + rel->r_offset);
7234 value += signed_addend;
7235 if (! h || h->root.type != bfd_link_hash_undefweak)
7236 {
7237 /* Check for overflow. */
7238 if ((value ^ (value >> 1)) & (1 << 30))
7239 return bfd_reloc_overflow;
7240 }
7241 value &= 0x7fffffff;
7242 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
7243 if (sym_flags == STT_ARM_TFUNC)
7244 value |= 1;
7245 break;
7246 }
7247
7248 bfd_put_32 (input_bfd, value, hit_data);
7249 return bfd_reloc_ok;
7250
7251 case R_ARM_ABS8:
7252 value += addend;
7253
7254 /* There is no way to tell whether the user intended to use a signed or
7255 unsigned addend. When checking for overflow we accept either,
7256 as specified by the AAELF. */
7257 if ((long) value > 0xff || (long) value < -0x80)
7258 return bfd_reloc_overflow;
7259
7260 bfd_put_8 (input_bfd, value, hit_data);
7261 return bfd_reloc_ok;
7262
7263 case R_ARM_ABS16:
7264 value += addend;
7265
7266 /* See comment for R_ARM_ABS8. */
7267 if ((long) value > 0xffff || (long) value < -0x8000)
7268 return bfd_reloc_overflow;
7269
7270 bfd_put_16 (input_bfd, value, hit_data);
7271 return bfd_reloc_ok;
7272
7273 case R_ARM_THM_ABS5:
7274 /* Support ldr and str instructions for the thumb. */
7275 if (globals->use_rel)
7276 {
7277 /* Need to refetch addend. */
7278 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7279 /* ??? Need to determine shift amount from operand size. */
7280 addend >>= howto->rightshift;
7281 }
7282 value += addend;
7283
7284 /* ??? Isn't value unsigned? */
7285 if ((long) value > 0x1f || (long) value < -0x10)
7286 return bfd_reloc_overflow;
7287
7288 /* ??? Value needs to be properly shifted into place first. */
7289 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
7290 bfd_put_16 (input_bfd, value, hit_data);
7291 return bfd_reloc_ok;
7292
7293 case R_ARM_THM_ALU_PREL_11_0:
7294 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
7295 {
7296 bfd_vma insn;
7297 bfd_signed_vma relocation;
7298
7299 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7300 | bfd_get_16 (input_bfd, hit_data + 2);
7301
7302 if (globals->use_rel)
7303 {
7304 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
7305 | ((insn & (1 << 26)) >> 15);
7306 if (insn & 0xf00000)
7307 signed_addend = -signed_addend;
7308 }
7309
7310 relocation = value + signed_addend;
7311 relocation -= (input_section->output_section->vma
7312 + input_section->output_offset
7313 + rel->r_offset);
7314
7315 value = abs (relocation);
7316
7317 if (value >= 0x1000)
7318 return bfd_reloc_overflow;
7319
7320 insn = (insn & 0xfb0f8f00) | (value & 0xff)
7321 | ((value & 0x700) << 4)
7322 | ((value & 0x800) << 15);
7323 if (relocation < 0)
7324 insn |= 0xa00000;
7325
7326 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7327 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7328
7329 return bfd_reloc_ok;
7330 }
7331
7332 case R_ARM_THM_PC8:
7333 /* PR 10073: This reloc is not generated by the GNU toolchain,
7334 but it is supported for compatibility with third party libraries
7335 generated by other compilers, specifically the ARM/IAR. */
7336 {
7337 bfd_vma insn;
7338 bfd_signed_vma relocation;
7339
7340 insn = bfd_get_16 (input_bfd, hit_data);
7341
7342 if (globals->use_rel)
7343 addend = (insn & 0x00ff) << 2;
7344
7345 relocation = value + addend;
7346 relocation -= (input_section->output_section->vma
7347 + input_section->output_offset
7348 + rel->r_offset);
7349
7350 value = abs (relocation);
7351
7352 /* We do not check for overflow of this reloc. Although strictly
7353 speaking this is incorrect, it appears to be necessary in order
7354 to work with IAR generated relocs. Since GCC and GAS do not
7355 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
7356 a problem for them. */
7357 value &= 0x3fc;
7358
7359 insn = (insn & 0xff00) | (value >> 2);
7360
7361 bfd_put_16 (input_bfd, insn, hit_data);
7362
7363 return bfd_reloc_ok;
7364 }
7365
7366 case R_ARM_THM_PC12:
7367 /* Corresponds to: ldr.w reg, [pc, #offset]. */
7368 {
7369 bfd_vma insn;
7370 bfd_signed_vma relocation;
7371
7372 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7373 | bfd_get_16 (input_bfd, hit_data + 2);
7374
7375 if (globals->use_rel)
7376 {
7377 signed_addend = insn & 0xfff;
7378 if (!(insn & (1 << 23)))
7379 signed_addend = -signed_addend;
7380 }
7381
7382 relocation = value + signed_addend;
7383 relocation -= (input_section->output_section->vma
7384 + input_section->output_offset
7385 + rel->r_offset);
7386
7387 value = abs (relocation);
7388
7389 if (value >= 0x1000)
7390 return bfd_reloc_overflow;
7391
7392 insn = (insn & 0xff7ff000) | value;
7393 if (relocation >= 0)
7394 insn |= (1 << 23);
7395
7396 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7397 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7398
7399 return bfd_reloc_ok;
7400 }
7401
7402 case R_ARM_THM_XPC22:
7403 case R_ARM_THM_CALL:
7404 case R_ARM_THM_JUMP24:
7405 /* Thumb BL (branch long instruction). */
7406 {
7407 bfd_vma relocation;
7408 bfd_vma reloc_sign;
7409 bfd_boolean overflow = FALSE;
7410 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7411 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7412 bfd_signed_vma reloc_signed_max;
7413 bfd_signed_vma reloc_signed_min;
7414 bfd_vma check;
7415 bfd_signed_vma signed_check;
7416 int bitsize;
7417 const int thumb2 = using_thumb2 (globals);
7418
7419 /* A branch to an undefined weak symbol is turned into a jump to
7420 the next instruction unless a PLT entry will be created.
7421 The jump to the next instruction is optimized as a NOP.W for
7422 Thumb-2 enabled architectures. */
7423 if (h && h->root.type == bfd_link_hash_undefweak
7424 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7425 {
7426 if (arch_has_thumb2_nop (globals))
7427 {
7428 bfd_put_16 (input_bfd, 0xf3af, hit_data);
7429 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
7430 }
7431 else
7432 {
7433 bfd_put_16 (input_bfd, 0xe000, hit_data);
7434 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
7435 }
7436 return bfd_reloc_ok;
7437 }
7438
7439 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
7440 with Thumb-1) involving the J1 and J2 bits. */
7441 if (globals->use_rel)
7442 {
7443 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
7444 bfd_vma upper = upper_insn & 0x3ff;
7445 bfd_vma lower = lower_insn & 0x7ff;
7446 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
7447 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
7448 bfd_vma i1 = j1 ^ s ? 0 : 1;
7449 bfd_vma i2 = j2 ^ s ? 0 : 1;
7450
7451 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
7452 /* Sign extend. */
7453 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
7454
7455 signed_addend = addend;
7456 }
7457
7458 if (r_type == R_ARM_THM_XPC22)
7459 {
7460 /* Check for Thumb to Thumb call. */
7461 /* FIXME: Should we translate the instruction into a BL
7462 instruction instead ? */
7463 if (sym_flags == STT_ARM_TFUNC)
7464 (*_bfd_error_handler)
7465 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
7466 input_bfd,
7467 h ? h->root.root.string : "(local)");
7468 }
7469 else
7470 {
7471 /* If it is not a call to Thumb, assume call to Arm.
7472 If it is a call relative to a section name, then it is not a
7473 function call at all, but rather a long jump. Calls through
7474 the PLT do not require stubs. */
7475 if (sym_flags != STT_ARM_TFUNC && sym_flags != STT_SECTION
7476 && (h == NULL || splt == NULL
7477 || h->plt.offset == (bfd_vma) -1))
7478 {
7479 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7480 {
7481 /* Convert BL to BLX. */
7482 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7483 }
7484 else if (( r_type != R_ARM_THM_CALL)
7485 && (r_type != R_ARM_THM_JUMP24))
7486 {
7487 if (elf32_thumb_to_arm_stub
7488 (info, sym_name, input_bfd, output_bfd, input_section,
7489 hit_data, sym_sec, rel->r_offset, signed_addend, value,
7490 error_message))
7491 return bfd_reloc_ok;
7492 else
7493 return bfd_reloc_dangerous;
7494 }
7495 }
7496 else if (sym_flags == STT_ARM_TFUNC && globals->use_blx
7497 && r_type == R_ARM_THM_CALL)
7498 {
7499 /* Make sure this is a BL. */
7500 lower_insn |= 0x1800;
7501 }
7502 }
7503
7504 /* Handle calls via the PLT. */
7505 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
7506 {
7507 value = (splt->output_section->vma
7508 + splt->output_offset
7509 + h->plt.offset);
7510 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7511 {
7512 /* If the Thumb BLX instruction is available, convert the
7513 BL to a BLX instruction to call the ARM-mode PLT entry. */
7514 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7515 sym_flags = STT_FUNC;
7516 }
7517 else
7518 {
7519 /* Target the Thumb stub before the ARM PLT entry. */
7520 value -= PLT_THUMB_STUB_SIZE;
7521 sym_flags = STT_ARM_TFUNC;
7522 }
7523 *unresolved_reloc_p = FALSE;
7524 }
7525
7526 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
7527 {
7528 /* Check if a stub has to be inserted because the destination
7529 is too far. */
7530 bfd_vma from;
7531 bfd_signed_vma branch_offset;
7532 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
7533
7534 from = (input_section->output_section->vma
7535 + input_section->output_offset
7536 + rel->r_offset);
7537 branch_offset = (bfd_signed_vma)(value - from);
7538
7539 if ((!thumb2
7540 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
7541 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
7542 ||
7543 (thumb2
7544 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
7545 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
7546 || ((sym_flags != STT_ARM_TFUNC)
7547 && (((r_type == R_ARM_THM_CALL) && !globals->use_blx)
7548 || r_type == R_ARM_THM_JUMP24)))
7549 {
7550 /* The target is out of reach or we are changing modes, so
7551 redirect the branch to the local stub for this
7552 function. */
7553 stub_entry = elf32_arm_get_stub_entry (input_section,
7554 sym_sec, h,
7555 rel, globals);
7556 if (stub_entry != NULL)
7557 value = (stub_entry->stub_offset
7558 + stub_entry->stub_sec->output_offset
7559 + stub_entry->stub_sec->output_section->vma);
7560
7561 /* If this call becomes a call to Arm, force BLX. */
7562 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
7563 {
7564 if ((stub_entry
7565 && !arm_stub_is_thumb (stub_entry->stub_type))
7566 || (sym_flags != STT_ARM_TFUNC))
7567 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7568 }
7569 }
7570 }
7571
7572 relocation = value + signed_addend;
7573
7574 relocation -= (input_section->output_section->vma
7575 + input_section->output_offset
7576 + rel->r_offset);
7577
7578 check = relocation >> howto->rightshift;
7579
7580 /* If this is a signed value, the rightshift just dropped
7581 leading 1 bits (assuming twos complement). */
7582 if ((bfd_signed_vma) relocation >= 0)
7583 signed_check = check;
7584 else
7585 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
7586
7587 /* Calculate the permissable maximum and minimum values for
7588 this relocation according to whether we're relocating for
7589 Thumb-2 or not. */
7590 bitsize = howto->bitsize;
7591 if (!thumb2)
7592 bitsize -= 2;
7593 reloc_signed_max = (1 << (bitsize - 1)) - 1;
7594 reloc_signed_min = ~reloc_signed_max;
7595
7596 /* Assumes two's complement. */
7597 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7598 overflow = TRUE;
7599
7600 if ((lower_insn & 0x5000) == 0x4000)
7601 /* For a BLX instruction, make sure that the relocation is rounded up
7602 to a word boundary. This follows the semantics of the instruction
7603 which specifies that bit 1 of the target address will come from bit
7604 1 of the base address. */
7605 relocation = (relocation + 2) & ~ 3;
7606
7607 /* Put RELOCATION back into the insn. Assumes two's complement.
7608 We use the Thumb-2 encoding, which is safe even if dealing with
7609 a Thumb-1 instruction by virtue of our overflow check above. */
7610 reloc_sign = (signed_check < 0) ? 1 : 0;
7611 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
7612 | ((relocation >> 12) & 0x3ff)
7613 | (reloc_sign << 10);
7614 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
7615 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
7616 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
7617 | ((relocation >> 1) & 0x7ff);
7618
7619 /* Put the relocated value back in the object file: */
7620 bfd_put_16 (input_bfd, upper_insn, hit_data);
7621 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7622
7623 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7624 }
7625 break;
7626
7627 case R_ARM_THM_JUMP19:
7628 /* Thumb32 conditional branch instruction. */
7629 {
7630 bfd_vma relocation;
7631 bfd_boolean overflow = FALSE;
7632 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7633 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7634 bfd_signed_vma reloc_signed_max = 0xffffe;
7635 bfd_signed_vma reloc_signed_min = -0x100000;
7636 bfd_signed_vma signed_check;
7637
7638 /* Need to refetch the addend, reconstruct the top three bits,
7639 and squish the two 11 bit pieces together. */
7640 if (globals->use_rel)
7641 {
7642 bfd_vma S = (upper_insn & 0x0400) >> 10;
7643 bfd_vma upper = (upper_insn & 0x003f);
7644 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
7645 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
7646 bfd_vma lower = (lower_insn & 0x07ff);
7647
7648 upper |= J1 << 6;
7649 upper |= J2 << 7;
7650 upper |= (!S) << 8;
7651 upper -= 0x0100; /* Sign extend. */
7652
7653 addend = (upper << 12) | (lower << 1);
7654 signed_addend = addend;
7655 }
7656
7657 /* Handle calls via the PLT. */
7658 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
7659 {
7660 value = (splt->output_section->vma
7661 + splt->output_offset
7662 + h->plt.offset);
7663 /* Target the Thumb stub before the ARM PLT entry. */
7664 value -= PLT_THUMB_STUB_SIZE;
7665 *unresolved_reloc_p = FALSE;
7666 }
7667
7668 /* ??? Should handle interworking? GCC might someday try to
7669 use this for tail calls. */
7670
7671 relocation = value + signed_addend;
7672 relocation -= (input_section->output_section->vma
7673 + input_section->output_offset
7674 + rel->r_offset);
7675 signed_check = (bfd_signed_vma) relocation;
7676
7677 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7678 overflow = TRUE;
7679
7680 /* Put RELOCATION back into the insn. */
7681 {
7682 bfd_vma S = (relocation & 0x00100000) >> 20;
7683 bfd_vma J2 = (relocation & 0x00080000) >> 19;
7684 bfd_vma J1 = (relocation & 0x00040000) >> 18;
7685 bfd_vma hi = (relocation & 0x0003f000) >> 12;
7686 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
7687
7688 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
7689 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
7690 }
7691
7692 /* Put the relocated value back in the object file: */
7693 bfd_put_16 (input_bfd, upper_insn, hit_data);
7694 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7695
7696 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7697 }
7698
7699 case R_ARM_THM_JUMP11:
7700 case R_ARM_THM_JUMP8:
7701 case R_ARM_THM_JUMP6:
7702 /* Thumb B (branch) instruction). */
7703 {
7704 bfd_signed_vma relocation;
7705 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
7706 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
7707 bfd_signed_vma signed_check;
7708
7709 /* CZB cannot jump backward. */
7710 if (r_type == R_ARM_THM_JUMP6)
7711 reloc_signed_min = 0;
7712
7713 if (globals->use_rel)
7714 {
7715 /* Need to refetch addend. */
7716 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7717 if (addend & ((howto->src_mask + 1) >> 1))
7718 {
7719 signed_addend = -1;
7720 signed_addend &= ~ howto->src_mask;
7721 signed_addend |= addend;
7722 }
7723 else
7724 signed_addend = addend;
7725 /* The value in the insn has been right shifted. We need to
7726 undo this, so that we can perform the address calculation
7727 in terms of bytes. */
7728 signed_addend <<= howto->rightshift;
7729 }
7730 relocation = value + signed_addend;
7731
7732 relocation -= (input_section->output_section->vma
7733 + input_section->output_offset
7734 + rel->r_offset);
7735
7736 relocation >>= howto->rightshift;
7737 signed_check = relocation;
7738
7739 if (r_type == R_ARM_THM_JUMP6)
7740 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
7741 else
7742 relocation &= howto->dst_mask;
7743 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
7744
7745 bfd_put_16 (input_bfd, relocation, hit_data);
7746
7747 /* Assumes two's complement. */
7748 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7749 return bfd_reloc_overflow;
7750
7751 return bfd_reloc_ok;
7752 }
7753
7754 case R_ARM_ALU_PCREL7_0:
7755 case R_ARM_ALU_PCREL15_8:
7756 case R_ARM_ALU_PCREL23_15:
7757 {
7758 bfd_vma insn;
7759 bfd_vma relocation;
7760
7761 insn = bfd_get_32 (input_bfd, hit_data);
7762 if (globals->use_rel)
7763 {
7764 /* Extract the addend. */
7765 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
7766 signed_addend = addend;
7767 }
7768 relocation = value + signed_addend;
7769
7770 relocation -= (input_section->output_section->vma
7771 + input_section->output_offset
7772 + rel->r_offset);
7773 insn = (insn & ~0xfff)
7774 | ((howto->bitpos << 7) & 0xf00)
7775 | ((relocation >> howto->bitpos) & 0xff);
7776 bfd_put_32 (input_bfd, value, hit_data);
7777 }
7778 return bfd_reloc_ok;
7779
7780 case R_ARM_GNU_VTINHERIT:
7781 case R_ARM_GNU_VTENTRY:
7782 return bfd_reloc_ok;
7783
7784 case R_ARM_GOTOFF32:
7785 /* Relocation is relative to the start of the
7786 global offset table. */
7787
7788 BFD_ASSERT (sgot != NULL);
7789 if (sgot == NULL)
7790 return bfd_reloc_notsupported;
7791
7792 /* If we are addressing a Thumb function, we need to adjust the
7793 address by one, so that attempts to call the function pointer will
7794 correctly interpret it as Thumb code. */
7795 if (sym_flags == STT_ARM_TFUNC)
7796 value += 1;
7797
7798 /* Note that sgot->output_offset is not involved in this
7799 calculation. We always want the start of .got. If we
7800 define _GLOBAL_OFFSET_TABLE in a different way, as is
7801 permitted by the ABI, we might have to change this
7802 calculation. */
7803 value -= sgot->output_section->vma;
7804 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7805 contents, rel->r_offset, value,
7806 rel->r_addend);
7807
7808 case R_ARM_GOTPC:
7809 /* Use global offset table as symbol value. */
7810 BFD_ASSERT (sgot != NULL);
7811
7812 if (sgot == NULL)
7813 return bfd_reloc_notsupported;
7814
7815 *unresolved_reloc_p = FALSE;
7816 value = sgot->output_section->vma;
7817 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7818 contents, rel->r_offset, value,
7819 rel->r_addend);
7820
7821 case R_ARM_GOT32:
7822 case R_ARM_GOT_PREL:
7823 /* Relocation is to the entry for this symbol in the
7824 global offset table. */
7825 if (sgot == NULL)
7826 return bfd_reloc_notsupported;
7827
7828 if (h != NULL)
7829 {
7830 bfd_vma off;
7831 bfd_boolean dyn;
7832
7833 off = h->got.offset;
7834 BFD_ASSERT (off != (bfd_vma) -1);
7835 dyn = globals->root.dynamic_sections_created;
7836
7837 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
7838 || (info->shared
7839 && SYMBOL_REFERENCES_LOCAL (info, h))
7840 || (ELF_ST_VISIBILITY (h->other)
7841 && h->root.type == bfd_link_hash_undefweak))
7842 {
7843 /* This is actually a static link, or it is a -Bsymbolic link
7844 and the symbol is defined locally. We must initialize this
7845 entry in the global offset table. Since the offset must
7846 always be a multiple of 4, we use the least significant bit
7847 to record whether we have initialized it already.
7848
7849 When doing a dynamic link, we create a .rel(a).got relocation
7850 entry to initialize the value. This is done in the
7851 finish_dynamic_symbol routine. */
7852 if ((off & 1) != 0)
7853 off &= ~1;
7854 else
7855 {
7856 /* If we are addressing a Thumb function, we need to
7857 adjust the address by one, so that attempts to
7858 call the function pointer will correctly
7859 interpret it as Thumb code. */
7860 if (sym_flags == STT_ARM_TFUNC)
7861 value |= 1;
7862
7863 bfd_put_32 (output_bfd, value, sgot->contents + off);
7864 h->got.offset |= 1;
7865 }
7866 }
7867 else
7868 *unresolved_reloc_p = FALSE;
7869
7870 value = sgot->output_offset + off;
7871 }
7872 else
7873 {
7874 bfd_vma off;
7875
7876 BFD_ASSERT (local_got_offsets != NULL &&
7877 local_got_offsets[r_symndx] != (bfd_vma) -1);
7878
7879 off = local_got_offsets[r_symndx];
7880
7881 /* The offset must always be a multiple of 4. We use the
7882 least significant bit to record whether we have already
7883 generated the necessary reloc. */
7884 if ((off & 1) != 0)
7885 off &= ~1;
7886 else
7887 {
7888 /* If we are addressing a Thumb function, we need to
7889 adjust the address by one, so that attempts to
7890 call the function pointer will correctly
7891 interpret it as Thumb code. */
7892 if (sym_flags == STT_ARM_TFUNC)
7893 value |= 1;
7894
7895 if (globals->use_rel)
7896 bfd_put_32 (output_bfd, value, sgot->contents + off);
7897
7898 if (info->shared)
7899 {
7900 asection * srelgot;
7901 Elf_Internal_Rela outrel;
7902 bfd_byte *loc;
7903
7904 srelgot = (bfd_get_section_by_name
7905 (dynobj, RELOC_SECTION (globals, ".got")));
7906 BFD_ASSERT (srelgot != NULL);
7907
7908 outrel.r_addend = addend + value;
7909 outrel.r_offset = (sgot->output_section->vma
7910 + sgot->output_offset
7911 + off);
7912 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
7913 loc = srelgot->contents;
7914 loc += srelgot->reloc_count++ * RELOC_SIZE (globals);
7915 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7916 }
7917
7918 local_got_offsets[r_symndx] |= 1;
7919 }
7920
7921 value = sgot->output_offset + off;
7922 }
7923 if (r_type != R_ARM_GOT32)
7924 value += sgot->output_section->vma;
7925
7926 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7927 contents, rel->r_offset, value,
7928 rel->r_addend);
7929
7930 case R_ARM_TLS_LDO32:
7931 value = value - dtpoff_base (info);
7932
7933 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7934 contents, rel->r_offset, value,
7935 rel->r_addend);
7936
7937 case R_ARM_TLS_LDM32:
7938 {
7939 bfd_vma off;
7940
7941 if (globals->sgot == NULL)
7942 abort ();
7943
7944 off = globals->tls_ldm_got.offset;
7945
7946 if ((off & 1) != 0)
7947 off &= ~1;
7948 else
7949 {
7950 /* If we don't know the module number, create a relocation
7951 for it. */
7952 if (info->shared)
7953 {
7954 Elf_Internal_Rela outrel;
7955 bfd_byte *loc;
7956
7957 if (globals->srelgot == NULL)
7958 abort ();
7959
7960 outrel.r_addend = 0;
7961 outrel.r_offset = (globals->sgot->output_section->vma
7962 + globals->sgot->output_offset + off);
7963 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
7964
7965 if (globals->use_rel)
7966 bfd_put_32 (output_bfd, outrel.r_addend,
7967 globals->sgot->contents + off);
7968
7969 loc = globals->srelgot->contents;
7970 loc += globals->srelgot->reloc_count++ * RELOC_SIZE (globals);
7971 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7972 }
7973 else
7974 bfd_put_32 (output_bfd, 1, globals->sgot->contents + off);
7975
7976 globals->tls_ldm_got.offset |= 1;
7977 }
7978
7979 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
7980 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
7981
7982 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7983 contents, rel->r_offset, value,
7984 rel->r_addend);
7985 }
7986
7987 case R_ARM_TLS_GD32:
7988 case R_ARM_TLS_IE32:
7989 {
7990 bfd_vma off;
7991 int indx;
7992 char tls_type;
7993
7994 if (globals->sgot == NULL)
7995 abort ();
7996
7997 indx = 0;
7998 if (h != NULL)
7999 {
8000 bfd_boolean dyn;
8001 dyn = globals->root.dynamic_sections_created;
8002 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
8003 && (!info->shared
8004 || !SYMBOL_REFERENCES_LOCAL (info, h)))
8005 {
8006 *unresolved_reloc_p = FALSE;
8007 indx = h->dynindx;
8008 }
8009 off = h->got.offset;
8010 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
8011 }
8012 else
8013 {
8014 if (local_got_offsets == NULL)
8015 abort ();
8016 off = local_got_offsets[r_symndx];
8017 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
8018 }
8019
8020 if (tls_type == GOT_UNKNOWN)
8021 abort ();
8022
8023 if ((off & 1) != 0)
8024 off &= ~1;
8025 else
8026 {
8027 bfd_boolean need_relocs = FALSE;
8028 Elf_Internal_Rela outrel;
8029 bfd_byte *loc = NULL;
8030 int cur_off = off;
8031
8032 /* The GOT entries have not been initialized yet. Do it
8033 now, and emit any relocations. If both an IE GOT and a
8034 GD GOT are necessary, we emit the GD first. */
8035
8036 if ((info->shared || indx != 0)
8037 && (h == NULL
8038 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
8039 || h->root.type != bfd_link_hash_undefweak))
8040 {
8041 need_relocs = TRUE;
8042 if (globals->srelgot == NULL)
8043 abort ();
8044 loc = globals->srelgot->contents;
8045 loc += globals->srelgot->reloc_count * RELOC_SIZE (globals);
8046 }
8047
8048 if (tls_type & GOT_TLS_GD)
8049 {
8050 if (need_relocs)
8051 {
8052 outrel.r_addend = 0;
8053 outrel.r_offset = (globals->sgot->output_section->vma
8054 + globals->sgot->output_offset
8055 + cur_off);
8056 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
8057
8058 if (globals->use_rel)
8059 bfd_put_32 (output_bfd, outrel.r_addend,
8060 globals->sgot->contents + cur_off);
8061
8062 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8063 globals->srelgot->reloc_count++;
8064 loc += RELOC_SIZE (globals);
8065
8066 if (indx == 0)
8067 bfd_put_32 (output_bfd, value - dtpoff_base (info),
8068 globals->sgot->contents + cur_off + 4);
8069 else
8070 {
8071 outrel.r_addend = 0;
8072 outrel.r_info = ELF32_R_INFO (indx,
8073 R_ARM_TLS_DTPOFF32);
8074 outrel.r_offset += 4;
8075
8076 if (globals->use_rel)
8077 bfd_put_32 (output_bfd, outrel.r_addend,
8078 globals->sgot->contents + cur_off + 4);
8079
8080
8081 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8082 globals->srelgot->reloc_count++;
8083 loc += RELOC_SIZE (globals);
8084 }
8085 }
8086 else
8087 {
8088 /* If we are not emitting relocations for a
8089 general dynamic reference, then we must be in a
8090 static link or an executable link with the
8091 symbol binding locally. Mark it as belonging
8092 to module 1, the executable. */
8093 bfd_put_32 (output_bfd, 1,
8094 globals->sgot->contents + cur_off);
8095 bfd_put_32 (output_bfd, value - dtpoff_base (info),
8096 globals->sgot->contents + cur_off + 4);
8097 }
8098
8099 cur_off += 8;
8100 }
8101
8102 if (tls_type & GOT_TLS_IE)
8103 {
8104 if (need_relocs)
8105 {
8106 if (indx == 0)
8107 outrel.r_addend = value - dtpoff_base (info);
8108 else
8109 outrel.r_addend = 0;
8110 outrel.r_offset = (globals->sgot->output_section->vma
8111 + globals->sgot->output_offset
8112 + cur_off);
8113 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
8114
8115 if (globals->use_rel)
8116 bfd_put_32 (output_bfd, outrel.r_addend,
8117 globals->sgot->contents + cur_off);
8118
8119 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8120 globals->srelgot->reloc_count++;
8121 loc += RELOC_SIZE (globals);
8122 }
8123 else
8124 bfd_put_32 (output_bfd, tpoff (info, value),
8125 globals->sgot->contents + cur_off);
8126 cur_off += 4;
8127 }
8128
8129 if (h != NULL)
8130 h->got.offset |= 1;
8131 else
8132 local_got_offsets[r_symndx] |= 1;
8133 }
8134
8135 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
8136 off += 8;
8137 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
8138 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
8139
8140 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8141 contents, rel->r_offset, value,
8142 rel->r_addend);
8143 }
8144
8145 case R_ARM_TLS_LE32:
8146 if (info->shared)
8147 {
8148 (*_bfd_error_handler)
8149 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
8150 input_bfd, input_section,
8151 (long) rel->r_offset, howto->name);
8152 return (bfd_reloc_status_type) FALSE;
8153 }
8154 else
8155 value = tpoff (info, value);
8156
8157 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8158 contents, rel->r_offset, value,
8159 rel->r_addend);
8160
8161 case R_ARM_V4BX:
8162 if (globals->fix_v4bx)
8163 {
8164 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8165
8166 /* Ensure that we have a BX instruction. */
8167 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
8168
8169 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
8170 {
8171 /* Branch to veneer. */
8172 bfd_vma glue_addr;
8173 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
8174 glue_addr -= input_section->output_section->vma
8175 + input_section->output_offset
8176 + rel->r_offset + 8;
8177 insn = (insn & 0xf0000000) | 0x0a000000
8178 | ((glue_addr >> 2) & 0x00ffffff);
8179 }
8180 else
8181 {
8182 /* Preserve Rm (lowest four bits) and the condition code
8183 (highest four bits). Other bits encode MOV PC,Rm. */
8184 insn = (insn & 0xf000000f) | 0x01a0f000;
8185 }
8186
8187 bfd_put_32 (input_bfd, insn, hit_data);
8188 }
8189 return bfd_reloc_ok;
8190
8191 case R_ARM_MOVW_ABS_NC:
8192 case R_ARM_MOVT_ABS:
8193 case R_ARM_MOVW_PREL_NC:
8194 case R_ARM_MOVT_PREL:
8195 /* Until we properly support segment-base-relative addressing then
8196 we assume the segment base to be zero, as for the group relocations.
8197 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
8198 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
8199 case R_ARM_MOVW_BREL_NC:
8200 case R_ARM_MOVW_BREL:
8201 case R_ARM_MOVT_BREL:
8202 {
8203 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8204
8205 if (globals->use_rel)
8206 {
8207 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8208 signed_addend = (addend ^ 0x8000) - 0x8000;
8209 }
8210
8211 value += signed_addend;
8212
8213 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
8214 value -= (input_section->output_section->vma
8215 + input_section->output_offset + rel->r_offset);
8216
8217 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
8218 return bfd_reloc_overflow;
8219
8220 if (sym_flags == STT_ARM_TFUNC)
8221 value |= 1;
8222
8223 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
8224 || r_type == R_ARM_MOVT_BREL)
8225 value >>= 16;
8226
8227 insn &= 0xfff0f000;
8228 insn |= value & 0xfff;
8229 insn |= (value & 0xf000) << 4;
8230 bfd_put_32 (input_bfd, insn, hit_data);
8231 }
8232 return bfd_reloc_ok;
8233
8234 case R_ARM_THM_MOVW_ABS_NC:
8235 case R_ARM_THM_MOVT_ABS:
8236 case R_ARM_THM_MOVW_PREL_NC:
8237 case R_ARM_THM_MOVT_PREL:
8238 /* Until we properly support segment-base-relative addressing then
8239 we assume the segment base to be zero, as for the above relocations.
8240 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
8241 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
8242 as R_ARM_THM_MOVT_ABS. */
8243 case R_ARM_THM_MOVW_BREL_NC:
8244 case R_ARM_THM_MOVW_BREL:
8245 case R_ARM_THM_MOVT_BREL:
8246 {
8247 bfd_vma insn;
8248
8249 insn = bfd_get_16 (input_bfd, hit_data) << 16;
8250 insn |= bfd_get_16 (input_bfd, hit_data + 2);
8251
8252 if (globals->use_rel)
8253 {
8254 addend = ((insn >> 4) & 0xf000)
8255 | ((insn >> 15) & 0x0800)
8256 | ((insn >> 4) & 0x0700)
8257 | (insn & 0x00ff);
8258 signed_addend = (addend ^ 0x8000) - 0x8000;
8259 }
8260
8261 value += signed_addend;
8262
8263 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
8264 value -= (input_section->output_section->vma
8265 + input_section->output_offset + rel->r_offset);
8266
8267 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
8268 return bfd_reloc_overflow;
8269
8270 if (sym_flags == STT_ARM_TFUNC)
8271 value |= 1;
8272
8273 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
8274 || r_type == R_ARM_THM_MOVT_BREL)
8275 value >>= 16;
8276
8277 insn &= 0xfbf08f00;
8278 insn |= (value & 0xf000) << 4;
8279 insn |= (value & 0x0800) << 15;
8280 insn |= (value & 0x0700) << 4;
8281 insn |= (value & 0x00ff);
8282
8283 bfd_put_16 (input_bfd, insn >> 16, hit_data);
8284 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
8285 }
8286 return bfd_reloc_ok;
8287
8288 case R_ARM_ALU_PC_G0_NC:
8289 case R_ARM_ALU_PC_G1_NC:
8290 case R_ARM_ALU_PC_G0:
8291 case R_ARM_ALU_PC_G1:
8292 case R_ARM_ALU_PC_G2:
8293 case R_ARM_ALU_SB_G0_NC:
8294 case R_ARM_ALU_SB_G1_NC:
8295 case R_ARM_ALU_SB_G0:
8296 case R_ARM_ALU_SB_G1:
8297 case R_ARM_ALU_SB_G2:
8298 {
8299 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8300 bfd_vma pc = input_section->output_section->vma
8301 + input_section->output_offset + rel->r_offset;
8302 /* sb should be the origin of the *segment* containing the symbol.
8303 It is not clear how to obtain this OS-dependent value, so we
8304 make an arbitrary choice of zero. */
8305 bfd_vma sb = 0;
8306 bfd_vma residual;
8307 bfd_vma g_n;
8308 bfd_signed_vma signed_value;
8309 int group = 0;
8310
8311 /* Determine which group of bits to select. */
8312 switch (r_type)
8313 {
8314 case R_ARM_ALU_PC_G0_NC:
8315 case R_ARM_ALU_PC_G0:
8316 case R_ARM_ALU_SB_G0_NC:
8317 case R_ARM_ALU_SB_G0:
8318 group = 0;
8319 break;
8320
8321 case R_ARM_ALU_PC_G1_NC:
8322 case R_ARM_ALU_PC_G1:
8323 case R_ARM_ALU_SB_G1_NC:
8324 case R_ARM_ALU_SB_G1:
8325 group = 1;
8326 break;
8327
8328 case R_ARM_ALU_PC_G2:
8329 case R_ARM_ALU_SB_G2:
8330 group = 2;
8331 break;
8332
8333 default:
8334 abort ();
8335 }
8336
8337 /* If REL, extract the addend from the insn. If RELA, it will
8338 have already been fetched for us. */
8339 if (globals->use_rel)
8340 {
8341 int negative;
8342 bfd_vma constant = insn & 0xff;
8343 bfd_vma rotation = (insn & 0xf00) >> 8;
8344
8345 if (rotation == 0)
8346 signed_addend = constant;
8347 else
8348 {
8349 /* Compensate for the fact that in the instruction, the
8350 rotation is stored in multiples of 2 bits. */
8351 rotation *= 2;
8352
8353 /* Rotate "constant" right by "rotation" bits. */
8354 signed_addend = (constant >> rotation) |
8355 (constant << (8 * sizeof (bfd_vma) - rotation));
8356 }
8357
8358 /* Determine if the instruction is an ADD or a SUB.
8359 (For REL, this determines the sign of the addend.) */
8360 negative = identify_add_or_sub (insn);
8361 if (negative == 0)
8362 {
8363 (*_bfd_error_handler)
8364 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
8365 input_bfd, input_section,
8366 (long) rel->r_offset, howto->name);
8367 return bfd_reloc_overflow;
8368 }
8369
8370 signed_addend *= negative;
8371 }
8372
8373 /* Compute the value (X) to go in the place. */
8374 if (r_type == R_ARM_ALU_PC_G0_NC
8375 || r_type == R_ARM_ALU_PC_G1_NC
8376 || r_type == R_ARM_ALU_PC_G0
8377 || r_type == R_ARM_ALU_PC_G1
8378 || r_type == R_ARM_ALU_PC_G2)
8379 /* PC relative. */
8380 signed_value = value - pc + signed_addend;
8381 else
8382 /* Section base relative. */
8383 signed_value = value - sb + signed_addend;
8384
8385 /* If the target symbol is a Thumb function, then set the
8386 Thumb bit in the address. */
8387 if (sym_flags == STT_ARM_TFUNC)
8388 signed_value |= 1;
8389
8390 /* Calculate the value of the relevant G_n, in encoded
8391 constant-with-rotation format. */
8392 g_n = calculate_group_reloc_mask (abs (signed_value), group,
8393 &residual);
8394
8395 /* Check for overflow if required. */
8396 if ((r_type == R_ARM_ALU_PC_G0
8397 || r_type == R_ARM_ALU_PC_G1
8398 || r_type == R_ARM_ALU_PC_G2
8399 || r_type == R_ARM_ALU_SB_G0
8400 || r_type == R_ARM_ALU_SB_G1
8401 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
8402 {
8403 (*_bfd_error_handler)
8404 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8405 input_bfd, input_section,
8406 (long) rel->r_offset, abs (signed_value), howto->name);
8407 return bfd_reloc_overflow;
8408 }
8409
8410 /* Mask out the value and the ADD/SUB part of the opcode; take care
8411 not to destroy the S bit. */
8412 insn &= 0xff1ff000;
8413
8414 /* Set the opcode according to whether the value to go in the
8415 place is negative. */
8416 if (signed_value < 0)
8417 insn |= 1 << 22;
8418 else
8419 insn |= 1 << 23;
8420
8421 /* Encode the offset. */
8422 insn |= g_n;
8423
8424 bfd_put_32 (input_bfd, insn, hit_data);
8425 }
8426 return bfd_reloc_ok;
8427
8428 case R_ARM_LDR_PC_G0:
8429 case R_ARM_LDR_PC_G1:
8430 case R_ARM_LDR_PC_G2:
8431 case R_ARM_LDR_SB_G0:
8432 case R_ARM_LDR_SB_G1:
8433 case R_ARM_LDR_SB_G2:
8434 {
8435 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8436 bfd_vma pc = input_section->output_section->vma
8437 + input_section->output_offset + rel->r_offset;
8438 bfd_vma sb = 0; /* See note above. */
8439 bfd_vma residual;
8440 bfd_signed_vma signed_value;
8441 int group = 0;
8442
8443 /* Determine which groups of bits to calculate. */
8444 switch (r_type)
8445 {
8446 case R_ARM_LDR_PC_G0:
8447 case R_ARM_LDR_SB_G0:
8448 group = 0;
8449 break;
8450
8451 case R_ARM_LDR_PC_G1:
8452 case R_ARM_LDR_SB_G1:
8453 group = 1;
8454 break;
8455
8456 case R_ARM_LDR_PC_G2:
8457 case R_ARM_LDR_SB_G2:
8458 group = 2;
8459 break;
8460
8461 default:
8462 abort ();
8463 }
8464
8465 /* If REL, extract the addend from the insn. If RELA, it will
8466 have already been fetched for us. */
8467 if (globals->use_rel)
8468 {
8469 int negative = (insn & (1 << 23)) ? 1 : -1;
8470 signed_addend = negative * (insn & 0xfff);
8471 }
8472
8473 /* Compute the value (X) to go in the place. */
8474 if (r_type == R_ARM_LDR_PC_G0
8475 || r_type == R_ARM_LDR_PC_G1
8476 || r_type == R_ARM_LDR_PC_G2)
8477 /* PC relative. */
8478 signed_value = value - pc + signed_addend;
8479 else
8480 /* Section base relative. */
8481 signed_value = value - sb + signed_addend;
8482
8483 /* Calculate the value of the relevant G_{n-1} to obtain
8484 the residual at that stage. */
8485 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8486
8487 /* Check for overflow. */
8488 if (residual >= 0x1000)
8489 {
8490 (*_bfd_error_handler)
8491 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8492 input_bfd, input_section,
8493 (long) rel->r_offset, abs (signed_value), howto->name);
8494 return bfd_reloc_overflow;
8495 }
8496
8497 /* Mask out the value and U bit. */
8498 insn &= 0xff7ff000;
8499
8500 /* Set the U bit if the value to go in the place is non-negative. */
8501 if (signed_value >= 0)
8502 insn |= 1 << 23;
8503
8504 /* Encode the offset. */
8505 insn |= residual;
8506
8507 bfd_put_32 (input_bfd, insn, hit_data);
8508 }
8509 return bfd_reloc_ok;
8510
8511 case R_ARM_LDRS_PC_G0:
8512 case R_ARM_LDRS_PC_G1:
8513 case R_ARM_LDRS_PC_G2:
8514 case R_ARM_LDRS_SB_G0:
8515 case R_ARM_LDRS_SB_G1:
8516 case R_ARM_LDRS_SB_G2:
8517 {
8518 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8519 bfd_vma pc = input_section->output_section->vma
8520 + input_section->output_offset + rel->r_offset;
8521 bfd_vma sb = 0; /* See note above. */
8522 bfd_vma residual;
8523 bfd_signed_vma signed_value;
8524 int group = 0;
8525
8526 /* Determine which groups of bits to calculate. */
8527 switch (r_type)
8528 {
8529 case R_ARM_LDRS_PC_G0:
8530 case R_ARM_LDRS_SB_G0:
8531 group = 0;
8532 break;
8533
8534 case R_ARM_LDRS_PC_G1:
8535 case R_ARM_LDRS_SB_G1:
8536 group = 1;
8537 break;
8538
8539 case R_ARM_LDRS_PC_G2:
8540 case R_ARM_LDRS_SB_G2:
8541 group = 2;
8542 break;
8543
8544 default:
8545 abort ();
8546 }
8547
8548 /* If REL, extract the addend from the insn. If RELA, it will
8549 have already been fetched for us. */
8550 if (globals->use_rel)
8551 {
8552 int negative = (insn & (1 << 23)) ? 1 : -1;
8553 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
8554 }
8555
8556 /* Compute the value (X) to go in the place. */
8557 if (r_type == R_ARM_LDRS_PC_G0
8558 || r_type == R_ARM_LDRS_PC_G1
8559 || r_type == R_ARM_LDRS_PC_G2)
8560 /* PC relative. */
8561 signed_value = value - pc + signed_addend;
8562 else
8563 /* Section base relative. */
8564 signed_value = value - sb + signed_addend;
8565
8566 /* Calculate the value of the relevant G_{n-1} to obtain
8567 the residual at that stage. */
8568 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8569
8570 /* Check for overflow. */
8571 if (residual >= 0x100)
8572 {
8573 (*_bfd_error_handler)
8574 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8575 input_bfd, input_section,
8576 (long) rel->r_offset, abs (signed_value), howto->name);
8577 return bfd_reloc_overflow;
8578 }
8579
8580 /* Mask out the value and U bit. */
8581 insn &= 0xff7ff0f0;
8582
8583 /* Set the U bit if the value to go in the place is non-negative. */
8584 if (signed_value >= 0)
8585 insn |= 1 << 23;
8586
8587 /* Encode the offset. */
8588 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
8589
8590 bfd_put_32 (input_bfd, insn, hit_data);
8591 }
8592 return bfd_reloc_ok;
8593
8594 case R_ARM_LDC_PC_G0:
8595 case R_ARM_LDC_PC_G1:
8596 case R_ARM_LDC_PC_G2:
8597 case R_ARM_LDC_SB_G0:
8598 case R_ARM_LDC_SB_G1:
8599 case R_ARM_LDC_SB_G2:
8600 {
8601 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8602 bfd_vma pc = input_section->output_section->vma
8603 + input_section->output_offset + rel->r_offset;
8604 bfd_vma sb = 0; /* See note above. */
8605 bfd_vma residual;
8606 bfd_signed_vma signed_value;
8607 int group = 0;
8608
8609 /* Determine which groups of bits to calculate. */
8610 switch (r_type)
8611 {
8612 case R_ARM_LDC_PC_G0:
8613 case R_ARM_LDC_SB_G0:
8614 group = 0;
8615 break;
8616
8617 case R_ARM_LDC_PC_G1:
8618 case R_ARM_LDC_SB_G1:
8619 group = 1;
8620 break;
8621
8622 case R_ARM_LDC_PC_G2:
8623 case R_ARM_LDC_SB_G2:
8624 group = 2;
8625 break;
8626
8627 default:
8628 abort ();
8629 }
8630
8631 /* If REL, extract the addend from the insn. If RELA, it will
8632 have already been fetched for us. */
8633 if (globals->use_rel)
8634 {
8635 int negative = (insn & (1 << 23)) ? 1 : -1;
8636 signed_addend = negative * ((insn & 0xff) << 2);
8637 }
8638
8639 /* Compute the value (X) to go in the place. */
8640 if (r_type == R_ARM_LDC_PC_G0
8641 || r_type == R_ARM_LDC_PC_G1
8642 || r_type == R_ARM_LDC_PC_G2)
8643 /* PC relative. */
8644 signed_value = value - pc + signed_addend;
8645 else
8646 /* Section base relative. */
8647 signed_value = value - sb + signed_addend;
8648
8649 /* Calculate the value of the relevant G_{n-1} to obtain
8650 the residual at that stage. */
8651 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8652
8653 /* Check for overflow. (The absolute value to go in the place must be
8654 divisible by four and, after having been divided by four, must
8655 fit in eight bits.) */
8656 if ((residual & 0x3) != 0 || residual >= 0x400)
8657 {
8658 (*_bfd_error_handler)
8659 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8660 input_bfd, input_section,
8661 (long) rel->r_offset, abs (signed_value), howto->name);
8662 return bfd_reloc_overflow;
8663 }
8664
8665 /* Mask out the value and U bit. */
8666 insn &= 0xff7fff00;
8667
8668 /* Set the U bit if the value to go in the place is non-negative. */
8669 if (signed_value >= 0)
8670 insn |= 1 << 23;
8671
8672 /* Encode the offset. */
8673 insn |= residual >> 2;
8674
8675 bfd_put_32 (input_bfd, insn, hit_data);
8676 }
8677 return bfd_reloc_ok;
8678
8679 default:
8680 return bfd_reloc_notsupported;
8681 }
8682 }
8683
8684 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
8685 static void
8686 arm_add_to_rel (bfd * abfd,
8687 bfd_byte * address,
8688 reloc_howto_type * howto,
8689 bfd_signed_vma increment)
8690 {
8691 bfd_signed_vma addend;
8692
8693 if (howto->type == R_ARM_THM_CALL
8694 || howto->type == R_ARM_THM_JUMP24)
8695 {
8696 int upper_insn, lower_insn;
8697 int upper, lower;
8698
8699 upper_insn = bfd_get_16 (abfd, address);
8700 lower_insn = bfd_get_16 (abfd, address + 2);
8701 upper = upper_insn & 0x7ff;
8702 lower = lower_insn & 0x7ff;
8703
8704 addend = (upper << 12) | (lower << 1);
8705 addend += increment;
8706 addend >>= 1;
8707
8708 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
8709 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
8710
8711 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
8712 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
8713 }
8714 else
8715 {
8716 bfd_vma contents;
8717
8718 contents = bfd_get_32 (abfd, address);
8719
8720 /* Get the (signed) value from the instruction. */
8721 addend = contents & howto->src_mask;
8722 if (addend & ((howto->src_mask + 1) >> 1))
8723 {
8724 bfd_signed_vma mask;
8725
8726 mask = -1;
8727 mask &= ~ howto->src_mask;
8728 addend |= mask;
8729 }
8730
8731 /* Add in the increment, (which is a byte value). */
8732 switch (howto->type)
8733 {
8734 default:
8735 addend += increment;
8736 break;
8737
8738 case R_ARM_PC24:
8739 case R_ARM_PLT32:
8740 case R_ARM_CALL:
8741 case R_ARM_JUMP24:
8742 addend <<= howto->size;
8743 addend += increment;
8744
8745 /* Should we check for overflow here ? */
8746
8747 /* Drop any undesired bits. */
8748 addend >>= howto->rightshift;
8749 break;
8750 }
8751
8752 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
8753
8754 bfd_put_32 (abfd, contents, address);
8755 }
8756 }
8757
8758 #define IS_ARM_TLS_RELOC(R_TYPE) \
8759 ((R_TYPE) == R_ARM_TLS_GD32 \
8760 || (R_TYPE) == R_ARM_TLS_LDO32 \
8761 || (R_TYPE) == R_ARM_TLS_LDM32 \
8762 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
8763 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
8764 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
8765 || (R_TYPE) == R_ARM_TLS_LE32 \
8766 || (R_TYPE) == R_ARM_TLS_IE32)
8767
8768 /* Relocate an ARM ELF section. */
8769
8770 static bfd_boolean
8771 elf32_arm_relocate_section (bfd * output_bfd,
8772 struct bfd_link_info * info,
8773 bfd * input_bfd,
8774 asection * input_section,
8775 bfd_byte * contents,
8776 Elf_Internal_Rela * relocs,
8777 Elf_Internal_Sym * local_syms,
8778 asection ** local_sections)
8779 {
8780 Elf_Internal_Shdr *symtab_hdr;
8781 struct elf_link_hash_entry **sym_hashes;
8782 Elf_Internal_Rela *rel;
8783 Elf_Internal_Rela *relend;
8784 const char *name;
8785 struct elf32_arm_link_hash_table * globals;
8786
8787 globals = elf32_arm_hash_table (info);
8788 if (globals == NULL)
8789 return FALSE;
8790
8791 symtab_hdr = & elf_symtab_hdr (input_bfd);
8792 sym_hashes = elf_sym_hashes (input_bfd);
8793
8794 rel = relocs;
8795 relend = relocs + input_section->reloc_count;
8796 for (; rel < relend; rel++)
8797 {
8798 int r_type;
8799 reloc_howto_type * howto;
8800 unsigned long r_symndx;
8801 Elf_Internal_Sym * sym;
8802 asection * sec;
8803 struct elf_link_hash_entry * h;
8804 bfd_vma relocation;
8805 bfd_reloc_status_type r;
8806 arelent bfd_reloc;
8807 char sym_type;
8808 bfd_boolean unresolved_reloc = FALSE;
8809 char *error_message = NULL;
8810
8811 r_symndx = ELF32_R_SYM (rel->r_info);
8812 r_type = ELF32_R_TYPE (rel->r_info);
8813 r_type = arm_real_reloc_type (globals, r_type);
8814
8815 if ( r_type == R_ARM_GNU_VTENTRY
8816 || r_type == R_ARM_GNU_VTINHERIT)
8817 continue;
8818
8819 bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
8820 howto = bfd_reloc.howto;
8821
8822 h = NULL;
8823 sym = NULL;
8824 sec = NULL;
8825
8826 if (r_symndx < symtab_hdr->sh_info)
8827 {
8828 sym = local_syms + r_symndx;
8829 sym_type = ELF32_ST_TYPE (sym->st_info);
8830 sec = local_sections[r_symndx];
8831
8832 /* An object file might have a reference to a local
8833 undefined symbol. This is a daft object file, but we
8834 should at least do something about it. V4BX & NONE
8835 relocations do not use the symbol and are explicitly
8836 allowed to use the undefined symbol, so allow those. */
8837 if (r_type != R_ARM_V4BX
8838 && r_type != R_ARM_NONE
8839 && bfd_is_und_section (sec)
8840 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
8841 {
8842 if (!info->callbacks->undefined_symbol
8843 (info, bfd_elf_string_from_elf_section
8844 (input_bfd, symtab_hdr->sh_link, sym->st_name),
8845 input_bfd, input_section,
8846 rel->r_offset, TRUE))
8847 return FALSE;
8848 }
8849
8850 if (globals->use_rel)
8851 {
8852 relocation = (sec->output_section->vma
8853 + sec->output_offset
8854 + sym->st_value);
8855 if (!info->relocatable
8856 && (sec->flags & SEC_MERGE)
8857 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
8858 {
8859 asection *msec;
8860 bfd_vma addend, value;
8861
8862 switch (r_type)
8863 {
8864 case R_ARM_MOVW_ABS_NC:
8865 case R_ARM_MOVT_ABS:
8866 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8867 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
8868 addend = (addend ^ 0x8000) - 0x8000;
8869 break;
8870
8871 case R_ARM_THM_MOVW_ABS_NC:
8872 case R_ARM_THM_MOVT_ABS:
8873 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
8874 << 16;
8875 value |= bfd_get_16 (input_bfd,
8876 contents + rel->r_offset + 2);
8877 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
8878 | ((value & 0x04000000) >> 15);
8879 addend = (addend ^ 0x8000) - 0x8000;
8880 break;
8881
8882 default:
8883 if (howto->rightshift
8884 || (howto->src_mask & (howto->src_mask + 1)))
8885 {
8886 (*_bfd_error_handler)
8887 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
8888 input_bfd, input_section,
8889 (long) rel->r_offset, howto->name);
8890 return FALSE;
8891 }
8892
8893 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8894
8895 /* Get the (signed) value from the instruction. */
8896 addend = value & howto->src_mask;
8897 if (addend & ((howto->src_mask + 1) >> 1))
8898 {
8899 bfd_signed_vma mask;
8900
8901 mask = -1;
8902 mask &= ~ howto->src_mask;
8903 addend |= mask;
8904 }
8905 break;
8906 }
8907
8908 msec = sec;
8909 addend =
8910 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
8911 - relocation;
8912 addend += msec->output_section->vma + msec->output_offset;
8913
8914 /* Cases here must match those in the preceeding
8915 switch statement. */
8916 switch (r_type)
8917 {
8918 case R_ARM_MOVW_ABS_NC:
8919 case R_ARM_MOVT_ABS:
8920 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
8921 | (addend & 0xfff);
8922 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
8923 break;
8924
8925 case R_ARM_THM_MOVW_ABS_NC:
8926 case R_ARM_THM_MOVT_ABS:
8927 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
8928 | (addend & 0xff) | ((addend & 0x0800) << 15);
8929 bfd_put_16 (input_bfd, value >> 16,
8930 contents + rel->r_offset);
8931 bfd_put_16 (input_bfd, value,
8932 contents + rel->r_offset + 2);
8933 break;
8934
8935 default:
8936 value = (value & ~ howto->dst_mask)
8937 | (addend & howto->dst_mask);
8938 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
8939 break;
8940 }
8941 }
8942 }
8943 else
8944 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
8945 }
8946 else
8947 {
8948 bfd_boolean warned;
8949
8950 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
8951 r_symndx, symtab_hdr, sym_hashes,
8952 h, sec, relocation,
8953 unresolved_reloc, warned);
8954
8955 sym_type = h->type;
8956 }
8957
8958 if (sec != NULL && elf_discarded_section (sec))
8959 {
8960 /* For relocs against symbols from removed linkonce sections,
8961 or sections discarded by a linker script, we just want the
8962 section contents zeroed. Avoid any special processing. */
8963 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
8964 rel->r_info = 0;
8965 rel->r_addend = 0;
8966 continue;
8967 }
8968
8969 if (info->relocatable)
8970 {
8971 /* This is a relocatable link. We don't have to change
8972 anything, unless the reloc is against a section symbol,
8973 in which case we have to adjust according to where the
8974 section symbol winds up in the output section. */
8975 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
8976 {
8977 if (globals->use_rel)
8978 arm_add_to_rel (input_bfd, contents + rel->r_offset,
8979 howto, (bfd_signed_vma) sec->output_offset);
8980 else
8981 rel->r_addend += sec->output_offset;
8982 }
8983 continue;
8984 }
8985
8986 if (h != NULL)
8987 name = h->root.root.string;
8988 else
8989 {
8990 name = (bfd_elf_string_from_elf_section
8991 (input_bfd, symtab_hdr->sh_link, sym->st_name));
8992 if (name == NULL || *name == '\0')
8993 name = bfd_section_name (input_bfd, sec);
8994 }
8995
8996 if (r_symndx != 0
8997 && r_type != R_ARM_NONE
8998 && (h == NULL
8999 || h->root.type == bfd_link_hash_defined
9000 || h->root.type == bfd_link_hash_defweak)
9001 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
9002 {
9003 (*_bfd_error_handler)
9004 ((sym_type == STT_TLS
9005 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
9006 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
9007 input_bfd,
9008 input_section,
9009 (long) rel->r_offset,
9010 howto->name,
9011 name);
9012 }
9013
9014 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
9015 input_section, contents, rel,
9016 relocation, info, sec, name,
9017 (h ? ELF_ST_TYPE (h->type) :
9018 ELF_ST_TYPE (sym->st_info)), h,
9019 &unresolved_reloc, &error_message);
9020
9021 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
9022 because such sections are not SEC_ALLOC and thus ld.so will
9023 not process them. */
9024 if (unresolved_reloc
9025 && !((input_section->flags & SEC_DEBUGGING) != 0
9026 && h->def_dynamic))
9027 {
9028 (*_bfd_error_handler)
9029 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
9030 input_bfd,
9031 input_section,
9032 (long) rel->r_offset,
9033 howto->name,
9034 h->root.root.string);
9035 return FALSE;
9036 }
9037
9038 if (r != bfd_reloc_ok)
9039 {
9040 switch (r)
9041 {
9042 case bfd_reloc_overflow:
9043 /* If the overflowing reloc was to an undefined symbol,
9044 we have already printed one error message and there
9045 is no point complaining again. */
9046 if ((! h ||
9047 h->root.type != bfd_link_hash_undefined)
9048 && (!((*info->callbacks->reloc_overflow)
9049 (info, (h ? &h->root : NULL), name, howto->name,
9050 (bfd_vma) 0, input_bfd, input_section,
9051 rel->r_offset))))
9052 return FALSE;
9053 break;
9054
9055 case bfd_reloc_undefined:
9056 if (!((*info->callbacks->undefined_symbol)
9057 (info, name, input_bfd, input_section,
9058 rel->r_offset, TRUE)))
9059 return FALSE;
9060 break;
9061
9062 case bfd_reloc_outofrange:
9063 error_message = _("out of range");
9064 goto common_error;
9065
9066 case bfd_reloc_notsupported:
9067 error_message = _("unsupported relocation");
9068 goto common_error;
9069
9070 case bfd_reloc_dangerous:
9071 /* error_message should already be set. */
9072 goto common_error;
9073
9074 default:
9075 error_message = _("unknown error");
9076 /* Fall through. */
9077
9078 common_error:
9079 BFD_ASSERT (error_message != NULL);
9080 if (!((*info->callbacks->reloc_dangerous)
9081 (info, error_message, input_bfd, input_section,
9082 rel->r_offset)))
9083 return FALSE;
9084 break;
9085 }
9086 }
9087 }
9088
9089 return TRUE;
9090 }
9091
9092 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
9093 adds the edit to the start of the list. (The list must be built in order of
9094 ascending TINDEX: the function's callers are primarily responsible for
9095 maintaining that condition). */
9096
9097 static void
9098 add_unwind_table_edit (arm_unwind_table_edit **head,
9099 arm_unwind_table_edit **tail,
9100 arm_unwind_edit_type type,
9101 asection *linked_section,
9102 unsigned int tindex)
9103 {
9104 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
9105 xmalloc (sizeof (arm_unwind_table_edit));
9106
9107 new_edit->type = type;
9108 new_edit->linked_section = linked_section;
9109 new_edit->index = tindex;
9110
9111 if (tindex > 0)
9112 {
9113 new_edit->next = NULL;
9114
9115 if (*tail)
9116 (*tail)->next = new_edit;
9117
9118 (*tail) = new_edit;
9119
9120 if (!*head)
9121 (*head) = new_edit;
9122 }
9123 else
9124 {
9125 new_edit->next = *head;
9126
9127 if (!*tail)
9128 *tail = new_edit;
9129
9130 *head = new_edit;
9131 }
9132 }
9133
9134 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
9135
9136 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
9137 static void
9138 adjust_exidx_size(asection *exidx_sec, int adjust)
9139 {
9140 asection *out_sec;
9141
9142 if (!exidx_sec->rawsize)
9143 exidx_sec->rawsize = exidx_sec->size;
9144
9145 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
9146 out_sec = exidx_sec->output_section;
9147 /* Adjust size of output section. */
9148 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
9149 }
9150
9151 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
9152 static void
9153 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
9154 {
9155 struct _arm_elf_section_data *exidx_arm_data;
9156
9157 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
9158 add_unwind_table_edit (
9159 &exidx_arm_data->u.exidx.unwind_edit_list,
9160 &exidx_arm_data->u.exidx.unwind_edit_tail,
9161 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
9162
9163 adjust_exidx_size(exidx_sec, 8);
9164 }
9165
9166 /* Scan .ARM.exidx tables, and create a list describing edits which should be
9167 made to those tables, such that:
9168
9169 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
9170 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
9171 codes which have been inlined into the index).
9172
9173 The edits are applied when the tables are written
9174 (in elf32_arm_write_section).
9175 */
9176
9177 bfd_boolean
9178 elf32_arm_fix_exidx_coverage (asection **text_section_order,
9179 unsigned int num_text_sections,
9180 struct bfd_link_info *info)
9181 {
9182 bfd *inp;
9183 unsigned int last_second_word = 0, i;
9184 asection *last_exidx_sec = NULL;
9185 asection *last_text_sec = NULL;
9186 int last_unwind_type = -1;
9187
9188 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
9189 text sections. */
9190 for (inp = info->input_bfds; inp != NULL; inp = inp->link_next)
9191 {
9192 asection *sec;
9193
9194 for (sec = inp->sections; sec != NULL; sec = sec->next)
9195 {
9196 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
9197 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
9198
9199 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
9200 continue;
9201
9202 if (elf_sec->linked_to)
9203 {
9204 Elf_Internal_Shdr *linked_hdr
9205 = &elf_section_data (elf_sec->linked_to)->this_hdr;
9206 struct _arm_elf_section_data *linked_sec_arm_data
9207 = get_arm_elf_section_data (linked_hdr->bfd_section);
9208
9209 if (linked_sec_arm_data == NULL)
9210 continue;
9211
9212 /* Link this .ARM.exidx section back from the text section it
9213 describes. */
9214 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
9215 }
9216 }
9217 }
9218
9219 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
9220 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
9221 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
9222
9223 for (i = 0; i < num_text_sections; i++)
9224 {
9225 asection *sec = text_section_order[i];
9226 asection *exidx_sec;
9227 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
9228 struct _arm_elf_section_data *exidx_arm_data;
9229 bfd_byte *contents = NULL;
9230 int deleted_exidx_bytes = 0;
9231 bfd_vma j;
9232 arm_unwind_table_edit *unwind_edit_head = NULL;
9233 arm_unwind_table_edit *unwind_edit_tail = NULL;
9234 Elf_Internal_Shdr *hdr;
9235 bfd *ibfd;
9236
9237 if (arm_data == NULL)
9238 continue;
9239
9240 exidx_sec = arm_data->u.text.arm_exidx_sec;
9241 if (exidx_sec == NULL)
9242 {
9243 /* Section has no unwind data. */
9244 if (last_unwind_type == 0 || !last_exidx_sec)
9245 continue;
9246
9247 /* Ignore zero sized sections. */
9248 if (sec->size == 0)
9249 continue;
9250
9251 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9252 last_unwind_type = 0;
9253 continue;
9254 }
9255
9256 /* Skip /DISCARD/ sections. */
9257 if (bfd_is_abs_section (exidx_sec->output_section))
9258 continue;
9259
9260 hdr = &elf_section_data (exidx_sec)->this_hdr;
9261 if (hdr->sh_type != SHT_ARM_EXIDX)
9262 continue;
9263
9264 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
9265 if (exidx_arm_data == NULL)
9266 continue;
9267
9268 ibfd = exidx_sec->owner;
9269
9270 if (hdr->contents != NULL)
9271 contents = hdr->contents;
9272 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
9273 /* An error? */
9274 continue;
9275
9276 for (j = 0; j < hdr->sh_size; j += 8)
9277 {
9278 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
9279 int unwind_type;
9280 int elide = 0;
9281
9282 /* An EXIDX_CANTUNWIND entry. */
9283 if (second_word == 1)
9284 {
9285 if (last_unwind_type == 0)
9286 elide = 1;
9287 unwind_type = 0;
9288 }
9289 /* Inlined unwinding data. Merge if equal to previous. */
9290 else if ((second_word & 0x80000000) != 0)
9291 {
9292 if (last_second_word == second_word && last_unwind_type == 1)
9293 elide = 1;
9294 unwind_type = 1;
9295 last_second_word = second_word;
9296 }
9297 /* Normal table entry. In theory we could merge these too,
9298 but duplicate entries are likely to be much less common. */
9299 else
9300 unwind_type = 2;
9301
9302 if (elide)
9303 {
9304 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
9305 DELETE_EXIDX_ENTRY, NULL, j / 8);
9306
9307 deleted_exidx_bytes += 8;
9308 }
9309
9310 last_unwind_type = unwind_type;
9311 }
9312
9313 /* Free contents if we allocated it ourselves. */
9314 if (contents != hdr->contents)
9315 free (contents);
9316
9317 /* Record edits to be applied later (in elf32_arm_write_section). */
9318 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
9319 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
9320
9321 if (deleted_exidx_bytes > 0)
9322 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
9323
9324 last_exidx_sec = exidx_sec;
9325 last_text_sec = sec;
9326 }
9327
9328 /* Add terminating CANTUNWIND entry. */
9329 if (last_exidx_sec && last_unwind_type != 0)
9330 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9331
9332 return TRUE;
9333 }
9334
9335 static bfd_boolean
9336 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
9337 bfd *ibfd, const char *name)
9338 {
9339 asection *sec, *osec;
9340
9341 sec = bfd_get_section_by_name (ibfd, name);
9342 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
9343 return TRUE;
9344
9345 osec = sec->output_section;
9346 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
9347 return TRUE;
9348
9349 if (! bfd_set_section_contents (obfd, osec, sec->contents,
9350 sec->output_offset, sec->size))
9351 return FALSE;
9352
9353 return TRUE;
9354 }
9355
9356 static bfd_boolean
9357 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
9358 {
9359 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
9360
9361 if (globals == NULL)
9362 return FALSE;
9363
9364 /* Invoke the regular ELF backend linker to do all the work. */
9365 if (!bfd_elf_final_link (abfd, info))
9366 return FALSE;
9367
9368 /* Write out any glue sections now that we have created all the
9369 stubs. */
9370 if (globals->bfd_of_glue_owner != NULL)
9371 {
9372 if (! elf32_arm_output_glue_section (info, abfd,
9373 globals->bfd_of_glue_owner,
9374 ARM2THUMB_GLUE_SECTION_NAME))
9375 return FALSE;
9376
9377 if (! elf32_arm_output_glue_section (info, abfd,
9378 globals->bfd_of_glue_owner,
9379 THUMB2ARM_GLUE_SECTION_NAME))
9380 return FALSE;
9381
9382 if (! elf32_arm_output_glue_section (info, abfd,
9383 globals->bfd_of_glue_owner,
9384 VFP11_ERRATUM_VENEER_SECTION_NAME))
9385 return FALSE;
9386
9387 if (! elf32_arm_output_glue_section (info, abfd,
9388 globals->bfd_of_glue_owner,
9389 ARM_BX_GLUE_SECTION_NAME))
9390 return FALSE;
9391 }
9392
9393 return TRUE;
9394 }
9395
9396 /* Set the right machine number. */
9397
9398 static bfd_boolean
9399 elf32_arm_object_p (bfd *abfd)
9400 {
9401 unsigned int mach;
9402
9403 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
9404
9405 if (mach != bfd_mach_arm_unknown)
9406 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9407
9408 else if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
9409 bfd_default_set_arch_mach (abfd, bfd_arch_arm, bfd_mach_arm_ep9312);
9410
9411 else
9412 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9413
9414 return TRUE;
9415 }
9416
9417 /* Function to keep ARM specific flags in the ELF header. */
9418
9419 static bfd_boolean
9420 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
9421 {
9422 if (elf_flags_init (abfd)
9423 && elf_elfheader (abfd)->e_flags != flags)
9424 {
9425 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
9426 {
9427 if (flags & EF_ARM_INTERWORK)
9428 (*_bfd_error_handler)
9429 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
9430 abfd);
9431 else
9432 _bfd_error_handler
9433 (_("Warning: Clearing the interworking flag of %B due to outside request"),
9434 abfd);
9435 }
9436 }
9437 else
9438 {
9439 elf_elfheader (abfd)->e_flags = flags;
9440 elf_flags_init (abfd) = TRUE;
9441 }
9442
9443 return TRUE;
9444 }
9445
9446 /* Copy backend specific data from one object module to another. */
9447
9448 static bfd_boolean
9449 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
9450 {
9451 flagword in_flags;
9452 flagword out_flags;
9453
9454 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
9455 return TRUE;
9456
9457 in_flags = elf_elfheader (ibfd)->e_flags;
9458 out_flags = elf_elfheader (obfd)->e_flags;
9459
9460 if (elf_flags_init (obfd)
9461 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
9462 && in_flags != out_flags)
9463 {
9464 /* Cannot mix APCS26 and APCS32 code. */
9465 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
9466 return FALSE;
9467
9468 /* Cannot mix float APCS and non-float APCS code. */
9469 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
9470 return FALSE;
9471
9472 /* If the src and dest have different interworking flags
9473 then turn off the interworking bit. */
9474 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
9475 {
9476 if (out_flags & EF_ARM_INTERWORK)
9477 _bfd_error_handler
9478 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
9479 obfd, ibfd);
9480
9481 in_flags &= ~EF_ARM_INTERWORK;
9482 }
9483
9484 /* Likewise for PIC, though don't warn for this case. */
9485 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
9486 in_flags &= ~EF_ARM_PIC;
9487 }
9488
9489 elf_elfheader (obfd)->e_flags = in_flags;
9490 elf_flags_init (obfd) = TRUE;
9491
9492 /* Also copy the EI_OSABI field. */
9493 elf_elfheader (obfd)->e_ident[EI_OSABI] =
9494 elf_elfheader (ibfd)->e_ident[EI_OSABI];
9495
9496 /* Copy object attributes. */
9497 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9498
9499 return TRUE;
9500 }
9501
9502 /* Values for Tag_ABI_PCS_R9_use. */
9503 enum
9504 {
9505 AEABI_R9_V6,
9506 AEABI_R9_SB,
9507 AEABI_R9_TLS,
9508 AEABI_R9_unused
9509 };
9510
9511 /* Values for Tag_ABI_PCS_RW_data. */
9512 enum
9513 {
9514 AEABI_PCS_RW_data_absolute,
9515 AEABI_PCS_RW_data_PCrel,
9516 AEABI_PCS_RW_data_SBrel,
9517 AEABI_PCS_RW_data_unused
9518 };
9519
9520 /* Values for Tag_ABI_enum_size. */
9521 enum
9522 {
9523 AEABI_enum_unused,
9524 AEABI_enum_short,
9525 AEABI_enum_wide,
9526 AEABI_enum_forced_wide
9527 };
9528
9529 /* Determine whether an object attribute tag takes an integer, a
9530 string or both. */
9531
9532 static int
9533 elf32_arm_obj_attrs_arg_type (int tag)
9534 {
9535 if (tag == Tag_compatibility)
9536 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
9537 else if (tag == Tag_nodefaults)
9538 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
9539 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
9540 return ATTR_TYPE_FLAG_STR_VAL;
9541 else if (tag < 32)
9542 return ATTR_TYPE_FLAG_INT_VAL;
9543 else
9544 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
9545 }
9546
9547 /* The ABI defines that Tag_conformance should be emitted first, and that
9548 Tag_nodefaults should be second (if either is defined). This sets those
9549 two positions, and bumps up the position of all the remaining tags to
9550 compensate. */
9551 static int
9552 elf32_arm_obj_attrs_order (int num)
9553 {
9554 if (num == 4)
9555 return Tag_conformance;
9556 if (num == 5)
9557 return Tag_nodefaults;
9558 if ((num - 2) < Tag_nodefaults)
9559 return num - 2;
9560 if ((num - 1) < Tag_conformance)
9561 return num - 1;
9562 return num;
9563 }
9564
9565 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
9566 Returns -1 if no architecture could be read. */
9567
9568 static int
9569 get_secondary_compatible_arch (bfd *abfd)
9570 {
9571 obj_attribute *attr =
9572 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9573
9574 /* Note: the tag and its argument below are uleb128 values, though
9575 currently-defined values fit in one byte for each. */
9576 if (attr->s
9577 && attr->s[0] == Tag_CPU_arch
9578 && (attr->s[1] & 128) != 128
9579 && attr->s[2] == 0)
9580 return attr->s[1];
9581
9582 /* This tag is "safely ignorable", so don't complain if it looks funny. */
9583 return -1;
9584 }
9585
9586 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
9587 The tag is removed if ARCH is -1. */
9588
9589 static void
9590 set_secondary_compatible_arch (bfd *abfd, int arch)
9591 {
9592 obj_attribute *attr =
9593 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9594
9595 if (arch == -1)
9596 {
9597 attr->s = NULL;
9598 return;
9599 }
9600
9601 /* Note: the tag and its argument below are uleb128 values, though
9602 currently-defined values fit in one byte for each. */
9603 if (!attr->s)
9604 attr->s = (char *) bfd_alloc (abfd, 3);
9605 attr->s[0] = Tag_CPU_arch;
9606 attr->s[1] = arch;
9607 attr->s[2] = '\0';
9608 }
9609
9610 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
9611 into account. */
9612
9613 static int
9614 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
9615 int newtag, int secondary_compat)
9616 {
9617 #define T(X) TAG_CPU_ARCH_##X
9618 int tagl, tagh, result;
9619 const int v6t2[] =
9620 {
9621 T(V6T2), /* PRE_V4. */
9622 T(V6T2), /* V4. */
9623 T(V6T2), /* V4T. */
9624 T(V6T2), /* V5T. */
9625 T(V6T2), /* V5TE. */
9626 T(V6T2), /* V5TEJ. */
9627 T(V6T2), /* V6. */
9628 T(V7), /* V6KZ. */
9629 T(V6T2) /* V6T2. */
9630 };
9631 const int v6k[] =
9632 {
9633 T(V6K), /* PRE_V4. */
9634 T(V6K), /* V4. */
9635 T(V6K), /* V4T. */
9636 T(V6K), /* V5T. */
9637 T(V6K), /* V5TE. */
9638 T(V6K), /* V5TEJ. */
9639 T(V6K), /* V6. */
9640 T(V6KZ), /* V6KZ. */
9641 T(V7), /* V6T2. */
9642 T(V6K) /* V6K. */
9643 };
9644 const int v7[] =
9645 {
9646 T(V7), /* PRE_V4. */
9647 T(V7), /* V4. */
9648 T(V7), /* V4T. */
9649 T(V7), /* V5T. */
9650 T(V7), /* V5TE. */
9651 T(V7), /* V5TEJ. */
9652 T(V7), /* V6. */
9653 T(V7), /* V6KZ. */
9654 T(V7), /* V6T2. */
9655 T(V7), /* V6K. */
9656 T(V7) /* V7. */
9657 };
9658 const int v6_m[] =
9659 {
9660 -1, /* PRE_V4. */
9661 -1, /* V4. */
9662 T(V6K), /* V4T. */
9663 T(V6K), /* V5T. */
9664 T(V6K), /* V5TE. */
9665 T(V6K), /* V5TEJ. */
9666 T(V6K), /* V6. */
9667 T(V6KZ), /* V6KZ. */
9668 T(V7), /* V6T2. */
9669 T(V6K), /* V6K. */
9670 T(V7), /* V7. */
9671 T(V6_M) /* V6_M. */
9672 };
9673 const int v6s_m[] =
9674 {
9675 -1, /* PRE_V4. */
9676 -1, /* V4. */
9677 T(V6K), /* V4T. */
9678 T(V6K), /* V5T. */
9679 T(V6K), /* V5TE. */
9680 T(V6K), /* V5TEJ. */
9681 T(V6K), /* V6. */
9682 T(V6KZ), /* V6KZ. */
9683 T(V7), /* V6T2. */
9684 T(V6K), /* V6K. */
9685 T(V7), /* V7. */
9686 T(V6S_M), /* V6_M. */
9687 T(V6S_M) /* V6S_M. */
9688 };
9689 const int v7e_m[] =
9690 {
9691 -1, /* PRE_V4. */
9692 -1, /* V4. */
9693 T(V7E_M), /* V4T. */
9694 T(V7E_M), /* V5T. */
9695 T(V7E_M), /* V5TE. */
9696 T(V7E_M), /* V5TEJ. */
9697 T(V7E_M), /* V6. */
9698 T(V7E_M), /* V6KZ. */
9699 T(V7E_M), /* V6T2. */
9700 T(V7E_M), /* V6K. */
9701 T(V7E_M), /* V7. */
9702 T(V7E_M), /* V6_M. */
9703 T(V7E_M), /* V6S_M. */
9704 T(V7E_M) /* V7E_M. */
9705 };
9706 const int v4t_plus_v6_m[] =
9707 {
9708 -1, /* PRE_V4. */
9709 -1, /* V4. */
9710 T(V4T), /* V4T. */
9711 T(V5T), /* V5T. */
9712 T(V5TE), /* V5TE. */
9713 T(V5TEJ), /* V5TEJ. */
9714 T(V6), /* V6. */
9715 T(V6KZ), /* V6KZ. */
9716 T(V6T2), /* V6T2. */
9717 T(V6K), /* V6K. */
9718 T(V7), /* V7. */
9719 T(V6_M), /* V6_M. */
9720 T(V6S_M), /* V6S_M. */
9721 T(V7E_M), /* V7E_M. */
9722 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
9723 };
9724 const int *comb[] =
9725 {
9726 v6t2,
9727 v6k,
9728 v7,
9729 v6_m,
9730 v6s_m,
9731 v7e_m,
9732 /* Pseudo-architecture. */
9733 v4t_plus_v6_m
9734 };
9735
9736 /* Check we've not got a higher architecture than we know about. */
9737
9738 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
9739 {
9740 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
9741 return -1;
9742 }
9743
9744 /* Override old tag if we have a Tag_also_compatible_with on the output. */
9745
9746 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
9747 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
9748 oldtag = T(V4T_PLUS_V6_M);
9749
9750 /* And override the new tag if we have a Tag_also_compatible_with on the
9751 input. */
9752
9753 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
9754 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
9755 newtag = T(V4T_PLUS_V6_M);
9756
9757 tagl = (oldtag < newtag) ? oldtag : newtag;
9758 result = tagh = (oldtag > newtag) ? oldtag : newtag;
9759
9760 /* Architectures before V6KZ add features monotonically. */
9761 if (tagh <= TAG_CPU_ARCH_V6KZ)
9762 return result;
9763
9764 result = comb[tagh - T(V6T2)][tagl];
9765
9766 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
9767 as the canonical version. */
9768 if (result == T(V4T_PLUS_V6_M))
9769 {
9770 result = T(V4T);
9771 *secondary_compat_out = T(V6_M);
9772 }
9773 else
9774 *secondary_compat_out = -1;
9775
9776 if (result == -1)
9777 {
9778 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
9779 ibfd, oldtag, newtag);
9780 return -1;
9781 }
9782
9783 return result;
9784 #undef T
9785 }
9786
9787 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
9788 are conflicting attributes. */
9789
9790 static bfd_boolean
9791 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
9792 {
9793 obj_attribute *in_attr;
9794 obj_attribute *out_attr;
9795 obj_attribute_list *in_list;
9796 obj_attribute_list *out_list;
9797 obj_attribute_list **out_listp;
9798 /* Some tags have 0 = don't care, 1 = strong requirement,
9799 2 = weak requirement. */
9800 static const int order_021[3] = {0, 2, 1};
9801 int i;
9802 bfd_boolean result = TRUE;
9803
9804 /* Skip the linker stubs file. This preserves previous behavior
9805 of accepting unknown attributes in the first input file - but
9806 is that a bug? */
9807 if (ibfd->flags & BFD_LINKER_CREATED)
9808 return TRUE;
9809
9810 if (!elf_known_obj_attributes_proc (obfd)[0].i)
9811 {
9812 /* This is the first object. Copy the attributes. */
9813 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9814
9815 out_attr = elf_known_obj_attributes_proc (obfd);
9816
9817 /* Use the Tag_null value to indicate the attributes have been
9818 initialized. */
9819 out_attr[0].i = 1;
9820
9821 /* We do not output objects with Tag_MPextension_use_legacy - we move
9822 the attribute's value to Tag_MPextension_use. */
9823 if (out_attr[Tag_MPextension_use_legacy].i != 0)
9824 {
9825 if (out_attr[Tag_MPextension_use].i != 0
9826 && out_attr[Tag_MPextension_use_legacy].i
9827 != out_attr[Tag_MPextension_use].i)
9828 {
9829 _bfd_error_handler
9830 (_("Error: %B has both the current and legacy "
9831 "Tag_MPextension_use attributes"), ibfd);
9832 result = FALSE;
9833 }
9834
9835 out_attr[Tag_MPextension_use] =
9836 out_attr[Tag_MPextension_use_legacy];
9837 out_attr[Tag_MPextension_use_legacy].type = 0;
9838 out_attr[Tag_MPextension_use_legacy].i = 0;
9839 }
9840
9841 return result;
9842 }
9843
9844 in_attr = elf_known_obj_attributes_proc (ibfd);
9845 out_attr = elf_known_obj_attributes_proc (obfd);
9846 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
9847 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
9848 {
9849 /* Ignore mismatches if the object doesn't use floating point. */
9850 if (out_attr[Tag_ABI_FP_number_model].i == 0)
9851 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
9852 else if (in_attr[Tag_ABI_FP_number_model].i != 0)
9853 {
9854 _bfd_error_handler
9855 (_("error: %B uses VFP register arguments, %B does not"),
9856 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
9857 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
9858 result = FALSE;
9859 }
9860 }
9861
9862 for (i = 4; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
9863 {
9864 /* Merge this attribute with existing attributes. */
9865 switch (i)
9866 {
9867 case Tag_CPU_raw_name:
9868 case Tag_CPU_name:
9869 /* These are merged after Tag_CPU_arch. */
9870 break;
9871
9872 case Tag_ABI_optimization_goals:
9873 case Tag_ABI_FP_optimization_goals:
9874 /* Use the first value seen. */
9875 break;
9876
9877 case Tag_CPU_arch:
9878 {
9879 int secondary_compat = -1, secondary_compat_out = -1;
9880 unsigned int saved_out_attr = out_attr[i].i;
9881 static const char *name_table[] = {
9882 /* These aren't real CPU names, but we can't guess
9883 that from the architecture version alone. */
9884 "Pre v4",
9885 "ARM v4",
9886 "ARM v4T",
9887 "ARM v5T",
9888 "ARM v5TE",
9889 "ARM v5TEJ",
9890 "ARM v6",
9891 "ARM v6KZ",
9892 "ARM v6T2",
9893 "ARM v6K",
9894 "ARM v7",
9895 "ARM v6-M",
9896 "ARM v6S-M"
9897 };
9898
9899 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
9900 secondary_compat = get_secondary_compatible_arch (ibfd);
9901 secondary_compat_out = get_secondary_compatible_arch (obfd);
9902 out_attr[i].i = tag_cpu_arch_combine (ibfd, out_attr[i].i,
9903 &secondary_compat_out,
9904 in_attr[i].i,
9905 secondary_compat);
9906 set_secondary_compatible_arch (obfd, secondary_compat_out);
9907
9908 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
9909 if (out_attr[i].i == saved_out_attr)
9910 ; /* Leave the names alone. */
9911 else if (out_attr[i].i == in_attr[i].i)
9912 {
9913 /* The output architecture has been changed to match the
9914 input architecture. Use the input names. */
9915 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
9916 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
9917 : NULL;
9918 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
9919 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
9920 : NULL;
9921 }
9922 else
9923 {
9924 out_attr[Tag_CPU_name].s = NULL;
9925 out_attr[Tag_CPU_raw_name].s = NULL;
9926 }
9927
9928 /* If we still don't have a value for Tag_CPU_name,
9929 make one up now. Tag_CPU_raw_name remains blank. */
9930 if (out_attr[Tag_CPU_name].s == NULL
9931 && out_attr[i].i < ARRAY_SIZE (name_table))
9932 out_attr[Tag_CPU_name].s =
9933 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
9934 }
9935 break;
9936
9937 case Tag_ARM_ISA_use:
9938 case Tag_THUMB_ISA_use:
9939 case Tag_WMMX_arch:
9940 case Tag_Advanced_SIMD_arch:
9941 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
9942 case Tag_ABI_FP_rounding:
9943 case Tag_ABI_FP_exceptions:
9944 case Tag_ABI_FP_user_exceptions:
9945 case Tag_ABI_FP_number_model:
9946 case Tag_VFP_HP_extension:
9947 case Tag_CPU_unaligned_access:
9948 case Tag_T2EE_use:
9949 case Tag_Virtualization_use:
9950 case Tag_MPextension_use:
9951 /* Use the largest value specified. */
9952 if (in_attr[i].i > out_attr[i].i)
9953 out_attr[i].i = in_attr[i].i;
9954 break;
9955
9956 case Tag_ABI_align8_preserved:
9957 case Tag_ABI_PCS_RO_data:
9958 /* Use the smallest value specified. */
9959 if (in_attr[i].i < out_attr[i].i)
9960 out_attr[i].i = in_attr[i].i;
9961 break;
9962
9963 case Tag_ABI_align8_needed:
9964 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
9965 && (in_attr[Tag_ABI_align8_preserved].i == 0
9966 || out_attr[Tag_ABI_align8_preserved].i == 0))
9967 {
9968 /* This error message should be enabled once all non-conformant
9969 binaries in the toolchain have had the attributes set
9970 properly.
9971 _bfd_error_handler
9972 (_("error: %B: 8-byte data alignment conflicts with %B"),
9973 obfd, ibfd);
9974 result = FALSE; */
9975 }
9976 /* Fall through. */
9977 case Tag_ABI_FP_denormal:
9978 case Tag_ABI_PCS_GOT_use:
9979 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
9980 value if greater than 2 (for future-proofing). */
9981 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
9982 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
9983 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
9984 out_attr[i].i = in_attr[i].i;
9985 break;
9986
9987
9988 case Tag_CPU_arch_profile:
9989 if (out_attr[i].i != in_attr[i].i)
9990 {
9991 /* 0 will merge with anything.
9992 'A' and 'S' merge to 'A'.
9993 'R' and 'S' merge to 'R'.
9994 'M' and 'A|R|S' is an error. */
9995 if (out_attr[i].i == 0
9996 || (out_attr[i].i == 'S'
9997 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
9998 out_attr[i].i = in_attr[i].i;
9999 else if (in_attr[i].i == 0
10000 || (in_attr[i].i == 'S'
10001 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
10002 ; /* Do nothing. */
10003 else
10004 {
10005 _bfd_error_handler
10006 (_("error: %B: Conflicting architecture profiles %c/%c"),
10007 ibfd,
10008 in_attr[i].i ? in_attr[i].i : '0',
10009 out_attr[i].i ? out_attr[i].i : '0');
10010 result = FALSE;
10011 }
10012 }
10013 break;
10014 case Tag_VFP_arch:
10015 {
10016 static const struct
10017 {
10018 int ver;
10019 int regs;
10020 } vfp_versions[7] =
10021 {
10022 {0, 0},
10023 {1, 16},
10024 {2, 16},
10025 {3, 32},
10026 {3, 16},
10027 {4, 32},
10028 {4, 16}
10029 };
10030 int ver;
10031 int regs;
10032 int newval;
10033
10034 /* Values greater than 6 aren't defined, so just pick the
10035 biggest */
10036 if (in_attr[i].i > 6 && in_attr[i].i > out_attr[i].i)
10037 {
10038 out_attr[i] = in_attr[i];
10039 break;
10040 }
10041 /* The output uses the superset of input features
10042 (ISA version) and registers. */
10043 ver = vfp_versions[in_attr[i].i].ver;
10044 if (ver < vfp_versions[out_attr[i].i].ver)
10045 ver = vfp_versions[out_attr[i].i].ver;
10046 regs = vfp_versions[in_attr[i].i].regs;
10047 if (regs < vfp_versions[out_attr[i].i].regs)
10048 regs = vfp_versions[out_attr[i].i].regs;
10049 /* This assumes all possible supersets are also a valid
10050 options. */
10051 for (newval = 6; newval > 0; newval--)
10052 {
10053 if (regs == vfp_versions[newval].regs
10054 && ver == vfp_versions[newval].ver)
10055 break;
10056 }
10057 out_attr[i].i = newval;
10058 }
10059 break;
10060 case Tag_PCS_config:
10061 if (out_attr[i].i == 0)
10062 out_attr[i].i = in_attr[i].i;
10063 else if (in_attr[i].i != 0 && out_attr[i].i != 0)
10064 {
10065 /* It's sometimes ok to mix different configs, so this is only
10066 a warning. */
10067 _bfd_error_handler
10068 (_("Warning: %B: Conflicting platform configuration"), ibfd);
10069 }
10070 break;
10071 case Tag_ABI_PCS_R9_use:
10072 if (in_attr[i].i != out_attr[i].i
10073 && out_attr[i].i != AEABI_R9_unused
10074 && in_attr[i].i != AEABI_R9_unused)
10075 {
10076 _bfd_error_handler
10077 (_("error: %B: Conflicting use of R9"), ibfd);
10078 result = FALSE;
10079 }
10080 if (out_attr[i].i == AEABI_R9_unused)
10081 out_attr[i].i = in_attr[i].i;
10082 break;
10083 case Tag_ABI_PCS_RW_data:
10084 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
10085 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
10086 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
10087 {
10088 _bfd_error_handler
10089 (_("error: %B: SB relative addressing conflicts with use of R9"),
10090 ibfd);
10091 result = FALSE;
10092 }
10093 /* Use the smallest value specified. */
10094 if (in_attr[i].i < out_attr[i].i)
10095 out_attr[i].i = in_attr[i].i;
10096 break;
10097 case Tag_ABI_PCS_wchar_t:
10098 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
10099 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
10100 {
10101 _bfd_error_handler
10102 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
10103 ibfd, in_attr[i].i, out_attr[i].i);
10104 }
10105 else if (in_attr[i].i && !out_attr[i].i)
10106 out_attr[i].i = in_attr[i].i;
10107 break;
10108 case Tag_ABI_enum_size:
10109 if (in_attr[i].i != AEABI_enum_unused)
10110 {
10111 if (out_attr[i].i == AEABI_enum_unused
10112 || out_attr[i].i == AEABI_enum_forced_wide)
10113 {
10114 /* The existing object is compatible with anything.
10115 Use whatever requirements the new object has. */
10116 out_attr[i].i = in_attr[i].i;
10117 }
10118 else if (in_attr[i].i != AEABI_enum_forced_wide
10119 && out_attr[i].i != in_attr[i].i
10120 && !elf_arm_tdata (obfd)->no_enum_size_warning)
10121 {
10122 static const char *aeabi_enum_names[] =
10123 { "", "variable-size", "32-bit", "" };
10124 const char *in_name =
10125 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
10126 ? aeabi_enum_names[in_attr[i].i]
10127 : "<unknown>";
10128 const char *out_name =
10129 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
10130 ? aeabi_enum_names[out_attr[i].i]
10131 : "<unknown>";
10132 _bfd_error_handler
10133 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
10134 ibfd, in_name, out_name);
10135 }
10136 }
10137 break;
10138 case Tag_ABI_VFP_args:
10139 /* Aready done. */
10140 break;
10141 case Tag_ABI_WMMX_args:
10142 if (in_attr[i].i != out_attr[i].i)
10143 {
10144 _bfd_error_handler
10145 (_("error: %B uses iWMMXt register arguments, %B does not"),
10146 ibfd, obfd);
10147 result = FALSE;
10148 }
10149 break;
10150 case Tag_compatibility:
10151 /* Merged in target-independent code. */
10152 break;
10153 case Tag_ABI_HardFP_use:
10154 /* 1 (SP) and 2 (DP) conflict, so combine to 3 (SP & DP). */
10155 if ((in_attr[i].i == 1 && out_attr[i].i == 2)
10156 || (in_attr[i].i == 2 && out_attr[i].i == 1))
10157 out_attr[i].i = 3;
10158 else if (in_attr[i].i > out_attr[i].i)
10159 out_attr[i].i = in_attr[i].i;
10160 break;
10161 case Tag_ABI_FP_16bit_format:
10162 if (in_attr[i].i != 0 && out_attr[i].i != 0)
10163 {
10164 if (in_attr[i].i != out_attr[i].i)
10165 {
10166 _bfd_error_handler
10167 (_("error: fp16 format mismatch between %B and %B"),
10168 ibfd, obfd);
10169 result = FALSE;
10170 }
10171 }
10172 if (in_attr[i].i != 0)
10173 out_attr[i].i = in_attr[i].i;
10174 break;
10175
10176 case Tag_DIV_use:
10177 /* This tag is set to zero if we can use UDIV and SDIV in Thumb
10178 mode on a v7-M or v7-R CPU; to one if we can not use UDIV or
10179 SDIV at all; and to two if we can use UDIV or SDIV on a v7-A
10180 CPU. We will merge as follows: If the input attribute's value
10181 is one then the output attribute's value remains unchanged. If
10182 the input attribute's value is zero or two then if the output
10183 attribute's value is one the output value is set to the input
10184 value, otherwise the output value must be the same as the
10185 inputs. */
10186 if (in_attr[i].i != 1 && out_attr[i].i != 1)
10187 {
10188 if (in_attr[i].i != out_attr[i].i)
10189 {
10190 _bfd_error_handler
10191 (_("DIV usage mismatch between %B and %B"),
10192 ibfd, obfd);
10193 result = FALSE;
10194 }
10195 }
10196
10197 if (in_attr[i].i != 1)
10198 out_attr[i].i = in_attr[i].i;
10199
10200 break;
10201
10202 case Tag_MPextension_use_legacy:
10203 /* We don't output objects with Tag_MPextension_use_legacy - we
10204 move the value to Tag_MPextension_use. */
10205 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
10206 {
10207 if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
10208 {
10209 _bfd_error_handler
10210 (_("%B has has both the current and legacy "
10211 "Tag_MPextension_use attributes"),
10212 ibfd);
10213 result = FALSE;
10214 }
10215 }
10216
10217 if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
10218 out_attr[Tag_MPextension_use] = in_attr[i];
10219
10220 break;
10221
10222 case Tag_nodefaults:
10223 /* This tag is set if it exists, but the value is unused (and is
10224 typically zero). We don't actually need to do anything here -
10225 the merge happens automatically when the type flags are merged
10226 below. */
10227 break;
10228 case Tag_also_compatible_with:
10229 /* Already done in Tag_CPU_arch. */
10230 break;
10231 case Tag_conformance:
10232 /* Keep the attribute if it matches. Throw it away otherwise.
10233 No attribute means no claim to conform. */
10234 if (!in_attr[i].s || !out_attr[i].s
10235 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
10236 out_attr[i].s = NULL;
10237 break;
10238
10239 default:
10240 {
10241 bfd *err_bfd = NULL;
10242
10243 /* The "known_obj_attributes" table does contain some undefined
10244 attributes. Ensure that there are unused. */
10245 if (out_attr[i].i != 0 || out_attr[i].s != NULL)
10246 err_bfd = obfd;
10247 else if (in_attr[i].i != 0 || in_attr[i].s != NULL)
10248 err_bfd = ibfd;
10249
10250 if (err_bfd != NULL)
10251 {
10252 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
10253 if ((i & 127) < 64)
10254 {
10255 _bfd_error_handler
10256 (_("%B: Unknown mandatory EABI object attribute %d"),
10257 err_bfd, i);
10258 bfd_set_error (bfd_error_bad_value);
10259 result = FALSE;
10260 }
10261 else
10262 {
10263 _bfd_error_handler
10264 (_("Warning: %B: Unknown EABI object attribute %d"),
10265 err_bfd, i);
10266 }
10267 }
10268
10269 /* Only pass on attributes that match in both inputs. */
10270 if (in_attr[i].i != out_attr[i].i
10271 || in_attr[i].s != out_attr[i].s
10272 || (in_attr[i].s != NULL && out_attr[i].s != NULL
10273 && strcmp (in_attr[i].s, out_attr[i].s) != 0))
10274 {
10275 out_attr[i].i = 0;
10276 out_attr[i].s = NULL;
10277 }
10278 }
10279 }
10280
10281 /* If out_attr was copied from in_attr then it won't have a type yet. */
10282 if (in_attr[i].type && !out_attr[i].type)
10283 out_attr[i].type = in_attr[i].type;
10284 }
10285
10286 /* Merge Tag_compatibility attributes and any common GNU ones. */
10287 _bfd_elf_merge_object_attributes (ibfd, obfd);
10288
10289 /* Check for any attributes not known on ARM. */
10290 in_list = elf_other_obj_attributes_proc (ibfd);
10291 out_listp = &elf_other_obj_attributes_proc (obfd);
10292 out_list = *out_listp;
10293
10294 for (; in_list || out_list; )
10295 {
10296 bfd *err_bfd = NULL;
10297 int err_tag = 0;
10298
10299 /* The tags for each list are in numerical order. */
10300 /* If the tags are equal, then merge. */
10301 if (out_list && (!in_list || in_list->tag > out_list->tag))
10302 {
10303 /* This attribute only exists in obfd. We can't merge, and we don't
10304 know what the tag means, so delete it. */
10305 err_bfd = obfd;
10306 err_tag = out_list->tag;
10307 *out_listp = out_list->next;
10308 out_list = *out_listp;
10309 }
10310 else if (in_list && (!out_list || in_list->tag < out_list->tag))
10311 {
10312 /* This attribute only exists in ibfd. We can't merge, and we don't
10313 know what the tag means, so ignore it. */
10314 err_bfd = ibfd;
10315 err_tag = in_list->tag;
10316 in_list = in_list->next;
10317 }
10318 else /* The tags are equal. */
10319 {
10320 /* As present, all attributes in the list are unknown, and
10321 therefore can't be merged meaningfully. */
10322 err_bfd = obfd;
10323 err_tag = out_list->tag;
10324
10325 /* Only pass on attributes that match in both inputs. */
10326 if (in_list->attr.i != out_list->attr.i
10327 || in_list->attr.s != out_list->attr.s
10328 || (in_list->attr.s && out_list->attr.s
10329 && strcmp (in_list->attr.s, out_list->attr.s) != 0))
10330 {
10331 /* No match. Delete the attribute. */
10332 *out_listp = out_list->next;
10333 out_list = *out_listp;
10334 }
10335 else
10336 {
10337 /* Matched. Keep the attribute and move to the next. */
10338 out_list = out_list->next;
10339 in_list = in_list->next;
10340 }
10341 }
10342
10343 if (err_bfd)
10344 {
10345 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
10346 if ((err_tag & 127) < 64)
10347 {
10348 _bfd_error_handler
10349 (_("%B: Unknown mandatory EABI object attribute %d"),
10350 err_bfd, err_tag);
10351 bfd_set_error (bfd_error_bad_value);
10352 result = FALSE;
10353 }
10354 else
10355 {
10356 _bfd_error_handler
10357 (_("Warning: %B: Unknown EABI object attribute %d"),
10358 err_bfd, err_tag);
10359 }
10360 }
10361 }
10362 return result;
10363 }
10364
10365
10366 /* Return TRUE if the two EABI versions are incompatible. */
10367
10368 static bfd_boolean
10369 elf32_arm_versions_compatible (unsigned iver, unsigned over)
10370 {
10371 /* v4 and v5 are the same spec before and after it was released,
10372 so allow mixing them. */
10373 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
10374 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
10375 return TRUE;
10376
10377 return (iver == over);
10378 }
10379
10380 /* Merge backend specific data from an object file to the output
10381 object file when linking. */
10382
10383 static bfd_boolean
10384 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd);
10385
10386 /* Display the flags field. */
10387
10388 static bfd_boolean
10389 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
10390 {
10391 FILE * file = (FILE *) ptr;
10392 unsigned long flags;
10393
10394 BFD_ASSERT (abfd != NULL && ptr != NULL);
10395
10396 /* Print normal ELF private data. */
10397 _bfd_elf_print_private_bfd_data (abfd, ptr);
10398
10399 flags = elf_elfheader (abfd)->e_flags;
10400 /* Ignore init flag - it may not be set, despite the flags field
10401 containing valid data. */
10402
10403 /* xgettext:c-format */
10404 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
10405
10406 switch (EF_ARM_EABI_VERSION (flags))
10407 {
10408 case EF_ARM_EABI_UNKNOWN:
10409 /* The following flag bits are GNU extensions and not part of the
10410 official ARM ELF extended ABI. Hence they are only decoded if
10411 the EABI version is not set. */
10412 if (flags & EF_ARM_INTERWORK)
10413 fprintf (file, _(" [interworking enabled]"));
10414
10415 if (flags & EF_ARM_APCS_26)
10416 fprintf (file, " [APCS-26]");
10417 else
10418 fprintf (file, " [APCS-32]");
10419
10420 if (flags & EF_ARM_VFP_FLOAT)
10421 fprintf (file, _(" [VFP float format]"));
10422 else if (flags & EF_ARM_MAVERICK_FLOAT)
10423 fprintf (file, _(" [Maverick float format]"));
10424 else
10425 fprintf (file, _(" [FPA float format]"));
10426
10427 if (flags & EF_ARM_APCS_FLOAT)
10428 fprintf (file, _(" [floats passed in float registers]"));
10429
10430 if (flags & EF_ARM_PIC)
10431 fprintf (file, _(" [position independent]"));
10432
10433 if (flags & EF_ARM_NEW_ABI)
10434 fprintf (file, _(" [new ABI]"));
10435
10436 if (flags & EF_ARM_OLD_ABI)
10437 fprintf (file, _(" [old ABI]"));
10438
10439 if (flags & EF_ARM_SOFT_FLOAT)
10440 fprintf (file, _(" [software FP]"));
10441
10442 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
10443 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
10444 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
10445 | EF_ARM_MAVERICK_FLOAT);
10446 break;
10447
10448 case EF_ARM_EABI_VER1:
10449 fprintf (file, _(" [Version1 EABI]"));
10450
10451 if (flags & EF_ARM_SYMSARESORTED)
10452 fprintf (file, _(" [sorted symbol table]"));
10453 else
10454 fprintf (file, _(" [unsorted symbol table]"));
10455
10456 flags &= ~ EF_ARM_SYMSARESORTED;
10457 break;
10458
10459 case EF_ARM_EABI_VER2:
10460 fprintf (file, _(" [Version2 EABI]"));
10461
10462 if (flags & EF_ARM_SYMSARESORTED)
10463 fprintf (file, _(" [sorted symbol table]"));
10464 else
10465 fprintf (file, _(" [unsorted symbol table]"));
10466
10467 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
10468 fprintf (file, _(" [dynamic symbols use segment index]"));
10469
10470 if (flags & EF_ARM_MAPSYMSFIRST)
10471 fprintf (file, _(" [mapping symbols precede others]"));
10472
10473 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
10474 | EF_ARM_MAPSYMSFIRST);
10475 break;
10476
10477 case EF_ARM_EABI_VER3:
10478 fprintf (file, _(" [Version3 EABI]"));
10479 break;
10480
10481 case EF_ARM_EABI_VER4:
10482 fprintf (file, _(" [Version4 EABI]"));
10483 goto eabi;
10484
10485 case EF_ARM_EABI_VER5:
10486 fprintf (file, _(" [Version5 EABI]"));
10487 eabi:
10488 if (flags & EF_ARM_BE8)
10489 fprintf (file, _(" [BE8]"));
10490
10491 if (flags & EF_ARM_LE8)
10492 fprintf (file, _(" [LE8]"));
10493
10494 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
10495 break;
10496
10497 default:
10498 fprintf (file, _(" <EABI version unrecognised>"));
10499 break;
10500 }
10501
10502 flags &= ~ EF_ARM_EABIMASK;
10503
10504 if (flags & EF_ARM_RELEXEC)
10505 fprintf (file, _(" [relocatable executable]"));
10506
10507 if (flags & EF_ARM_HASENTRY)
10508 fprintf (file, _(" [has entry point]"));
10509
10510 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_HASENTRY);
10511
10512 if (flags)
10513 fprintf (file, _("<Unrecognised flag bits set>"));
10514
10515 fputc ('\n', file);
10516
10517 return TRUE;
10518 }
10519
10520 static int
10521 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
10522 {
10523 switch (ELF_ST_TYPE (elf_sym->st_info))
10524 {
10525 case STT_ARM_TFUNC:
10526 return ELF_ST_TYPE (elf_sym->st_info);
10527
10528 case STT_ARM_16BIT:
10529 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
10530 This allows us to distinguish between data used by Thumb instructions
10531 and non-data (which is probably code) inside Thumb regions of an
10532 executable. */
10533 if (type != STT_OBJECT && type != STT_TLS)
10534 return ELF_ST_TYPE (elf_sym->st_info);
10535 break;
10536
10537 default:
10538 break;
10539 }
10540
10541 return type;
10542 }
10543
10544 static asection *
10545 elf32_arm_gc_mark_hook (asection *sec,
10546 struct bfd_link_info *info,
10547 Elf_Internal_Rela *rel,
10548 struct elf_link_hash_entry *h,
10549 Elf_Internal_Sym *sym)
10550 {
10551 if (h != NULL)
10552 switch (ELF32_R_TYPE (rel->r_info))
10553 {
10554 case R_ARM_GNU_VTINHERIT:
10555 case R_ARM_GNU_VTENTRY:
10556 return NULL;
10557 }
10558
10559 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
10560 }
10561
10562 /* Update the got entry reference counts for the section being removed. */
10563
10564 static bfd_boolean
10565 elf32_arm_gc_sweep_hook (bfd * abfd,
10566 struct bfd_link_info * info,
10567 asection * sec,
10568 const Elf_Internal_Rela * relocs)
10569 {
10570 Elf_Internal_Shdr *symtab_hdr;
10571 struct elf_link_hash_entry **sym_hashes;
10572 bfd_signed_vma *local_got_refcounts;
10573 const Elf_Internal_Rela *rel, *relend;
10574 struct elf32_arm_link_hash_table * globals;
10575
10576 if (info->relocatable)
10577 return TRUE;
10578
10579 globals = elf32_arm_hash_table (info);
10580 if (globals == NULL)
10581 return FALSE;
10582
10583 elf_section_data (sec)->local_dynrel = NULL;
10584
10585 symtab_hdr = & elf_symtab_hdr (abfd);
10586 sym_hashes = elf_sym_hashes (abfd);
10587 local_got_refcounts = elf_local_got_refcounts (abfd);
10588
10589 check_use_blx (globals);
10590
10591 relend = relocs + sec->reloc_count;
10592 for (rel = relocs; rel < relend; rel++)
10593 {
10594 unsigned long r_symndx;
10595 struct elf_link_hash_entry *h = NULL;
10596 int r_type;
10597
10598 r_symndx = ELF32_R_SYM (rel->r_info);
10599 if (r_symndx >= symtab_hdr->sh_info)
10600 {
10601 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10602 while (h->root.type == bfd_link_hash_indirect
10603 || h->root.type == bfd_link_hash_warning)
10604 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10605 }
10606
10607 r_type = ELF32_R_TYPE (rel->r_info);
10608 r_type = arm_real_reloc_type (globals, r_type);
10609 switch (r_type)
10610 {
10611 case R_ARM_GOT32:
10612 case R_ARM_GOT_PREL:
10613 case R_ARM_TLS_GD32:
10614 case R_ARM_TLS_IE32:
10615 if (h != NULL)
10616 {
10617 if (h->got.refcount > 0)
10618 h->got.refcount -= 1;
10619 }
10620 else if (local_got_refcounts != NULL)
10621 {
10622 if (local_got_refcounts[r_symndx] > 0)
10623 local_got_refcounts[r_symndx] -= 1;
10624 }
10625 break;
10626
10627 case R_ARM_TLS_LDM32:
10628 globals->tls_ldm_got.refcount -= 1;
10629 break;
10630
10631 case R_ARM_ABS32:
10632 case R_ARM_ABS32_NOI:
10633 case R_ARM_REL32:
10634 case R_ARM_REL32_NOI:
10635 case R_ARM_PC24:
10636 case R_ARM_PLT32:
10637 case R_ARM_CALL:
10638 case R_ARM_JUMP24:
10639 case R_ARM_PREL31:
10640 case R_ARM_THM_CALL:
10641 case R_ARM_THM_JUMP24:
10642 case R_ARM_THM_JUMP19:
10643 case R_ARM_MOVW_ABS_NC:
10644 case R_ARM_MOVT_ABS:
10645 case R_ARM_MOVW_PREL_NC:
10646 case R_ARM_MOVT_PREL:
10647 case R_ARM_THM_MOVW_ABS_NC:
10648 case R_ARM_THM_MOVT_ABS:
10649 case R_ARM_THM_MOVW_PREL_NC:
10650 case R_ARM_THM_MOVT_PREL:
10651 /* Should the interworking branches be here also? */
10652
10653 if (h != NULL)
10654 {
10655 struct elf32_arm_link_hash_entry *eh;
10656 struct elf32_arm_relocs_copied **pp;
10657 struct elf32_arm_relocs_copied *p;
10658
10659 eh = (struct elf32_arm_link_hash_entry *) h;
10660
10661 if (h->plt.refcount > 0)
10662 {
10663 h->plt.refcount -= 1;
10664 if (r_type == R_ARM_THM_CALL)
10665 eh->plt_maybe_thumb_refcount--;
10666
10667 if (r_type == R_ARM_THM_JUMP24
10668 || r_type == R_ARM_THM_JUMP19)
10669 eh->plt_thumb_refcount--;
10670 }
10671
10672 if (r_type == R_ARM_ABS32
10673 || r_type == R_ARM_REL32
10674 || r_type == R_ARM_ABS32_NOI
10675 || r_type == R_ARM_REL32_NOI)
10676 {
10677 for (pp = &eh->relocs_copied; (p = *pp) != NULL;
10678 pp = &p->next)
10679 if (p->section == sec)
10680 {
10681 p->count -= 1;
10682 if (ELF32_R_TYPE (rel->r_info) == R_ARM_REL32
10683 || ELF32_R_TYPE (rel->r_info) == R_ARM_REL32_NOI)
10684 p->pc_count -= 1;
10685 if (p->count == 0)
10686 *pp = p->next;
10687 break;
10688 }
10689 }
10690 }
10691 break;
10692
10693 default:
10694 break;
10695 }
10696 }
10697
10698 return TRUE;
10699 }
10700
10701 /* Look through the relocs for a section during the first phase. */
10702
10703 static bfd_boolean
10704 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
10705 asection *sec, const Elf_Internal_Rela *relocs)
10706 {
10707 Elf_Internal_Shdr *symtab_hdr;
10708 struct elf_link_hash_entry **sym_hashes;
10709 const Elf_Internal_Rela *rel;
10710 const Elf_Internal_Rela *rel_end;
10711 bfd *dynobj;
10712 asection *sreloc;
10713 bfd_vma *local_got_offsets;
10714 struct elf32_arm_link_hash_table *htab;
10715 bfd_boolean needs_plt;
10716 unsigned long nsyms;
10717
10718 if (info->relocatable)
10719 return TRUE;
10720
10721 BFD_ASSERT (is_arm_elf (abfd));
10722
10723 htab = elf32_arm_hash_table (info);
10724 if (htab == NULL)
10725 return FALSE;
10726
10727 sreloc = NULL;
10728
10729 /* Create dynamic sections for relocatable executables so that we can
10730 copy relocations. */
10731 if (htab->root.is_relocatable_executable
10732 && ! htab->root.dynamic_sections_created)
10733 {
10734 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
10735 return FALSE;
10736 }
10737
10738 dynobj = elf_hash_table (info)->dynobj;
10739 local_got_offsets = elf_local_got_offsets (abfd);
10740
10741 symtab_hdr = & elf_symtab_hdr (abfd);
10742 sym_hashes = elf_sym_hashes (abfd);
10743 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
10744
10745 rel_end = relocs + sec->reloc_count;
10746 for (rel = relocs; rel < rel_end; rel++)
10747 {
10748 struct elf_link_hash_entry *h;
10749 struct elf32_arm_link_hash_entry *eh;
10750 unsigned long r_symndx;
10751 int r_type;
10752
10753 r_symndx = ELF32_R_SYM (rel->r_info);
10754 r_type = ELF32_R_TYPE (rel->r_info);
10755 r_type = arm_real_reloc_type (htab, r_type);
10756
10757 if (r_symndx >= nsyms
10758 /* PR 9934: It is possible to have relocations that do not
10759 refer to symbols, thus it is also possible to have an
10760 object file containing relocations but no symbol table. */
10761 && (r_symndx > 0 || nsyms > 0))
10762 {
10763 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
10764 r_symndx);
10765 return FALSE;
10766 }
10767
10768 if (nsyms == 0 || r_symndx < symtab_hdr->sh_info)
10769 h = NULL;
10770 else
10771 {
10772 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10773 while (h->root.type == bfd_link_hash_indirect
10774 || h->root.type == bfd_link_hash_warning)
10775 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10776 }
10777
10778 eh = (struct elf32_arm_link_hash_entry *) h;
10779
10780 switch (r_type)
10781 {
10782 case R_ARM_GOT32:
10783 case R_ARM_GOT_PREL:
10784 case R_ARM_TLS_GD32:
10785 case R_ARM_TLS_IE32:
10786 /* This symbol requires a global offset table entry. */
10787 {
10788 int tls_type, old_tls_type;
10789
10790 switch (r_type)
10791 {
10792 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
10793 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
10794 default: tls_type = GOT_NORMAL; break;
10795 }
10796
10797 if (h != NULL)
10798 {
10799 h->got.refcount++;
10800 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
10801 }
10802 else
10803 {
10804 bfd_signed_vma *local_got_refcounts;
10805
10806 /* This is a global offset table entry for a local symbol. */
10807 local_got_refcounts = elf_local_got_refcounts (abfd);
10808 if (local_got_refcounts == NULL)
10809 {
10810 bfd_size_type size;
10811
10812 size = symtab_hdr->sh_info;
10813 size *= (sizeof (bfd_signed_vma) + sizeof (char));
10814 local_got_refcounts = (bfd_signed_vma *)
10815 bfd_zalloc (abfd, size);
10816 if (local_got_refcounts == NULL)
10817 return FALSE;
10818 elf_local_got_refcounts (abfd) = local_got_refcounts;
10819 elf32_arm_local_got_tls_type (abfd)
10820 = (char *) (local_got_refcounts + symtab_hdr->sh_info);
10821 }
10822 local_got_refcounts[r_symndx] += 1;
10823 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
10824 }
10825
10826 /* We will already have issued an error message if there is a
10827 TLS / non-TLS mismatch, based on the symbol type. We don't
10828 support any linker relaxations. So just combine any TLS
10829 types needed. */
10830 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
10831 && tls_type != GOT_NORMAL)
10832 tls_type |= old_tls_type;
10833
10834 if (old_tls_type != tls_type)
10835 {
10836 if (h != NULL)
10837 elf32_arm_hash_entry (h)->tls_type = tls_type;
10838 else
10839 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
10840 }
10841 }
10842 /* Fall through. */
10843
10844 case R_ARM_TLS_LDM32:
10845 if (r_type == R_ARM_TLS_LDM32)
10846 htab->tls_ldm_got.refcount++;
10847 /* Fall through. */
10848
10849 case R_ARM_GOTOFF32:
10850 case R_ARM_GOTPC:
10851 if (htab->sgot == NULL)
10852 {
10853 if (htab->root.dynobj == NULL)
10854 htab->root.dynobj = abfd;
10855 if (!create_got_section (htab->root.dynobj, info))
10856 return FALSE;
10857 }
10858 break;
10859
10860 case R_ARM_ABS12:
10861 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
10862 ldr __GOTT_INDEX__ offsets. */
10863 if (!htab->vxworks_p)
10864 break;
10865 /* Fall through. */
10866
10867 case R_ARM_PC24:
10868 case R_ARM_PLT32:
10869 case R_ARM_CALL:
10870 case R_ARM_JUMP24:
10871 case R_ARM_PREL31:
10872 case R_ARM_THM_CALL:
10873 case R_ARM_THM_JUMP24:
10874 case R_ARM_THM_JUMP19:
10875 needs_plt = 1;
10876 goto normal_reloc;
10877
10878 case R_ARM_MOVW_ABS_NC:
10879 case R_ARM_MOVT_ABS:
10880 case R_ARM_THM_MOVW_ABS_NC:
10881 case R_ARM_THM_MOVT_ABS:
10882 if (info->shared)
10883 {
10884 (*_bfd_error_handler)
10885 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
10886 abfd, elf32_arm_howto_table_1[r_type].name,
10887 (h) ? h->root.root.string : "a local symbol");
10888 bfd_set_error (bfd_error_bad_value);
10889 return FALSE;
10890 }
10891
10892 /* Fall through. */
10893 case R_ARM_ABS32:
10894 case R_ARM_ABS32_NOI:
10895 case R_ARM_REL32:
10896 case R_ARM_REL32_NOI:
10897 case R_ARM_MOVW_PREL_NC:
10898 case R_ARM_MOVT_PREL:
10899 case R_ARM_THM_MOVW_PREL_NC:
10900 case R_ARM_THM_MOVT_PREL:
10901 needs_plt = 0;
10902 normal_reloc:
10903
10904 /* Should the interworking branches be listed here? */
10905 if (h != NULL)
10906 {
10907 /* If this reloc is in a read-only section, we might
10908 need a copy reloc. We can't check reliably at this
10909 stage whether the section is read-only, as input
10910 sections have not yet been mapped to output sections.
10911 Tentatively set the flag for now, and correct in
10912 adjust_dynamic_symbol. */
10913 if (!info->shared)
10914 h->non_got_ref = 1;
10915
10916 /* We may need a .plt entry if the function this reloc
10917 refers to is in a different object. We can't tell for
10918 sure yet, because something later might force the
10919 symbol local. */
10920 if (needs_plt)
10921 h->needs_plt = 1;
10922
10923 /* If we create a PLT entry, this relocation will reference
10924 it, even if it's an ABS32 relocation. */
10925 h->plt.refcount += 1;
10926
10927 /* It's too early to use htab->use_blx here, so we have to
10928 record possible blx references separately from
10929 relocs that definitely need a thumb stub. */
10930
10931 if (r_type == R_ARM_THM_CALL)
10932 eh->plt_maybe_thumb_refcount += 1;
10933
10934 if (r_type == R_ARM_THM_JUMP24
10935 || r_type == R_ARM_THM_JUMP19)
10936 eh->plt_thumb_refcount += 1;
10937 }
10938
10939 /* If we are creating a shared library or relocatable executable,
10940 and this is a reloc against a global symbol, or a non PC
10941 relative reloc against a local symbol, then we need to copy
10942 the reloc into the shared library. However, if we are linking
10943 with -Bsymbolic, we do not need to copy a reloc against a
10944 global symbol which is defined in an object we are
10945 including in the link (i.e., DEF_REGULAR is set). At
10946 this point we have not seen all the input files, so it is
10947 possible that DEF_REGULAR is not set now but will be set
10948 later (it is never cleared). We account for that
10949 possibility below by storing information in the
10950 relocs_copied field of the hash table entry. */
10951 if ((info->shared || htab->root.is_relocatable_executable)
10952 && (sec->flags & SEC_ALLOC) != 0
10953 && ((r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI)
10954 || (h != NULL && ! h->needs_plt
10955 && (! info->symbolic || ! h->def_regular))))
10956 {
10957 struct elf32_arm_relocs_copied *p, **head;
10958
10959 /* When creating a shared object, we must copy these
10960 reloc types into the output file. We create a reloc
10961 section in dynobj and make room for this reloc. */
10962 if (sreloc == NULL)
10963 {
10964 sreloc = _bfd_elf_make_dynamic_reloc_section
10965 (sec, dynobj, 2, abfd, ! htab->use_rel);
10966
10967 if (sreloc == NULL)
10968 return FALSE;
10969
10970 /* BPABI objects never have dynamic relocations mapped. */
10971 if (htab->symbian_p)
10972 {
10973 flagword flags;
10974
10975 flags = bfd_get_section_flags (dynobj, sreloc);
10976 flags &= ~(SEC_LOAD | SEC_ALLOC);
10977 bfd_set_section_flags (dynobj, sreloc, flags);
10978 }
10979 }
10980
10981 /* If this is a global symbol, we count the number of
10982 relocations we need for this symbol. */
10983 if (h != NULL)
10984 {
10985 head = &((struct elf32_arm_link_hash_entry *) h)->relocs_copied;
10986 }
10987 else
10988 {
10989 /* Track dynamic relocs needed for local syms too.
10990 We really need local syms available to do this
10991 easily. Oh well. */
10992 asection *s;
10993 void *vpp;
10994 Elf_Internal_Sym *isym;
10995
10996 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
10997 abfd, r_symndx);
10998 if (isym == NULL)
10999 return FALSE;
11000
11001 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
11002 if (s == NULL)
11003 s = sec;
11004
11005 vpp = &elf_section_data (s)->local_dynrel;
11006 head = (struct elf32_arm_relocs_copied **) vpp;
11007 }
11008
11009 p = *head;
11010 if (p == NULL || p->section != sec)
11011 {
11012 bfd_size_type amt = sizeof *p;
11013
11014 p = (struct elf32_arm_relocs_copied *)
11015 bfd_alloc (htab->root.dynobj, amt);
11016 if (p == NULL)
11017 return FALSE;
11018 p->next = *head;
11019 *head = p;
11020 p->section = sec;
11021 p->count = 0;
11022 p->pc_count = 0;
11023 }
11024
11025 if (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
11026 p->pc_count += 1;
11027 p->count += 1;
11028 }
11029 break;
11030
11031 /* This relocation describes the C++ object vtable hierarchy.
11032 Reconstruct it for later use during GC. */
11033 case R_ARM_GNU_VTINHERIT:
11034 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
11035 return FALSE;
11036 break;
11037
11038 /* This relocation describes which C++ vtable entries are actually
11039 used. Record for later use during GC. */
11040 case R_ARM_GNU_VTENTRY:
11041 BFD_ASSERT (h != NULL);
11042 if (h != NULL
11043 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
11044 return FALSE;
11045 break;
11046 }
11047 }
11048
11049 return TRUE;
11050 }
11051
11052 /* Unwinding tables are not referenced directly. This pass marks them as
11053 required if the corresponding code section is marked. */
11054
11055 static bfd_boolean
11056 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
11057 elf_gc_mark_hook_fn gc_mark_hook)
11058 {
11059 bfd *sub;
11060 Elf_Internal_Shdr **elf_shdrp;
11061 bfd_boolean again;
11062
11063 /* Marking EH data may cause additional code sections to be marked,
11064 requiring multiple passes. */
11065 again = TRUE;
11066 while (again)
11067 {
11068 again = FALSE;
11069 for (sub = info->input_bfds; sub != NULL; sub = sub->link_next)
11070 {
11071 asection *o;
11072
11073 if (! is_arm_elf (sub))
11074 continue;
11075
11076 elf_shdrp = elf_elfsections (sub);
11077 for (o = sub->sections; o != NULL; o = o->next)
11078 {
11079 Elf_Internal_Shdr *hdr;
11080
11081 hdr = &elf_section_data (o)->this_hdr;
11082 if (hdr->sh_type == SHT_ARM_EXIDX
11083 && hdr->sh_link
11084 && hdr->sh_link < elf_numsections (sub)
11085 && !o->gc_mark
11086 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
11087 {
11088 again = TRUE;
11089 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
11090 return FALSE;
11091 }
11092 }
11093 }
11094 }
11095
11096 return TRUE;
11097 }
11098
11099 /* Treat mapping symbols as special target symbols. */
11100
11101 static bfd_boolean
11102 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
11103 {
11104 return bfd_is_arm_special_symbol_name (sym->name,
11105 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
11106 }
11107
11108 /* This is a copy of elf_find_function() from elf.c except that
11109 ARM mapping symbols are ignored when looking for function names
11110 and STT_ARM_TFUNC is considered to a function type. */
11111
11112 static bfd_boolean
11113 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
11114 asection * section,
11115 asymbol ** symbols,
11116 bfd_vma offset,
11117 const char ** filename_ptr,
11118 const char ** functionname_ptr)
11119 {
11120 const char * filename = NULL;
11121 asymbol * func = NULL;
11122 bfd_vma low_func = 0;
11123 asymbol ** p;
11124
11125 for (p = symbols; *p != NULL; p++)
11126 {
11127 elf_symbol_type *q;
11128
11129 q = (elf_symbol_type *) *p;
11130
11131 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
11132 {
11133 default:
11134 break;
11135 case STT_FILE:
11136 filename = bfd_asymbol_name (&q->symbol);
11137 break;
11138 case STT_FUNC:
11139 case STT_ARM_TFUNC:
11140 case STT_NOTYPE:
11141 /* Skip mapping symbols. */
11142 if ((q->symbol.flags & BSF_LOCAL)
11143 && bfd_is_arm_special_symbol_name (q->symbol.name,
11144 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
11145 continue;
11146 /* Fall through. */
11147 if (bfd_get_section (&q->symbol) == section
11148 && q->symbol.value >= low_func
11149 && q->symbol.value <= offset)
11150 {
11151 func = (asymbol *) q;
11152 low_func = q->symbol.value;
11153 }
11154 break;
11155 }
11156 }
11157
11158 if (func == NULL)
11159 return FALSE;
11160
11161 if (filename_ptr)
11162 *filename_ptr = filename;
11163 if (functionname_ptr)
11164 *functionname_ptr = bfd_asymbol_name (func);
11165
11166 return TRUE;
11167 }
11168
11169
11170 /* Find the nearest line to a particular section and offset, for error
11171 reporting. This code is a duplicate of the code in elf.c, except
11172 that it uses arm_elf_find_function. */
11173
11174 static bfd_boolean
11175 elf32_arm_find_nearest_line (bfd * abfd,
11176 asection * section,
11177 asymbol ** symbols,
11178 bfd_vma offset,
11179 const char ** filename_ptr,
11180 const char ** functionname_ptr,
11181 unsigned int * line_ptr)
11182 {
11183 bfd_boolean found = FALSE;
11184
11185 /* We skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain uses it. */
11186
11187 if (_bfd_dwarf2_find_nearest_line (abfd, section, symbols, offset,
11188 filename_ptr, functionname_ptr,
11189 line_ptr, 0,
11190 & elf_tdata (abfd)->dwarf2_find_line_info))
11191 {
11192 if (!*functionname_ptr)
11193 arm_elf_find_function (abfd, section, symbols, offset,
11194 *filename_ptr ? NULL : filename_ptr,
11195 functionname_ptr);
11196
11197 return TRUE;
11198 }
11199
11200 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
11201 & found, filename_ptr,
11202 functionname_ptr, line_ptr,
11203 & elf_tdata (abfd)->line_info))
11204 return FALSE;
11205
11206 if (found && (*functionname_ptr || *line_ptr))
11207 return TRUE;
11208
11209 if (symbols == NULL)
11210 return FALSE;
11211
11212 if (! arm_elf_find_function (abfd, section, symbols, offset,
11213 filename_ptr, functionname_ptr))
11214 return FALSE;
11215
11216 *line_ptr = 0;
11217 return TRUE;
11218 }
11219
11220 static bfd_boolean
11221 elf32_arm_find_inliner_info (bfd * abfd,
11222 const char ** filename_ptr,
11223 const char ** functionname_ptr,
11224 unsigned int * line_ptr)
11225 {
11226 bfd_boolean found;
11227 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
11228 functionname_ptr, line_ptr,
11229 & elf_tdata (abfd)->dwarf2_find_line_info);
11230 return found;
11231 }
11232
11233 /* Adjust a symbol defined by a dynamic object and referenced by a
11234 regular object. The current definition is in some section of the
11235 dynamic object, but we're not including those sections. We have to
11236 change the definition to something the rest of the link can
11237 understand. */
11238
11239 static bfd_boolean
11240 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
11241 struct elf_link_hash_entry * h)
11242 {
11243 bfd * dynobj;
11244 asection * s;
11245 struct elf32_arm_link_hash_entry * eh;
11246 struct elf32_arm_link_hash_table *globals;
11247
11248 globals = elf32_arm_hash_table (info);
11249 if (globals == NULL)
11250 return FALSE;
11251
11252 dynobj = elf_hash_table (info)->dynobj;
11253
11254 /* Make sure we know what is going on here. */
11255 BFD_ASSERT (dynobj != NULL
11256 && (h->needs_plt
11257 || h->u.weakdef != NULL
11258 || (h->def_dynamic
11259 && h->ref_regular
11260 && !h->def_regular)));
11261
11262 eh = (struct elf32_arm_link_hash_entry *) h;
11263
11264 /* If this is a function, put it in the procedure linkage table. We
11265 will fill in the contents of the procedure linkage table later,
11266 when we know the address of the .got section. */
11267 if (h->type == STT_FUNC || h->type == STT_ARM_TFUNC
11268 || h->needs_plt)
11269 {
11270 if (h->plt.refcount <= 0
11271 || SYMBOL_CALLS_LOCAL (info, h)
11272 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
11273 && h->root.type == bfd_link_hash_undefweak))
11274 {
11275 /* This case can occur if we saw a PLT32 reloc in an input
11276 file, but the symbol was never referred to by a dynamic
11277 object, or if all references were garbage collected. In
11278 such a case, we don't actually need to build a procedure
11279 linkage table, and we can just do a PC24 reloc instead. */
11280 h->plt.offset = (bfd_vma) -1;
11281 eh->plt_thumb_refcount = 0;
11282 eh->plt_maybe_thumb_refcount = 0;
11283 h->needs_plt = 0;
11284 }
11285
11286 return TRUE;
11287 }
11288 else
11289 {
11290 /* It's possible that we incorrectly decided a .plt reloc was
11291 needed for an R_ARM_PC24 or similar reloc to a non-function sym
11292 in check_relocs. We can't decide accurately between function
11293 and non-function syms in check-relocs; Objects loaded later in
11294 the link may change h->type. So fix it now. */
11295 h->plt.offset = (bfd_vma) -1;
11296 eh->plt_thumb_refcount = 0;
11297 eh->plt_maybe_thumb_refcount = 0;
11298 }
11299
11300 /* If this is a weak symbol, and there is a real definition, the
11301 processor independent code will have arranged for us to see the
11302 real definition first, and we can just use the same value. */
11303 if (h->u.weakdef != NULL)
11304 {
11305 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
11306 || h->u.weakdef->root.type == bfd_link_hash_defweak);
11307 h->root.u.def.section = h->u.weakdef->root.u.def.section;
11308 h->root.u.def.value = h->u.weakdef->root.u.def.value;
11309 return TRUE;
11310 }
11311
11312 /* If there are no non-GOT references, we do not need a copy
11313 relocation. */
11314 if (!h->non_got_ref)
11315 return TRUE;
11316
11317 /* This is a reference to a symbol defined by a dynamic object which
11318 is not a function. */
11319
11320 /* If we are creating a shared library, we must presume that the
11321 only references to the symbol are via the global offset table.
11322 For such cases we need not do anything here; the relocations will
11323 be handled correctly by relocate_section. Relocatable executables
11324 can reference data in shared objects directly, so we don't need to
11325 do anything here. */
11326 if (info->shared || globals->root.is_relocatable_executable)
11327 return TRUE;
11328
11329 if (h->size == 0)
11330 {
11331 (*_bfd_error_handler) (_("dynamic variable `%s' is zero size"),
11332 h->root.root.string);
11333 return TRUE;
11334 }
11335
11336 /* We must allocate the symbol in our .dynbss section, which will
11337 become part of the .bss section of the executable. There will be
11338 an entry for this symbol in the .dynsym section. The dynamic
11339 object will contain position independent code, so all references
11340 from the dynamic object to this symbol will go through the global
11341 offset table. The dynamic linker will use the .dynsym entry to
11342 determine the address it must put in the global offset table, so
11343 both the dynamic object and the regular object will refer to the
11344 same memory location for the variable. */
11345 s = bfd_get_section_by_name (dynobj, ".dynbss");
11346 BFD_ASSERT (s != NULL);
11347
11348 /* We must generate a R_ARM_COPY reloc to tell the dynamic linker to
11349 copy the initial value out of the dynamic object and into the
11350 runtime process image. We need to remember the offset into the
11351 .rel(a).bss section we are going to use. */
11352 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0)
11353 {
11354 asection *srel;
11355
11356 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (globals, ".bss"));
11357 BFD_ASSERT (srel != NULL);
11358 srel->size += RELOC_SIZE (globals);
11359 h->needs_copy = 1;
11360 }
11361
11362 return _bfd_elf_adjust_dynamic_copy (h, s);
11363 }
11364
11365 /* Allocate space in .plt, .got and associated reloc sections for
11366 dynamic relocs. */
11367
11368 static bfd_boolean
11369 allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
11370 {
11371 struct bfd_link_info *info;
11372 struct elf32_arm_link_hash_table *htab;
11373 struct elf32_arm_link_hash_entry *eh;
11374 struct elf32_arm_relocs_copied *p;
11375 bfd_signed_vma thumb_refs;
11376
11377 eh = (struct elf32_arm_link_hash_entry *) h;
11378
11379 if (h->root.type == bfd_link_hash_indirect)
11380 return TRUE;
11381
11382 if (h->root.type == bfd_link_hash_warning)
11383 /* When warning symbols are created, they **replace** the "real"
11384 entry in the hash table, thus we never get to see the real
11385 symbol in a hash traversal. So look at it now. */
11386 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11387
11388 info = (struct bfd_link_info *) inf;
11389 htab = elf32_arm_hash_table (info);
11390 if (htab == NULL)
11391 return FALSE;
11392
11393 if (htab->root.dynamic_sections_created
11394 && h->plt.refcount > 0)
11395 {
11396 /* Make sure this symbol is output as a dynamic symbol.
11397 Undefined weak syms won't yet be marked as dynamic. */
11398 if (h->dynindx == -1
11399 && !h->forced_local)
11400 {
11401 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11402 return FALSE;
11403 }
11404
11405 if (info->shared
11406 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
11407 {
11408 asection *s = htab->splt;
11409
11410 /* If this is the first .plt entry, make room for the special
11411 first entry. */
11412 if (s->size == 0)
11413 s->size += htab->plt_header_size;
11414
11415 h->plt.offset = s->size;
11416
11417 /* If we will insert a Thumb trampoline before this PLT, leave room
11418 for it. */
11419 thumb_refs = eh->plt_thumb_refcount;
11420 if (!htab->use_blx)
11421 thumb_refs += eh->plt_maybe_thumb_refcount;
11422
11423 if (thumb_refs > 0)
11424 {
11425 h->plt.offset += PLT_THUMB_STUB_SIZE;
11426 s->size += PLT_THUMB_STUB_SIZE;
11427 }
11428
11429 /* If this symbol is not defined in a regular file, and we are
11430 not generating a shared library, then set the symbol to this
11431 location in the .plt. This is required to make function
11432 pointers compare as equal between the normal executable and
11433 the shared library. */
11434 if (! info->shared
11435 && !h->def_regular)
11436 {
11437 h->root.u.def.section = s;
11438 h->root.u.def.value = h->plt.offset;
11439
11440 /* Make sure the function is not marked as Thumb, in case
11441 it is the target of an ABS32 relocation, which will
11442 point to the PLT entry. */
11443 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
11444 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11445 }
11446
11447 /* Make room for this entry. */
11448 s->size += htab->plt_entry_size;
11449
11450 if (!htab->symbian_p)
11451 {
11452 /* We also need to make an entry in the .got.plt section, which
11453 will be placed in the .got section by the linker script. */
11454 eh->plt_got_offset = htab->sgotplt->size;
11455 htab->sgotplt->size += 4;
11456 }
11457
11458 /* We also need to make an entry in the .rel(a).plt section. */
11459 htab->srelplt->size += RELOC_SIZE (htab);
11460
11461 /* VxWorks executables have a second set of relocations for
11462 each PLT entry. They go in a separate relocation section,
11463 which is processed by the kernel loader. */
11464 if (htab->vxworks_p && !info->shared)
11465 {
11466 /* There is a relocation for the initial PLT entry:
11467 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
11468 if (h->plt.offset == htab->plt_header_size)
11469 htab->srelplt2->size += RELOC_SIZE (htab);
11470
11471 /* There are two extra relocations for each subsequent
11472 PLT entry: an R_ARM_32 relocation for the GOT entry,
11473 and an R_ARM_32 relocation for the PLT entry. */
11474 htab->srelplt2->size += RELOC_SIZE (htab) * 2;
11475 }
11476 }
11477 else
11478 {
11479 h->plt.offset = (bfd_vma) -1;
11480 h->needs_plt = 0;
11481 }
11482 }
11483 else
11484 {
11485 h->plt.offset = (bfd_vma) -1;
11486 h->needs_plt = 0;
11487 }
11488
11489 if (h->got.refcount > 0)
11490 {
11491 asection *s;
11492 bfd_boolean dyn;
11493 int tls_type = elf32_arm_hash_entry (h)->tls_type;
11494 int indx;
11495
11496 /* Make sure this symbol is output as a dynamic symbol.
11497 Undefined weak syms won't yet be marked as dynamic. */
11498 if (h->dynindx == -1
11499 && !h->forced_local)
11500 {
11501 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11502 return FALSE;
11503 }
11504
11505 if (!htab->symbian_p)
11506 {
11507 s = htab->sgot;
11508 h->got.offset = s->size;
11509
11510 if (tls_type == GOT_UNKNOWN)
11511 abort ();
11512
11513 if (tls_type == GOT_NORMAL)
11514 /* Non-TLS symbols need one GOT slot. */
11515 s->size += 4;
11516 else
11517 {
11518 if (tls_type & GOT_TLS_GD)
11519 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. */
11520 s->size += 8;
11521 if (tls_type & GOT_TLS_IE)
11522 /* R_ARM_TLS_IE32 needs one GOT slot. */
11523 s->size += 4;
11524 }
11525
11526 dyn = htab->root.dynamic_sections_created;
11527
11528 indx = 0;
11529 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
11530 && (!info->shared
11531 || !SYMBOL_REFERENCES_LOCAL (info, h)))
11532 indx = h->dynindx;
11533
11534 if (tls_type != GOT_NORMAL
11535 && (info->shared || indx != 0)
11536 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11537 || h->root.type != bfd_link_hash_undefweak))
11538 {
11539 if (tls_type & GOT_TLS_IE)
11540 htab->srelgot->size += RELOC_SIZE (htab);
11541
11542 if (tls_type & GOT_TLS_GD)
11543 htab->srelgot->size += RELOC_SIZE (htab);
11544
11545 if ((tls_type & GOT_TLS_GD) && indx != 0)
11546 htab->srelgot->size += RELOC_SIZE (htab);
11547 }
11548 else if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11549 || h->root.type != bfd_link_hash_undefweak)
11550 && (info->shared
11551 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
11552 htab->srelgot->size += RELOC_SIZE (htab);
11553 }
11554 }
11555 else
11556 h->got.offset = (bfd_vma) -1;
11557
11558 /* Allocate stubs for exported Thumb functions on v4t. */
11559 if (!htab->use_blx && h->dynindx != -1
11560 && h->def_regular
11561 && ELF_ST_TYPE (h->type) == STT_ARM_TFUNC
11562 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
11563 {
11564 struct elf_link_hash_entry * th;
11565 struct bfd_link_hash_entry * bh;
11566 struct elf_link_hash_entry * myh;
11567 char name[1024];
11568 asection *s;
11569 bh = NULL;
11570 /* Create a new symbol to regist the real location of the function. */
11571 s = h->root.u.def.section;
11572 sprintf (name, "__real_%s", h->root.root.string);
11573 _bfd_generic_link_add_one_symbol (info, s->owner,
11574 name, BSF_GLOBAL, s,
11575 h->root.u.def.value,
11576 NULL, TRUE, FALSE, &bh);
11577
11578 myh = (struct elf_link_hash_entry *) bh;
11579 myh->type = ELF_ST_INFO (STB_LOCAL, STT_ARM_TFUNC);
11580 myh->forced_local = 1;
11581 eh->export_glue = myh;
11582 th = record_arm_to_thumb_glue (info, h);
11583 /* Point the symbol at the stub. */
11584 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11585 h->root.u.def.section = th->root.u.def.section;
11586 h->root.u.def.value = th->root.u.def.value & ~1;
11587 }
11588
11589 if (eh->relocs_copied == NULL)
11590 return TRUE;
11591
11592 /* In the shared -Bsymbolic case, discard space allocated for
11593 dynamic pc-relative relocs against symbols which turn out to be
11594 defined in regular objects. For the normal shared case, discard
11595 space for pc-relative relocs that have become local due to symbol
11596 visibility changes. */
11597
11598 if (info->shared || htab->root.is_relocatable_executable)
11599 {
11600 /* The only relocs that use pc_count are R_ARM_REL32 and
11601 R_ARM_REL32_NOI, which will appear on something like
11602 ".long foo - .". We want calls to protected symbols to resolve
11603 directly to the function rather than going via the plt. If people
11604 want function pointer comparisons to work as expected then they
11605 should avoid writing assembly like ".long foo - .". */
11606 if (SYMBOL_CALLS_LOCAL (info, h))
11607 {
11608 struct elf32_arm_relocs_copied **pp;
11609
11610 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11611 {
11612 p->count -= p->pc_count;
11613 p->pc_count = 0;
11614 if (p->count == 0)
11615 *pp = p->next;
11616 else
11617 pp = &p->next;
11618 }
11619 }
11620
11621 if (htab->vxworks_p)
11622 {
11623 struct elf32_arm_relocs_copied **pp;
11624
11625 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11626 {
11627 if (strcmp (p->section->output_section->name, ".tls_vars") == 0)
11628 *pp = p->next;
11629 else
11630 pp = &p->next;
11631 }
11632 }
11633
11634 /* Also discard relocs on undefined weak syms with non-default
11635 visibility. */
11636 if (eh->relocs_copied != NULL
11637 && h->root.type == bfd_link_hash_undefweak)
11638 {
11639 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
11640 eh->relocs_copied = NULL;
11641
11642 /* Make sure undefined weak symbols are output as a dynamic
11643 symbol in PIEs. */
11644 else if (h->dynindx == -1
11645 && !h->forced_local)
11646 {
11647 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11648 return FALSE;
11649 }
11650 }
11651
11652 else if (htab->root.is_relocatable_executable && h->dynindx == -1
11653 && h->root.type == bfd_link_hash_new)
11654 {
11655 /* Output absolute symbols so that we can create relocations
11656 against them. For normal symbols we output a relocation
11657 against the section that contains them. */
11658 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11659 return FALSE;
11660 }
11661
11662 }
11663 else
11664 {
11665 /* For the non-shared case, discard space for relocs against
11666 symbols which turn out to need copy relocs or are not
11667 dynamic. */
11668
11669 if (!h->non_got_ref
11670 && ((h->def_dynamic
11671 && !h->def_regular)
11672 || (htab->root.dynamic_sections_created
11673 && (h->root.type == bfd_link_hash_undefweak
11674 || h->root.type == bfd_link_hash_undefined))))
11675 {
11676 /* Make sure this symbol is output as a dynamic symbol.
11677 Undefined weak syms won't yet be marked as dynamic. */
11678 if (h->dynindx == -1
11679 && !h->forced_local)
11680 {
11681 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11682 return FALSE;
11683 }
11684
11685 /* If that succeeded, we know we'll be keeping all the
11686 relocs. */
11687 if (h->dynindx != -1)
11688 goto keep;
11689 }
11690
11691 eh->relocs_copied = NULL;
11692
11693 keep: ;
11694 }
11695
11696 /* Finally, allocate space. */
11697 for (p = eh->relocs_copied; p != NULL; p = p->next)
11698 {
11699 asection *sreloc = elf_section_data (p->section)->sreloc;
11700 sreloc->size += p->count * RELOC_SIZE (htab);
11701 }
11702
11703 return TRUE;
11704 }
11705
11706 /* Find any dynamic relocs that apply to read-only sections. */
11707
11708 static bfd_boolean
11709 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
11710 {
11711 struct elf32_arm_link_hash_entry * eh;
11712 struct elf32_arm_relocs_copied * p;
11713
11714 if (h->root.type == bfd_link_hash_warning)
11715 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11716
11717 eh = (struct elf32_arm_link_hash_entry *) h;
11718 for (p = eh->relocs_copied; p != NULL; p = p->next)
11719 {
11720 asection *s = p->section;
11721
11722 if (s != NULL && (s->flags & SEC_READONLY) != 0)
11723 {
11724 struct bfd_link_info *info = (struct bfd_link_info *) inf;
11725
11726 info->flags |= DF_TEXTREL;
11727
11728 /* Not an error, just cut short the traversal. */
11729 return FALSE;
11730 }
11731 }
11732 return TRUE;
11733 }
11734
11735 void
11736 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
11737 int byteswap_code)
11738 {
11739 struct elf32_arm_link_hash_table *globals;
11740
11741 globals = elf32_arm_hash_table (info);
11742 if (globals == NULL)
11743 return;
11744
11745 globals->byteswap_code = byteswap_code;
11746 }
11747
11748 /* Set the sizes of the dynamic sections. */
11749
11750 static bfd_boolean
11751 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
11752 struct bfd_link_info * info)
11753 {
11754 bfd * dynobj;
11755 asection * s;
11756 bfd_boolean plt;
11757 bfd_boolean relocs;
11758 bfd *ibfd;
11759 struct elf32_arm_link_hash_table *htab;
11760
11761 htab = elf32_arm_hash_table (info);
11762 if (htab == NULL)
11763 return FALSE;
11764
11765 dynobj = elf_hash_table (info)->dynobj;
11766 BFD_ASSERT (dynobj != NULL);
11767 check_use_blx (htab);
11768
11769 if (elf_hash_table (info)->dynamic_sections_created)
11770 {
11771 /* Set the contents of the .interp section to the interpreter. */
11772 if (info->executable)
11773 {
11774 s = bfd_get_section_by_name (dynobj, ".interp");
11775 BFD_ASSERT (s != NULL);
11776 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
11777 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
11778 }
11779 }
11780
11781 /* Set up .got offsets for local syms, and space for local dynamic
11782 relocs. */
11783 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
11784 {
11785 bfd_signed_vma *local_got;
11786 bfd_signed_vma *end_local_got;
11787 char *local_tls_type;
11788 bfd_size_type locsymcount;
11789 Elf_Internal_Shdr *symtab_hdr;
11790 asection *srel;
11791 bfd_boolean is_vxworks = htab->vxworks_p;
11792
11793 if (! is_arm_elf (ibfd))
11794 continue;
11795
11796 for (s = ibfd->sections; s != NULL; s = s->next)
11797 {
11798 struct elf32_arm_relocs_copied *p;
11799
11800 for (p = (struct elf32_arm_relocs_copied *)
11801 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
11802 {
11803 if (!bfd_is_abs_section (p->section)
11804 && bfd_is_abs_section (p->section->output_section))
11805 {
11806 /* Input section has been discarded, either because
11807 it is a copy of a linkonce section or due to
11808 linker script /DISCARD/, so we'll be discarding
11809 the relocs too. */
11810 }
11811 else if (is_vxworks
11812 && strcmp (p->section->output_section->name,
11813 ".tls_vars") == 0)
11814 {
11815 /* Relocations in vxworks .tls_vars sections are
11816 handled specially by the loader. */
11817 }
11818 else if (p->count != 0)
11819 {
11820 srel = elf_section_data (p->section)->sreloc;
11821 srel->size += p->count * RELOC_SIZE (htab);
11822 if ((p->section->output_section->flags & SEC_READONLY) != 0)
11823 info->flags |= DF_TEXTREL;
11824 }
11825 }
11826 }
11827
11828 local_got = elf_local_got_refcounts (ibfd);
11829 if (!local_got)
11830 continue;
11831
11832 symtab_hdr = & elf_symtab_hdr (ibfd);
11833 locsymcount = symtab_hdr->sh_info;
11834 end_local_got = local_got + locsymcount;
11835 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
11836 s = htab->sgot;
11837 srel = htab->srelgot;
11838 for (; local_got < end_local_got; ++local_got, ++local_tls_type)
11839 {
11840 if (*local_got > 0)
11841 {
11842 *local_got = s->size;
11843 if (*local_tls_type & GOT_TLS_GD)
11844 /* TLS_GD relocs need an 8-byte structure in the GOT. */
11845 s->size += 8;
11846 if (*local_tls_type & GOT_TLS_IE)
11847 s->size += 4;
11848 if (*local_tls_type == GOT_NORMAL)
11849 s->size += 4;
11850
11851 if (info->shared || *local_tls_type == GOT_TLS_GD)
11852 srel->size += RELOC_SIZE (htab);
11853 }
11854 else
11855 *local_got = (bfd_vma) -1;
11856 }
11857 }
11858
11859 if (htab->tls_ldm_got.refcount > 0)
11860 {
11861 /* Allocate two GOT entries and one dynamic relocation (if necessary)
11862 for R_ARM_TLS_LDM32 relocations. */
11863 htab->tls_ldm_got.offset = htab->sgot->size;
11864 htab->sgot->size += 8;
11865 if (info->shared)
11866 htab->srelgot->size += RELOC_SIZE (htab);
11867 }
11868 else
11869 htab->tls_ldm_got.offset = -1;
11870
11871 /* Allocate global sym .plt and .got entries, and space for global
11872 sym dynamic relocs. */
11873 elf_link_hash_traverse (& htab->root, allocate_dynrelocs, info);
11874
11875 /* Here we rummage through the found bfds to collect glue information. */
11876 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
11877 {
11878 if (! is_arm_elf (ibfd))
11879 continue;
11880
11881 /* Initialise mapping tables for code/data. */
11882 bfd_elf32_arm_init_maps (ibfd);
11883
11884 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
11885 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info))
11886 /* xgettext:c-format */
11887 _bfd_error_handler (_("Errors encountered processing file %s"),
11888 ibfd->filename);
11889 }
11890
11891 /* Allocate space for the glue sections now that we've sized them. */
11892 bfd_elf32_arm_allocate_interworking_sections (info);
11893
11894 /* The check_relocs and adjust_dynamic_symbol entry points have
11895 determined the sizes of the various dynamic sections. Allocate
11896 memory for them. */
11897 plt = FALSE;
11898 relocs = FALSE;
11899 for (s = dynobj->sections; s != NULL; s = s->next)
11900 {
11901 const char * name;
11902
11903 if ((s->flags & SEC_LINKER_CREATED) == 0)
11904 continue;
11905
11906 /* It's OK to base decisions on the section name, because none
11907 of the dynobj section names depend upon the input files. */
11908 name = bfd_get_section_name (dynobj, s);
11909
11910 if (strcmp (name, ".plt") == 0)
11911 {
11912 /* Remember whether there is a PLT. */
11913 plt = s->size != 0;
11914 }
11915 else if (CONST_STRNEQ (name, ".rel"))
11916 {
11917 if (s->size != 0)
11918 {
11919 /* Remember whether there are any reloc sections other
11920 than .rel(a).plt and .rela.plt.unloaded. */
11921 if (s != htab->srelplt && s != htab->srelplt2)
11922 relocs = TRUE;
11923
11924 /* We use the reloc_count field as a counter if we need
11925 to copy relocs into the output file. */
11926 s->reloc_count = 0;
11927 }
11928 }
11929 else if (! CONST_STRNEQ (name, ".got")
11930 && strcmp (name, ".dynbss") != 0)
11931 {
11932 /* It's not one of our sections, so don't allocate space. */
11933 continue;
11934 }
11935
11936 if (s->size == 0)
11937 {
11938 /* If we don't need this section, strip it from the
11939 output file. This is mostly to handle .rel(a).bss and
11940 .rel(a).plt. We must create both sections in
11941 create_dynamic_sections, because they must be created
11942 before the linker maps input sections to output
11943 sections. The linker does that before
11944 adjust_dynamic_symbol is called, and it is that
11945 function which decides whether anything needs to go
11946 into these sections. */
11947 s->flags |= SEC_EXCLUDE;
11948 continue;
11949 }
11950
11951 if ((s->flags & SEC_HAS_CONTENTS) == 0)
11952 continue;
11953
11954 /* Allocate memory for the section contents. */
11955 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
11956 if (s->contents == NULL)
11957 return FALSE;
11958 }
11959
11960 if (elf_hash_table (info)->dynamic_sections_created)
11961 {
11962 /* Add some entries to the .dynamic section. We fill in the
11963 values later, in elf32_arm_finish_dynamic_sections, but we
11964 must add the entries now so that we get the correct size for
11965 the .dynamic section. The DT_DEBUG entry is filled in by the
11966 dynamic linker and used by the debugger. */
11967 #define add_dynamic_entry(TAG, VAL) \
11968 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
11969
11970 if (info->executable)
11971 {
11972 if (!add_dynamic_entry (DT_DEBUG, 0))
11973 return FALSE;
11974 }
11975
11976 if (plt)
11977 {
11978 if ( !add_dynamic_entry (DT_PLTGOT, 0)
11979 || !add_dynamic_entry (DT_PLTRELSZ, 0)
11980 || !add_dynamic_entry (DT_PLTREL,
11981 htab->use_rel ? DT_REL : DT_RELA)
11982 || !add_dynamic_entry (DT_JMPREL, 0))
11983 return FALSE;
11984 }
11985
11986 if (relocs)
11987 {
11988 if (htab->use_rel)
11989 {
11990 if (!add_dynamic_entry (DT_REL, 0)
11991 || !add_dynamic_entry (DT_RELSZ, 0)
11992 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
11993 return FALSE;
11994 }
11995 else
11996 {
11997 if (!add_dynamic_entry (DT_RELA, 0)
11998 || !add_dynamic_entry (DT_RELASZ, 0)
11999 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
12000 return FALSE;
12001 }
12002 }
12003
12004 /* If any dynamic relocs apply to a read-only section,
12005 then we need a DT_TEXTREL entry. */
12006 if ((info->flags & DF_TEXTREL) == 0)
12007 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs,
12008 info);
12009
12010 if ((info->flags & DF_TEXTREL) != 0)
12011 {
12012 if (!add_dynamic_entry (DT_TEXTREL, 0))
12013 return FALSE;
12014 }
12015 if (htab->vxworks_p
12016 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
12017 return FALSE;
12018 }
12019 #undef add_dynamic_entry
12020
12021 return TRUE;
12022 }
12023
12024 /* Finish up dynamic symbol handling. We set the contents of various
12025 dynamic sections here. */
12026
12027 static bfd_boolean
12028 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
12029 struct bfd_link_info * info,
12030 struct elf_link_hash_entry * h,
12031 Elf_Internal_Sym * sym)
12032 {
12033 bfd * dynobj;
12034 struct elf32_arm_link_hash_table *htab;
12035 struct elf32_arm_link_hash_entry *eh;
12036
12037 dynobj = elf_hash_table (info)->dynobj;
12038 htab = elf32_arm_hash_table (info);
12039 if (htab == NULL)
12040 return FALSE;
12041
12042 eh = (struct elf32_arm_link_hash_entry *) h;
12043
12044 if (h->plt.offset != (bfd_vma) -1)
12045 {
12046 asection * splt;
12047 asection * srel;
12048 bfd_byte *loc;
12049 bfd_vma plt_index;
12050 Elf_Internal_Rela rel;
12051
12052 /* This symbol has an entry in the procedure linkage table. Set
12053 it up. */
12054
12055 BFD_ASSERT (h->dynindx != -1);
12056
12057 splt = bfd_get_section_by_name (dynobj, ".plt");
12058 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".plt"));
12059 BFD_ASSERT (splt != NULL && srel != NULL);
12060
12061 /* Fill in the entry in the procedure linkage table. */
12062 if (htab->symbian_p)
12063 {
12064 put_arm_insn (htab, output_bfd,
12065 elf32_arm_symbian_plt_entry[0],
12066 splt->contents + h->plt.offset);
12067 bfd_put_32 (output_bfd,
12068 elf32_arm_symbian_plt_entry[1],
12069 splt->contents + h->plt.offset + 4);
12070
12071 /* Fill in the entry in the .rel.plt section. */
12072 rel.r_offset = (splt->output_section->vma
12073 + splt->output_offset
12074 + h->plt.offset + 4);
12075 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
12076
12077 /* Get the index in the procedure linkage table which
12078 corresponds to this symbol. This is the index of this symbol
12079 in all the symbols for which we are making plt entries. The
12080 first entry in the procedure linkage table is reserved. */
12081 plt_index = ((h->plt.offset - htab->plt_header_size)
12082 / htab->plt_entry_size);
12083 }
12084 else
12085 {
12086 bfd_vma got_offset, got_address, plt_address;
12087 bfd_vma got_displacement;
12088 asection * sgot;
12089 bfd_byte * ptr;
12090
12091 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
12092 BFD_ASSERT (sgot != NULL);
12093
12094 /* Get the offset into the .got.plt table of the entry that
12095 corresponds to this function. */
12096 got_offset = eh->plt_got_offset;
12097
12098 /* Get the index in the procedure linkage table which
12099 corresponds to this symbol. This is the index of this symbol
12100 in all the symbols for which we are making plt entries. The
12101 first three entries in .got.plt are reserved; after that
12102 symbols appear in the same order as in .plt. */
12103 plt_index = (got_offset - 12) / 4;
12104
12105 /* Calculate the address of the GOT entry. */
12106 got_address = (sgot->output_section->vma
12107 + sgot->output_offset
12108 + got_offset);
12109
12110 /* ...and the address of the PLT entry. */
12111 plt_address = (splt->output_section->vma
12112 + splt->output_offset
12113 + h->plt.offset);
12114
12115 ptr = htab->splt->contents + h->plt.offset;
12116 if (htab->vxworks_p && info->shared)
12117 {
12118 unsigned int i;
12119 bfd_vma val;
12120
12121 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
12122 {
12123 val = elf32_arm_vxworks_shared_plt_entry[i];
12124 if (i == 2)
12125 val |= got_address - sgot->output_section->vma;
12126 if (i == 5)
12127 val |= plt_index * RELOC_SIZE (htab);
12128 if (i == 2 || i == 5)
12129 bfd_put_32 (output_bfd, val, ptr);
12130 else
12131 put_arm_insn (htab, output_bfd, val, ptr);
12132 }
12133 }
12134 else if (htab->vxworks_p)
12135 {
12136 unsigned int i;
12137 bfd_vma val;
12138
12139 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
12140 {
12141 val = elf32_arm_vxworks_exec_plt_entry[i];
12142 if (i == 2)
12143 val |= got_address;
12144 if (i == 4)
12145 val |= 0xffffff & -((h->plt.offset + i * 4 + 8) >> 2);
12146 if (i == 5)
12147 val |= plt_index * RELOC_SIZE (htab);
12148 if (i == 2 || i == 5)
12149 bfd_put_32 (output_bfd, val, ptr);
12150 else
12151 put_arm_insn (htab, output_bfd, val, ptr);
12152 }
12153
12154 loc = (htab->srelplt2->contents
12155 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
12156
12157 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
12158 referencing the GOT for this PLT entry. */
12159 rel.r_offset = plt_address + 8;
12160 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12161 rel.r_addend = got_offset;
12162 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12163 loc += RELOC_SIZE (htab);
12164
12165 /* Create the R_ARM_ABS32 relocation referencing the
12166 beginning of the PLT for this GOT entry. */
12167 rel.r_offset = got_address;
12168 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12169 rel.r_addend = 0;
12170 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12171 }
12172 else
12173 {
12174 bfd_signed_vma thumb_refs;
12175 /* Calculate the displacement between the PLT slot and the
12176 entry in the GOT. The eight-byte offset accounts for the
12177 value produced by adding to pc in the first instruction
12178 of the PLT stub. */
12179 got_displacement = got_address - (plt_address + 8);
12180
12181 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
12182
12183 thumb_refs = eh->plt_thumb_refcount;
12184 if (!htab->use_blx)
12185 thumb_refs += eh->plt_maybe_thumb_refcount;
12186
12187 if (thumb_refs > 0)
12188 {
12189 put_thumb_insn (htab, output_bfd,
12190 elf32_arm_plt_thumb_stub[0], ptr - 4);
12191 put_thumb_insn (htab, output_bfd,
12192 elf32_arm_plt_thumb_stub[1], ptr - 2);
12193 }
12194
12195 put_arm_insn (htab, output_bfd,
12196 elf32_arm_plt_entry[0]
12197 | ((got_displacement & 0x0ff00000) >> 20),
12198 ptr + 0);
12199 put_arm_insn (htab, output_bfd,
12200 elf32_arm_plt_entry[1]
12201 | ((got_displacement & 0x000ff000) >> 12),
12202 ptr+ 4);
12203 put_arm_insn (htab, output_bfd,
12204 elf32_arm_plt_entry[2]
12205 | (got_displacement & 0x00000fff),
12206 ptr + 8);
12207 #ifdef FOUR_WORD_PLT
12208 bfd_put_32 (output_bfd, elf32_arm_plt_entry[3], ptr + 12);
12209 #endif
12210 }
12211
12212 /* Fill in the entry in the global offset table. */
12213 bfd_put_32 (output_bfd,
12214 (splt->output_section->vma
12215 + splt->output_offset),
12216 sgot->contents + got_offset);
12217
12218 /* Fill in the entry in the .rel(a).plt section. */
12219 rel.r_addend = 0;
12220 rel.r_offset = got_address;
12221 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_JUMP_SLOT);
12222 }
12223
12224 loc = srel->contents + plt_index * RELOC_SIZE (htab);
12225 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12226
12227 if (!h->def_regular)
12228 {
12229 /* Mark the symbol as undefined, rather than as defined in
12230 the .plt section. Leave the value alone. */
12231 sym->st_shndx = SHN_UNDEF;
12232 /* If the symbol is weak, we do need to clear the value.
12233 Otherwise, the PLT entry would provide a definition for
12234 the symbol even if the symbol wasn't defined anywhere,
12235 and so the symbol would never be NULL. */
12236 if (!h->ref_regular_nonweak)
12237 sym->st_value = 0;
12238 }
12239 }
12240
12241 if (h->got.offset != (bfd_vma) -1
12242 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_GD) == 0
12243 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_IE) == 0)
12244 {
12245 asection * sgot;
12246 asection * srel;
12247 Elf_Internal_Rela rel;
12248 bfd_byte *loc;
12249 bfd_vma offset;
12250
12251 /* This symbol has an entry in the global offset table. Set it
12252 up. */
12253 sgot = bfd_get_section_by_name (dynobj, ".got");
12254 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".got"));
12255 BFD_ASSERT (sgot != NULL && srel != NULL);
12256
12257 offset = (h->got.offset & ~(bfd_vma) 1);
12258 rel.r_addend = 0;
12259 rel.r_offset = (sgot->output_section->vma
12260 + sgot->output_offset
12261 + offset);
12262
12263 /* If this is a static link, or it is a -Bsymbolic link and the
12264 symbol is defined locally or was forced to be local because
12265 of a version file, we just want to emit a RELATIVE reloc.
12266 The entry in the global offset table will already have been
12267 initialized in the relocate_section function. */
12268 if (info->shared
12269 && SYMBOL_REFERENCES_LOCAL (info, h))
12270 {
12271 BFD_ASSERT ((h->got.offset & 1) != 0);
12272 rel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12273 if (!htab->use_rel)
12274 {
12275 rel.r_addend = bfd_get_32 (output_bfd, sgot->contents + offset);
12276 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12277 }
12278 }
12279 else
12280 {
12281 BFD_ASSERT ((h->got.offset & 1) == 0);
12282 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12283 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
12284 }
12285
12286 loc = srel->contents + srel->reloc_count++ * RELOC_SIZE (htab);
12287 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12288 }
12289
12290 if (h->needs_copy)
12291 {
12292 asection * s;
12293 Elf_Internal_Rela rel;
12294 bfd_byte *loc;
12295
12296 /* This symbol needs a copy reloc. Set it up. */
12297 BFD_ASSERT (h->dynindx != -1
12298 && (h->root.type == bfd_link_hash_defined
12299 || h->root.type == bfd_link_hash_defweak));
12300
12301 s = bfd_get_section_by_name (h->root.u.def.section->owner,
12302 RELOC_SECTION (htab, ".bss"));
12303 BFD_ASSERT (s != NULL);
12304
12305 rel.r_addend = 0;
12306 rel.r_offset = (h->root.u.def.value
12307 + h->root.u.def.section->output_section->vma
12308 + h->root.u.def.section->output_offset);
12309 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
12310 loc = s->contents + s->reloc_count++ * RELOC_SIZE (htab);
12311 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12312 }
12313
12314 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
12315 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
12316 to the ".got" section. */
12317 if (strcmp (h->root.root.string, "_DYNAMIC") == 0
12318 || (!htab->vxworks_p && h == htab->root.hgot))
12319 sym->st_shndx = SHN_ABS;
12320
12321 return TRUE;
12322 }
12323
12324 /* Finish up the dynamic sections. */
12325
12326 static bfd_boolean
12327 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
12328 {
12329 bfd * dynobj;
12330 asection * sgot;
12331 asection * sdyn;
12332 struct elf32_arm_link_hash_table *htab;
12333
12334 htab = elf32_arm_hash_table (info);
12335 if (htab == NULL)
12336 return FALSE;
12337
12338 dynobj = elf_hash_table (info)->dynobj;
12339
12340 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
12341 BFD_ASSERT (htab->symbian_p || sgot != NULL);
12342 sdyn = bfd_get_section_by_name (dynobj, ".dynamic");
12343
12344 if (elf_hash_table (info)->dynamic_sections_created)
12345 {
12346 asection *splt;
12347 Elf32_External_Dyn *dyncon, *dynconend;
12348
12349 splt = bfd_get_section_by_name (dynobj, ".plt");
12350 BFD_ASSERT (splt != NULL && sdyn != NULL);
12351
12352 dyncon = (Elf32_External_Dyn *) sdyn->contents;
12353 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
12354
12355 for (; dyncon < dynconend; dyncon++)
12356 {
12357 Elf_Internal_Dyn dyn;
12358 const char * name;
12359 asection * s;
12360
12361 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
12362
12363 switch (dyn.d_tag)
12364 {
12365 unsigned int type;
12366
12367 default:
12368 if (htab->vxworks_p
12369 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
12370 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12371 break;
12372
12373 case DT_HASH:
12374 name = ".hash";
12375 goto get_vma_if_bpabi;
12376 case DT_STRTAB:
12377 name = ".dynstr";
12378 goto get_vma_if_bpabi;
12379 case DT_SYMTAB:
12380 name = ".dynsym";
12381 goto get_vma_if_bpabi;
12382 case DT_VERSYM:
12383 name = ".gnu.version";
12384 goto get_vma_if_bpabi;
12385 case DT_VERDEF:
12386 name = ".gnu.version_d";
12387 goto get_vma_if_bpabi;
12388 case DT_VERNEED:
12389 name = ".gnu.version_r";
12390 goto get_vma_if_bpabi;
12391
12392 case DT_PLTGOT:
12393 name = ".got";
12394 goto get_vma;
12395 case DT_JMPREL:
12396 name = RELOC_SECTION (htab, ".plt");
12397 get_vma:
12398 s = bfd_get_section_by_name (output_bfd, name);
12399 BFD_ASSERT (s != NULL);
12400 if (!htab->symbian_p)
12401 dyn.d_un.d_ptr = s->vma;
12402 else
12403 /* In the BPABI, tags in the PT_DYNAMIC section point
12404 at the file offset, not the memory address, for the
12405 convenience of the post linker. */
12406 dyn.d_un.d_ptr = s->filepos;
12407 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12408 break;
12409
12410 get_vma_if_bpabi:
12411 if (htab->symbian_p)
12412 goto get_vma;
12413 break;
12414
12415 case DT_PLTRELSZ:
12416 s = bfd_get_section_by_name (output_bfd,
12417 RELOC_SECTION (htab, ".plt"));
12418 BFD_ASSERT (s != NULL);
12419 dyn.d_un.d_val = s->size;
12420 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12421 break;
12422
12423 case DT_RELSZ:
12424 case DT_RELASZ:
12425 if (!htab->symbian_p)
12426 {
12427 /* My reading of the SVR4 ABI indicates that the
12428 procedure linkage table relocs (DT_JMPREL) should be
12429 included in the overall relocs (DT_REL). This is
12430 what Solaris does. However, UnixWare can not handle
12431 that case. Therefore, we override the DT_RELSZ entry
12432 here to make it not include the JMPREL relocs. Since
12433 the linker script arranges for .rel(a).plt to follow all
12434 other relocation sections, we don't have to worry
12435 about changing the DT_REL entry. */
12436 s = bfd_get_section_by_name (output_bfd,
12437 RELOC_SECTION (htab, ".plt"));
12438 if (s != NULL)
12439 dyn.d_un.d_val -= s->size;
12440 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12441 break;
12442 }
12443 /* Fall through. */
12444
12445 case DT_REL:
12446 case DT_RELA:
12447 /* In the BPABI, the DT_REL tag must point at the file
12448 offset, not the VMA, of the first relocation
12449 section. So, we use code similar to that in
12450 elflink.c, but do not check for SHF_ALLOC on the
12451 relcoation section, since relocations sections are
12452 never allocated under the BPABI. The comments above
12453 about Unixware notwithstanding, we include all of the
12454 relocations here. */
12455 if (htab->symbian_p)
12456 {
12457 unsigned int i;
12458 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
12459 ? SHT_REL : SHT_RELA);
12460 dyn.d_un.d_val = 0;
12461 for (i = 1; i < elf_numsections (output_bfd); i++)
12462 {
12463 Elf_Internal_Shdr *hdr
12464 = elf_elfsections (output_bfd)[i];
12465 if (hdr->sh_type == type)
12466 {
12467 if (dyn.d_tag == DT_RELSZ
12468 || dyn.d_tag == DT_RELASZ)
12469 dyn.d_un.d_val += hdr->sh_size;
12470 else if ((ufile_ptr) hdr->sh_offset
12471 <= dyn.d_un.d_val - 1)
12472 dyn.d_un.d_val = hdr->sh_offset;
12473 }
12474 }
12475 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12476 }
12477 break;
12478
12479 /* Set the bottom bit of DT_INIT/FINI if the
12480 corresponding function is Thumb. */
12481 case DT_INIT:
12482 name = info->init_function;
12483 goto get_sym;
12484 case DT_FINI:
12485 name = info->fini_function;
12486 get_sym:
12487 /* If it wasn't set by elf_bfd_final_link
12488 then there is nothing to adjust. */
12489 if (dyn.d_un.d_val != 0)
12490 {
12491 struct elf_link_hash_entry * eh;
12492
12493 eh = elf_link_hash_lookup (elf_hash_table (info), name,
12494 FALSE, FALSE, TRUE);
12495 if (eh != NULL
12496 && ELF_ST_TYPE (eh->type) == STT_ARM_TFUNC)
12497 {
12498 dyn.d_un.d_val |= 1;
12499 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12500 }
12501 }
12502 break;
12503 }
12504 }
12505
12506 /* Fill in the first entry in the procedure linkage table. */
12507 if (splt->size > 0 && htab->plt_header_size)
12508 {
12509 const bfd_vma *plt0_entry;
12510 bfd_vma got_address, plt_address, got_displacement;
12511
12512 /* Calculate the addresses of the GOT and PLT. */
12513 got_address = sgot->output_section->vma + sgot->output_offset;
12514 plt_address = splt->output_section->vma + splt->output_offset;
12515
12516 if (htab->vxworks_p)
12517 {
12518 /* The VxWorks GOT is relocated by the dynamic linker.
12519 Therefore, we must emit relocations rather than simply
12520 computing the values now. */
12521 Elf_Internal_Rela rel;
12522
12523 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
12524 put_arm_insn (htab, output_bfd, plt0_entry[0],
12525 splt->contents + 0);
12526 put_arm_insn (htab, output_bfd, plt0_entry[1],
12527 splt->contents + 4);
12528 put_arm_insn (htab, output_bfd, plt0_entry[2],
12529 splt->contents + 8);
12530 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
12531
12532 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
12533 rel.r_offset = plt_address + 12;
12534 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12535 rel.r_addend = 0;
12536 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
12537 htab->srelplt2->contents);
12538 }
12539 else
12540 {
12541 got_displacement = got_address - (plt_address + 16);
12542
12543 plt0_entry = elf32_arm_plt0_entry;
12544 put_arm_insn (htab, output_bfd, plt0_entry[0],
12545 splt->contents + 0);
12546 put_arm_insn (htab, output_bfd, plt0_entry[1],
12547 splt->contents + 4);
12548 put_arm_insn (htab, output_bfd, plt0_entry[2],
12549 splt->contents + 8);
12550 put_arm_insn (htab, output_bfd, plt0_entry[3],
12551 splt->contents + 12);
12552
12553 #ifdef FOUR_WORD_PLT
12554 /* The displacement value goes in the otherwise-unused
12555 last word of the second entry. */
12556 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
12557 #else
12558 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
12559 #endif
12560 }
12561 }
12562
12563 /* UnixWare sets the entsize of .plt to 4, although that doesn't
12564 really seem like the right value. */
12565 if (splt->output_section->owner == output_bfd)
12566 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
12567
12568 if (htab->vxworks_p && !info->shared && htab->splt->size > 0)
12569 {
12570 /* Correct the .rel(a).plt.unloaded relocations. They will have
12571 incorrect symbol indexes. */
12572 int num_plts;
12573 unsigned char *p;
12574
12575 num_plts = ((htab->splt->size - htab->plt_header_size)
12576 / htab->plt_entry_size);
12577 p = htab->srelplt2->contents + RELOC_SIZE (htab);
12578
12579 for (; num_plts; num_plts--)
12580 {
12581 Elf_Internal_Rela rel;
12582
12583 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12584 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12585 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12586 p += RELOC_SIZE (htab);
12587
12588 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12589 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12590 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12591 p += RELOC_SIZE (htab);
12592 }
12593 }
12594 }
12595
12596 /* Fill in the first three entries in the global offset table. */
12597 if (sgot)
12598 {
12599 if (sgot->size > 0)
12600 {
12601 if (sdyn == NULL)
12602 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
12603 else
12604 bfd_put_32 (output_bfd,
12605 sdyn->output_section->vma + sdyn->output_offset,
12606 sgot->contents);
12607 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
12608 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
12609 }
12610
12611 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
12612 }
12613
12614 return TRUE;
12615 }
12616
12617 static void
12618 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
12619 {
12620 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
12621 struct elf32_arm_link_hash_table *globals;
12622
12623 i_ehdrp = elf_elfheader (abfd);
12624
12625 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
12626 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
12627 else
12628 i_ehdrp->e_ident[EI_OSABI] = 0;
12629 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
12630
12631 if (link_info)
12632 {
12633 globals = elf32_arm_hash_table (link_info);
12634 if (globals != NULL && globals->byteswap_code)
12635 i_ehdrp->e_flags |= EF_ARM_BE8;
12636 }
12637 }
12638
12639 static enum elf_reloc_type_class
12640 elf32_arm_reloc_type_class (const Elf_Internal_Rela *rela)
12641 {
12642 switch ((int) ELF32_R_TYPE (rela->r_info))
12643 {
12644 case R_ARM_RELATIVE:
12645 return reloc_class_relative;
12646 case R_ARM_JUMP_SLOT:
12647 return reloc_class_plt;
12648 case R_ARM_COPY:
12649 return reloc_class_copy;
12650 default:
12651 return reloc_class_normal;
12652 }
12653 }
12654
12655 /* Set the right machine number for an Arm ELF file. */
12656
12657 static bfd_boolean
12658 elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr *hdr)
12659 {
12660 if (hdr->sh_type == SHT_NOTE)
12661 *flags |= SEC_LINK_ONCE | SEC_LINK_DUPLICATES_SAME_CONTENTS;
12662
12663 return TRUE;
12664 }
12665
12666 static void
12667 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
12668 {
12669 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
12670 }
12671
12672 /* Return TRUE if this is an unwinding table entry. */
12673
12674 static bfd_boolean
12675 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
12676 {
12677 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
12678 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
12679 }
12680
12681
12682 /* Set the type and flags for an ARM section. We do this by
12683 the section name, which is a hack, but ought to work. */
12684
12685 static bfd_boolean
12686 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
12687 {
12688 const char * name;
12689
12690 name = bfd_get_section_name (abfd, sec);
12691
12692 if (is_arm_elf_unwind_section_name (abfd, name))
12693 {
12694 hdr->sh_type = SHT_ARM_EXIDX;
12695 hdr->sh_flags |= SHF_LINK_ORDER;
12696 }
12697 return TRUE;
12698 }
12699
12700 /* Handle an ARM specific section when reading an object file. This is
12701 called when bfd_section_from_shdr finds a section with an unknown
12702 type. */
12703
12704 static bfd_boolean
12705 elf32_arm_section_from_shdr (bfd *abfd,
12706 Elf_Internal_Shdr * hdr,
12707 const char *name,
12708 int shindex)
12709 {
12710 /* There ought to be a place to keep ELF backend specific flags, but
12711 at the moment there isn't one. We just keep track of the
12712 sections by their name, instead. Fortunately, the ABI gives
12713 names for all the ARM specific sections, so we will probably get
12714 away with this. */
12715 switch (hdr->sh_type)
12716 {
12717 case SHT_ARM_EXIDX:
12718 case SHT_ARM_PREEMPTMAP:
12719 case SHT_ARM_ATTRIBUTES:
12720 break;
12721
12722 default:
12723 return FALSE;
12724 }
12725
12726 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
12727 return FALSE;
12728
12729 return TRUE;
12730 }
12731
12732 /* A structure used to record a list of sections, independently
12733 of the next and prev fields in the asection structure. */
12734 typedef struct section_list
12735 {
12736 asection * sec;
12737 struct section_list * next;
12738 struct section_list * prev;
12739 }
12740 section_list;
12741
12742 /* Unfortunately we need to keep a list of sections for which
12743 an _arm_elf_section_data structure has been allocated. This
12744 is because it is possible for functions like elf32_arm_write_section
12745 to be called on a section which has had an elf_data_structure
12746 allocated for it (and so the used_by_bfd field is valid) but
12747 for which the ARM extended version of this structure - the
12748 _arm_elf_section_data structure - has not been allocated. */
12749 static section_list * sections_with_arm_elf_section_data = NULL;
12750
12751 static void
12752 record_section_with_arm_elf_section_data (asection * sec)
12753 {
12754 struct section_list * entry;
12755
12756 entry = (struct section_list *) bfd_malloc (sizeof (* entry));
12757 if (entry == NULL)
12758 return;
12759 entry->sec = sec;
12760 entry->next = sections_with_arm_elf_section_data;
12761 entry->prev = NULL;
12762 if (entry->next != NULL)
12763 entry->next->prev = entry;
12764 sections_with_arm_elf_section_data = entry;
12765 }
12766
12767 static struct section_list *
12768 find_arm_elf_section_entry (asection * sec)
12769 {
12770 struct section_list * entry;
12771 static struct section_list * last_entry = NULL;
12772
12773 /* This is a short cut for the typical case where the sections are added
12774 to the sections_with_arm_elf_section_data list in forward order and
12775 then looked up here in backwards order. This makes a real difference
12776 to the ld-srec/sec64k.exp linker test. */
12777 entry = sections_with_arm_elf_section_data;
12778 if (last_entry != NULL)
12779 {
12780 if (last_entry->sec == sec)
12781 entry = last_entry;
12782 else if (last_entry->next != NULL
12783 && last_entry->next->sec == sec)
12784 entry = last_entry->next;
12785 }
12786
12787 for (; entry; entry = entry->next)
12788 if (entry->sec == sec)
12789 break;
12790
12791 if (entry)
12792 /* Record the entry prior to this one - it is the entry we are most
12793 likely to want to locate next time. Also this way if we have been
12794 called from unrecord_section_with_arm_elf_section_data() we will not
12795 be caching a pointer that is about to be freed. */
12796 last_entry = entry->prev;
12797
12798 return entry;
12799 }
12800
12801 static _arm_elf_section_data *
12802 get_arm_elf_section_data (asection * sec)
12803 {
12804 struct section_list * entry;
12805
12806 entry = find_arm_elf_section_entry (sec);
12807
12808 if (entry)
12809 return elf32_arm_section_data (entry->sec);
12810 else
12811 return NULL;
12812 }
12813
12814 static void
12815 unrecord_section_with_arm_elf_section_data (asection * sec)
12816 {
12817 struct section_list * entry;
12818
12819 entry = find_arm_elf_section_entry (sec);
12820
12821 if (entry)
12822 {
12823 if (entry->prev != NULL)
12824 entry->prev->next = entry->next;
12825 if (entry->next != NULL)
12826 entry->next->prev = entry->prev;
12827 if (entry == sections_with_arm_elf_section_data)
12828 sections_with_arm_elf_section_data = entry->next;
12829 free (entry);
12830 }
12831 }
12832
12833
12834 typedef struct
12835 {
12836 void *finfo;
12837 struct bfd_link_info *info;
12838 asection *sec;
12839 int sec_shndx;
12840 int (*func) (void *, const char *, Elf_Internal_Sym *,
12841 asection *, struct elf_link_hash_entry *);
12842 } output_arch_syminfo;
12843
12844 enum map_symbol_type
12845 {
12846 ARM_MAP_ARM,
12847 ARM_MAP_THUMB,
12848 ARM_MAP_DATA
12849 };
12850
12851
12852 /* Output a single mapping symbol. */
12853
12854 static bfd_boolean
12855 elf32_arm_output_map_sym (output_arch_syminfo *osi,
12856 enum map_symbol_type type,
12857 bfd_vma offset)
12858 {
12859 static const char *names[3] = {"$a", "$t", "$d"};
12860 Elf_Internal_Sym sym;
12861
12862 sym.st_value = osi->sec->output_section->vma
12863 + osi->sec->output_offset
12864 + offset;
12865 sym.st_size = 0;
12866 sym.st_other = 0;
12867 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
12868 sym.st_shndx = osi->sec_shndx;
12869 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
12870 }
12871
12872
12873 /* Output mapping symbols for PLT entries associated with H. */
12874
12875 static bfd_boolean
12876 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
12877 {
12878 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
12879 struct elf32_arm_link_hash_table *htab;
12880 struct elf32_arm_link_hash_entry *eh;
12881 bfd_vma addr;
12882
12883 if (h->root.type == bfd_link_hash_indirect)
12884 return TRUE;
12885
12886 if (h->root.type == bfd_link_hash_warning)
12887 /* When warning symbols are created, they **replace** the "real"
12888 entry in the hash table, thus we never get to see the real
12889 symbol in a hash traversal. So look at it now. */
12890 h = (struct elf_link_hash_entry *) h->root.u.i.link;
12891
12892 if (h->plt.offset == (bfd_vma) -1)
12893 return TRUE;
12894
12895 htab = elf32_arm_hash_table (osi->info);
12896 if (htab == NULL)
12897 return FALSE;
12898
12899 eh = (struct elf32_arm_link_hash_entry *) h;
12900 addr = h->plt.offset;
12901 if (htab->symbian_p)
12902 {
12903 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12904 return FALSE;
12905 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
12906 return FALSE;
12907 }
12908 else if (htab->vxworks_p)
12909 {
12910 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12911 return FALSE;
12912 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
12913 return FALSE;
12914 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
12915 return FALSE;
12916 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
12917 return FALSE;
12918 }
12919 else
12920 {
12921 bfd_signed_vma thumb_refs;
12922
12923 thumb_refs = eh->plt_thumb_refcount;
12924 if (!htab->use_blx)
12925 thumb_refs += eh->plt_maybe_thumb_refcount;
12926
12927 if (thumb_refs > 0)
12928 {
12929 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
12930 return FALSE;
12931 }
12932 #ifdef FOUR_WORD_PLT
12933 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12934 return FALSE;
12935 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
12936 return FALSE;
12937 #else
12938 /* A three-word PLT with no Thumb thunk contains only Arm code,
12939 so only need to output a mapping symbol for the first PLT entry and
12940 entries with thumb thunks. */
12941 if (thumb_refs > 0 || addr == 20)
12942 {
12943 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12944 return FALSE;
12945 }
12946 #endif
12947 }
12948
12949 return TRUE;
12950 }
12951
12952 /* Output a single local symbol for a generated stub. */
12953
12954 static bfd_boolean
12955 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
12956 bfd_vma offset, bfd_vma size)
12957 {
12958 Elf_Internal_Sym sym;
12959
12960 sym.st_value = osi->sec->output_section->vma
12961 + osi->sec->output_offset
12962 + offset;
12963 sym.st_size = size;
12964 sym.st_other = 0;
12965 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
12966 sym.st_shndx = osi->sec_shndx;
12967 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
12968 }
12969
12970 static bfd_boolean
12971 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
12972 void * in_arg)
12973 {
12974 struct elf32_arm_stub_hash_entry *stub_entry;
12975 struct bfd_link_info *info;
12976 asection *stub_sec;
12977 bfd_vma addr;
12978 char *stub_name;
12979 output_arch_syminfo *osi;
12980 const insn_sequence *template_sequence;
12981 enum stub_insn_type prev_type;
12982 int size;
12983 int i;
12984 enum map_symbol_type sym_type;
12985
12986 /* Massage our args to the form they really have. */
12987 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
12988 osi = (output_arch_syminfo *) in_arg;
12989
12990 info = osi->info;
12991
12992 stub_sec = stub_entry->stub_sec;
12993
12994 /* Ensure this stub is attached to the current section being
12995 processed. */
12996 if (stub_sec != osi->sec)
12997 return TRUE;
12998
12999 addr = (bfd_vma) stub_entry->stub_offset;
13000 stub_name = stub_entry->output_name;
13001
13002 template_sequence = stub_entry->stub_template;
13003 switch (template_sequence[0].type)
13004 {
13005 case ARM_TYPE:
13006 if (!elf32_arm_output_stub_sym (osi, stub_name, addr, stub_entry->stub_size))
13007 return FALSE;
13008 break;
13009 case THUMB16_TYPE:
13010 case THUMB32_TYPE:
13011 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
13012 stub_entry->stub_size))
13013 return FALSE;
13014 break;
13015 default:
13016 BFD_FAIL ();
13017 return 0;
13018 }
13019
13020 prev_type = DATA_TYPE;
13021 size = 0;
13022 for (i = 0; i < stub_entry->stub_template_size; i++)
13023 {
13024 switch (template_sequence[i].type)
13025 {
13026 case ARM_TYPE:
13027 sym_type = ARM_MAP_ARM;
13028 break;
13029
13030 case THUMB16_TYPE:
13031 case THUMB32_TYPE:
13032 sym_type = ARM_MAP_THUMB;
13033 break;
13034
13035 case DATA_TYPE:
13036 sym_type = ARM_MAP_DATA;
13037 break;
13038
13039 default:
13040 BFD_FAIL ();
13041 return FALSE;
13042 }
13043
13044 if (template_sequence[i].type != prev_type)
13045 {
13046 prev_type = template_sequence[i].type;
13047 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
13048 return FALSE;
13049 }
13050
13051 switch (template_sequence[i].type)
13052 {
13053 case ARM_TYPE:
13054 case THUMB32_TYPE:
13055 size += 4;
13056 break;
13057
13058 case THUMB16_TYPE:
13059 size += 2;
13060 break;
13061
13062 case DATA_TYPE:
13063 size += 4;
13064 break;
13065
13066 default:
13067 BFD_FAIL ();
13068 return FALSE;
13069 }
13070 }
13071
13072 return TRUE;
13073 }
13074
13075 /* Output mapping symbols for linker generated sections,
13076 and for those data-only sections that do not have a
13077 $d. */
13078
13079 static bfd_boolean
13080 elf32_arm_output_arch_local_syms (bfd *output_bfd,
13081 struct bfd_link_info *info,
13082 void *finfo,
13083 int (*func) (void *, const char *,
13084 Elf_Internal_Sym *,
13085 asection *,
13086 struct elf_link_hash_entry *))
13087 {
13088 output_arch_syminfo osi;
13089 struct elf32_arm_link_hash_table *htab;
13090 bfd_vma offset;
13091 bfd_size_type size;
13092 bfd *input_bfd;
13093
13094 htab = elf32_arm_hash_table (info);
13095 if (htab == NULL)
13096 return FALSE;
13097
13098 check_use_blx (htab);
13099
13100 osi.finfo = finfo;
13101 osi.info = info;
13102 osi.func = func;
13103
13104 /* Add a $d mapping symbol to data-only sections that
13105 don't have any mapping symbol. This may result in (harmless) redundant
13106 mapping symbols. */
13107 for (input_bfd = info->input_bfds;
13108 input_bfd != NULL;
13109 input_bfd = input_bfd->link_next)
13110 {
13111 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
13112 for (osi.sec = input_bfd->sections;
13113 osi.sec != NULL;
13114 osi.sec = osi.sec->next)
13115 {
13116 if (osi.sec->output_section != NULL
13117 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
13118 == SEC_HAS_CONTENTS
13119 && get_arm_elf_section_data (osi.sec) != NULL
13120 && get_arm_elf_section_data (osi.sec)->mapcount == 0)
13121 {
13122 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13123 (output_bfd, osi.sec->output_section);
13124 if (osi.sec_shndx != (int)SHN_BAD)
13125 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
13126 }
13127 }
13128 }
13129
13130 /* ARM->Thumb glue. */
13131 if (htab->arm_glue_size > 0)
13132 {
13133 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13134 ARM2THUMB_GLUE_SECTION_NAME);
13135
13136 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13137 (output_bfd, osi.sec->output_section);
13138 if (info->shared || htab->root.is_relocatable_executable
13139 || htab->pic_veneer)
13140 size = ARM2THUMB_PIC_GLUE_SIZE;
13141 else if (htab->use_blx)
13142 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
13143 else
13144 size = ARM2THUMB_STATIC_GLUE_SIZE;
13145
13146 for (offset = 0; offset < htab->arm_glue_size; offset += size)
13147 {
13148 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
13149 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
13150 }
13151 }
13152
13153 /* Thumb->ARM glue. */
13154 if (htab->thumb_glue_size > 0)
13155 {
13156 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13157 THUMB2ARM_GLUE_SECTION_NAME);
13158
13159 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13160 (output_bfd, osi.sec->output_section);
13161 size = THUMB2ARM_GLUE_SIZE;
13162
13163 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
13164 {
13165 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
13166 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
13167 }
13168 }
13169
13170 /* ARMv4 BX veneers. */
13171 if (htab->bx_glue_size > 0)
13172 {
13173 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13174 ARM_BX_GLUE_SECTION_NAME);
13175
13176 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13177 (output_bfd, osi.sec->output_section);
13178
13179 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
13180 }
13181
13182 /* Long calls stubs. */
13183 if (htab->stub_bfd && htab->stub_bfd->sections)
13184 {
13185 asection* stub_sec;
13186
13187 for (stub_sec = htab->stub_bfd->sections;
13188 stub_sec != NULL;
13189 stub_sec = stub_sec->next)
13190 {
13191 /* Ignore non-stub sections. */
13192 if (!strstr (stub_sec->name, STUB_SUFFIX))
13193 continue;
13194
13195 osi.sec = stub_sec;
13196
13197 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13198 (output_bfd, osi.sec->output_section);
13199
13200 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
13201 }
13202 }
13203
13204 /* Finally, output mapping symbols for the PLT. */
13205 if (!htab->splt || htab->splt->size == 0)
13206 return TRUE;
13207
13208 osi.sec_shndx = _bfd_elf_section_from_bfd_section (output_bfd,
13209 htab->splt->output_section);
13210 osi.sec = htab->splt;
13211 /* Output mapping symbols for the plt header. SymbianOS does not have a
13212 plt header. */
13213 if (htab->vxworks_p)
13214 {
13215 /* VxWorks shared libraries have no PLT header. */
13216 if (!info->shared)
13217 {
13218 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13219 return FALSE;
13220 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
13221 return FALSE;
13222 }
13223 }
13224 else if (!htab->symbian_p)
13225 {
13226 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13227 return FALSE;
13228 #ifndef FOUR_WORD_PLT
13229 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
13230 return FALSE;
13231 #endif
13232 }
13233
13234 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, (void *) &osi);
13235 return TRUE;
13236 }
13237
13238 /* Allocate target specific section data. */
13239
13240 static bfd_boolean
13241 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
13242 {
13243 if (!sec->used_by_bfd)
13244 {
13245 _arm_elf_section_data *sdata;
13246 bfd_size_type amt = sizeof (*sdata);
13247
13248 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
13249 if (sdata == NULL)
13250 return FALSE;
13251 sec->used_by_bfd = sdata;
13252 }
13253
13254 record_section_with_arm_elf_section_data (sec);
13255
13256 return _bfd_elf_new_section_hook (abfd, sec);
13257 }
13258
13259
13260 /* Used to order a list of mapping symbols by address. */
13261
13262 static int
13263 elf32_arm_compare_mapping (const void * a, const void * b)
13264 {
13265 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
13266 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
13267
13268 if (amap->vma > bmap->vma)
13269 return 1;
13270 else if (amap->vma < bmap->vma)
13271 return -1;
13272 else if (amap->type > bmap->type)
13273 /* Ensure results do not depend on the host qsort for objects with
13274 multiple mapping symbols at the same address by sorting on type
13275 after vma. */
13276 return 1;
13277 else if (amap->type < bmap->type)
13278 return -1;
13279 else
13280 return 0;
13281 }
13282
13283 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
13284
13285 static unsigned long
13286 offset_prel31 (unsigned long addr, bfd_vma offset)
13287 {
13288 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
13289 }
13290
13291 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
13292 relocations. */
13293
13294 static void
13295 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
13296 {
13297 unsigned long first_word = bfd_get_32 (output_bfd, from);
13298 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
13299
13300 /* High bit of first word is supposed to be zero. */
13301 if ((first_word & 0x80000000ul) == 0)
13302 first_word = offset_prel31 (first_word, offset);
13303
13304 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
13305 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
13306 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
13307 second_word = offset_prel31 (second_word, offset);
13308
13309 bfd_put_32 (output_bfd, first_word, to);
13310 bfd_put_32 (output_bfd, second_word, to + 4);
13311 }
13312
13313 /* Data for make_branch_to_a8_stub(). */
13314
13315 struct a8_branch_to_stub_data {
13316 asection *writing_section;
13317 bfd_byte *contents;
13318 };
13319
13320
13321 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
13322 places for a particular section. */
13323
13324 static bfd_boolean
13325 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
13326 void *in_arg)
13327 {
13328 struct elf32_arm_stub_hash_entry *stub_entry;
13329 struct a8_branch_to_stub_data *data;
13330 bfd_byte *contents;
13331 unsigned long branch_insn;
13332 bfd_vma veneered_insn_loc, veneer_entry_loc;
13333 bfd_signed_vma branch_offset;
13334 bfd *abfd;
13335 unsigned int target;
13336
13337 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
13338 data = (struct a8_branch_to_stub_data *) in_arg;
13339
13340 if (stub_entry->target_section != data->writing_section
13341 || stub_entry->stub_type < arm_stub_a8_veneer_b_cond)
13342 return TRUE;
13343
13344 contents = data->contents;
13345
13346 veneered_insn_loc = stub_entry->target_section->output_section->vma
13347 + stub_entry->target_section->output_offset
13348 + stub_entry->target_value;
13349
13350 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
13351 + stub_entry->stub_sec->output_offset
13352 + stub_entry->stub_offset;
13353
13354 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
13355 veneered_insn_loc &= ~3u;
13356
13357 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
13358
13359 abfd = stub_entry->target_section->owner;
13360 target = stub_entry->target_value;
13361
13362 /* We attempt to avoid this condition by setting stubs_always_after_branch
13363 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
13364 This check is just to be on the safe side... */
13365 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
13366 {
13367 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
13368 "allocated in unsafe location"), abfd);
13369 return FALSE;
13370 }
13371
13372 switch (stub_entry->stub_type)
13373 {
13374 case arm_stub_a8_veneer_b:
13375 case arm_stub_a8_veneer_b_cond:
13376 branch_insn = 0xf0009000;
13377 goto jump24;
13378
13379 case arm_stub_a8_veneer_blx:
13380 branch_insn = 0xf000e800;
13381 goto jump24;
13382
13383 case arm_stub_a8_veneer_bl:
13384 {
13385 unsigned int i1, j1, i2, j2, s;
13386
13387 branch_insn = 0xf000d000;
13388
13389 jump24:
13390 if (branch_offset < -16777216 || branch_offset > 16777214)
13391 {
13392 /* There's not much we can do apart from complain if this
13393 happens. */
13394 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
13395 "of range (input file too large)"), abfd);
13396 return FALSE;
13397 }
13398
13399 /* i1 = not(j1 eor s), so:
13400 not i1 = j1 eor s
13401 j1 = (not i1) eor s. */
13402
13403 branch_insn |= (branch_offset >> 1) & 0x7ff;
13404 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
13405 i2 = (branch_offset >> 22) & 1;
13406 i1 = (branch_offset >> 23) & 1;
13407 s = (branch_offset >> 24) & 1;
13408 j1 = (!i1) ^ s;
13409 j2 = (!i2) ^ s;
13410 branch_insn |= j2 << 11;
13411 branch_insn |= j1 << 13;
13412 branch_insn |= s << 26;
13413 }
13414 break;
13415
13416 default:
13417 BFD_FAIL ();
13418 return FALSE;
13419 }
13420
13421 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[target]);
13422 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[target + 2]);
13423
13424 return TRUE;
13425 }
13426
13427 /* Do code byteswapping. Return FALSE afterwards so that the section is
13428 written out as normal. */
13429
13430 static bfd_boolean
13431 elf32_arm_write_section (bfd *output_bfd,
13432 struct bfd_link_info *link_info,
13433 asection *sec,
13434 bfd_byte *contents)
13435 {
13436 unsigned int mapcount, errcount;
13437 _arm_elf_section_data *arm_data;
13438 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
13439 elf32_arm_section_map *map;
13440 elf32_vfp11_erratum_list *errnode;
13441 bfd_vma ptr;
13442 bfd_vma end;
13443 bfd_vma offset = sec->output_section->vma + sec->output_offset;
13444 bfd_byte tmp;
13445 unsigned int i;
13446
13447 if (globals == NULL)
13448 return FALSE;
13449
13450 /* If this section has not been allocated an _arm_elf_section_data
13451 structure then we cannot record anything. */
13452 arm_data = get_arm_elf_section_data (sec);
13453 if (arm_data == NULL)
13454 return FALSE;
13455
13456 mapcount = arm_data->mapcount;
13457 map = arm_data->map;
13458 errcount = arm_data->erratumcount;
13459
13460 if (errcount != 0)
13461 {
13462 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
13463
13464 for (errnode = arm_data->erratumlist; errnode != 0;
13465 errnode = errnode->next)
13466 {
13467 bfd_vma target = errnode->vma - offset;
13468
13469 switch (errnode->type)
13470 {
13471 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
13472 {
13473 bfd_vma branch_to_veneer;
13474 /* Original condition code of instruction, plus bit mask for
13475 ARM B instruction. */
13476 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
13477 | 0x0a000000;
13478
13479 /* The instruction is before the label. */
13480 target -= 4;
13481
13482 /* Above offset included in -4 below. */
13483 branch_to_veneer = errnode->u.b.veneer->vma
13484 - errnode->vma - 4;
13485
13486 if ((signed) branch_to_veneer < -(1 << 25)
13487 || (signed) branch_to_veneer >= (1 << 25))
13488 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13489 "range"), output_bfd);
13490
13491 insn |= (branch_to_veneer >> 2) & 0xffffff;
13492 contents[endianflip ^ target] = insn & 0xff;
13493 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
13494 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
13495 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
13496 }
13497 break;
13498
13499 case VFP11_ERRATUM_ARM_VENEER:
13500 {
13501 bfd_vma branch_from_veneer;
13502 unsigned int insn;
13503
13504 /* Take size of veneer into account. */
13505 branch_from_veneer = errnode->u.v.branch->vma
13506 - errnode->vma - 12;
13507
13508 if ((signed) branch_from_veneer < -(1 << 25)
13509 || (signed) branch_from_veneer >= (1 << 25))
13510 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13511 "range"), output_bfd);
13512
13513 /* Original instruction. */
13514 insn = errnode->u.v.branch->u.b.vfp_insn;
13515 contents[endianflip ^ target] = insn & 0xff;
13516 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
13517 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
13518 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
13519
13520 /* Branch back to insn after original insn. */
13521 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
13522 contents[endianflip ^ (target + 4)] = insn & 0xff;
13523 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
13524 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
13525 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
13526 }
13527 break;
13528
13529 default:
13530 abort ();
13531 }
13532 }
13533 }
13534
13535 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
13536 {
13537 arm_unwind_table_edit *edit_node
13538 = arm_data->u.exidx.unwind_edit_list;
13539 /* Now, sec->size is the size of the section we will write. The original
13540 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
13541 markers) was sec->rawsize. (This isn't the case if we perform no
13542 edits, then rawsize will be zero and we should use size). */
13543 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
13544 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
13545 unsigned int in_index, out_index;
13546 bfd_vma add_to_offsets = 0;
13547
13548 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
13549 {
13550 if (edit_node)
13551 {
13552 unsigned int edit_index = edit_node->index;
13553
13554 if (in_index < edit_index && in_index * 8 < input_size)
13555 {
13556 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13557 contents + in_index * 8, add_to_offsets);
13558 out_index++;
13559 in_index++;
13560 }
13561 else if (in_index == edit_index
13562 || (in_index * 8 >= input_size
13563 && edit_index == UINT_MAX))
13564 {
13565 switch (edit_node->type)
13566 {
13567 case DELETE_EXIDX_ENTRY:
13568 in_index++;
13569 add_to_offsets += 8;
13570 break;
13571
13572 case INSERT_EXIDX_CANTUNWIND_AT_END:
13573 {
13574 asection *text_sec = edit_node->linked_section;
13575 bfd_vma text_offset = text_sec->output_section->vma
13576 + text_sec->output_offset
13577 + text_sec->size;
13578 bfd_vma exidx_offset = offset + out_index * 8;
13579 unsigned long prel31_offset;
13580
13581 /* Note: this is meant to be equivalent to an
13582 R_ARM_PREL31 relocation. These synthetic
13583 EXIDX_CANTUNWIND markers are not relocated by the
13584 usual BFD method. */
13585 prel31_offset = (text_offset - exidx_offset)
13586 & 0x7ffffffful;
13587
13588 /* First address we can't unwind. */
13589 bfd_put_32 (output_bfd, prel31_offset,
13590 &edited_contents[out_index * 8]);
13591
13592 /* Code for EXIDX_CANTUNWIND. */
13593 bfd_put_32 (output_bfd, 0x1,
13594 &edited_contents[out_index * 8 + 4]);
13595
13596 out_index++;
13597 add_to_offsets -= 8;
13598 }
13599 break;
13600 }
13601
13602 edit_node = edit_node->next;
13603 }
13604 }
13605 else
13606 {
13607 /* No more edits, copy remaining entries verbatim. */
13608 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13609 contents + in_index * 8, add_to_offsets);
13610 out_index++;
13611 in_index++;
13612 }
13613 }
13614
13615 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
13616 bfd_set_section_contents (output_bfd, sec->output_section,
13617 edited_contents,
13618 (file_ptr) sec->output_offset, sec->size);
13619
13620 return TRUE;
13621 }
13622
13623 /* Fix code to point to Cortex-A8 erratum stubs. */
13624 if (globals->fix_cortex_a8)
13625 {
13626 struct a8_branch_to_stub_data data;
13627
13628 data.writing_section = sec;
13629 data.contents = contents;
13630
13631 bfd_hash_traverse (&globals->stub_hash_table, make_branch_to_a8_stub,
13632 &data);
13633 }
13634
13635 if (mapcount == 0)
13636 return FALSE;
13637
13638 if (globals->byteswap_code)
13639 {
13640 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
13641
13642 ptr = map[0].vma;
13643 for (i = 0; i < mapcount; i++)
13644 {
13645 if (i == mapcount - 1)
13646 end = sec->size;
13647 else
13648 end = map[i + 1].vma;
13649
13650 switch (map[i].type)
13651 {
13652 case 'a':
13653 /* Byte swap code words. */
13654 while (ptr + 3 < end)
13655 {
13656 tmp = contents[ptr];
13657 contents[ptr] = contents[ptr + 3];
13658 contents[ptr + 3] = tmp;
13659 tmp = contents[ptr + 1];
13660 contents[ptr + 1] = contents[ptr + 2];
13661 contents[ptr + 2] = tmp;
13662 ptr += 4;
13663 }
13664 break;
13665
13666 case 't':
13667 /* Byte swap code halfwords. */
13668 while (ptr + 1 < end)
13669 {
13670 tmp = contents[ptr];
13671 contents[ptr] = contents[ptr + 1];
13672 contents[ptr + 1] = tmp;
13673 ptr += 2;
13674 }
13675 break;
13676
13677 case 'd':
13678 /* Leave data alone. */
13679 break;
13680 }
13681 ptr = end;
13682 }
13683 }
13684
13685 free (map);
13686 arm_data->mapcount = 0;
13687 arm_data->mapsize = 0;
13688 arm_data->map = NULL;
13689 unrecord_section_with_arm_elf_section_data (sec);
13690
13691 return FALSE;
13692 }
13693
13694 static void
13695 unrecord_section_via_map_over_sections (bfd * abfd ATTRIBUTE_UNUSED,
13696 asection * sec,
13697 void * ignore ATTRIBUTE_UNUSED)
13698 {
13699 unrecord_section_with_arm_elf_section_data (sec);
13700 }
13701
13702 static bfd_boolean
13703 elf32_arm_close_and_cleanup (bfd * abfd)
13704 {
13705 if (abfd->sections)
13706 bfd_map_over_sections (abfd,
13707 unrecord_section_via_map_over_sections,
13708 NULL);
13709
13710 return _bfd_elf_close_and_cleanup (abfd);
13711 }
13712
13713 static bfd_boolean
13714 elf32_arm_bfd_free_cached_info (bfd * abfd)
13715 {
13716 if (abfd->sections)
13717 bfd_map_over_sections (abfd,
13718 unrecord_section_via_map_over_sections,
13719 NULL);
13720
13721 return _bfd_free_cached_info (abfd);
13722 }
13723
13724 /* Display STT_ARM_TFUNC symbols as functions. */
13725
13726 static void
13727 elf32_arm_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
13728 asymbol *asym)
13729 {
13730 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
13731
13732 if (ELF_ST_TYPE (elfsym->internal_elf_sym.st_info) == STT_ARM_TFUNC)
13733 elfsym->symbol.flags |= BSF_FUNCTION;
13734 }
13735
13736
13737 /* Mangle thumb function symbols as we read them in. */
13738
13739 static bfd_boolean
13740 elf32_arm_swap_symbol_in (bfd * abfd,
13741 const void *psrc,
13742 const void *pshn,
13743 Elf_Internal_Sym *dst)
13744 {
13745 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
13746 return FALSE;
13747
13748 /* New EABI objects mark thumb function symbols by setting the low bit of
13749 the address. Turn these into STT_ARM_TFUNC. */
13750 if ((ELF_ST_TYPE (dst->st_info) == STT_FUNC)
13751 && (dst->st_value & 1))
13752 {
13753 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_ARM_TFUNC);
13754 dst->st_value &= ~(bfd_vma) 1;
13755 }
13756 return TRUE;
13757 }
13758
13759
13760 /* Mangle thumb function symbols as we write them out. */
13761
13762 static void
13763 elf32_arm_swap_symbol_out (bfd *abfd,
13764 const Elf_Internal_Sym *src,
13765 void *cdst,
13766 void *shndx)
13767 {
13768 Elf_Internal_Sym newsym;
13769
13770 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
13771 of the address set, as per the new EABI. We do this unconditionally
13772 because objcopy does not set the elf header flags until after
13773 it writes out the symbol table. */
13774 if (ELF_ST_TYPE (src->st_info) == STT_ARM_TFUNC)
13775 {
13776 newsym = *src;
13777 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
13778 if (newsym.st_shndx != SHN_UNDEF)
13779 {
13780 /* Do this only for defined symbols. At link type, the static
13781 linker will simulate the work of dynamic linker of resolving
13782 symbols and will carry over the thumbness of found symbols to
13783 the output symbol table. It's not clear how it happens, but
13784 the thumbness of undefined symbols can well be different at
13785 runtime, and writing '1' for them will be confusing for users
13786 and possibly for dynamic linker itself.
13787 */
13788 newsym.st_value |= 1;
13789 }
13790
13791 src = &newsym;
13792 }
13793 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
13794 }
13795
13796 /* Add the PT_ARM_EXIDX program header. */
13797
13798 static bfd_boolean
13799 elf32_arm_modify_segment_map (bfd *abfd,
13800 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13801 {
13802 struct elf_segment_map *m;
13803 asection *sec;
13804
13805 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13806 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13807 {
13808 /* If there is already a PT_ARM_EXIDX header, then we do not
13809 want to add another one. This situation arises when running
13810 "strip"; the input binary already has the header. */
13811 m = elf_tdata (abfd)->segment_map;
13812 while (m && m->p_type != PT_ARM_EXIDX)
13813 m = m->next;
13814 if (!m)
13815 {
13816 m = (struct elf_segment_map *)
13817 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
13818 if (m == NULL)
13819 return FALSE;
13820 m->p_type = PT_ARM_EXIDX;
13821 m->count = 1;
13822 m->sections[0] = sec;
13823
13824 m->next = elf_tdata (abfd)->segment_map;
13825 elf_tdata (abfd)->segment_map = m;
13826 }
13827 }
13828
13829 return TRUE;
13830 }
13831
13832 /* We may add a PT_ARM_EXIDX program header. */
13833
13834 static int
13835 elf32_arm_additional_program_headers (bfd *abfd,
13836 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13837 {
13838 asection *sec;
13839
13840 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13841 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13842 return 1;
13843 else
13844 return 0;
13845 }
13846
13847 /* We have two function types: STT_FUNC and STT_ARM_TFUNC. */
13848
13849 static bfd_boolean
13850 elf32_arm_is_function_type (unsigned int type)
13851 {
13852 return (type == STT_FUNC) || (type == STT_ARM_TFUNC);
13853 }
13854
13855 /* We use this to override swap_symbol_in and swap_symbol_out. */
13856 const struct elf_size_info elf32_arm_size_info =
13857 {
13858 sizeof (Elf32_External_Ehdr),
13859 sizeof (Elf32_External_Phdr),
13860 sizeof (Elf32_External_Shdr),
13861 sizeof (Elf32_External_Rel),
13862 sizeof (Elf32_External_Rela),
13863 sizeof (Elf32_External_Sym),
13864 sizeof (Elf32_External_Dyn),
13865 sizeof (Elf_External_Note),
13866 4,
13867 1,
13868 32, 2,
13869 ELFCLASS32, EV_CURRENT,
13870 bfd_elf32_write_out_phdrs,
13871 bfd_elf32_write_shdrs_and_ehdr,
13872 bfd_elf32_checksum_contents,
13873 bfd_elf32_write_relocs,
13874 elf32_arm_swap_symbol_in,
13875 elf32_arm_swap_symbol_out,
13876 bfd_elf32_slurp_reloc_table,
13877 bfd_elf32_slurp_symbol_table,
13878 bfd_elf32_swap_dyn_in,
13879 bfd_elf32_swap_dyn_out,
13880 bfd_elf32_swap_reloc_in,
13881 bfd_elf32_swap_reloc_out,
13882 bfd_elf32_swap_reloca_in,
13883 bfd_elf32_swap_reloca_out
13884 };
13885
13886 #define ELF_ARCH bfd_arch_arm
13887 #define ELF_MACHINE_CODE EM_ARM
13888 #ifdef __QNXTARGET__
13889 #define ELF_MAXPAGESIZE 0x1000
13890 #else
13891 #define ELF_MAXPAGESIZE 0x8000
13892 #endif
13893 #define ELF_MINPAGESIZE 0x1000
13894 #define ELF_COMMONPAGESIZE 0x1000
13895
13896 #define bfd_elf32_mkobject elf32_arm_mkobject
13897
13898 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
13899 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
13900 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
13901 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
13902 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
13903 #define bfd_elf32_bfd_link_hash_table_free elf32_arm_hash_table_free
13904 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
13905 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
13906 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
13907 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
13908 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
13909 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
13910 #define bfd_elf32_close_and_cleanup elf32_arm_close_and_cleanup
13911 #define bfd_elf32_bfd_free_cached_info elf32_arm_bfd_free_cached_info
13912 #define bfd_elf32_bfd_final_link elf32_arm_final_link
13913
13914 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
13915 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
13916 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
13917 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
13918 #define elf_backend_check_relocs elf32_arm_check_relocs
13919 #define elf_backend_relocate_section elf32_arm_relocate_section
13920 #define elf_backend_write_section elf32_arm_write_section
13921 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
13922 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
13923 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
13924 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
13925 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
13926 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
13927 #define elf_backend_post_process_headers elf32_arm_post_process_headers
13928 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
13929 #define elf_backend_object_p elf32_arm_object_p
13930 #define elf_backend_section_flags elf32_arm_section_flags
13931 #define elf_backend_fake_sections elf32_arm_fake_sections
13932 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
13933 #define elf_backend_final_write_processing elf32_arm_final_write_processing
13934 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
13935 #define elf_backend_symbol_processing elf32_arm_symbol_processing
13936 #define elf_backend_size_info elf32_arm_size_info
13937 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
13938 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
13939 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
13940 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
13941 #define elf_backend_is_function_type elf32_arm_is_function_type
13942
13943 #define elf_backend_can_refcount 1
13944 #define elf_backend_can_gc_sections 1
13945 #define elf_backend_plt_readonly 1
13946 #define elf_backend_want_got_plt 1
13947 #define elf_backend_want_plt_sym 0
13948 #define elf_backend_may_use_rel_p 1
13949 #define elf_backend_may_use_rela_p 0
13950 #define elf_backend_default_use_rela_p 0
13951
13952 #define elf_backend_got_header_size 12
13953
13954 #undef elf_backend_obj_attrs_vendor
13955 #define elf_backend_obj_attrs_vendor "aeabi"
13956 #undef elf_backend_obj_attrs_section
13957 #define elf_backend_obj_attrs_section ".ARM.attributes"
13958 #undef elf_backend_obj_attrs_arg_type
13959 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
13960 #undef elf_backend_obj_attrs_section_type
13961 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
13962 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
13963
13964 #include "elf32-target.h"
13965
13966 /* VxWorks Targets. */
13967
13968 #undef TARGET_LITTLE_SYM
13969 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vxworks_vec
13970 #undef TARGET_LITTLE_NAME
13971 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
13972 #undef TARGET_BIG_SYM
13973 #define TARGET_BIG_SYM bfd_elf32_bigarm_vxworks_vec
13974 #undef TARGET_BIG_NAME
13975 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
13976
13977 /* Like elf32_arm_link_hash_table_create -- but overrides
13978 appropriately for VxWorks. */
13979
13980 static struct bfd_link_hash_table *
13981 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
13982 {
13983 struct bfd_link_hash_table *ret;
13984
13985 ret = elf32_arm_link_hash_table_create (abfd);
13986 if (ret)
13987 {
13988 struct elf32_arm_link_hash_table *htab
13989 = (struct elf32_arm_link_hash_table *) ret;
13990 htab->use_rel = 0;
13991 htab->vxworks_p = 1;
13992 }
13993 return ret;
13994 }
13995
13996 static void
13997 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
13998 {
13999 elf32_arm_final_write_processing (abfd, linker);
14000 elf_vxworks_final_write_processing (abfd, linker);
14001 }
14002
14003 #undef elf32_bed
14004 #define elf32_bed elf32_arm_vxworks_bed
14005
14006 #undef bfd_elf32_bfd_link_hash_table_create
14007 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
14008 #undef elf_backend_add_symbol_hook
14009 #define elf_backend_add_symbol_hook elf_vxworks_add_symbol_hook
14010 #undef elf_backend_final_write_processing
14011 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
14012 #undef elf_backend_emit_relocs
14013 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
14014
14015 #undef elf_backend_may_use_rel_p
14016 #define elf_backend_may_use_rel_p 0
14017 #undef elf_backend_may_use_rela_p
14018 #define elf_backend_may_use_rela_p 1
14019 #undef elf_backend_default_use_rela_p
14020 #define elf_backend_default_use_rela_p 1
14021 #undef elf_backend_want_plt_sym
14022 #define elf_backend_want_plt_sym 1
14023 #undef ELF_MAXPAGESIZE
14024 #define ELF_MAXPAGESIZE 0x1000
14025
14026 #include "elf32-target.h"
14027
14028
14029 /* Merge backend specific data from an object file to the output
14030 object file when linking. */
14031
14032 static bfd_boolean
14033 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
14034 {
14035 flagword out_flags;
14036 flagword in_flags;
14037 bfd_boolean flags_compatible = TRUE;
14038 asection *sec;
14039
14040 /* Check if we have the same endianess. */
14041 if (! _bfd_generic_verify_endian_match (ibfd, obfd))
14042 return FALSE;
14043
14044 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
14045 return TRUE;
14046
14047 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
14048 return FALSE;
14049
14050 /* The input BFD must have had its flags initialised. */
14051 /* The following seems bogus to me -- The flags are initialized in
14052 the assembler but I don't think an elf_flags_init field is
14053 written into the object. */
14054 /* BFD_ASSERT (elf_flags_init (ibfd)); */
14055
14056 in_flags = elf_elfheader (ibfd)->e_flags;
14057 out_flags = elf_elfheader (obfd)->e_flags;
14058
14059 /* In theory there is no reason why we couldn't handle this. However
14060 in practice it isn't even close to working and there is no real
14061 reason to want it. */
14062 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
14063 && !(ibfd->flags & DYNAMIC)
14064 && (in_flags & EF_ARM_BE8))
14065 {
14066 _bfd_error_handler (_("error: %B is already in final BE8 format"),
14067 ibfd);
14068 return FALSE;
14069 }
14070
14071 if (!elf_flags_init (obfd))
14072 {
14073 /* If the input is the default architecture and had the default
14074 flags then do not bother setting the flags for the output
14075 architecture, instead allow future merges to do this. If no
14076 future merges ever set these flags then they will retain their
14077 uninitialised values, which surprise surprise, correspond
14078 to the default values. */
14079 if (bfd_get_arch_info (ibfd)->the_default
14080 && elf_elfheader (ibfd)->e_flags == 0)
14081 return TRUE;
14082
14083 elf_flags_init (obfd) = TRUE;
14084 elf_elfheader (obfd)->e_flags = in_flags;
14085
14086 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
14087 && bfd_get_arch_info (obfd)->the_default)
14088 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
14089
14090 return TRUE;
14091 }
14092
14093 /* Determine what should happen if the input ARM architecture
14094 does not match the output ARM architecture. */
14095 if (! bfd_arm_merge_machines (ibfd, obfd))
14096 return FALSE;
14097
14098 /* Identical flags must be compatible. */
14099 if (in_flags == out_flags)
14100 return TRUE;
14101
14102 /* Check to see if the input BFD actually contains any sections. If
14103 not, its flags may not have been initialised either, but it
14104 cannot actually cause any incompatiblity. Do not short-circuit
14105 dynamic objects; their section list may be emptied by
14106 elf_link_add_object_symbols.
14107
14108 Also check to see if there are no code sections in the input.
14109 In this case there is no need to check for code specific flags.
14110 XXX - do we need to worry about floating-point format compatability
14111 in data sections ? */
14112 if (!(ibfd->flags & DYNAMIC))
14113 {
14114 bfd_boolean null_input_bfd = TRUE;
14115 bfd_boolean only_data_sections = TRUE;
14116
14117 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
14118 {
14119 /* Ignore synthetic glue sections. */
14120 if (strcmp (sec->name, ".glue_7")
14121 && strcmp (sec->name, ".glue_7t"))
14122 {
14123 if ((bfd_get_section_flags (ibfd, sec)
14124 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
14125 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
14126 only_data_sections = FALSE;
14127
14128 null_input_bfd = FALSE;
14129 break;
14130 }
14131 }
14132
14133 if (null_input_bfd || only_data_sections)
14134 return TRUE;
14135 }
14136
14137 /* Complain about various flag mismatches. */
14138 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
14139 EF_ARM_EABI_VERSION (out_flags)))
14140 {
14141 _bfd_error_handler
14142 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
14143 ibfd, obfd,
14144 (in_flags & EF_ARM_EABIMASK) >> 24,
14145 (out_flags & EF_ARM_EABIMASK) >> 24);
14146 return FALSE;
14147 }
14148
14149 /* Not sure what needs to be checked for EABI versions >= 1. */
14150 /* VxWorks libraries do not use these flags. */
14151 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
14152 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
14153 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
14154 {
14155 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
14156 {
14157 _bfd_error_handler
14158 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
14159 ibfd, obfd,
14160 in_flags & EF_ARM_APCS_26 ? 26 : 32,
14161 out_flags & EF_ARM_APCS_26 ? 26 : 32);
14162 flags_compatible = FALSE;
14163 }
14164
14165 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
14166 {
14167 if (in_flags & EF_ARM_APCS_FLOAT)
14168 _bfd_error_handler
14169 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
14170 ibfd, obfd);
14171 else
14172 _bfd_error_handler
14173 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
14174 ibfd, obfd);
14175
14176 flags_compatible = FALSE;
14177 }
14178
14179 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
14180 {
14181 if (in_flags & EF_ARM_VFP_FLOAT)
14182 _bfd_error_handler
14183 (_("error: %B uses VFP instructions, whereas %B does not"),
14184 ibfd, obfd);
14185 else
14186 _bfd_error_handler
14187 (_("error: %B uses FPA instructions, whereas %B does not"),
14188 ibfd, obfd);
14189
14190 flags_compatible = FALSE;
14191 }
14192
14193 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
14194 {
14195 if (in_flags & EF_ARM_MAVERICK_FLOAT)
14196 _bfd_error_handler
14197 (_("error: %B uses Maverick instructions, whereas %B does not"),
14198 ibfd, obfd);
14199 else
14200 _bfd_error_handler
14201 (_("error: %B does not use Maverick instructions, whereas %B does"),
14202 ibfd, obfd);
14203
14204 flags_compatible = FALSE;
14205 }
14206
14207 #ifdef EF_ARM_SOFT_FLOAT
14208 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
14209 {
14210 /* We can allow interworking between code that is VFP format
14211 layout, and uses either soft float or integer regs for
14212 passing floating point arguments and results. We already
14213 know that the APCS_FLOAT flags match; similarly for VFP
14214 flags. */
14215 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
14216 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
14217 {
14218 if (in_flags & EF_ARM_SOFT_FLOAT)
14219 _bfd_error_handler
14220 (_("error: %B uses software FP, whereas %B uses hardware FP"),
14221 ibfd, obfd);
14222 else
14223 _bfd_error_handler
14224 (_("error: %B uses hardware FP, whereas %B uses software FP"),
14225 ibfd, obfd);
14226
14227 flags_compatible = FALSE;
14228 }
14229 }
14230 #endif
14231
14232 /* Interworking mismatch is only a warning. */
14233 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
14234 {
14235 if (in_flags & EF_ARM_INTERWORK)
14236 {
14237 _bfd_error_handler
14238 (_("Warning: %B supports interworking, whereas %B does not"),
14239 ibfd, obfd);
14240 }
14241 else
14242 {
14243 _bfd_error_handler
14244 (_("Warning: %B does not support interworking, whereas %B does"),
14245 ibfd, obfd);
14246 }
14247 }
14248 }
14249
14250 return flags_compatible;
14251 }
14252
14253
14254 /* Symbian OS Targets. */
14255
14256 #undef TARGET_LITTLE_SYM
14257 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_symbian_vec
14258 #undef TARGET_LITTLE_NAME
14259 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
14260 #undef TARGET_BIG_SYM
14261 #define TARGET_BIG_SYM bfd_elf32_bigarm_symbian_vec
14262 #undef TARGET_BIG_NAME
14263 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
14264
14265 /* Like elf32_arm_link_hash_table_create -- but overrides
14266 appropriately for Symbian OS. */
14267
14268 static struct bfd_link_hash_table *
14269 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
14270 {
14271 struct bfd_link_hash_table *ret;
14272
14273 ret = elf32_arm_link_hash_table_create (abfd);
14274 if (ret)
14275 {
14276 struct elf32_arm_link_hash_table *htab
14277 = (struct elf32_arm_link_hash_table *)ret;
14278 /* There is no PLT header for Symbian OS. */
14279 htab->plt_header_size = 0;
14280 /* The PLT entries are each one instruction and one word. */
14281 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
14282 htab->symbian_p = 1;
14283 /* Symbian uses armv5t or above, so use_blx is always true. */
14284 htab->use_blx = 1;
14285 htab->root.is_relocatable_executable = 1;
14286 }
14287 return ret;
14288 }
14289
14290 static const struct bfd_elf_special_section
14291 elf32_arm_symbian_special_sections[] =
14292 {
14293 /* In a BPABI executable, the dynamic linking sections do not go in
14294 the loadable read-only segment. The post-linker may wish to
14295 refer to these sections, but they are not part of the final
14296 program image. */
14297 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
14298 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
14299 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
14300 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
14301 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
14302 /* These sections do not need to be writable as the SymbianOS
14303 postlinker will arrange things so that no dynamic relocation is
14304 required. */
14305 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
14306 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
14307 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
14308 { NULL, 0, 0, 0, 0 }
14309 };
14310
14311 static void
14312 elf32_arm_symbian_begin_write_processing (bfd *abfd,
14313 struct bfd_link_info *link_info)
14314 {
14315 /* BPABI objects are never loaded directly by an OS kernel; they are
14316 processed by a postlinker first, into an OS-specific format. If
14317 the D_PAGED bit is set on the file, BFD will align segments on
14318 page boundaries, so that an OS can directly map the file. With
14319 BPABI objects, that just results in wasted space. In addition,
14320 because we clear the D_PAGED bit, map_sections_to_segments will
14321 recognize that the program headers should not be mapped into any
14322 loadable segment. */
14323 abfd->flags &= ~D_PAGED;
14324 elf32_arm_begin_write_processing (abfd, link_info);
14325 }
14326
14327 static bfd_boolean
14328 elf32_arm_symbian_modify_segment_map (bfd *abfd,
14329 struct bfd_link_info *info)
14330 {
14331 struct elf_segment_map *m;
14332 asection *dynsec;
14333
14334 /* BPABI shared libraries and executables should have a PT_DYNAMIC
14335 segment. However, because the .dynamic section is not marked
14336 with SEC_LOAD, the generic ELF code will not create such a
14337 segment. */
14338 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
14339 if (dynsec)
14340 {
14341 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
14342 if (m->p_type == PT_DYNAMIC)
14343 break;
14344
14345 if (m == NULL)
14346 {
14347 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
14348 m->next = elf_tdata (abfd)->segment_map;
14349 elf_tdata (abfd)->segment_map = m;
14350 }
14351 }
14352
14353 /* Also call the generic arm routine. */
14354 return elf32_arm_modify_segment_map (abfd, info);
14355 }
14356
14357 /* Return address for Ith PLT stub in section PLT, for relocation REL
14358 or (bfd_vma) -1 if it should not be included. */
14359
14360 static bfd_vma
14361 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
14362 const arelent *rel ATTRIBUTE_UNUSED)
14363 {
14364 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
14365 }
14366
14367
14368 #undef elf32_bed
14369 #define elf32_bed elf32_arm_symbian_bed
14370
14371 /* The dynamic sections are not allocated on SymbianOS; the postlinker
14372 will process them and then discard them. */
14373 #undef ELF_DYNAMIC_SEC_FLAGS
14374 #define ELF_DYNAMIC_SEC_FLAGS \
14375 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
14376
14377 #undef elf_backend_add_symbol_hook
14378 #undef elf_backend_emit_relocs
14379
14380 #undef bfd_elf32_bfd_link_hash_table_create
14381 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
14382 #undef elf_backend_special_sections
14383 #define elf_backend_special_sections elf32_arm_symbian_special_sections
14384 #undef elf_backend_begin_write_processing
14385 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
14386 #undef elf_backend_final_write_processing
14387 #define elf_backend_final_write_processing elf32_arm_final_write_processing
14388
14389 #undef elf_backend_modify_segment_map
14390 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
14391
14392 /* There is no .got section for BPABI objects, and hence no header. */
14393 #undef elf_backend_got_header_size
14394 #define elf_backend_got_header_size 0
14395
14396 /* Similarly, there is no .got.plt section. */
14397 #undef elf_backend_want_got_plt
14398 #define elf_backend_want_got_plt 0
14399
14400 #undef elf_backend_plt_sym_val
14401 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
14402
14403 #undef elf_backend_may_use_rel_p
14404 #define elf_backend_may_use_rel_p 1
14405 #undef elf_backend_may_use_rela_p
14406 #define elf_backend_may_use_rela_p 0
14407 #undef elf_backend_default_use_rela_p
14408 #define elf_backend_default_use_rela_p 0
14409 #undef elf_backend_want_plt_sym
14410 #define elf_backend_want_plt_sym 0
14411 #undef ELF_MAXPAGESIZE
14412 #define ELF_MAXPAGESIZE 0x8000
14413
14414 #include "elf32-target.h"