1 /* SPU specific support for 32-bit ELF
3 Copyright 2006, 2007 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
27 #include "elf32-spu.h"
29 /* We use RELA style relocs. Don't define USE_REL. */
31 static bfd_reloc_status_type
spu_elf_rel9 (bfd
*, arelent
*, asymbol
*,
35 /* Values of type 'enum elf_spu_reloc_type' are used to index this
36 array, so it must be declared in the order of that type. */
38 static reloc_howto_type elf_howto_table
[] = {
39 HOWTO (R_SPU_NONE
, 0, 0, 0, FALSE
, 0, complain_overflow_dont
,
40 bfd_elf_generic_reloc
, "SPU_NONE",
41 FALSE
, 0, 0x00000000, FALSE
),
42 HOWTO (R_SPU_ADDR10
, 4, 2, 10, FALSE
, 14, complain_overflow_bitfield
,
43 bfd_elf_generic_reloc
, "SPU_ADDR10",
44 FALSE
, 0, 0x00ffc000, FALSE
),
45 HOWTO (R_SPU_ADDR16
, 2, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
46 bfd_elf_generic_reloc
, "SPU_ADDR16",
47 FALSE
, 0, 0x007fff80, FALSE
),
48 HOWTO (R_SPU_ADDR16_HI
, 16, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
49 bfd_elf_generic_reloc
, "SPU_ADDR16_HI",
50 FALSE
, 0, 0x007fff80, FALSE
),
51 HOWTO (R_SPU_ADDR16_LO
, 0, 2, 16, FALSE
, 7, complain_overflow_dont
,
52 bfd_elf_generic_reloc
, "SPU_ADDR16_LO",
53 FALSE
, 0, 0x007fff80, FALSE
),
54 HOWTO (R_SPU_ADDR18
, 0, 2, 18, FALSE
, 7, complain_overflow_bitfield
,
55 bfd_elf_generic_reloc
, "SPU_ADDR18",
56 FALSE
, 0, 0x01ffff80, FALSE
),
57 HOWTO (R_SPU_ADDR32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
58 bfd_elf_generic_reloc
, "SPU_ADDR32",
59 FALSE
, 0, 0xffffffff, FALSE
),
60 HOWTO (R_SPU_REL16
, 2, 2, 16, TRUE
, 7, complain_overflow_bitfield
,
61 bfd_elf_generic_reloc
, "SPU_REL16",
62 FALSE
, 0, 0x007fff80, TRUE
),
63 HOWTO (R_SPU_ADDR7
, 0, 2, 7, FALSE
, 14, complain_overflow_dont
,
64 bfd_elf_generic_reloc
, "SPU_ADDR7",
65 FALSE
, 0, 0x001fc000, FALSE
),
66 HOWTO (R_SPU_REL9
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
67 spu_elf_rel9
, "SPU_REL9",
68 FALSE
, 0, 0x0180007f, TRUE
),
69 HOWTO (R_SPU_REL9I
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
70 spu_elf_rel9
, "SPU_REL9I",
71 FALSE
, 0, 0x0000c07f, TRUE
),
72 HOWTO (R_SPU_ADDR10I
, 0, 2, 10, FALSE
, 14, complain_overflow_signed
,
73 bfd_elf_generic_reloc
, "SPU_ADDR10I",
74 FALSE
, 0, 0x00ffc000, FALSE
),
75 HOWTO (R_SPU_ADDR16I
, 0, 2, 16, FALSE
, 7, complain_overflow_signed
,
76 bfd_elf_generic_reloc
, "SPU_ADDR16I",
77 FALSE
, 0, 0x007fff80, FALSE
),
78 HOWTO (R_SPU_REL32
, 0, 2, 32, TRUE
, 0, complain_overflow_dont
,
79 bfd_elf_generic_reloc
, "SPU_REL32",
80 FALSE
, 0, 0xffffffff, TRUE
),
81 HOWTO (R_SPU_ADDR16X
, 0, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
82 bfd_elf_generic_reloc
, "SPU_ADDR16X",
83 FALSE
, 0, 0x007fff80, FALSE
),
84 HOWTO (R_SPU_PPU32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
85 bfd_elf_generic_reloc
, "SPU_PPU32",
86 FALSE
, 0, 0xffffffff, FALSE
),
87 HOWTO (R_SPU_PPU64
, 0, 4, 64, FALSE
, 0, complain_overflow_dont
,
88 bfd_elf_generic_reloc
, "SPU_PPU64",
92 static struct bfd_elf_special_section
const spu_elf_special_sections
[] = {
93 { ".toe", 4, 0, SHT_NOBITS
, SHF_ALLOC
},
97 static enum elf_spu_reloc_type
98 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code
)
104 case BFD_RELOC_SPU_IMM10W
:
106 case BFD_RELOC_SPU_IMM16W
:
108 case BFD_RELOC_SPU_LO16
:
109 return R_SPU_ADDR16_LO
;
110 case BFD_RELOC_SPU_HI16
:
111 return R_SPU_ADDR16_HI
;
112 case BFD_RELOC_SPU_IMM18
:
114 case BFD_RELOC_SPU_PCREL16
:
116 case BFD_RELOC_SPU_IMM7
:
118 case BFD_RELOC_SPU_IMM8
:
120 case BFD_RELOC_SPU_PCREL9a
:
122 case BFD_RELOC_SPU_PCREL9b
:
124 case BFD_RELOC_SPU_IMM10
:
125 return R_SPU_ADDR10I
;
126 case BFD_RELOC_SPU_IMM16
:
127 return R_SPU_ADDR16I
;
130 case BFD_RELOC_32_PCREL
:
132 case BFD_RELOC_SPU_PPU32
:
134 case BFD_RELOC_SPU_PPU64
:
140 spu_elf_info_to_howto (bfd
*abfd ATTRIBUTE_UNUSED
,
142 Elf_Internal_Rela
*dst
)
144 enum elf_spu_reloc_type r_type
;
146 r_type
= (enum elf_spu_reloc_type
) ELF32_R_TYPE (dst
->r_info
);
147 BFD_ASSERT (r_type
< R_SPU_max
);
148 cache_ptr
->howto
= &elf_howto_table
[(int) r_type
];
151 static reloc_howto_type
*
152 spu_elf_reloc_type_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
153 bfd_reloc_code_real_type code
)
155 enum elf_spu_reloc_type r_type
= spu_elf_bfd_to_reloc_type (code
);
157 if (r_type
== R_SPU_NONE
)
160 return elf_howto_table
+ r_type
;
163 static reloc_howto_type
*
164 spu_elf_reloc_name_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
169 for (i
= 0; i
< sizeof (elf_howto_table
) / sizeof (elf_howto_table
[0]); i
++)
170 if (elf_howto_table
[i
].name
!= NULL
171 && strcasecmp (elf_howto_table
[i
].name
, r_name
) == 0)
172 return &elf_howto_table
[i
];
177 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
179 static bfd_reloc_status_type
180 spu_elf_rel9 (bfd
*abfd
, arelent
*reloc_entry
, asymbol
*symbol
,
181 void *data
, asection
*input_section
,
182 bfd
*output_bfd
, char **error_message
)
184 bfd_size_type octets
;
188 /* If this is a relocatable link (output_bfd test tells us), just
189 call the generic function. Any adjustment will be done at final
191 if (output_bfd
!= NULL
)
192 return bfd_elf_generic_reloc (abfd
, reloc_entry
, symbol
, data
,
193 input_section
, output_bfd
, error_message
);
195 if (reloc_entry
->address
> bfd_get_section_limit (abfd
, input_section
))
196 return bfd_reloc_outofrange
;
197 octets
= reloc_entry
->address
* bfd_octets_per_byte (abfd
);
199 /* Get symbol value. */
201 if (!bfd_is_com_section (symbol
->section
))
203 if (symbol
->section
->output_section
)
204 val
+= symbol
->section
->output_section
->vma
;
206 val
+= reloc_entry
->addend
;
208 /* Make it pc-relative. */
209 val
-= input_section
->output_section
->vma
+ input_section
->output_offset
;
212 if (val
+ 256 >= 512)
213 return bfd_reloc_overflow
;
215 insn
= bfd_get_32 (abfd
, (bfd_byte
*) data
+ octets
);
217 /* Move two high bits of value to REL9I and REL9 position.
218 The mask will take care of selecting the right field. */
219 val
= (val
& 0x7f) | ((val
& 0x180) << 7) | ((val
& 0x180) << 16);
220 insn
&= ~reloc_entry
->howto
->dst_mask
;
221 insn
|= val
& reloc_entry
->howto
->dst_mask
;
222 bfd_put_32 (abfd
, insn
, (bfd_byte
*) data
+ octets
);
227 spu_elf_new_section_hook (bfd
*abfd
, asection
*sec
)
229 if (!sec
->used_by_bfd
)
231 struct _spu_elf_section_data
*sdata
;
233 sdata
= bfd_zalloc (abfd
, sizeof (*sdata
));
236 sec
->used_by_bfd
= sdata
;
239 return _bfd_elf_new_section_hook (abfd
, sec
);
242 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
243 strip --strip-unneeded will not remove them. */
246 spu_elf_backend_symbol_processing (bfd
*abfd ATTRIBUTE_UNUSED
, asymbol
*sym
)
248 if (sym
->name
!= NULL
249 && sym
->section
!= bfd_abs_section_ptr
250 && strncmp (sym
->name
, "_EAR_", 5) == 0)
251 sym
->flags
|= BSF_KEEP
;
254 /* SPU ELF linker hash table. */
256 struct spu_link_hash_table
258 struct elf_link_hash_table elf
;
260 /* The stub hash table. */
261 struct bfd_hash_table stub_hash_table
;
263 /* Sorted array of stubs. */
265 struct spu_stub_hash_entry
**sh
;
270 /* Shortcuts to overlay sections. */
274 struct elf_link_hash_entry
*ovly_load
;
276 /* An array of two output sections per overlay region, chosen such that
277 the first section vma is the overlay buffer vma (ie. the section has
278 the lowest vma in the group that occupy the region), and the second
279 section vma+size specifies the end of the region. We keep pointers
280 to sections like this because section vmas may change when laying
282 asection
**ovl_region
;
284 /* Number of overlay buffers. */
285 unsigned int num_buf
;
287 /* Total number of overlays. */
288 unsigned int num_overlays
;
290 /* Set if we should emit symbols for stubs. */
291 unsigned int emit_stub_syms
:1;
293 /* Set if we want stubs on calls out of overlay regions to
294 non-overlay regions. */
295 unsigned int non_overlay_stubs
: 1;
298 unsigned int stub_overflow
: 1;
300 /* Set if stack size analysis should be done. */
301 unsigned int stack_analysis
: 1;
303 /* Set if __stack_* syms will be emitted. */
304 unsigned int emit_stack_syms
: 1;
307 #define spu_hash_table(p) \
308 ((struct spu_link_hash_table *) ((p)->hash))
310 struct spu_stub_hash_entry
312 struct bfd_hash_entry root
;
314 /* Destination of this stub. */
315 asection
*target_section
;
318 /* Offset of entry in stub section. */
321 /* Offset from this stub to stub that loads the overlay index. */
325 /* Create an entry in a spu stub hash table. */
327 static struct bfd_hash_entry
*
328 stub_hash_newfunc (struct bfd_hash_entry
*entry
,
329 struct bfd_hash_table
*table
,
332 /* Allocate the structure if it has not already been allocated by a
336 entry
= bfd_hash_allocate (table
, sizeof (struct spu_stub_hash_entry
));
341 /* Call the allocation method of the superclass. */
342 entry
= bfd_hash_newfunc (entry
, table
, string
);
345 struct spu_stub_hash_entry
*sh
= (struct spu_stub_hash_entry
*) entry
;
347 sh
->target_section
= NULL
;
356 /* Create a spu ELF linker hash table. */
358 static struct bfd_link_hash_table
*
359 spu_elf_link_hash_table_create (bfd
*abfd
)
361 struct spu_link_hash_table
*htab
;
363 htab
= bfd_malloc (sizeof (*htab
));
367 if (!_bfd_elf_link_hash_table_init (&htab
->elf
, abfd
,
368 _bfd_elf_link_hash_newfunc
,
369 sizeof (struct elf_link_hash_entry
)))
375 /* Init the stub hash table too. */
376 if (!bfd_hash_table_init (&htab
->stub_hash_table
, stub_hash_newfunc
,
377 sizeof (struct spu_stub_hash_entry
)))
380 memset (&htab
->stubs
, 0,
381 sizeof (*htab
) - offsetof (struct spu_link_hash_table
, stubs
));
383 return &htab
->elf
.root
;
386 /* Free the derived linker hash table. */
389 spu_elf_link_hash_table_free (struct bfd_link_hash_table
*hash
)
391 struct spu_link_hash_table
*ret
= (struct spu_link_hash_table
*) hash
;
393 bfd_hash_table_free (&ret
->stub_hash_table
);
394 _bfd_generic_link_hash_table_free (hash
);
397 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
398 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
399 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
402 get_sym_h (struct elf_link_hash_entry
**hp
,
403 Elf_Internal_Sym
**symp
,
405 Elf_Internal_Sym
**locsymsp
,
406 unsigned long r_symndx
,
409 Elf_Internal_Shdr
*symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
411 if (r_symndx
>= symtab_hdr
->sh_info
)
413 struct elf_link_hash_entry
**sym_hashes
= elf_sym_hashes (ibfd
);
414 struct elf_link_hash_entry
*h
;
416 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
417 while (h
->root
.type
== bfd_link_hash_indirect
418 || h
->root
.type
== bfd_link_hash_warning
)
419 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
429 asection
*symsec
= NULL
;
430 if (h
->root
.type
== bfd_link_hash_defined
431 || h
->root
.type
== bfd_link_hash_defweak
)
432 symsec
= h
->root
.u
.def
.section
;
438 Elf_Internal_Sym
*sym
;
439 Elf_Internal_Sym
*locsyms
= *locsymsp
;
443 locsyms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
446 size_t symcount
= symtab_hdr
->sh_info
;
448 /* If we are reading symbols into the contents, then
449 read the global syms too. This is done to cache
450 syms for later stack analysis. */
451 if ((unsigned char **) locsymsp
== &symtab_hdr
->contents
)
452 symcount
= symtab_hdr
->sh_size
/ symtab_hdr
->sh_entsize
;
453 locsyms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
, symcount
, 0,
460 sym
= locsyms
+ r_symndx
;
470 asection
*symsec
= NULL
;
471 if ((sym
->st_shndx
!= SHN_UNDEF
472 && sym
->st_shndx
< SHN_LORESERVE
)
473 || sym
->st_shndx
> SHN_HIRESERVE
)
474 symsec
= bfd_section_from_elf_index (ibfd
, sym
->st_shndx
);
482 /* Build a name for an entry in the stub hash table. We can't use a
483 local symbol name because ld -r might generate duplicate local symbols. */
486 spu_stub_name (const asection
*sym_sec
,
487 const struct elf_link_hash_entry
*h
,
488 const Elf_Internal_Rela
*rel
)
495 len
= strlen (h
->root
.root
.string
) + 1 + 8 + 1;
496 stub_name
= bfd_malloc (len
);
497 if (stub_name
== NULL
)
500 sprintf (stub_name
, "%s+%x",
502 (int) rel
->r_addend
& 0xffffffff);
507 len
= 8 + 1 + 8 + 1 + 8 + 1;
508 stub_name
= bfd_malloc (len
);
509 if (stub_name
== NULL
)
512 sprintf (stub_name
, "%x:%x+%x",
513 sym_sec
->id
& 0xffffffff,
514 (int) ELF32_R_SYM (rel
->r_info
) & 0xffffffff,
515 (int) rel
->r_addend
& 0xffffffff);
516 len
= strlen (stub_name
);
519 if (stub_name
[len
- 2] == '+'
520 && stub_name
[len
- 1] == '0'
521 && stub_name
[len
] == 0)
522 stub_name
[len
- 2] = 0;
527 /* Create the note section if not already present. This is done early so
528 that the linker maps the sections to the right place in the output. */
531 spu_elf_create_sections (bfd
*output_bfd
,
532 struct bfd_link_info
*info
,
537 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
539 /* Stash some options away where we can get at them later. */
540 htab
->stack_analysis
= stack_analysis
;
541 htab
->emit_stack_syms
= emit_stack_syms
;
543 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
544 if (bfd_get_section_by_name (ibfd
, SPU_PTNOTE_SPUNAME
) != NULL
)
549 /* Make SPU_PTNOTE_SPUNAME section. */
556 ibfd
= info
->input_bfds
;
557 flags
= SEC_LOAD
| SEC_READONLY
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
558 s
= bfd_make_section_anyway_with_flags (ibfd
, SPU_PTNOTE_SPUNAME
, flags
);
560 || !bfd_set_section_alignment (ibfd
, s
, 4))
563 name_len
= strlen (bfd_get_filename (output_bfd
)) + 1;
564 size
= 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4);
565 size
+= (name_len
+ 3) & -4;
567 if (!bfd_set_section_size (ibfd
, s
, size
))
570 data
= bfd_zalloc (ibfd
, size
);
574 bfd_put_32 (ibfd
, sizeof (SPU_PLUGIN_NAME
), data
+ 0);
575 bfd_put_32 (ibfd
, name_len
, data
+ 4);
576 bfd_put_32 (ibfd
, 1, data
+ 8);
577 memcpy (data
+ 12, SPU_PLUGIN_NAME
, sizeof (SPU_PLUGIN_NAME
));
578 memcpy (data
+ 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4),
579 bfd_get_filename (output_bfd
), name_len
);
586 /* qsort predicate to sort sections by vma. */
589 sort_sections (const void *a
, const void *b
)
591 const asection
*const *s1
= a
;
592 const asection
*const *s2
= b
;
593 bfd_signed_vma delta
= (*s1
)->vma
- (*s2
)->vma
;
596 return delta
< 0 ? -1 : 1;
598 return (*s1
)->index
- (*s2
)->index
;
601 /* Identify overlays in the output bfd, and number them. */
604 spu_elf_find_overlays (bfd
*output_bfd
, struct bfd_link_info
*info
)
606 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
607 asection
**alloc_sec
;
608 unsigned int i
, n
, ovl_index
, num_buf
;
612 if (output_bfd
->section_count
< 2)
615 alloc_sec
= bfd_malloc (output_bfd
->section_count
* sizeof (*alloc_sec
));
616 if (alloc_sec
== NULL
)
619 /* Pick out all the alloced sections. */
620 for (n
= 0, s
= output_bfd
->sections
; s
!= NULL
; s
= s
->next
)
621 if ((s
->flags
& SEC_ALLOC
) != 0
622 && (s
->flags
& (SEC_LOAD
| SEC_THREAD_LOCAL
)) != SEC_THREAD_LOCAL
632 /* Sort them by vma. */
633 qsort (alloc_sec
, n
, sizeof (*alloc_sec
), sort_sections
);
635 /* Look for overlapping vmas. Any with overlap must be overlays.
636 Count them. Also count the number of overlay regions and for
637 each region save a section from that region with the lowest vma
638 and another section with the highest end vma. */
639 ovl_end
= alloc_sec
[0]->vma
+ alloc_sec
[0]->size
;
640 for (ovl_index
= 0, num_buf
= 0, i
= 1; i
< n
; i
++)
643 if (s
->vma
< ovl_end
)
645 asection
*s0
= alloc_sec
[i
- 1];
647 if (spu_elf_section_data (s0
)->ovl_index
== 0)
649 spu_elf_section_data (s0
)->ovl_index
= ++ovl_index
;
650 alloc_sec
[num_buf
* 2] = s0
;
651 alloc_sec
[num_buf
* 2 + 1] = s0
;
654 spu_elf_section_data (s
)->ovl_index
= ++ovl_index
;
655 if (ovl_end
< s
->vma
+ s
->size
)
657 ovl_end
= s
->vma
+ s
->size
;
658 alloc_sec
[num_buf
* 2 - 1] = s
;
662 ovl_end
= s
->vma
+ s
->size
;
665 htab
->num_overlays
= ovl_index
;
666 htab
->num_buf
= num_buf
;
673 alloc_sec
= bfd_realloc (alloc_sec
, num_buf
* 2 * sizeof (*alloc_sec
));
674 if (alloc_sec
== NULL
)
677 htab
->ovl_region
= alloc_sec
;
681 /* One of these per stub. */
682 #define SIZEOF_STUB1 8
683 #define ILA_79 0x4200004f /* ila $79,function_address */
684 #define BR 0x32000000 /* br stub2 */
686 /* One of these per overlay. */
687 #define SIZEOF_STUB2 8
688 #define ILA_78 0x4200004e /* ila $78,overlay_number */
690 #define NOP 0x40200000
692 /* Return true for all relative and absolute branch instructions.
700 brhnz 00100011 0.. */
703 is_branch (const unsigned char *insn
)
705 return (insn
[0] & 0xec) == 0x20 && (insn
[1] & 0x80) == 0;
708 /* Return true for all indirect branch instructions.
716 bihnz 00100101 011 */
719 is_indirect_branch (const unsigned char *insn
)
721 return (insn
[0] & 0xef) == 0x25 && (insn
[1] & 0x80) == 0;
724 /* Return true for branch hint instructions.
729 is_hint (const unsigned char *insn
)
731 return (insn
[0] & 0xfc) == 0x10;
734 /* Return TRUE if this reloc symbol should possibly go via an overlay stub. */
737 needs_ovl_stub (const char *sym_name
,
739 asection
*input_section
,
740 struct spu_link_hash_table
*htab
,
741 bfd_boolean is_branch
)
743 if (htab
->num_overlays
== 0)
747 || sym_sec
->output_section
== NULL
748 || spu_elf_section_data (sym_sec
->output_section
) == NULL
)
751 /* setjmp always goes via an overlay stub, because then the return
752 and hence the longjmp goes via __ovly_return. That magically
753 makes setjmp/longjmp between overlays work. */
754 if (strncmp (sym_name
, "setjmp", 6) == 0
755 && (sym_name
[6] == '\0' || sym_name
[6] == '@'))
758 /* Usually, symbols in non-overlay sections don't need stubs. */
759 if (spu_elf_section_data (sym_sec
->output_section
)->ovl_index
== 0
760 && !htab
->non_overlay_stubs
)
763 /* A reference from some other section to a symbol in an overlay
764 section needs a stub. */
765 if (spu_elf_section_data (sym_sec
->output_section
)->ovl_index
766 != spu_elf_section_data (input_section
->output_section
)->ovl_index
)
769 /* If this insn isn't a branch then we are possibly taking the
770 address of a function and passing it out somehow. */
774 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
778 allocate_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
780 /* Symbols starting with _SPUEAR_ need a stub because they may be
781 invoked by the PPU. */
782 if ((h
->root
.type
== bfd_link_hash_defined
783 || h
->root
.type
== bfd_link_hash_defweak
)
785 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0)
787 struct spu_link_hash_table
*htab
= inf
;
788 static Elf_Internal_Rela zero_rel
;
789 char *stub_name
= spu_stub_name (h
->root
.u
.def
.section
, h
, &zero_rel
);
790 struct spu_stub_hash_entry
*sh
;
792 if (stub_name
== NULL
)
798 sh
= (struct spu_stub_hash_entry
*)
799 bfd_hash_lookup (&htab
->stub_hash_table
, stub_name
, TRUE
, FALSE
);
806 /* If this entry isn't new, we already have a stub. */
807 if (sh
->target_section
!= NULL
)
813 sh
->target_section
= h
->root
.u
.def
.section
;
814 sh
->target_off
= h
->root
.u
.def
.value
;
815 htab
->stubs
.count
+= 1;
821 /* Called via bfd_hash_traverse to set up pointers to all symbols
822 in the stub hash table. */
825 populate_stubs (struct bfd_hash_entry
*bh
, void *inf
)
827 struct spu_link_hash_table
*htab
= inf
;
829 htab
->stubs
.sh
[--htab
->stubs
.count
] = (struct spu_stub_hash_entry
*) bh
;
833 /* qsort predicate to sort stubs by overlay number. */
836 sort_stubs (const void *a
, const void *b
)
838 const struct spu_stub_hash_entry
*const *sa
= a
;
839 const struct spu_stub_hash_entry
*const *sb
= b
;
843 i
= spu_elf_section_data ((*sa
)->target_section
->output_section
)->ovl_index
;
844 i
-= spu_elf_section_data ((*sb
)->target_section
->output_section
)->ovl_index
;
848 d
= ((*sa
)->target_section
->output_section
->vma
849 + (*sa
)->target_section
->output_offset
851 - (*sb
)->target_section
->output_section
->vma
852 - (*sb
)->target_section
->output_offset
853 - (*sb
)->target_off
);
855 return d
< 0 ? -1 : 1;
857 /* Two functions at the same address. Aliases perhaps. */
858 i
= strcmp ((*sb
)->root
.string
, (*sa
)->root
.string
);
863 /* Allocate space for overlay call and return stubs. */
866 spu_elf_size_stubs (bfd
*output_bfd
,
867 struct bfd_link_info
*info
,
868 int non_overlay_stubs
,
874 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
879 htab
->non_overlay_stubs
= non_overlay_stubs
;
880 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
882 extern const bfd_target bfd_elf32_spu_vec
;
883 Elf_Internal_Shdr
*symtab_hdr
;
885 Elf_Internal_Sym
*local_syms
= NULL
;
888 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
891 /* We'll need the symbol table in a second. */
892 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
893 if (symtab_hdr
->sh_info
== 0)
896 /* Arrange to read and keep global syms for later stack analysis. */
899 psyms
= &symtab_hdr
->contents
;
901 /* Walk over each section attached to the input bfd. */
902 for (section
= ibfd
->sections
; section
!= NULL
; section
= section
->next
)
904 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
906 /* If there aren't any relocs, then there's nothing more to do. */
907 if ((section
->flags
& SEC_RELOC
) == 0
908 || (section
->flags
& SEC_ALLOC
) == 0
909 || (section
->flags
& SEC_LOAD
) == 0
910 || section
->reloc_count
== 0)
913 /* If this section is a link-once section that will be
914 discarded, then don't create any stubs. */
915 if (section
->output_section
== NULL
916 || section
->output_section
->owner
!= output_bfd
)
919 /* Get the relocs. */
921 = _bfd_elf_link_read_relocs (ibfd
, section
, NULL
, NULL
,
923 if (internal_relocs
== NULL
)
924 goto error_ret_free_local
;
926 /* Now examine each relocation. */
927 irela
= internal_relocs
;
928 irelaend
= irela
+ section
->reloc_count
;
929 for (; irela
< irelaend
; irela
++)
931 enum elf_spu_reloc_type r_type
;
934 Elf_Internal_Sym
*sym
;
935 struct elf_link_hash_entry
*h
;
936 const char *sym_name
;
938 struct spu_stub_hash_entry
*sh
;
939 unsigned int sym_type
;
940 enum _insn_type
{ non_branch
, branch
, call
} insn_type
;
942 r_type
= ELF32_R_TYPE (irela
->r_info
);
943 r_indx
= ELF32_R_SYM (irela
->r_info
);
945 if (r_type
>= R_SPU_max
)
947 bfd_set_error (bfd_error_bad_value
);
948 goto error_ret_free_internal
;
951 /* Determine the reloc target section. */
952 if (!get_sym_h (&h
, &sym
, &sym_sec
, psyms
, r_indx
, ibfd
))
953 goto error_ret_free_internal
;
956 || sym_sec
->output_section
== NULL
957 || sym_sec
->output_section
->owner
!= output_bfd
)
960 /* Ensure no stubs for user supplied overlay manager syms. */
962 && (strcmp (h
->root
.root
.string
, "__ovly_load") == 0
963 || strcmp (h
->root
.root
.string
, "__ovly_return") == 0))
966 insn_type
= non_branch
;
967 if (r_type
== R_SPU_REL16
968 || r_type
== R_SPU_ADDR16
)
970 unsigned char insn
[4];
972 if (!bfd_get_section_contents (ibfd
, section
, insn
,
974 goto error_ret_free_internal
;
976 if (is_branch (insn
) || is_hint (insn
))
979 if ((insn
[0] & 0xfd) == 0x31)
984 /* We are only interested in function symbols. */
988 sym_name
= h
->root
.root
.string
;
992 sym_type
= ELF_ST_TYPE (sym
->st_info
);
993 sym_name
= bfd_elf_sym_name (sym_sec
->owner
,
998 if (sym_type
!= STT_FUNC
)
1000 /* It's common for people to write assembly and forget
1001 to give function symbols the right type. Handle
1002 calls to such symbols, but warn so that (hopefully)
1003 people will fix their code. We need the symbol
1004 type to be correct to distinguish function pointer
1005 initialisation from other pointer initialisation. */
1006 if (insn_type
== call
)
1007 (*_bfd_error_handler
) (_("warning: call to non-function"
1008 " symbol %s defined in %B"),
1009 sym_sec
->owner
, sym_name
);
1014 if (!needs_ovl_stub (sym_name
, sym_sec
, section
, htab
,
1015 insn_type
!= non_branch
))
1018 stub_name
= spu_stub_name (sym_sec
, h
, irela
);
1019 if (stub_name
== NULL
)
1020 goto error_ret_free_internal
;
1022 sh
= (struct spu_stub_hash_entry
*)
1023 bfd_hash_lookup (&htab
->stub_hash_table
, stub_name
,
1028 error_ret_free_internal
:
1029 if (elf_section_data (section
)->relocs
!= internal_relocs
)
1030 free (internal_relocs
);
1031 error_ret_free_local
:
1032 if (local_syms
!= NULL
1033 && (symtab_hdr
->contents
1034 != (unsigned char *) local_syms
))
1039 /* If this entry isn't new, we already have a stub. */
1040 if (sh
->target_section
!= NULL
)
1046 sh
->target_section
= sym_sec
;
1048 sh
->target_off
= h
->root
.u
.def
.value
;
1050 sh
->target_off
= sym
->st_value
;
1051 sh
->target_off
+= irela
->r_addend
;
1053 htab
->stubs
.count
+= 1;
1056 /* We're done with the internal relocs, free them. */
1057 if (elf_section_data (section
)->relocs
!= internal_relocs
)
1058 free (internal_relocs
);
1061 if (local_syms
!= NULL
1062 && symtab_hdr
->contents
!= (unsigned char *) local_syms
)
1064 if (!info
->keep_memory
)
1067 symtab_hdr
->contents
= (unsigned char *) local_syms
;
1071 elf_link_hash_traverse (&htab
->elf
, allocate_spuear_stubs
, htab
);
1072 if (htab
->stubs
.err
)
1076 if (htab
->stubs
.count
== 0)
1079 ibfd
= info
->input_bfds
;
1080 flags
= (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_READONLY
1081 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1082 htab
->stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1084 if (htab
->stub
== NULL
1085 || !bfd_set_section_alignment (ibfd
, htab
->stub
, 2))
1088 flags
= (SEC_ALLOC
| SEC_LOAD
1089 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1090 htab
->ovtab
= bfd_make_section_anyway_with_flags (ibfd
, ".ovtab", flags
);
1091 *ovtab
= htab
->ovtab
;
1092 if (htab
->ovtab
== NULL
1093 || !bfd_set_section_alignment (ibfd
, htab
->stub
, 4))
1096 *toe
= bfd_make_section_anyway_with_flags (ibfd
, ".toe", SEC_ALLOC
);
1098 || !bfd_set_section_alignment (ibfd
, *toe
, 4))
1102 /* Retrieve all the stubs and sort. */
1103 htab
->stubs
.sh
= bfd_malloc (htab
->stubs
.count
* sizeof (*htab
->stubs
.sh
));
1104 if (htab
->stubs
.sh
== NULL
)
1106 i
= htab
->stubs
.count
;
1107 bfd_hash_traverse (&htab
->stub_hash_table
, populate_stubs
, htab
);
1108 BFD_ASSERT (htab
->stubs
.count
== 0);
1110 htab
->stubs
.count
= i
;
1111 qsort (htab
->stubs
.sh
, htab
->stubs
.count
, sizeof (*htab
->stubs
.sh
),
1114 /* Now that the stubs are sorted, place them in the stub section.
1115 Stubs are grouped per overlay
1129 for (i
= 0; i
< htab
->stubs
.count
; i
++)
1131 if (spu_elf_section_data (htab
->stubs
.sh
[group
]->target_section
1132 ->output_section
)->ovl_index
1133 != spu_elf_section_data (htab
->stubs
.sh
[i
]->target_section
1134 ->output_section
)->ovl_index
)
1136 htab
->stub
->size
+= SIZEOF_STUB2
;
1137 for (; group
!= i
; group
++)
1138 htab
->stubs
.sh
[group
]->delta
1139 = htab
->stubs
.sh
[i
- 1]->off
- htab
->stubs
.sh
[group
]->off
;
1142 || ((htab
->stubs
.sh
[i
- 1]->target_section
->output_section
->vma
1143 + htab
->stubs
.sh
[i
- 1]->target_section
->output_offset
1144 + htab
->stubs
.sh
[i
- 1]->target_off
)
1145 != (htab
->stubs
.sh
[i
]->target_section
->output_section
->vma
1146 + htab
->stubs
.sh
[i
]->target_section
->output_offset
1147 + htab
->stubs
.sh
[i
]->target_off
)))
1149 htab
->stubs
.sh
[i
]->off
= htab
->stub
->size
;
1150 htab
->stub
->size
+= SIZEOF_STUB1
;
1153 htab
->stubs
.sh
[i
]->off
= htab
->stubs
.sh
[i
- 1]->off
;
1156 htab
->stub
->size
+= SIZEOF_STUB2
;
1157 for (; group
!= i
; group
++)
1158 htab
->stubs
.sh
[group
]->delta
1159 = htab
->stubs
.sh
[i
- 1]->off
- htab
->stubs
.sh
[group
]->off
;
1161 /* htab->ovtab consists of two arrays.
1171 . } _ovly_buf_table[]; */
1173 htab
->ovtab
->alignment_power
= 4;
1174 htab
->ovtab
->size
= htab
->num_overlays
* 16 + htab
->num_buf
* 4;
1179 /* Functions to handle embedded spu_ovl.o object. */
1182 ovl_mgr_open (struct bfd
*nbfd ATTRIBUTE_UNUSED
, void *stream
)
1188 ovl_mgr_pread (struct bfd
*abfd ATTRIBUTE_UNUSED
,
1194 struct _ovl_stream
*os
;
1198 os
= (struct _ovl_stream
*) stream
;
1199 max
= (const char *) os
->end
- (const char *) os
->start
;
1201 if ((ufile_ptr
) offset
>= max
)
1205 if (count
> max
- offset
)
1206 count
= max
- offset
;
1208 memcpy (buf
, (const char *) os
->start
+ offset
, count
);
1213 spu_elf_open_builtin_lib (bfd
**ovl_bfd
, const struct _ovl_stream
*stream
)
1215 *ovl_bfd
= bfd_openr_iovec ("builtin ovl_mgr",
1222 return *ovl_bfd
!= NULL
;
1225 /* Fill in the ila and br for a stub. On the last stub for a group,
1226 write the stub that sets the overlay number too. */
1229 write_one_stub (struct spu_stub_hash_entry
*ent
, struct bfd_link_info
*info
)
1231 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1232 asection
*sec
= htab
->stub
;
1233 asection
*s
= ent
->target_section
;
1237 val
= ent
->target_off
+ s
->output_offset
+ s
->output_section
->vma
;
1238 bfd_put_32 (sec
->owner
, ILA_79
+ ((val
<< 7) & 0x01ffff80),
1239 sec
->contents
+ ent
->off
);
1240 val
= ent
->delta
+ 4;
1241 bfd_put_32 (sec
->owner
, BR
+ ((val
<< 5) & 0x007fff80),
1242 sec
->contents
+ ent
->off
+ 4);
1244 /* If this is the last stub of this group, write stub2. */
1245 if (ent
->delta
== 0)
1247 bfd_put_32 (sec
->owner
, NOP
,
1248 sec
->contents
+ ent
->off
+ 4);
1250 ovl
= spu_elf_section_data (s
->output_section
)->ovl_index
;
1251 bfd_put_32 (sec
->owner
, ILA_78
+ ((ovl
<< 7) & 0x01ffff80),
1252 sec
->contents
+ ent
->off
+ 8);
1254 val
= (htab
->ovly_load
->root
.u
.def
.section
->output_section
->vma
1255 + htab
->ovly_load
->root
.u
.def
.section
->output_offset
1256 + htab
->ovly_load
->root
.u
.def
.value
1257 - (sec
->output_section
->vma
1258 + sec
->output_offset
1261 if (val
+ 0x20000 >= 0x40000)
1262 htab
->stub_overflow
= TRUE
;
1264 bfd_put_32 (sec
->owner
, BR
+ ((val
<< 5) & 0x007fff80),
1265 sec
->contents
+ ent
->off
+ 12);
1268 if (htab
->emit_stub_syms
)
1270 struct elf_link_hash_entry
*h
;
1274 len1
= sizeof ("00000000.ovl_call.") - 1;
1275 len2
= strlen (ent
->root
.string
);
1276 name
= bfd_malloc (len1
+ len2
+ 1);
1279 memcpy (name
, "00000000.ovl_call.", len1
);
1280 memcpy (name
+ len1
, ent
->root
.string
, len2
+ 1);
1281 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
1285 if (h
->root
.type
== bfd_link_hash_new
)
1287 h
->root
.type
= bfd_link_hash_defined
;
1288 h
->root
.u
.def
.section
= sec
;
1289 h
->root
.u
.def
.value
= ent
->off
;
1290 h
->size
= (ent
->delta
== 0
1291 ? SIZEOF_STUB1
+ SIZEOF_STUB2
: SIZEOF_STUB1
);
1295 h
->ref_regular_nonweak
= 1;
1296 h
->forced_local
= 1;
1304 /* Define an STT_OBJECT symbol. */
1306 static struct elf_link_hash_entry
*
1307 define_ovtab_symbol (struct spu_link_hash_table
*htab
, const char *name
)
1309 struct elf_link_hash_entry
*h
;
1311 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, FALSE
, FALSE
);
1315 if (h
->root
.type
!= bfd_link_hash_defined
1318 h
->root
.type
= bfd_link_hash_defined
;
1319 h
->root
.u
.def
.section
= htab
->ovtab
;
1320 h
->type
= STT_OBJECT
;
1323 h
->ref_regular_nonweak
= 1;
1328 (*_bfd_error_handler
) (_("%B is not allowed to define %s"),
1329 h
->root
.u
.def
.section
->owner
,
1330 h
->root
.root
.string
);
1331 bfd_set_error (bfd_error_bad_value
);
1338 /* Fill in all stubs and the overlay tables. */
1341 spu_elf_build_stubs (struct bfd_link_info
*info
, int emit_syms
, asection
*toe
)
1343 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1344 struct elf_link_hash_entry
*h
;
1350 htab
->emit_stub_syms
= emit_syms
;
1351 htab
->stub
->contents
= bfd_zalloc (htab
->stub
->owner
, htab
->stub
->size
);
1352 if (htab
->stub
->contents
== NULL
)
1355 h
= elf_link_hash_lookup (&htab
->elf
, "__ovly_load", FALSE
, FALSE
, FALSE
);
1356 htab
->ovly_load
= h
;
1357 BFD_ASSERT (h
!= NULL
1358 && (h
->root
.type
== bfd_link_hash_defined
1359 || h
->root
.type
== bfd_link_hash_defweak
)
1362 s
= h
->root
.u
.def
.section
->output_section
;
1363 if (spu_elf_section_data (s
)->ovl_index
)
1365 (*_bfd_error_handler
) (_("%s in overlay section"),
1366 h
->root
.u
.def
.section
->owner
);
1367 bfd_set_error (bfd_error_bad_value
);
1371 /* Write out all the stubs. */
1372 for (i
= 0; i
< htab
->stubs
.count
; i
++)
1373 write_one_stub (htab
->stubs
.sh
[i
], info
);
1375 if (htab
->stub_overflow
)
1377 (*_bfd_error_handler
) (_("overlay stub relocation overflow"));
1378 bfd_set_error (bfd_error_bad_value
);
1382 htab
->ovtab
->contents
= bfd_zalloc (htab
->ovtab
->owner
, htab
->ovtab
->size
);
1383 if (htab
->ovtab
->contents
== NULL
)
1386 /* Write out _ovly_table. */
1387 p
= htab
->ovtab
->contents
;
1388 obfd
= htab
->ovtab
->output_section
->owner
;
1389 for (s
= obfd
->sections
; s
!= NULL
; s
= s
->next
)
1391 unsigned int ovl_index
= spu_elf_section_data (s
)->ovl_index
;
1395 unsigned int lo
, hi
, mid
;
1396 unsigned long off
= (ovl_index
- 1) * 16;
1397 bfd_put_32 (htab
->ovtab
->owner
, s
->vma
, p
+ off
);
1398 bfd_put_32 (htab
->ovtab
->owner
, (s
->size
+ 15) & -16, p
+ off
+ 4);
1399 /* file_off written later in spu_elf_modify_program_headers. */
1405 mid
= (lo
+ hi
) >> 1;
1406 if (htab
->ovl_region
[2 * mid
+ 1]->vma
1407 + htab
->ovl_region
[2 * mid
+ 1]->size
<= s
->vma
)
1409 else if (htab
->ovl_region
[2 * mid
]->vma
> s
->vma
)
1413 bfd_put_32 (htab
->ovtab
->owner
, mid
+ 1, p
+ off
+ 12);
1417 BFD_ASSERT (lo
< hi
);
1421 /* Write out _ovly_buf_table. */
1422 p
= htab
->ovtab
->contents
+ htab
->num_overlays
* 16;
1423 for (i
= 0; i
< htab
->num_buf
; i
++)
1425 bfd_put_32 (htab
->ovtab
->owner
, 0, p
);
1429 h
= define_ovtab_symbol (htab
, "_ovly_table");
1432 h
->root
.u
.def
.value
= 0;
1433 h
->size
= htab
->num_overlays
* 16;
1435 h
= define_ovtab_symbol (htab
, "_ovly_table_end");
1438 h
->root
.u
.def
.value
= htab
->num_overlays
* 16;
1441 h
= define_ovtab_symbol (htab
, "_ovly_buf_table");
1444 h
->root
.u
.def
.value
= htab
->num_overlays
* 16;
1445 h
->size
= htab
->num_buf
* 4;
1447 h
= define_ovtab_symbol (htab
, "_ovly_buf_table_end");
1450 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + htab
->num_buf
* 4;
1453 h
= define_ovtab_symbol (htab
, "_EAR_");
1456 h
->root
.u
.def
.section
= toe
;
1457 h
->root
.u
.def
.value
= 0;
1463 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
1464 Search for stack adjusting insns, and return the sp delta. */
1467 find_function_stack_adjust (asection
*sec
, bfd_vma offset
)
1472 memset (reg
, 0, sizeof (reg
));
1473 for (unrecog
= 0; offset
+ 4 <= sec
->size
&& unrecog
< 32; offset
+= 4)
1475 unsigned char buf
[4];
1479 /* Assume no relocs on stack adjusing insns. */
1480 if (!bfd_get_section_contents (sec
->owner
, sec
, buf
, offset
, 4))
1483 if (buf
[0] == 0x24 /* stqd */)
1487 ra
= ((buf
[2] & 0x3f) << 1) | (buf
[3] >> 7);
1488 /* Partly decoded immediate field. */
1489 imm
= (buf
[1] << 9) | (buf
[2] << 1) | (buf
[3] >> 7);
1491 if (buf
[0] == 0x1c /* ai */)
1494 imm
= (imm
^ 0x200) - 0x200;
1495 reg
[rt
] = reg
[ra
] + imm
;
1497 if (rt
== 1 /* sp */)
1504 else if (buf
[0] == 0x18 && (buf
[1] & 0xe0) == 0 /* a */)
1506 int rb
= ((buf
[1] & 0x1f) << 2) | ((buf
[2] & 0xc0) >> 6);
1508 reg
[rt
] = reg
[ra
] + reg
[rb
];
1512 else if ((buf
[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
1514 if (buf
[0] >= 0x42 /* ila */)
1515 imm
|= (buf
[0] & 1) << 17;
1520 if (buf
[0] == 0x40 /* il */)
1522 if ((buf
[1] & 0x80) == 0)
1524 imm
= (imm
^ 0x8000) - 0x8000;
1526 else if ((buf
[1] & 0x80) == 0 /* ilhu */)
1532 else if (buf
[0] == 0x60 && (buf
[1] & 0x80) != 0 /* iohl */)
1534 reg
[rt
] |= imm
& 0xffff;
1537 else if (buf
[0] == 0x04 /* ori */)
1540 imm
= (imm
^ 0x200) - 0x200;
1541 reg
[rt
] = reg
[ra
] | imm
;
1544 else if ((buf
[0] == 0x33 && imm
== 1 /* brsl .+4 */)
1545 || (buf
[0] == 0x08 && (buf
[1] & 0xe0) == 0 /* sf */))
1547 /* Used in pic reg load. Say rt is trashed. */
1551 else if (is_branch (buf
) || is_indirect_branch (buf
))
1552 /* If we hit a branch then we must be out of the prologue. */
1561 /* qsort predicate to sort symbols by section and value. */
1563 static Elf_Internal_Sym
*sort_syms_syms
;
1564 static asection
**sort_syms_psecs
;
1567 sort_syms (const void *a
, const void *b
)
1569 Elf_Internal_Sym
*const *s1
= a
;
1570 Elf_Internal_Sym
*const *s2
= b
;
1571 asection
*sec1
,*sec2
;
1572 bfd_signed_vma delta
;
1574 sec1
= sort_syms_psecs
[*s1
- sort_syms_syms
];
1575 sec2
= sort_syms_psecs
[*s2
- sort_syms_syms
];
1578 return sec1
->index
- sec2
->index
;
1580 delta
= (*s1
)->st_value
- (*s2
)->st_value
;
1582 return delta
< 0 ? -1 : 1;
1584 delta
= (*s2
)->st_size
- (*s1
)->st_size
;
1586 return delta
< 0 ? -1 : 1;
1588 return *s1
< *s2
? -1 : 1;
1593 struct function_info
*fun
;
1594 struct call_info
*next
;
1598 struct function_info
1600 /* List of functions called. Also branches to hot/cold part of
1602 struct call_info
*call_list
;
1603 /* For hot/cold part of function, point to owner. */
1604 struct function_info
*start
;
1605 /* Symbol at start of function. */
1607 Elf_Internal_Sym
*sym
;
1608 struct elf_link_hash_entry
*h
;
1610 /* Function section. */
1612 /* Address range of (this part of) function. */
1616 /* Set if global symbol. */
1617 unsigned int global
: 1;
1618 /* Set if known to be start of function (as distinct from a hunk
1619 in hot/cold section. */
1620 unsigned int is_func
: 1;
1621 /* Flags used during call tree traversal. */
1622 unsigned int visit1
: 1;
1623 unsigned int non_root
: 1;
1624 unsigned int visit2
: 1;
1625 unsigned int marking
: 1;
1626 unsigned int visit3
: 1;
1629 struct spu_elf_stack_info
1633 /* Variable size array describing functions, one per contiguous
1634 address range belonging to a function. */
1635 struct function_info fun
[1];
1638 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
1639 entries for section SEC. */
1641 static struct spu_elf_stack_info
*
1642 alloc_stack_info (asection
*sec
, int max_fun
)
1644 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1647 amt
= sizeof (struct spu_elf_stack_info
);
1648 amt
+= (max_fun
- 1) * sizeof (struct function_info
);
1649 sec_data
->stack_info
= bfd_zmalloc (amt
);
1650 if (sec_data
->stack_info
!= NULL
)
1651 sec_data
->stack_info
->max_fun
= max_fun
;
1652 return sec_data
->stack_info
;
1655 /* Add a new struct function_info describing a (part of a) function
1656 starting at SYM_H. Keep the array sorted by address. */
1658 static struct function_info
*
1659 maybe_insert_function (asection
*sec
,
1662 bfd_boolean is_func
)
1664 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1665 struct spu_elf_stack_info
*sinfo
= sec_data
->stack_info
;
1671 sinfo
= alloc_stack_info (sec
, 20);
1678 Elf_Internal_Sym
*sym
= sym_h
;
1679 off
= sym
->st_value
;
1680 size
= sym
->st_size
;
1684 struct elf_link_hash_entry
*h
= sym_h
;
1685 off
= h
->root
.u
.def
.value
;
1689 for (i
= sinfo
->num_fun
; --i
>= 0; )
1690 if (sinfo
->fun
[i
].lo
<= off
)
1695 /* Don't add another entry for an alias, but do update some
1697 if (sinfo
->fun
[i
].lo
== off
)
1699 /* Prefer globals over local syms. */
1700 if (global
&& !sinfo
->fun
[i
].global
)
1702 sinfo
->fun
[i
].global
= TRUE
;
1703 sinfo
->fun
[i
].u
.h
= sym_h
;
1706 sinfo
->fun
[i
].is_func
= TRUE
;
1707 return &sinfo
->fun
[i
];
1709 /* Ignore a zero-size symbol inside an existing function. */
1710 else if (sinfo
->fun
[i
].hi
> off
&& size
== 0)
1711 return &sinfo
->fun
[i
];
1714 if (++i
< sinfo
->num_fun
)
1715 memmove (&sinfo
->fun
[i
+ 1], &sinfo
->fun
[i
],
1716 (sinfo
->num_fun
- i
) * sizeof (sinfo
->fun
[i
]));
1717 else if (i
>= sinfo
->max_fun
)
1719 bfd_size_type amt
= sizeof (struct spu_elf_stack_info
);
1720 bfd_size_type old
= amt
;
1722 old
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
1723 sinfo
->max_fun
+= 20 + (sinfo
->max_fun
>> 1);
1724 amt
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
1725 sinfo
= bfd_realloc (sinfo
, amt
);
1728 memset ((char *) sinfo
+ old
, 0, amt
- old
);
1729 sec_data
->stack_info
= sinfo
;
1731 sinfo
->fun
[i
].is_func
= is_func
;
1732 sinfo
->fun
[i
].global
= global
;
1733 sinfo
->fun
[i
].sec
= sec
;
1735 sinfo
->fun
[i
].u
.h
= sym_h
;
1737 sinfo
->fun
[i
].u
.sym
= sym_h
;
1738 sinfo
->fun
[i
].lo
= off
;
1739 sinfo
->fun
[i
].hi
= off
+ size
;
1740 sinfo
->fun
[i
].stack
= -find_function_stack_adjust (sec
, off
);
1741 sinfo
->num_fun
+= 1;
1742 return &sinfo
->fun
[i
];
1745 /* Return the name of FUN. */
1748 func_name (struct function_info
*fun
)
1752 Elf_Internal_Shdr
*symtab_hdr
;
1754 while (fun
->start
!= NULL
)
1758 return fun
->u
.h
->root
.root
.string
;
1761 if (fun
->u
.sym
->st_name
== 0)
1763 size_t len
= strlen (sec
->name
);
1764 char *name
= bfd_malloc (len
+ 10);
1767 sprintf (name
, "%s+%lx", sec
->name
,
1768 (unsigned long) fun
->u
.sym
->st_value
& 0xffffffff);
1772 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
1773 return bfd_elf_sym_name (ibfd
, symtab_hdr
, fun
->u
.sym
, sec
);
1776 /* Read the instruction at OFF in SEC. Return true iff the instruction
1777 is a nop, lnop, or stop 0 (all zero insn). */
1780 is_nop (asection
*sec
, bfd_vma off
)
1782 unsigned char insn
[4];
1784 if (off
+ 4 > sec
->size
1785 || !bfd_get_section_contents (sec
->owner
, sec
, insn
, off
, 4))
1787 if ((insn
[0] & 0xbf) == 0 && (insn
[1] & 0xe0) == 0x20)
1789 if (insn
[0] == 0 && insn
[1] == 0 && insn
[2] == 0 && insn
[3] == 0)
1794 /* Extend the range of FUN to cover nop padding up to LIMIT.
1795 Return TRUE iff some instruction other than a NOP was found. */
1798 insns_at_end (struct function_info
*fun
, bfd_vma limit
)
1800 bfd_vma off
= (fun
->hi
+ 3) & -4;
1802 while (off
< limit
&& is_nop (fun
->sec
, off
))
1813 /* Check and fix overlapping function ranges. Return TRUE iff there
1814 are gaps in the current info we have about functions in SEC. */
1817 check_function_ranges (asection
*sec
, struct bfd_link_info
*info
)
1819 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1820 struct spu_elf_stack_info
*sinfo
= sec_data
->stack_info
;
1822 bfd_boolean gaps
= FALSE
;
1827 for (i
= 1; i
< sinfo
->num_fun
; i
++)
1828 if (sinfo
->fun
[i
- 1].hi
> sinfo
->fun
[i
].lo
)
1830 /* Fix overlapping symbols. */
1831 const char *f1
= func_name (&sinfo
->fun
[i
- 1]);
1832 const char *f2
= func_name (&sinfo
->fun
[i
]);
1834 info
->callbacks
->einfo (_("warning: %s overlaps %s\n"), f1
, f2
);
1835 sinfo
->fun
[i
- 1].hi
= sinfo
->fun
[i
].lo
;
1837 else if (insns_at_end (&sinfo
->fun
[i
- 1], sinfo
->fun
[i
].lo
))
1840 if (sinfo
->num_fun
== 0)
1844 if (sinfo
->fun
[0].lo
!= 0)
1846 if (sinfo
->fun
[sinfo
->num_fun
- 1].hi
> sec
->size
)
1848 const char *f1
= func_name (&sinfo
->fun
[sinfo
->num_fun
- 1]);
1850 info
->callbacks
->einfo (_("warning: %s exceeds section size\n"), f1
);
1851 sinfo
->fun
[sinfo
->num_fun
- 1].hi
= sec
->size
;
1853 else if (insns_at_end (&sinfo
->fun
[sinfo
->num_fun
- 1], sec
->size
))
1859 /* Search current function info for a function that contains address
1860 OFFSET in section SEC. */
1862 static struct function_info
*
1863 find_function (asection
*sec
, bfd_vma offset
, struct bfd_link_info
*info
)
1865 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1866 struct spu_elf_stack_info
*sinfo
= sec_data
->stack_info
;
1870 hi
= sinfo
->num_fun
;
1873 mid
= (lo
+ hi
) / 2;
1874 if (offset
< sinfo
->fun
[mid
].lo
)
1876 else if (offset
>= sinfo
->fun
[mid
].hi
)
1879 return &sinfo
->fun
[mid
];
1881 info
->callbacks
->einfo (_("%A:0x%v not found in function table\n"),
1886 /* Add CALLEE to CALLER call list if not already present. */
1889 insert_callee (struct function_info
*caller
, struct call_info
*callee
)
1891 struct call_info
*p
;
1892 for (p
= caller
->call_list
; p
!= NULL
; p
= p
->next
)
1893 if (p
->fun
== callee
->fun
)
1895 /* Tail calls use less stack than normal calls. Retain entry
1896 for normal call over one for tail call. */
1897 if (p
->is_tail
> callee
->is_tail
)
1898 p
->is_tail
= callee
->is_tail
;
1901 callee
->next
= caller
->call_list
;
1902 caller
->call_list
= callee
;
1906 /* Rummage through the relocs for SEC, looking for function calls.
1907 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
1908 mark destination symbols on calls as being functions. Also
1909 look at branches, which may be tail calls or go to hot/cold
1910 section part of same function. */
1913 mark_functions_via_relocs (asection
*sec
,
1914 struct bfd_link_info
*info
,
1917 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
1918 Elf_Internal_Shdr
*symtab_hdr
= &elf_tdata (sec
->owner
)->symtab_hdr
;
1919 Elf_Internal_Sym
*syms
;
1921 static bfd_boolean warned
;
1923 internal_relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
1925 if (internal_relocs
== NULL
)
1928 symtab_hdr
= &elf_tdata (sec
->owner
)->symtab_hdr
;
1929 psyms
= &symtab_hdr
->contents
;
1930 syms
= *(Elf_Internal_Sym
**) psyms
;
1931 irela
= internal_relocs
;
1932 irelaend
= irela
+ sec
->reloc_count
;
1933 for (; irela
< irelaend
; irela
++)
1935 enum elf_spu_reloc_type r_type
;
1936 unsigned int r_indx
;
1938 Elf_Internal_Sym
*sym
;
1939 struct elf_link_hash_entry
*h
;
1941 unsigned char insn
[4];
1942 bfd_boolean is_call
;
1943 struct function_info
*caller
;
1944 struct call_info
*callee
;
1946 r_type
= ELF32_R_TYPE (irela
->r_info
);
1947 if (r_type
!= R_SPU_REL16
1948 && r_type
!= R_SPU_ADDR16
)
1951 r_indx
= ELF32_R_SYM (irela
->r_info
);
1952 if (!get_sym_h (&h
, &sym
, &sym_sec
, psyms
, r_indx
, sec
->owner
))
1956 || sym_sec
->output_section
== NULL
1957 || sym_sec
->output_section
->owner
!= sec
->output_section
->owner
)
1960 if (!bfd_get_section_contents (sec
->owner
, sec
, insn
,
1961 irela
->r_offset
, 4))
1963 if (!is_branch (insn
))
1966 if ((sym_sec
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
1967 != (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
1971 if (!call_tree
|| !warned
)
1972 info
->callbacks
->einfo (_("%B(%A+0x%v): call to non-code section"
1973 " %B(%A), stack analysis incomplete\n"),
1974 sec
->owner
, sec
, irela
->r_offset
,
1975 sym_sec
->owner
, sym_sec
);
1979 is_call
= (insn
[0] & 0xfd) == 0x31;
1982 val
= h
->root
.u
.def
.value
;
1984 val
= sym
->st_value
;
1985 val
+= irela
->r_addend
;
1989 struct function_info
*fun
;
1991 if (irela
->r_addend
!= 0)
1993 Elf_Internal_Sym
*fake
= bfd_zmalloc (sizeof (*fake
));
1996 fake
->st_value
= val
;
1998 = _bfd_elf_section_from_bfd_section (sym_sec
->owner
, sym_sec
);
2002 fun
= maybe_insert_function (sym_sec
, sym
, FALSE
, is_call
);
2004 fun
= maybe_insert_function (sym_sec
, h
, TRUE
, is_call
);
2007 if (irela
->r_addend
!= 0
2008 && fun
->u
.sym
!= sym
)
2013 caller
= find_function (sec
, irela
->r_offset
, info
);
2016 callee
= bfd_malloc (sizeof *callee
);
2020 callee
->fun
= find_function (sym_sec
, val
, info
);
2021 if (callee
->fun
== NULL
)
2023 callee
->is_tail
= !is_call
;
2024 if (!insert_callee (caller
, callee
))
2027 && !callee
->fun
->is_func
2028 && callee
->fun
->stack
== 0)
2030 /* This is either a tail call or a branch from one part of
2031 the function to another, ie. hot/cold section. If the
2032 destination has been called by some other function then
2033 it is a separate function. We also assume that functions
2034 are not split across input files. */
2035 if (callee
->fun
->start
!= NULL
2036 || sec
->owner
!= sym_sec
->owner
)
2038 callee
->fun
->start
= NULL
;
2039 callee
->fun
->is_func
= TRUE
;
2042 callee
->fun
->start
= caller
;
2049 /* Handle something like .init or .fini, which has a piece of a function.
2050 These sections are pasted together to form a single function. */
2053 pasted_function (asection
*sec
, struct bfd_link_info
*info
)
2055 struct bfd_link_order
*l
;
2056 struct _spu_elf_section_data
*sec_data
;
2057 struct spu_elf_stack_info
*sinfo
;
2058 Elf_Internal_Sym
*fake
;
2059 struct function_info
*fun
, *fun_start
;
2061 fake
= bfd_zmalloc (sizeof (*fake
));
2065 fake
->st_size
= sec
->size
;
2067 = _bfd_elf_section_from_bfd_section (sec
->owner
, sec
);
2068 fun
= maybe_insert_function (sec
, fake
, FALSE
, FALSE
);
2072 /* Find a function immediately preceding this section. */
2074 for (l
= sec
->output_section
->map_head
.link_order
; l
!= NULL
; l
= l
->next
)
2076 if (l
->u
.indirect
.section
== sec
)
2078 if (fun_start
!= NULL
)
2080 if (fun_start
->start
)
2081 fun_start
= fun_start
->start
;
2082 fun
->start
= fun_start
;
2086 if (l
->type
== bfd_indirect_link_order
2087 && (sec_data
= spu_elf_section_data (l
->u
.indirect
.section
)) != NULL
2088 && (sinfo
= sec_data
->stack_info
) != NULL
2089 && sinfo
->num_fun
!= 0)
2090 fun_start
= &sinfo
->fun
[sinfo
->num_fun
- 1];
2093 info
->callbacks
->einfo (_("%A link_order not found\n"), sec
);
2097 /* We're only interested in code sections. */
2100 interesting_section (asection
*s
, bfd
*obfd
, struct spu_link_hash_table
*htab
)
2102 return (s
!= htab
->stub
2103 && s
->output_section
!= NULL
2104 && s
->output_section
->owner
== obfd
2105 && ((s
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2106 == (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2110 /* Map address ranges in code sections to functions. */
2113 discover_functions (bfd
*output_bfd
, struct bfd_link_info
*info
)
2115 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2118 Elf_Internal_Sym
***psym_arr
;
2119 asection
***sec_arr
;
2120 bfd_boolean gaps
= FALSE
;
2123 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2126 psym_arr
= bfd_zmalloc (bfd_idx
* sizeof (*psym_arr
));
2127 if (psym_arr
== NULL
)
2129 sec_arr
= bfd_zmalloc (bfd_idx
* sizeof (*sec_arr
));
2130 if (sec_arr
== NULL
)
2134 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2136 ibfd
= ibfd
->link_next
, bfd_idx
++)
2138 extern const bfd_target bfd_elf32_spu_vec
;
2139 Elf_Internal_Shdr
*symtab_hdr
;
2142 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2143 asection
**psecs
, **p
;
2145 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2148 /* Read all the symbols. */
2149 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2150 symcount
= symtab_hdr
->sh_size
/ symtab_hdr
->sh_entsize
;
2154 syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
2157 syms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
, symcount
, 0,
2159 symtab_hdr
->contents
= (void *) syms
;
2164 /* Select defined function symbols that are going to be output. */
2165 psyms
= bfd_malloc ((symcount
+ 1) * sizeof (*psyms
));
2168 psym_arr
[bfd_idx
] = psyms
;
2169 psecs
= bfd_malloc (symcount
* sizeof (*psecs
));
2172 sec_arr
[bfd_idx
] = psecs
;
2173 for (psy
= psyms
, p
= psecs
, sy
= syms
; sy
< syms
+ symcount
; ++p
, ++sy
)
2174 if (ELF_ST_TYPE (sy
->st_info
) == STT_NOTYPE
2175 || ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
2179 *p
= s
= bfd_section_from_elf_index (ibfd
, sy
->st_shndx
);
2180 if (s
!= NULL
&& interesting_section (s
, output_bfd
, htab
))
2183 symcount
= psy
- psyms
;
2186 /* Sort them by section and offset within section. */
2187 sort_syms_syms
= syms
;
2188 sort_syms_psecs
= psecs
;
2189 qsort (psyms
, symcount
, sizeof (*psyms
), sort_syms
);
2191 /* Now inspect the function symbols. */
2192 for (psy
= psyms
; psy
< psyms
+ symcount
; )
2194 asection
*s
= psecs
[*psy
- syms
];
2195 Elf_Internal_Sym
**psy2
;
2197 for (psy2
= psy
; ++psy2
< psyms
+ symcount
; )
2198 if (psecs
[*psy2
- syms
] != s
)
2201 if (!alloc_stack_info (s
, psy2
- psy
))
2206 /* First install info about properly typed and sized functions.
2207 In an ideal world this will cover all code sections, except
2208 when partitioning functions into hot and cold sections,
2209 and the horrible pasted together .init and .fini functions. */
2210 for (psy
= psyms
; psy
< psyms
+ symcount
; ++psy
)
2213 if (ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
2215 asection
*s
= psecs
[sy
- syms
];
2216 if (!maybe_insert_function (s
, sy
, FALSE
, TRUE
))
2221 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2222 if (interesting_section (sec
, output_bfd
, htab
))
2223 gaps
|= check_function_ranges (sec
, info
);
2228 /* See if we can discover more function symbols by looking at
2230 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2232 ibfd
= ibfd
->link_next
, bfd_idx
++)
2236 if (psym_arr
[bfd_idx
] == NULL
)
2239 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2240 if (interesting_section (sec
, output_bfd
, htab
)
2241 && sec
->reloc_count
!= 0)
2243 if (!mark_functions_via_relocs (sec
, info
, FALSE
))
2248 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2250 ibfd
= ibfd
->link_next
, bfd_idx
++)
2252 Elf_Internal_Shdr
*symtab_hdr
;
2254 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2257 if ((psyms
= psym_arr
[bfd_idx
]) == NULL
)
2260 psecs
= sec_arr
[bfd_idx
];
2262 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2263 syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
2266 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2267 if (interesting_section (sec
, output_bfd
, htab
))
2268 gaps
|= check_function_ranges (sec
, info
);
2272 /* Finally, install all globals. */
2273 for (psy
= psyms
; (sy
= *psy
) != NULL
; ++psy
)
2277 s
= psecs
[sy
- syms
];
2279 /* Global syms might be improperly typed functions. */
2280 if (ELF_ST_TYPE (sy
->st_info
) != STT_FUNC
2281 && ELF_ST_BIND (sy
->st_info
) == STB_GLOBAL
)
2283 if (!maybe_insert_function (s
, sy
, FALSE
, FALSE
))
2288 /* Some of the symbols we've installed as marking the
2289 beginning of functions may have a size of zero. Extend
2290 the range of such functions to the beginning of the
2291 next symbol of interest. */
2292 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2293 if (interesting_section (sec
, output_bfd
, htab
))
2295 struct _spu_elf_section_data
*sec_data
;
2296 struct spu_elf_stack_info
*sinfo
;
2298 sec_data
= spu_elf_section_data (sec
);
2299 sinfo
= sec_data
->stack_info
;
2303 bfd_vma hi
= sec
->size
;
2305 for (fun_idx
= sinfo
->num_fun
; --fun_idx
>= 0; )
2307 sinfo
->fun
[fun_idx
].hi
= hi
;
2308 hi
= sinfo
->fun
[fun_idx
].lo
;
2311 /* No symbols in this section. Must be .init or .fini
2312 or something similar. */
2313 else if (!pasted_function (sec
, info
))
2319 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2321 ibfd
= ibfd
->link_next
, bfd_idx
++)
2323 if (psym_arr
[bfd_idx
] == NULL
)
2326 free (psym_arr
[bfd_idx
]);
2327 free (sec_arr
[bfd_idx
]);
2336 /* Mark nodes in the call graph that are called by some other node. */
2339 mark_non_root (struct function_info
*fun
)
2341 struct call_info
*call
;
2344 for (call
= fun
->call_list
; call
; call
= call
->next
)
2346 call
->fun
->non_root
= TRUE
;
2347 if (!call
->fun
->visit1
)
2348 mark_non_root (call
->fun
);
2352 /* Remove cycles from the call graph. */
2355 call_graph_traverse (struct function_info
*fun
, struct bfd_link_info
*info
)
2357 struct call_info
**callp
, *call
;
2360 fun
->marking
= TRUE
;
2362 callp
= &fun
->call_list
;
2363 while ((call
= *callp
) != NULL
)
2365 if (!call
->fun
->visit2
)
2366 call_graph_traverse (call
->fun
, info
);
2367 else if (call
->fun
->marking
)
2369 const char *f1
= func_name (fun
);
2370 const char *f2
= func_name (call
->fun
);
2372 info
->callbacks
->info (_("Stack analysis will ignore the call "
2375 *callp
= call
->next
;
2378 callp
= &call
->next
;
2380 fun
->marking
= FALSE
;
2383 /* Populate call_list for each function. */
2386 build_call_tree (bfd
*output_bfd
, struct bfd_link_info
*info
)
2388 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2391 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2393 extern const bfd_target bfd_elf32_spu_vec
;
2396 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2399 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2401 if (!interesting_section (sec
, output_bfd
, htab
)
2402 || sec
->reloc_count
== 0)
2405 if (!mark_functions_via_relocs (sec
, info
, TRUE
))
2409 /* Transfer call info from hot/cold section part of function
2411 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2413 struct _spu_elf_section_data
*sec_data
;
2414 struct spu_elf_stack_info
*sinfo
;
2416 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
2417 && (sinfo
= sec_data
->stack_info
) != NULL
)
2420 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
2422 if (sinfo
->fun
[i
].start
!= NULL
)
2424 struct call_info
*call
= sinfo
->fun
[i
].call_list
;
2426 while (call
!= NULL
)
2428 struct call_info
*call_next
= call
->next
;
2429 if (!insert_callee (sinfo
->fun
[i
].start
, call
))
2433 sinfo
->fun
[i
].call_list
= NULL
;
2434 sinfo
->fun
[i
].non_root
= TRUE
;
2441 /* Find the call graph root(s). */
2442 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2444 extern const bfd_target bfd_elf32_spu_vec
;
2447 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2450 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2452 struct _spu_elf_section_data
*sec_data
;
2453 struct spu_elf_stack_info
*sinfo
;
2455 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
2456 && (sinfo
= sec_data
->stack_info
) != NULL
)
2459 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
2460 if (!sinfo
->fun
[i
].visit1
)
2461 mark_non_root (&sinfo
->fun
[i
]);
2466 /* Remove cycles from the call graph. We start from the root node(s)
2467 so that we break cycles in a reasonable place. */
2468 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2470 extern const bfd_target bfd_elf32_spu_vec
;
2473 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2476 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2478 struct _spu_elf_section_data
*sec_data
;
2479 struct spu_elf_stack_info
*sinfo
;
2481 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
2482 && (sinfo
= sec_data
->stack_info
) != NULL
)
2485 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
2486 if (!sinfo
->fun
[i
].non_root
)
2487 call_graph_traverse (&sinfo
->fun
[i
], info
);
2495 /* Descend the call graph for FUN, accumulating total stack required. */
2498 sum_stack (struct function_info
*fun
,
2499 struct bfd_link_info
*info
,
2500 int emit_stack_syms
)
2502 struct call_info
*call
;
2503 struct function_info
*max
= NULL
;
2504 bfd_vma max_stack
= fun
->stack
;
2511 for (call
= fun
->call_list
; call
; call
= call
->next
)
2513 stack
= sum_stack (call
->fun
, info
, emit_stack_syms
);
2514 /* Include caller stack for normal calls, don't do so for
2515 tail calls. fun->stack here is local stack usage for
2518 stack
+= fun
->stack
;
2519 if (max_stack
< stack
)
2526 f1
= func_name (fun
);
2527 info
->callbacks
->minfo (_("%s: 0x%v 0x%v\n"),
2528 f1
, (bfd_vma
) fun
->stack
, max_stack
);
2532 info
->callbacks
->minfo (_(" calls:\n"));
2533 for (call
= fun
->call_list
; call
; call
= call
->next
)
2535 const char *f2
= func_name (call
->fun
);
2536 const char *ann1
= call
->fun
== max
? "*" : " ";
2537 const char *ann2
= call
->is_tail
? "t" : " ";
2539 info
->callbacks
->minfo (_(" %s%s %s\n"), ann1
, ann2
, f2
);
2543 /* Now fun->stack holds cumulative stack. */
2544 fun
->stack
= max_stack
;
2547 if (emit_stack_syms
)
2549 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2550 char *name
= bfd_malloc (18 + strlen (f1
));
2551 struct elf_link_hash_entry
*h
;
2555 if (fun
->global
|| ELF_ST_BIND (fun
->u
.sym
->st_info
) == STB_GLOBAL
)
2556 sprintf (name
, "__stack_%s", f1
);
2558 sprintf (name
, "__stack_%x_%s", fun
->sec
->id
& 0xffffffff, f1
);
2560 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
2563 && (h
->root
.type
== bfd_link_hash_new
2564 || h
->root
.type
== bfd_link_hash_undefined
2565 || h
->root
.type
== bfd_link_hash_undefweak
))
2567 h
->root
.type
= bfd_link_hash_defined
;
2568 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2569 h
->root
.u
.def
.value
= max_stack
;
2574 h
->ref_regular_nonweak
= 1;
2575 h
->forced_local
= 1;
2584 /* Provide an estimate of total stack required. */
2587 spu_elf_stack_analysis (bfd
*output_bfd
,
2588 struct bfd_link_info
*info
,
2589 int emit_stack_syms
)
2592 bfd_vma max_stack
= 0;
2594 if (!discover_functions (output_bfd
, info
))
2597 if (!build_call_tree (output_bfd
, info
))
2600 info
->callbacks
->info (_("Stack size for call graph root nodes.\n"));
2601 info
->callbacks
->minfo (_("\nStack size for functions. "
2602 "Annotations: '*' max stack, 't' tail call\n"));
2603 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2605 extern const bfd_target bfd_elf32_spu_vec
;
2608 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2611 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2613 struct _spu_elf_section_data
*sec_data
;
2614 struct spu_elf_stack_info
*sinfo
;
2616 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
2617 && (sinfo
= sec_data
->stack_info
) != NULL
)
2620 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
2622 if (!sinfo
->fun
[i
].non_root
)
2627 stack
= sum_stack (&sinfo
->fun
[i
], info
,
2629 f1
= func_name (&sinfo
->fun
[i
]);
2630 info
->callbacks
->info (_(" %s: 0x%v\n"),
2632 if (max_stack
< stack
)
2640 info
->callbacks
->info (_("Maximum stack required is 0x%v\n"), max_stack
);
2644 /* Perform a final link. */
2647 spu_elf_final_link (bfd
*output_bfd
, struct bfd_link_info
*info
)
2649 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2651 if (htab
->stack_analysis
2652 && !spu_elf_stack_analysis (output_bfd
, info
, htab
->emit_stack_syms
))
2653 info
->callbacks
->einfo ("%X%P: stack analysis error: %E\n");
2655 return bfd_elf_final_link (output_bfd
, info
);
2658 /* Called when not normally emitting relocs, ie. !info->relocatable
2659 and !info->emitrelocations. Returns a count of special relocs
2660 that need to be emitted. */
2663 spu_elf_count_relocs (asection
*sec
, Elf_Internal_Rela
*relocs
)
2665 unsigned int count
= 0;
2666 Elf_Internal_Rela
*relend
= relocs
+ sec
->reloc_count
;
2668 for (; relocs
< relend
; relocs
++)
2670 int r_type
= ELF32_R_TYPE (relocs
->r_info
);
2671 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
2678 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
2681 spu_elf_relocate_section (bfd
*output_bfd
,
2682 struct bfd_link_info
*info
,
2684 asection
*input_section
,
2686 Elf_Internal_Rela
*relocs
,
2687 Elf_Internal_Sym
*local_syms
,
2688 asection
**local_sections
)
2690 Elf_Internal_Shdr
*symtab_hdr
;
2691 struct elf_link_hash_entry
**sym_hashes
;
2692 Elf_Internal_Rela
*rel
, *relend
;
2693 struct spu_link_hash_table
*htab
;
2694 bfd_boolean ret
= TRUE
;
2695 bfd_boolean emit_these_relocs
= FALSE
;
2697 htab
= spu_hash_table (info
);
2698 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
2699 sym_hashes
= (struct elf_link_hash_entry
**) (elf_sym_hashes (input_bfd
));
2702 relend
= relocs
+ input_section
->reloc_count
;
2703 for (; rel
< relend
; rel
++)
2706 reloc_howto_type
*howto
;
2707 unsigned long r_symndx
;
2708 Elf_Internal_Sym
*sym
;
2710 struct elf_link_hash_entry
*h
;
2711 const char *sym_name
;
2714 bfd_reloc_status_type r
;
2715 bfd_boolean unresolved_reloc
;
2719 r_symndx
= ELF32_R_SYM (rel
->r_info
);
2720 r_type
= ELF32_R_TYPE (rel
->r_info
);
2721 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
2723 emit_these_relocs
= TRUE
;
2727 howto
= elf_howto_table
+ r_type
;
2728 unresolved_reloc
= FALSE
;
2733 if (r_symndx
< symtab_hdr
->sh_info
)
2735 sym
= local_syms
+ r_symndx
;
2736 sec
= local_sections
[r_symndx
];
2737 sym_name
= bfd_elf_sym_name (input_bfd
, symtab_hdr
, sym
, sec
);
2738 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, &sec
, rel
);
2742 RELOC_FOR_GLOBAL_SYMBOL (info
, input_bfd
, input_section
, rel
,
2743 r_symndx
, symtab_hdr
, sym_hashes
,
2745 unresolved_reloc
, warned
);
2746 sym_name
= h
->root
.root
.string
;
2749 if (sec
!= NULL
&& elf_discarded_section (sec
))
2751 /* For relocs against symbols from removed linkonce sections,
2752 or sections discarded by a linker script, we just want the
2753 section contents zeroed. Avoid any special processing. */
2754 _bfd_clear_contents (howto
, input_bfd
, contents
+ rel
->r_offset
);
2760 if (info
->relocatable
)
2763 if (unresolved_reloc
)
2765 (*_bfd_error_handler
)
2766 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
2768 bfd_get_section_name (input_bfd
, input_section
),
2769 (long) rel
->r_offset
,
2775 /* If this symbol is in an overlay area, we may need to relocate
2776 to the overlay stub. */
2777 addend
= rel
->r_addend
;
2778 branch
= (is_branch (contents
+ rel
->r_offset
)
2779 || is_hint (contents
+ rel
->r_offset
));
2780 if (needs_ovl_stub (sym_name
, sec
, input_section
, htab
, branch
))
2783 struct spu_stub_hash_entry
*sh
;
2785 stub_name
= spu_stub_name (sec
, h
, rel
);
2786 if (stub_name
== NULL
)
2789 sh
= (struct spu_stub_hash_entry
*)
2790 bfd_hash_lookup (&htab
->stub_hash_table
, stub_name
, FALSE
, FALSE
);
2793 relocation
= (htab
->stub
->output_section
->vma
2794 + htab
->stub
->output_offset
2801 r
= _bfd_final_link_relocate (howto
,
2805 rel
->r_offset
, relocation
, addend
);
2807 if (r
!= bfd_reloc_ok
)
2809 const char *msg
= (const char *) 0;
2813 case bfd_reloc_overflow
:
2814 if (!((*info
->callbacks
->reloc_overflow
)
2815 (info
, (h
? &h
->root
: NULL
), sym_name
, howto
->name
,
2816 (bfd_vma
) 0, input_bfd
, input_section
, rel
->r_offset
)))
2820 case bfd_reloc_undefined
:
2821 if (!((*info
->callbacks
->undefined_symbol
)
2822 (info
, sym_name
, input_bfd
, input_section
,
2823 rel
->r_offset
, TRUE
)))
2827 case bfd_reloc_outofrange
:
2828 msg
= _("internal error: out of range error");
2831 case bfd_reloc_notsupported
:
2832 msg
= _("internal error: unsupported relocation error");
2835 case bfd_reloc_dangerous
:
2836 msg
= _("internal error: dangerous error");
2840 msg
= _("internal error: unknown error");
2844 if (!((*info
->callbacks
->warning
)
2845 (info
, msg
, sym_name
, input_bfd
, input_section
,
2854 && emit_these_relocs
2855 && !info
->relocatable
2856 && !info
->emitrelocations
)
2858 Elf_Internal_Rela
*wrel
;
2859 Elf_Internal_Shdr
*rel_hdr
;
2861 wrel
= rel
= relocs
;
2862 relend
= relocs
+ input_section
->reloc_count
;
2863 for (; rel
< relend
; rel
++)
2867 r_type
= ELF32_R_TYPE (rel
->r_info
);
2868 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
2871 input_section
->reloc_count
= wrel
- relocs
;
2872 /* Backflips for _bfd_elf_link_output_relocs. */
2873 rel_hdr
= &elf_section_data (input_section
)->rel_hdr
;
2874 rel_hdr
->sh_size
= input_section
->reloc_count
* rel_hdr
->sh_entsize
;
2881 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
2884 spu_elf_output_symbol_hook (struct bfd_link_info
*info
,
2885 const char *sym_name ATTRIBUTE_UNUSED
,
2886 Elf_Internal_Sym
*sym
,
2887 asection
*sym_sec ATTRIBUTE_UNUSED
,
2888 struct elf_link_hash_entry
*h
)
2890 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2892 if (!info
->relocatable
2893 && htab
->num_overlays
!= 0
2895 && (h
->root
.type
== bfd_link_hash_defined
2896 || h
->root
.type
== bfd_link_hash_defweak
)
2898 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0)
2900 static Elf_Internal_Rela zero_rel
;
2901 char *stub_name
= spu_stub_name (h
->root
.u
.def
.section
, h
, &zero_rel
);
2902 struct spu_stub_hash_entry
*sh
;
2904 if (stub_name
== NULL
)
2906 sh
= (struct spu_stub_hash_entry
*)
2907 bfd_hash_lookup (&htab
->stub_hash_table
, stub_name
, FALSE
, FALSE
);
2912 = _bfd_elf_section_from_bfd_section (htab
->stub
->output_section
->owner
,
2913 htab
->stub
->output_section
);
2914 sym
->st_value
= (htab
->stub
->output_section
->vma
2915 + htab
->stub
->output_offset
2922 static int spu_plugin
= 0;
2925 spu_elf_plugin (int val
)
2930 /* Set ELF header e_type for plugins. */
2933 spu_elf_post_process_headers (bfd
*abfd
,
2934 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
2938 Elf_Internal_Ehdr
*i_ehdrp
= elf_elfheader (abfd
);
2940 i_ehdrp
->e_type
= ET_DYN
;
2944 /* We may add an extra PT_LOAD segment for .toe. We also need extra
2945 segments for overlays. */
2948 spu_elf_additional_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
2950 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2951 int extra
= htab
->num_overlays
;
2957 sec
= bfd_get_section_by_name (abfd
, ".toe");
2958 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
2964 /* Remove .toe section from other PT_LOAD segments and put it in
2965 a segment of its own. Put overlays in separate segments too. */
2968 spu_elf_modify_segment_map (bfd
*abfd
, struct bfd_link_info
*info
)
2971 struct elf_segment_map
*m
;
2977 toe
= bfd_get_section_by_name (abfd
, ".toe");
2978 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
2979 if (m
->p_type
== PT_LOAD
&& m
->count
> 1)
2980 for (i
= 0; i
< m
->count
; i
++)
2981 if ((s
= m
->sections
[i
]) == toe
2982 || spu_elf_section_data (s
)->ovl_index
!= 0)
2984 struct elf_segment_map
*m2
;
2987 if (i
+ 1 < m
->count
)
2989 amt
= sizeof (struct elf_segment_map
);
2990 amt
+= (m
->count
- (i
+ 2)) * sizeof (m
->sections
[0]);
2991 m2
= bfd_zalloc (abfd
, amt
);
2994 m2
->count
= m
->count
- (i
+ 1);
2995 memcpy (m2
->sections
, m
->sections
+ i
+ 1,
2996 m2
->count
* sizeof (m
->sections
[0]));
2997 m2
->p_type
= PT_LOAD
;
3005 amt
= sizeof (struct elf_segment_map
);
3006 m2
= bfd_zalloc (abfd
, amt
);
3009 m2
->p_type
= PT_LOAD
;
3011 m2
->sections
[0] = s
;
3021 /* Check that all loadable section VMAs lie in the range
3022 LO .. HI inclusive. */
3025 spu_elf_check_vma (bfd
*abfd
, bfd_vma lo
, bfd_vma hi
)
3027 struct elf_segment_map
*m
;
3030 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
3031 if (m
->p_type
== PT_LOAD
)
3032 for (i
= 0; i
< m
->count
; i
++)
3033 if (m
->sections
[i
]->size
!= 0
3034 && (m
->sections
[i
]->vma
< lo
3035 || m
->sections
[i
]->vma
> hi
3036 || m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
))
3037 return m
->sections
[i
];
3042 /* Tweak the section type of .note.spu_name. */
3045 spu_elf_fake_sections (bfd
*obfd ATTRIBUTE_UNUSED
,
3046 Elf_Internal_Shdr
*hdr
,
3049 if (strcmp (sec
->name
, SPU_PTNOTE_SPUNAME
) == 0)
3050 hdr
->sh_type
= SHT_NOTE
;
3054 /* Tweak phdrs before writing them out. */
3057 spu_elf_modify_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
3059 const struct elf_backend_data
*bed
;
3060 struct elf_obj_tdata
*tdata
;
3061 Elf_Internal_Phdr
*phdr
, *last
;
3062 struct spu_link_hash_table
*htab
;
3069 bed
= get_elf_backend_data (abfd
);
3070 tdata
= elf_tdata (abfd
);
3072 count
= tdata
->program_header_size
/ bed
->s
->sizeof_phdr
;
3073 htab
= spu_hash_table (info
);
3074 if (htab
->num_overlays
!= 0)
3076 struct elf_segment_map
*m
;
3079 for (i
= 0, m
= elf_tdata (abfd
)->segment_map
; m
; ++i
, m
= m
->next
)
3081 && (o
= spu_elf_section_data (m
->sections
[0])->ovl_index
) != 0)
3083 /* Mark this as an overlay header. */
3084 phdr
[i
].p_flags
|= PF_OVERLAY
;
3086 if (htab
->ovtab
!= NULL
&& htab
->ovtab
->size
!= 0)
3088 bfd_byte
*p
= htab
->ovtab
->contents
;
3089 unsigned int off
= (o
- 1) * 16 + 8;
3091 /* Write file_off into _ovly_table. */
3092 bfd_put_32 (htab
->ovtab
->owner
, phdr
[i
].p_offset
, p
+ off
);
3097 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
3098 of 16. This should always be possible when using the standard
3099 linker scripts, but don't create overlapping segments if
3100 someone is playing games with linker scripts. */
3102 for (i
= count
; i
-- != 0; )
3103 if (phdr
[i
].p_type
== PT_LOAD
)
3107 adjust
= -phdr
[i
].p_filesz
& 15;
3110 && phdr
[i
].p_offset
+ phdr
[i
].p_filesz
> last
->p_offset
- adjust
)
3113 adjust
= -phdr
[i
].p_memsz
& 15;
3116 && phdr
[i
].p_filesz
!= 0
3117 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
> last
->p_vaddr
- adjust
3118 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
<= last
->p_vaddr
)
3121 if (phdr
[i
].p_filesz
!= 0)
3125 if (i
== (unsigned int) -1)
3126 for (i
= count
; i
-- != 0; )
3127 if (phdr
[i
].p_type
== PT_LOAD
)
3131 adjust
= -phdr
[i
].p_filesz
& 15;
3132 phdr
[i
].p_filesz
+= adjust
;
3134 adjust
= -phdr
[i
].p_memsz
& 15;
3135 phdr
[i
].p_memsz
+= adjust
;
3141 #define TARGET_BIG_SYM bfd_elf32_spu_vec
3142 #define TARGET_BIG_NAME "elf32-spu"
3143 #define ELF_ARCH bfd_arch_spu
3144 #define ELF_MACHINE_CODE EM_SPU
3145 /* This matches the alignment need for DMA. */
3146 #define ELF_MAXPAGESIZE 0x80
3147 #define elf_backend_rela_normal 1
3148 #define elf_backend_can_gc_sections 1
3150 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
3151 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
3152 #define elf_info_to_howto spu_elf_info_to_howto
3153 #define elf_backend_count_relocs spu_elf_count_relocs
3154 #define elf_backend_relocate_section spu_elf_relocate_section
3155 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
3156 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
3157 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
3158 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
3159 #define bfd_elf32_bfd_link_hash_table_free spu_elf_link_hash_table_free
3161 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
3162 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
3163 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
3164 #define elf_backend_post_process_headers spu_elf_post_process_headers
3165 #define elf_backend_fake_sections spu_elf_fake_sections
3166 #define elf_backend_special_sections spu_elf_special_sections
3167 #define bfd_elf32_bfd_final_link spu_elf_final_link
3169 #include "elf32-target.h"