1 /* SPU specific support for 32-bit ELF
3 Copyright 2006, 2007, 2008 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
27 #include "elf32-spu.h"
29 /* We use RELA style relocs. Don't define USE_REL. */
31 static bfd_reloc_status_type
spu_elf_rel9 (bfd
*, arelent
*, asymbol
*,
35 /* Values of type 'enum elf_spu_reloc_type' are used to index this
36 array, so it must be declared in the order of that type. */
38 static reloc_howto_type elf_howto_table
[] = {
39 HOWTO (R_SPU_NONE
, 0, 0, 0, FALSE
, 0, complain_overflow_dont
,
40 bfd_elf_generic_reloc
, "SPU_NONE",
41 FALSE
, 0, 0x00000000, FALSE
),
42 HOWTO (R_SPU_ADDR10
, 4, 2, 10, FALSE
, 14, complain_overflow_bitfield
,
43 bfd_elf_generic_reloc
, "SPU_ADDR10",
44 FALSE
, 0, 0x00ffc000, FALSE
),
45 HOWTO (R_SPU_ADDR16
, 2, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
46 bfd_elf_generic_reloc
, "SPU_ADDR16",
47 FALSE
, 0, 0x007fff80, FALSE
),
48 HOWTO (R_SPU_ADDR16_HI
, 16, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
49 bfd_elf_generic_reloc
, "SPU_ADDR16_HI",
50 FALSE
, 0, 0x007fff80, FALSE
),
51 HOWTO (R_SPU_ADDR16_LO
, 0, 2, 16, FALSE
, 7, complain_overflow_dont
,
52 bfd_elf_generic_reloc
, "SPU_ADDR16_LO",
53 FALSE
, 0, 0x007fff80, FALSE
),
54 HOWTO (R_SPU_ADDR18
, 0, 2, 18, FALSE
, 7, complain_overflow_bitfield
,
55 bfd_elf_generic_reloc
, "SPU_ADDR18",
56 FALSE
, 0, 0x01ffff80, FALSE
),
57 HOWTO (R_SPU_ADDR32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
58 bfd_elf_generic_reloc
, "SPU_ADDR32",
59 FALSE
, 0, 0xffffffff, FALSE
),
60 HOWTO (R_SPU_REL16
, 2, 2, 16, TRUE
, 7, complain_overflow_bitfield
,
61 bfd_elf_generic_reloc
, "SPU_REL16",
62 FALSE
, 0, 0x007fff80, TRUE
),
63 HOWTO (R_SPU_ADDR7
, 0, 2, 7, FALSE
, 14, complain_overflow_dont
,
64 bfd_elf_generic_reloc
, "SPU_ADDR7",
65 FALSE
, 0, 0x001fc000, FALSE
),
66 HOWTO (R_SPU_REL9
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
67 spu_elf_rel9
, "SPU_REL9",
68 FALSE
, 0, 0x0180007f, TRUE
),
69 HOWTO (R_SPU_REL9I
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
70 spu_elf_rel9
, "SPU_REL9I",
71 FALSE
, 0, 0x0000c07f, TRUE
),
72 HOWTO (R_SPU_ADDR10I
, 0, 2, 10, FALSE
, 14, complain_overflow_signed
,
73 bfd_elf_generic_reloc
, "SPU_ADDR10I",
74 FALSE
, 0, 0x00ffc000, FALSE
),
75 HOWTO (R_SPU_ADDR16I
, 0, 2, 16, FALSE
, 7, complain_overflow_signed
,
76 bfd_elf_generic_reloc
, "SPU_ADDR16I",
77 FALSE
, 0, 0x007fff80, FALSE
),
78 HOWTO (R_SPU_REL32
, 0, 2, 32, TRUE
, 0, complain_overflow_dont
,
79 bfd_elf_generic_reloc
, "SPU_REL32",
80 FALSE
, 0, 0xffffffff, TRUE
),
81 HOWTO (R_SPU_ADDR16X
, 0, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
82 bfd_elf_generic_reloc
, "SPU_ADDR16X",
83 FALSE
, 0, 0x007fff80, FALSE
),
84 HOWTO (R_SPU_PPU32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
85 bfd_elf_generic_reloc
, "SPU_PPU32",
86 FALSE
, 0, 0xffffffff, FALSE
),
87 HOWTO (R_SPU_PPU64
, 0, 4, 64, FALSE
, 0, complain_overflow_dont
,
88 bfd_elf_generic_reloc
, "SPU_PPU64",
92 static struct bfd_elf_special_section
const spu_elf_special_sections
[] = {
93 { ".toe", 4, 0, SHT_NOBITS
, SHF_ALLOC
},
97 static enum elf_spu_reloc_type
98 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code
)
104 case BFD_RELOC_SPU_IMM10W
:
106 case BFD_RELOC_SPU_IMM16W
:
108 case BFD_RELOC_SPU_LO16
:
109 return R_SPU_ADDR16_LO
;
110 case BFD_RELOC_SPU_HI16
:
111 return R_SPU_ADDR16_HI
;
112 case BFD_RELOC_SPU_IMM18
:
114 case BFD_RELOC_SPU_PCREL16
:
116 case BFD_RELOC_SPU_IMM7
:
118 case BFD_RELOC_SPU_IMM8
:
120 case BFD_RELOC_SPU_PCREL9a
:
122 case BFD_RELOC_SPU_PCREL9b
:
124 case BFD_RELOC_SPU_IMM10
:
125 return R_SPU_ADDR10I
;
126 case BFD_RELOC_SPU_IMM16
:
127 return R_SPU_ADDR16I
;
130 case BFD_RELOC_32_PCREL
:
132 case BFD_RELOC_SPU_PPU32
:
134 case BFD_RELOC_SPU_PPU64
:
140 spu_elf_info_to_howto (bfd
*abfd ATTRIBUTE_UNUSED
,
142 Elf_Internal_Rela
*dst
)
144 enum elf_spu_reloc_type r_type
;
146 r_type
= (enum elf_spu_reloc_type
) ELF32_R_TYPE (dst
->r_info
);
147 BFD_ASSERT (r_type
< R_SPU_max
);
148 cache_ptr
->howto
= &elf_howto_table
[(int) r_type
];
151 static reloc_howto_type
*
152 spu_elf_reloc_type_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
153 bfd_reloc_code_real_type code
)
155 enum elf_spu_reloc_type r_type
= spu_elf_bfd_to_reloc_type (code
);
157 if (r_type
== R_SPU_NONE
)
160 return elf_howto_table
+ r_type
;
163 static reloc_howto_type
*
164 spu_elf_reloc_name_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
169 for (i
= 0; i
< sizeof (elf_howto_table
) / sizeof (elf_howto_table
[0]); i
++)
170 if (elf_howto_table
[i
].name
!= NULL
171 && strcasecmp (elf_howto_table
[i
].name
, r_name
) == 0)
172 return &elf_howto_table
[i
];
177 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
179 static bfd_reloc_status_type
180 spu_elf_rel9 (bfd
*abfd
, arelent
*reloc_entry
, asymbol
*symbol
,
181 void *data
, asection
*input_section
,
182 bfd
*output_bfd
, char **error_message
)
184 bfd_size_type octets
;
188 /* If this is a relocatable link (output_bfd test tells us), just
189 call the generic function. Any adjustment will be done at final
191 if (output_bfd
!= NULL
)
192 return bfd_elf_generic_reloc (abfd
, reloc_entry
, symbol
, data
,
193 input_section
, output_bfd
, error_message
);
195 if (reloc_entry
->address
> bfd_get_section_limit (abfd
, input_section
))
196 return bfd_reloc_outofrange
;
197 octets
= reloc_entry
->address
* bfd_octets_per_byte (abfd
);
199 /* Get symbol value. */
201 if (!bfd_is_com_section (symbol
->section
))
203 if (symbol
->section
->output_section
)
204 val
+= symbol
->section
->output_section
->vma
;
206 val
+= reloc_entry
->addend
;
208 /* Make it pc-relative. */
209 val
-= input_section
->output_section
->vma
+ input_section
->output_offset
;
212 if (val
+ 256 >= 512)
213 return bfd_reloc_overflow
;
215 insn
= bfd_get_32 (abfd
, (bfd_byte
*) data
+ octets
);
217 /* Move two high bits of value to REL9I and REL9 position.
218 The mask will take care of selecting the right field. */
219 val
= (val
& 0x7f) | ((val
& 0x180) << 7) | ((val
& 0x180) << 16);
220 insn
&= ~reloc_entry
->howto
->dst_mask
;
221 insn
|= val
& reloc_entry
->howto
->dst_mask
;
222 bfd_put_32 (abfd
, insn
, (bfd_byte
*) data
+ octets
);
227 spu_elf_new_section_hook (bfd
*abfd
, asection
*sec
)
229 if (!sec
->used_by_bfd
)
231 struct _spu_elf_section_data
*sdata
;
233 sdata
= bfd_zalloc (abfd
, sizeof (*sdata
));
236 sec
->used_by_bfd
= sdata
;
239 return _bfd_elf_new_section_hook (abfd
, sec
);
242 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
243 strip --strip-unneeded will not remove them. */
246 spu_elf_backend_symbol_processing (bfd
*abfd ATTRIBUTE_UNUSED
, asymbol
*sym
)
248 if (sym
->name
!= NULL
249 && sym
->section
!= bfd_abs_section_ptr
250 && strncmp (sym
->name
, "_EAR_", 5) == 0)
251 sym
->flags
|= BSF_KEEP
;
254 /* SPU ELF linker hash table. */
256 struct spu_link_hash_table
258 struct elf_link_hash_table elf
;
260 /* Shortcuts to overlay sections. */
265 /* Count of stubs in each overlay section. */
266 unsigned int *stub_count
;
268 /* The stub section for each overlay section. */
271 struct elf_link_hash_entry
*ovly_load
;
272 struct elf_link_hash_entry
*ovly_return
;
273 unsigned long ovly_load_r_symndx
;
275 /* Number of overlay buffers. */
276 unsigned int num_buf
;
278 /* Total number of overlays. */
279 unsigned int num_overlays
;
281 /* Set if we should emit symbols for stubs. */
282 unsigned int emit_stub_syms
:1;
284 /* Set if we want stubs on calls out of overlay regions to
285 non-overlay regions. */
286 unsigned int non_overlay_stubs
: 1;
289 unsigned int stub_err
: 1;
291 /* Set if stack size analysis should be done. */
292 unsigned int stack_analysis
: 1;
294 /* Set if __stack_* syms will be emitted. */
295 unsigned int emit_stack_syms
: 1;
298 /* Hijack the generic got fields for overlay stub accounting. */
302 struct got_entry
*next
;
307 #define spu_hash_table(p) \
308 ((struct spu_link_hash_table *) ((p)->hash))
310 /* Create a spu ELF linker hash table. */
312 static struct bfd_link_hash_table
*
313 spu_elf_link_hash_table_create (bfd
*abfd
)
315 struct spu_link_hash_table
*htab
;
317 htab
= bfd_malloc (sizeof (*htab
));
321 if (!_bfd_elf_link_hash_table_init (&htab
->elf
, abfd
,
322 _bfd_elf_link_hash_newfunc
,
323 sizeof (struct elf_link_hash_entry
)))
329 memset (&htab
->ovtab
, 0,
330 sizeof (*htab
) - offsetof (struct spu_link_hash_table
, ovtab
));
332 htab
->elf
.init_got_refcount
.refcount
= 0;
333 htab
->elf
.init_got_refcount
.glist
= NULL
;
334 htab
->elf
.init_got_offset
.offset
= 0;
335 htab
->elf
.init_got_offset
.glist
= NULL
;
336 return &htab
->elf
.root
;
339 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
340 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
341 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
344 get_sym_h (struct elf_link_hash_entry
**hp
,
345 Elf_Internal_Sym
**symp
,
347 Elf_Internal_Sym
**locsymsp
,
348 unsigned long r_symndx
,
351 Elf_Internal_Shdr
*symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
353 if (r_symndx
>= symtab_hdr
->sh_info
)
355 struct elf_link_hash_entry
**sym_hashes
= elf_sym_hashes (ibfd
);
356 struct elf_link_hash_entry
*h
;
358 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
359 while (h
->root
.type
== bfd_link_hash_indirect
360 || h
->root
.type
== bfd_link_hash_warning
)
361 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
371 asection
*symsec
= NULL
;
372 if (h
->root
.type
== bfd_link_hash_defined
373 || h
->root
.type
== bfd_link_hash_defweak
)
374 symsec
= h
->root
.u
.def
.section
;
380 Elf_Internal_Sym
*sym
;
381 Elf_Internal_Sym
*locsyms
= *locsymsp
;
385 locsyms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
388 size_t symcount
= symtab_hdr
->sh_info
;
390 /* If we are reading symbols into the contents, then
391 read the global syms too. This is done to cache
392 syms for later stack analysis. */
393 if ((unsigned char **) locsymsp
== &symtab_hdr
->contents
)
394 symcount
= symtab_hdr
->sh_size
/ symtab_hdr
->sh_entsize
;
395 locsyms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
, symcount
, 0,
402 sym
= locsyms
+ r_symndx
;
412 asection
*symsec
= NULL
;
413 if ((sym
->st_shndx
!= SHN_UNDEF
414 && sym
->st_shndx
< SHN_LORESERVE
)
415 || sym
->st_shndx
> SHN_HIRESERVE
)
416 symsec
= bfd_section_from_elf_index (ibfd
, sym
->st_shndx
);
424 /* Create the note section if not already present. This is done early so
425 that the linker maps the sections to the right place in the output. */
428 spu_elf_create_sections (bfd
*output_bfd
,
429 struct bfd_link_info
*info
,
434 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
436 /* Stash some options away where we can get at them later. */
437 htab
->stack_analysis
= stack_analysis
;
438 htab
->emit_stack_syms
= emit_stack_syms
;
440 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
441 if (bfd_get_section_by_name (ibfd
, SPU_PTNOTE_SPUNAME
) != NULL
)
446 /* Make SPU_PTNOTE_SPUNAME section. */
453 ibfd
= info
->input_bfds
;
454 flags
= SEC_LOAD
| SEC_READONLY
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
455 s
= bfd_make_section_anyway_with_flags (ibfd
, SPU_PTNOTE_SPUNAME
, flags
);
457 || !bfd_set_section_alignment (ibfd
, s
, 4))
460 name_len
= strlen (bfd_get_filename (output_bfd
)) + 1;
461 size
= 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4);
462 size
+= (name_len
+ 3) & -4;
464 if (!bfd_set_section_size (ibfd
, s
, size
))
467 data
= bfd_zalloc (ibfd
, size
);
471 bfd_put_32 (ibfd
, sizeof (SPU_PLUGIN_NAME
), data
+ 0);
472 bfd_put_32 (ibfd
, name_len
, data
+ 4);
473 bfd_put_32 (ibfd
, 1, data
+ 8);
474 memcpy (data
+ 12, SPU_PLUGIN_NAME
, sizeof (SPU_PLUGIN_NAME
));
475 memcpy (data
+ 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4),
476 bfd_get_filename (output_bfd
), name_len
);
483 /* qsort predicate to sort sections by vma. */
486 sort_sections (const void *a
, const void *b
)
488 const asection
*const *s1
= a
;
489 const asection
*const *s2
= b
;
490 bfd_signed_vma delta
= (*s1
)->vma
- (*s2
)->vma
;
493 return delta
< 0 ? -1 : 1;
495 return (*s1
)->index
- (*s2
)->index
;
498 /* Identify overlays in the output bfd, and number them. */
501 spu_elf_find_overlays (bfd
*output_bfd
, struct bfd_link_info
*info
)
503 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
504 asection
**alloc_sec
;
505 unsigned int i
, n
, ovl_index
, num_buf
;
509 if (output_bfd
->section_count
< 2)
512 alloc_sec
= bfd_malloc (output_bfd
->section_count
* sizeof (*alloc_sec
));
513 if (alloc_sec
== NULL
)
516 /* Pick out all the alloced sections. */
517 for (n
= 0, s
= output_bfd
->sections
; s
!= NULL
; s
= s
->next
)
518 if ((s
->flags
& SEC_ALLOC
) != 0
519 && (s
->flags
& (SEC_LOAD
| SEC_THREAD_LOCAL
)) != SEC_THREAD_LOCAL
529 /* Sort them by vma. */
530 qsort (alloc_sec
, n
, sizeof (*alloc_sec
), sort_sections
);
532 /* Look for overlapping vmas. Any with overlap must be overlays.
533 Count them. Also count the number of overlay regions. */
534 ovl_end
= alloc_sec
[0]->vma
+ alloc_sec
[0]->size
;
535 for (ovl_index
= 0, num_buf
= 0, i
= 1; i
< n
; i
++)
538 if (s
->vma
< ovl_end
)
540 asection
*s0
= alloc_sec
[i
- 1];
542 if (spu_elf_section_data (s0
)->u
.o
.ovl_index
== 0)
544 alloc_sec
[ovl_index
] = s0
;
545 spu_elf_section_data (s0
)->u
.o
.ovl_index
= ++ovl_index
;
546 spu_elf_section_data (s0
)->u
.o
.ovl_buf
= ++num_buf
;
548 alloc_sec
[ovl_index
] = s
;
549 spu_elf_section_data (s
)->u
.o
.ovl_index
= ++ovl_index
;
550 spu_elf_section_data (s
)->u
.o
.ovl_buf
= num_buf
;
551 if (s0
->vma
!= s
->vma
)
553 info
->callbacks
->einfo (_("%X%P: overlay sections %A and %A "
554 "do not start at the same address.\n"),
558 if (ovl_end
< s
->vma
+ s
->size
)
559 ovl_end
= s
->vma
+ s
->size
;
562 ovl_end
= s
->vma
+ s
->size
;
565 htab
->num_overlays
= ovl_index
;
566 htab
->num_buf
= num_buf
;
567 htab
->ovl_sec
= alloc_sec
;
568 return ovl_index
!= 0;
571 /* Support two sizes of overlay stubs, a slower more compact stub of two
572 intructions, and a faster stub of four instructions. */
573 #ifndef OVL_STUB_SIZE
574 /* Default to faster. */
575 #define OVL_STUB_SIZE 16
576 /* #define OVL_STUB_SIZE 8 */
578 #define BRSL 0x33000000
579 #define BR 0x32000000
580 #define NOP 0x40200000
581 #define LNOP 0x00200000
582 #define ILA 0x42000000
584 /* Return true for all relative and absolute branch instructions.
592 brhnz 00100011 0.. */
595 is_branch (const unsigned char *insn
)
597 return (insn
[0] & 0xec) == 0x20 && (insn
[1] & 0x80) == 0;
600 /* Return true for all indirect branch instructions.
608 bihnz 00100101 011 */
611 is_indirect_branch (const unsigned char *insn
)
613 return (insn
[0] & 0xef) == 0x25 && (insn
[1] & 0x80) == 0;
616 /* Return true for branch hint instructions.
621 is_hint (const unsigned char *insn
)
623 return (insn
[0] & 0xfc) == 0x10;
626 /* Return TRUE if this reloc symbol should possibly go via an overlay stub. */
629 needs_ovl_stub (const char *sym_name
,
631 asection
*input_section
,
632 struct spu_link_hash_table
*htab
,
633 bfd_boolean is_branch
)
635 if (htab
->num_overlays
== 0)
639 || sym_sec
->output_section
== NULL
640 || spu_elf_section_data (sym_sec
->output_section
) == NULL
)
643 /* setjmp always goes via an overlay stub, because then the return
644 and hence the longjmp goes via __ovly_return. That magically
645 makes setjmp/longjmp between overlays work. */
646 if (strncmp (sym_name
, "setjmp", 6) == 0
647 && (sym_name
[6] == '\0' || sym_name
[6] == '@'))
650 /* Usually, symbols in non-overlay sections don't need stubs. */
651 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
== 0
652 && !htab
->non_overlay_stubs
)
655 /* A reference from some other section to a symbol in an overlay
656 section needs a stub. */
657 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
658 != spu_elf_section_data (input_section
->output_section
)->u
.o
.ovl_index
)
661 /* If this insn isn't a branch then we are possibly taking the
662 address of a function and passing it out somehow. */
666 enum _insn_type
{ non_branch
, branch
, call
};
669 count_stub (struct spu_link_hash_table
*htab
,
672 enum _insn_type insn_type
,
673 struct elf_link_hash_entry
*h
,
674 const Elf_Internal_Rela
*irela
)
676 unsigned int ovl
= 0;
677 struct got_entry
*g
, **head
;
679 /* If this instruction is a branch or call, we need a stub
680 for it. One stub per function per overlay.
681 If it isn't a branch, then we are taking the address of
682 this function so need a stub in the non-overlay area
683 for it. One stub per function. */
684 if (insn_type
!= non_branch
)
685 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
688 head
= &h
->got
.glist
;
691 if (elf_local_got_ents (ibfd
) == NULL
)
693 bfd_size_type amt
= (elf_tdata (ibfd
)->symtab_hdr
.sh_info
694 * sizeof (*elf_local_got_ents (ibfd
)));
695 elf_local_got_ents (ibfd
) = bfd_zmalloc (amt
);
696 if (elf_local_got_ents (ibfd
) == NULL
)
699 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
702 /* If we have a stub in the non-overlay area then there's no need
703 for one in overlays. */
705 if (g
!= NULL
&& g
->ovl
== 0)
710 struct got_entry
*gnext
;
712 /* Need a new non-overlay area stub. Zap other stubs. */
713 for (; g
!= NULL
; g
= gnext
)
715 htab
->stub_count
[g
->ovl
] -= 1;
722 for (; g
!= NULL
; g
= g
->next
)
729 g
= bfd_malloc (sizeof *g
);
733 g
->stub_addr
= (bfd_vma
) -1;
737 htab
->stub_count
[ovl
] += 1;
743 /* Two instruction overlay stubs look like:
746 .word target_ovl_and_address
748 ovl_and_address is a word with the overlay number in the top 14 bits
749 and local store address in the bottom 18 bits.
751 Four instruction overlay stubs look like:
755 ila $79,target_address
759 build_stub (struct spu_link_hash_table
*htab
,
762 enum _insn_type insn_type
,
763 struct elf_link_hash_entry
*h
,
764 const Elf_Internal_Rela
*irela
,
769 struct got_entry
*g
, **head
;
771 bfd_vma val
, from
, to
;
774 if (insn_type
!= non_branch
)
775 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
778 head
= &h
->got
.glist
;
780 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
783 if (g
!= NULL
&& g
->ovl
== 0 && ovl
!= 0)
786 for (; g
!= NULL
; g
= g
->next
)
792 if (g
->stub_addr
!= (bfd_vma
) -1)
795 sec
= htab
->stub_sec
[ovl
];
796 dest
+= dest_sec
->output_offset
+ dest_sec
->output_section
->vma
;
797 from
= sec
->size
+ sec
->output_offset
+ sec
->output_section
->vma
;
799 to
= (htab
->ovly_load
->root
.u
.def
.value
800 + htab
->ovly_load
->root
.u
.def
.section
->output_offset
801 + htab
->ovly_load
->root
.u
.def
.section
->output_section
->vma
);
803 if (OVL_STUB_SIZE
== 16)
805 if (((dest
| to
| from
) & 3) != 0
806 || val
+ 0x20000 >= 0x40000)
811 ovl
= spu_elf_section_data (dest_sec
->output_section
)->u
.o
.ovl_index
;
813 if (OVL_STUB_SIZE
== 16)
815 bfd_put_32 (sec
->owner
, ILA
+ ((ovl
<< 7) & 0x01ffff80) + 78,
816 sec
->contents
+ sec
->size
);
817 bfd_put_32 (sec
->owner
, LNOP
,
818 sec
->contents
+ sec
->size
+ 4);
819 bfd_put_32 (sec
->owner
, ILA
+ ((dest
<< 7) & 0x01ffff80) + 79,
820 sec
->contents
+ sec
->size
+ 8);
821 bfd_put_32 (sec
->owner
, BR
+ ((val
<< 5) & 0x007fff80),
822 sec
->contents
+ sec
->size
+ 12);
824 else if (OVL_STUB_SIZE
== 8)
826 bfd_put_32 (sec
->owner
, BRSL
+ ((val
<< 5) & 0x007fff80) + 75,
827 sec
->contents
+ sec
->size
);
829 val
= (dest
& 0x3ffff) | (ovl
<< 14);
830 bfd_put_32 (sec
->owner
, val
,
831 sec
->contents
+ sec
->size
+ 4);
835 sec
->size
+= OVL_STUB_SIZE
;
837 if (htab
->emit_stub_syms
)
843 len
= 8 + sizeof (".ovl_call.") - 1;
845 len
+= strlen (h
->root
.root
.string
);
850 add
= (int) irela
->r_addend
& 0xffffffff;
853 name
= bfd_malloc (len
);
857 sprintf (name
, "%08x.ovl_call.", g
->ovl
);
859 strcpy (name
+ 8 + sizeof (".ovl_call.") - 1, h
->root
.root
.string
);
861 sprintf (name
+ 8 + sizeof (".ovl_call.") - 1, "%x:%x",
862 dest_sec
->id
& 0xffffffff,
863 (int) ELF32_R_SYM (irela
->r_info
) & 0xffffffff);
865 sprintf (name
+ len
- 9, "+%x", add
);
867 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
871 if (h
->root
.type
== bfd_link_hash_new
)
873 h
->root
.type
= bfd_link_hash_defined
;
874 h
->root
.u
.def
.section
= sec
;
875 h
->root
.u
.def
.value
= sec
->size
- OVL_STUB_SIZE
;
876 h
->size
= OVL_STUB_SIZE
;
880 h
->ref_regular_nonweak
= 1;
889 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
893 allocate_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
895 /* Symbols starting with _SPUEAR_ need a stub because they may be
896 invoked by the PPU. */
897 if ((h
->root
.type
== bfd_link_hash_defined
898 || h
->root
.type
== bfd_link_hash_defweak
)
900 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0)
902 struct spu_link_hash_table
*htab
= inf
;
904 count_stub (htab
, NULL
, NULL
, non_branch
, h
, NULL
);
911 build_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
913 /* Symbols starting with _SPUEAR_ need a stub because they may be
914 invoked by the PPU. */
915 if ((h
->root
.type
== bfd_link_hash_defined
916 || h
->root
.type
== bfd_link_hash_defweak
)
918 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0)
920 struct spu_link_hash_table
*htab
= inf
;
922 build_stub (htab
, NULL
, NULL
, non_branch
, h
, NULL
,
923 h
->root
.u
.def
.value
, h
->root
.u
.def
.section
);
929 /* Size or build stubs. */
932 process_stubs (bfd
*output_bfd
,
933 struct bfd_link_info
*info
,
936 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
939 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
941 extern const bfd_target bfd_elf32_spu_vec
;
942 Elf_Internal_Shdr
*symtab_hdr
;
944 Elf_Internal_Sym
*local_syms
= NULL
;
947 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
950 /* We'll need the symbol table in a second. */
951 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
952 if (symtab_hdr
->sh_info
== 0)
955 /* Arrange to read and keep global syms for later stack analysis. */
957 if (htab
->stack_analysis
)
958 psyms
= &symtab_hdr
->contents
;
960 /* Walk over each section attached to the input bfd. */
961 for (isec
= ibfd
->sections
; isec
!= NULL
; isec
= isec
->next
)
963 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
965 /* If there aren't any relocs, then there's nothing more to do. */
966 if ((isec
->flags
& SEC_RELOC
) == 0
967 || (isec
->flags
& SEC_ALLOC
) == 0
968 || (isec
->flags
& SEC_LOAD
) == 0
969 || isec
->reloc_count
== 0)
972 /* If this section is a link-once section that will be
973 discarded, then don't create any stubs. */
974 if (isec
->output_section
== NULL
975 || isec
->output_section
->owner
!= output_bfd
)
978 /* Get the relocs. */
979 internal_relocs
= _bfd_elf_link_read_relocs (ibfd
, isec
, NULL
, NULL
,
981 if (internal_relocs
== NULL
)
982 goto error_ret_free_local
;
984 /* Now examine each relocation. */
985 irela
= internal_relocs
;
986 irelaend
= irela
+ isec
->reloc_count
;
987 for (; irela
< irelaend
; irela
++)
989 enum elf_spu_reloc_type r_type
;
992 Elf_Internal_Sym
*sym
;
993 struct elf_link_hash_entry
*h
;
994 const char *sym_name
;
995 unsigned int sym_type
;
996 enum _insn_type insn_type
;
998 r_type
= ELF32_R_TYPE (irela
->r_info
);
999 r_indx
= ELF32_R_SYM (irela
->r_info
);
1001 if (r_type
>= R_SPU_max
)
1003 bfd_set_error (bfd_error_bad_value
);
1004 error_ret_free_internal
:
1005 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1006 free (internal_relocs
);
1007 error_ret_free_local
:
1008 if (local_syms
!= NULL
1009 && (symtab_hdr
->contents
1010 != (unsigned char *) local_syms
))
1015 /* Determine the reloc target section. */
1016 if (!get_sym_h (&h
, &sym
, &sym_sec
, psyms
, r_indx
, ibfd
))
1017 goto error_ret_free_internal
;
1020 || sym_sec
->output_section
== NULL
1021 || sym_sec
->output_section
->owner
!= output_bfd
)
1024 /* Ensure no stubs for user supplied overlay manager syms. */
1026 && (strcmp (h
->root
.root
.string
, "__ovly_load") == 0
1027 || strcmp (h
->root
.root
.string
, "__ovly_return") == 0))
1030 insn_type
= non_branch
;
1031 if (r_type
== R_SPU_REL16
1032 || r_type
== R_SPU_ADDR16
)
1034 unsigned char insn
[4];
1036 if (!bfd_get_section_contents (ibfd
, isec
, insn
,
1037 irela
->r_offset
, 4))
1038 goto error_ret_free_internal
;
1040 if (is_branch (insn
) || is_hint (insn
))
1043 if ((insn
[0] & 0xfd) == 0x31)
1048 /* We are only interested in function symbols. */
1052 sym_name
= h
->root
.root
.string
;
1056 sym_type
= ELF_ST_TYPE (sym
->st_info
);
1057 sym_name
= bfd_elf_sym_name (sym_sec
->owner
,
1063 if (sym_type
!= STT_FUNC
)
1065 /* It's common for people to write assembly and forget
1066 to give function symbols the right type. Handle
1067 calls to such symbols, but warn so that (hopefully)
1068 people will fix their code. We need the symbol
1069 type to be correct to distinguish function pointer
1070 initialisation from other pointer initialisation. */
1071 if (insn_type
== call
)
1072 (*_bfd_error_handler
) (_("warning: call to non-function"
1073 " symbol %s defined in %B"),
1074 sym_sec
->owner
, sym_name
);
1075 else if (insn_type
== non_branch
)
1079 if (!needs_ovl_stub (sym_name
, sym_sec
, isec
, htab
,
1080 insn_type
!= non_branch
))
1083 if (htab
->stub_count
== NULL
)
1086 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_count
);
1087 htab
->stub_count
= bfd_zmalloc (amt
);
1088 if (htab
->stub_count
== NULL
)
1089 goto error_ret_free_internal
;
1094 if (!count_stub (htab
, ibfd
, isec
, insn_type
, h
, irela
))
1095 goto error_ret_free_internal
;
1102 dest
= h
->root
.u
.def
.value
;
1104 dest
= sym
->st_value
;
1105 if (!build_stub (htab
, ibfd
, isec
, insn_type
, h
, irela
,
1107 goto error_ret_free_internal
;
1111 /* We're done with the internal relocs, free them. */
1112 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1113 free (internal_relocs
);
1116 if (local_syms
!= NULL
1117 && symtab_hdr
->contents
!= (unsigned char *) local_syms
)
1119 if (!info
->keep_memory
)
1122 symtab_hdr
->contents
= (unsigned char *) local_syms
;
1129 /* Allocate space for overlay call and return stubs. */
1132 spu_elf_size_stubs (bfd
*output_bfd
,
1133 struct bfd_link_info
*info
,
1134 void (*place_spu_section
) (asection
*, asection
*,
1136 int non_overlay_stubs
)
1138 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1145 htab
->non_overlay_stubs
= non_overlay_stubs
;
1146 if (!process_stubs (output_bfd
, info
, FALSE
))
1149 elf_link_hash_traverse (&htab
->elf
, allocate_spuear_stubs
, htab
);
1153 if (htab
->stub_count
== NULL
)
1156 ibfd
= info
->input_bfds
;
1157 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_sec
);
1158 htab
->stub_sec
= bfd_zmalloc (amt
);
1159 if (htab
->stub_sec
== NULL
)
1162 flags
= (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_READONLY
1163 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1164 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1165 htab
->stub_sec
[0] = stub
;
1167 || !bfd_set_section_alignment (ibfd
, stub
, 3 + (OVL_STUB_SIZE
> 8)))
1169 stub
->size
= htab
->stub_count
[0] * OVL_STUB_SIZE
;
1170 (*place_spu_section
) (stub
, NULL
, ".text");
1172 for (i
= 0; i
< htab
->num_overlays
; ++i
)
1174 asection
*osec
= htab
->ovl_sec
[i
];
1175 unsigned int ovl
= spu_elf_section_data (osec
)->u
.o
.ovl_index
;
1176 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1177 htab
->stub_sec
[ovl
] = stub
;
1179 || !bfd_set_section_alignment (ibfd
, stub
, 3 + (OVL_STUB_SIZE
> 8)))
1181 stub
->size
= htab
->stub_count
[ovl
] * OVL_STUB_SIZE
;
1182 (*place_spu_section
) (stub
, osec
, NULL
);
1185 /* htab->ovtab consists of two arrays.
1195 . } _ovly_buf_table[];
1198 flags
= (SEC_ALLOC
| SEC_LOAD
1199 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1200 htab
->ovtab
= bfd_make_section_anyway_with_flags (ibfd
, ".ovtab", flags
);
1201 if (htab
->ovtab
== NULL
1202 || !bfd_set_section_alignment (ibfd
, htab
->ovtab
, 4))
1205 htab
->ovtab
->size
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
1206 (*place_spu_section
) (htab
->ovtab
, NULL
, ".data");
1208 htab
->toe
= bfd_make_section_anyway_with_flags (ibfd
, ".toe", SEC_ALLOC
);
1209 if (htab
->toe
== NULL
1210 || !bfd_set_section_alignment (ibfd
, htab
->toe
, 4))
1212 htab
->toe
->size
= 16;
1213 (*place_spu_section
) (htab
->toe
, NULL
, ".toe");
1218 /* Functions to handle embedded spu_ovl.o object. */
1221 ovl_mgr_open (struct bfd
*nbfd ATTRIBUTE_UNUSED
, void *stream
)
1227 ovl_mgr_pread (struct bfd
*abfd ATTRIBUTE_UNUSED
,
1233 struct _ovl_stream
*os
;
1237 os
= (struct _ovl_stream
*) stream
;
1238 max
= (const char *) os
->end
- (const char *) os
->start
;
1240 if ((ufile_ptr
) offset
>= max
)
1244 if (count
> max
- offset
)
1245 count
= max
- offset
;
1247 memcpy (buf
, (const char *) os
->start
+ offset
, count
);
1252 spu_elf_open_builtin_lib (bfd
**ovl_bfd
, const struct _ovl_stream
*stream
)
1254 *ovl_bfd
= bfd_openr_iovec ("builtin ovl_mgr",
1261 return *ovl_bfd
!= NULL
;
1264 /* Define an STT_OBJECT symbol. */
1266 static struct elf_link_hash_entry
*
1267 define_ovtab_symbol (struct spu_link_hash_table
*htab
, const char *name
)
1269 struct elf_link_hash_entry
*h
;
1271 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, FALSE
, FALSE
);
1275 if (h
->root
.type
!= bfd_link_hash_defined
1278 h
->root
.type
= bfd_link_hash_defined
;
1279 h
->root
.u
.def
.section
= htab
->ovtab
;
1280 h
->type
= STT_OBJECT
;
1283 h
->ref_regular_nonweak
= 1;
1288 (*_bfd_error_handler
) (_("%B is not allowed to define %s"),
1289 h
->root
.u
.def
.section
->owner
,
1290 h
->root
.root
.string
);
1291 bfd_set_error (bfd_error_bad_value
);
1298 /* Fill in all stubs and the overlay tables. */
1301 spu_elf_build_stubs (struct bfd_link_info
*info
, int emit_syms
)
1303 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1304 struct elf_link_hash_entry
*h
;
1310 htab
->emit_stub_syms
= emit_syms
;
1311 if (htab
->stub_count
== NULL
)
1314 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1315 if (htab
->stub_sec
[i
]->size
!= 0)
1317 htab
->stub_sec
[i
]->contents
= bfd_zalloc (htab
->stub_sec
[i
]->owner
,
1318 htab
->stub_sec
[i
]->size
);
1319 if (htab
->stub_sec
[i
]->contents
== NULL
)
1321 htab
->stub_sec
[i
]->rawsize
= htab
->stub_sec
[i
]->size
;
1322 htab
->stub_sec
[i
]->size
= 0;
1325 h
= elf_link_hash_lookup (&htab
->elf
, "__ovly_load", FALSE
, FALSE
, FALSE
);
1326 htab
->ovly_load
= h
;
1327 BFD_ASSERT (h
!= NULL
1328 && (h
->root
.type
== bfd_link_hash_defined
1329 || h
->root
.type
== bfd_link_hash_defweak
)
1332 s
= h
->root
.u
.def
.section
->output_section
;
1333 if (spu_elf_section_data (s
)->u
.o
.ovl_index
)
1335 (*_bfd_error_handler
) (_("%s in overlay section"),
1336 h
->root
.u
.def
.section
->owner
);
1337 bfd_set_error (bfd_error_bad_value
);
1341 h
= elf_link_hash_lookup (&htab
->elf
, "__ovly_return", FALSE
, FALSE
, FALSE
);
1342 htab
->ovly_return
= h
;
1344 /* Write out all the stubs. */
1345 obfd
= htab
->ovtab
->output_section
->owner
;
1346 process_stubs (obfd
, info
, TRUE
);
1348 elf_link_hash_traverse (&htab
->elf
, build_spuear_stubs
, htab
);
1352 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1354 if (htab
->stub_sec
[i
]->size
!= htab
->stub_sec
[i
]->rawsize
)
1356 (*_bfd_error_handler
) (_("stubs don't match calculated size"));
1357 bfd_set_error (bfd_error_bad_value
);
1360 htab
->stub_sec
[i
]->rawsize
= 0;
1365 (*_bfd_error_handler
) (_("overlay stub relocation overflow"));
1366 bfd_set_error (bfd_error_bad_value
);
1370 htab
->ovtab
->contents
= bfd_zalloc (htab
->ovtab
->owner
, htab
->ovtab
->size
);
1371 if (htab
->ovtab
->contents
== NULL
)
1374 /* Write out _ovly_table. */
1375 p
= htab
->ovtab
->contents
;
1376 /* set low bit of .size to mark non-overlay area as present. */
1378 for (s
= obfd
->sections
; s
!= NULL
; s
= s
->next
)
1380 unsigned int ovl_index
= spu_elf_section_data (s
)->u
.o
.ovl_index
;
1384 unsigned long off
= ovl_index
* 16;
1385 unsigned int ovl_buf
= spu_elf_section_data (s
)->u
.o
.ovl_buf
;
1387 bfd_put_32 (htab
->ovtab
->owner
, s
->vma
, p
+ off
);
1388 bfd_put_32 (htab
->ovtab
->owner
, (s
->size
+ 15) & -16, p
+ off
+ 4);
1389 /* file_off written later in spu_elf_modify_program_headers. */
1390 bfd_put_32 (htab
->ovtab
->owner
, ovl_buf
, p
+ off
+ 12);
1394 h
= define_ovtab_symbol (htab
, "_ovly_table");
1397 h
->root
.u
.def
.value
= 16;
1398 h
->size
= htab
->num_overlays
* 16;
1400 h
= define_ovtab_symbol (htab
, "_ovly_table_end");
1403 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
1406 h
= define_ovtab_symbol (htab
, "_ovly_buf_table");
1409 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
1410 h
->size
= htab
->num_buf
* 4;
1412 h
= define_ovtab_symbol (htab
, "_ovly_buf_table_end");
1415 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
1418 h
= define_ovtab_symbol (htab
, "_EAR_");
1421 h
->root
.u
.def
.section
= htab
->toe
;
1422 h
->root
.u
.def
.value
= 0;
1428 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
1429 Search for stack adjusting insns, and return the sp delta. */
1432 find_function_stack_adjust (asection
*sec
, bfd_vma offset
)
1437 memset (reg
, 0, sizeof (reg
));
1438 for (unrecog
= 0; offset
+ 4 <= sec
->size
&& unrecog
< 32; offset
+= 4)
1440 unsigned char buf
[4];
1444 /* Assume no relocs on stack adjusing insns. */
1445 if (!bfd_get_section_contents (sec
->owner
, sec
, buf
, offset
, 4))
1448 if (buf
[0] == 0x24 /* stqd */)
1452 ra
= ((buf
[2] & 0x3f) << 1) | (buf
[3] >> 7);
1453 /* Partly decoded immediate field. */
1454 imm
= (buf
[1] << 9) | (buf
[2] << 1) | (buf
[3] >> 7);
1456 if (buf
[0] == 0x1c /* ai */)
1459 imm
= (imm
^ 0x200) - 0x200;
1460 reg
[rt
] = reg
[ra
] + imm
;
1462 if (rt
== 1 /* sp */)
1469 else if (buf
[0] == 0x18 && (buf
[1] & 0xe0) == 0 /* a */)
1471 int rb
= ((buf
[1] & 0x1f) << 2) | ((buf
[2] & 0xc0) >> 6);
1473 reg
[rt
] = reg
[ra
] + reg
[rb
];
1477 else if ((buf
[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
1479 if (buf
[0] >= 0x42 /* ila */)
1480 imm
|= (buf
[0] & 1) << 17;
1485 if (buf
[0] == 0x40 /* il */)
1487 if ((buf
[1] & 0x80) == 0)
1489 imm
= (imm
^ 0x8000) - 0x8000;
1491 else if ((buf
[1] & 0x80) == 0 /* ilhu */)
1497 else if (buf
[0] == 0x60 && (buf
[1] & 0x80) != 0 /* iohl */)
1499 reg
[rt
] |= imm
& 0xffff;
1502 else if (buf
[0] == 0x04 /* ori */)
1505 imm
= (imm
^ 0x200) - 0x200;
1506 reg
[rt
] = reg
[ra
] | imm
;
1509 else if ((buf
[0] == 0x33 && imm
== 1 /* brsl .+4 */)
1510 || (buf
[0] == 0x08 && (buf
[1] & 0xe0) == 0 /* sf */))
1512 /* Used in pic reg load. Say rt is trashed. */
1516 else if (is_branch (buf
) || is_indirect_branch (buf
))
1517 /* If we hit a branch then we must be out of the prologue. */
1526 /* qsort predicate to sort symbols by section and value. */
1528 static Elf_Internal_Sym
*sort_syms_syms
;
1529 static asection
**sort_syms_psecs
;
1532 sort_syms (const void *a
, const void *b
)
1534 Elf_Internal_Sym
*const *s1
= a
;
1535 Elf_Internal_Sym
*const *s2
= b
;
1536 asection
*sec1
,*sec2
;
1537 bfd_signed_vma delta
;
1539 sec1
= sort_syms_psecs
[*s1
- sort_syms_syms
];
1540 sec2
= sort_syms_psecs
[*s2
- sort_syms_syms
];
1543 return sec1
->index
- sec2
->index
;
1545 delta
= (*s1
)->st_value
- (*s2
)->st_value
;
1547 return delta
< 0 ? -1 : 1;
1549 delta
= (*s2
)->st_size
- (*s1
)->st_size
;
1551 return delta
< 0 ? -1 : 1;
1553 return *s1
< *s2
? -1 : 1;
1558 struct function_info
*fun
;
1559 struct call_info
*next
;
1563 struct function_info
1565 /* List of functions called. Also branches to hot/cold part of
1567 struct call_info
*call_list
;
1568 /* For hot/cold part of function, point to owner. */
1569 struct function_info
*start
;
1570 /* Symbol at start of function. */
1572 Elf_Internal_Sym
*sym
;
1573 struct elf_link_hash_entry
*h
;
1575 /* Function section. */
1577 /* Address range of (this part of) function. */
1581 /* Set if global symbol. */
1582 unsigned int global
: 1;
1583 /* Set if known to be start of function (as distinct from a hunk
1584 in hot/cold section. */
1585 unsigned int is_func
: 1;
1586 /* Flags used during call tree traversal. */
1587 unsigned int visit1
: 1;
1588 unsigned int non_root
: 1;
1589 unsigned int visit2
: 1;
1590 unsigned int marking
: 1;
1591 unsigned int visit3
: 1;
1594 struct spu_elf_stack_info
1598 /* Variable size array describing functions, one per contiguous
1599 address range belonging to a function. */
1600 struct function_info fun
[1];
1603 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
1604 entries for section SEC. */
1606 static struct spu_elf_stack_info
*
1607 alloc_stack_info (asection
*sec
, int max_fun
)
1609 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1612 amt
= sizeof (struct spu_elf_stack_info
);
1613 amt
+= (max_fun
- 1) * sizeof (struct function_info
);
1614 sec_data
->u
.i
.stack_info
= bfd_zmalloc (amt
);
1615 if (sec_data
->u
.i
.stack_info
!= NULL
)
1616 sec_data
->u
.i
.stack_info
->max_fun
= max_fun
;
1617 return sec_data
->u
.i
.stack_info
;
1620 /* Add a new struct function_info describing a (part of a) function
1621 starting at SYM_H. Keep the array sorted by address. */
1623 static struct function_info
*
1624 maybe_insert_function (asection
*sec
,
1627 bfd_boolean is_func
)
1629 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1630 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
1636 sinfo
= alloc_stack_info (sec
, 20);
1643 Elf_Internal_Sym
*sym
= sym_h
;
1644 off
= sym
->st_value
;
1645 size
= sym
->st_size
;
1649 struct elf_link_hash_entry
*h
= sym_h
;
1650 off
= h
->root
.u
.def
.value
;
1654 for (i
= sinfo
->num_fun
; --i
>= 0; )
1655 if (sinfo
->fun
[i
].lo
<= off
)
1660 /* Don't add another entry for an alias, but do update some
1662 if (sinfo
->fun
[i
].lo
== off
)
1664 /* Prefer globals over local syms. */
1665 if (global
&& !sinfo
->fun
[i
].global
)
1667 sinfo
->fun
[i
].global
= TRUE
;
1668 sinfo
->fun
[i
].u
.h
= sym_h
;
1671 sinfo
->fun
[i
].is_func
= TRUE
;
1672 return &sinfo
->fun
[i
];
1674 /* Ignore a zero-size symbol inside an existing function. */
1675 else if (sinfo
->fun
[i
].hi
> off
&& size
== 0)
1676 return &sinfo
->fun
[i
];
1679 if (++i
< sinfo
->num_fun
)
1680 memmove (&sinfo
->fun
[i
+ 1], &sinfo
->fun
[i
],
1681 (sinfo
->num_fun
- i
) * sizeof (sinfo
->fun
[i
]));
1682 else if (i
>= sinfo
->max_fun
)
1684 bfd_size_type amt
= sizeof (struct spu_elf_stack_info
);
1685 bfd_size_type old
= amt
;
1687 old
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
1688 sinfo
->max_fun
+= 20 + (sinfo
->max_fun
>> 1);
1689 amt
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
1690 sinfo
= bfd_realloc (sinfo
, amt
);
1693 memset ((char *) sinfo
+ old
, 0, amt
- old
);
1694 sec_data
->u
.i
.stack_info
= sinfo
;
1696 sinfo
->fun
[i
].is_func
= is_func
;
1697 sinfo
->fun
[i
].global
= global
;
1698 sinfo
->fun
[i
].sec
= sec
;
1700 sinfo
->fun
[i
].u
.h
= sym_h
;
1702 sinfo
->fun
[i
].u
.sym
= sym_h
;
1703 sinfo
->fun
[i
].lo
= off
;
1704 sinfo
->fun
[i
].hi
= off
+ size
;
1705 sinfo
->fun
[i
].stack
= -find_function_stack_adjust (sec
, off
);
1706 sinfo
->num_fun
+= 1;
1707 return &sinfo
->fun
[i
];
1710 /* Return the name of FUN. */
1713 func_name (struct function_info
*fun
)
1717 Elf_Internal_Shdr
*symtab_hdr
;
1719 while (fun
->start
!= NULL
)
1723 return fun
->u
.h
->root
.root
.string
;
1726 if (fun
->u
.sym
->st_name
== 0)
1728 size_t len
= strlen (sec
->name
);
1729 char *name
= bfd_malloc (len
+ 10);
1732 sprintf (name
, "%s+%lx", sec
->name
,
1733 (unsigned long) fun
->u
.sym
->st_value
& 0xffffffff);
1737 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
1738 return bfd_elf_sym_name (ibfd
, symtab_hdr
, fun
->u
.sym
, sec
);
1741 /* Read the instruction at OFF in SEC. Return true iff the instruction
1742 is a nop, lnop, or stop 0 (all zero insn). */
1745 is_nop (asection
*sec
, bfd_vma off
)
1747 unsigned char insn
[4];
1749 if (off
+ 4 > sec
->size
1750 || !bfd_get_section_contents (sec
->owner
, sec
, insn
, off
, 4))
1752 if ((insn
[0] & 0xbf) == 0 && (insn
[1] & 0xe0) == 0x20)
1754 if (insn
[0] == 0 && insn
[1] == 0 && insn
[2] == 0 && insn
[3] == 0)
1759 /* Extend the range of FUN to cover nop padding up to LIMIT.
1760 Return TRUE iff some instruction other than a NOP was found. */
1763 insns_at_end (struct function_info
*fun
, bfd_vma limit
)
1765 bfd_vma off
= (fun
->hi
+ 3) & -4;
1767 while (off
< limit
&& is_nop (fun
->sec
, off
))
1778 /* Check and fix overlapping function ranges. Return TRUE iff there
1779 are gaps in the current info we have about functions in SEC. */
1782 check_function_ranges (asection
*sec
, struct bfd_link_info
*info
)
1784 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1785 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
1787 bfd_boolean gaps
= FALSE
;
1792 for (i
= 1; i
< sinfo
->num_fun
; i
++)
1793 if (sinfo
->fun
[i
- 1].hi
> sinfo
->fun
[i
].lo
)
1795 /* Fix overlapping symbols. */
1796 const char *f1
= func_name (&sinfo
->fun
[i
- 1]);
1797 const char *f2
= func_name (&sinfo
->fun
[i
]);
1799 info
->callbacks
->einfo (_("warning: %s overlaps %s\n"), f1
, f2
);
1800 sinfo
->fun
[i
- 1].hi
= sinfo
->fun
[i
].lo
;
1802 else if (insns_at_end (&sinfo
->fun
[i
- 1], sinfo
->fun
[i
].lo
))
1805 if (sinfo
->num_fun
== 0)
1809 if (sinfo
->fun
[0].lo
!= 0)
1811 if (sinfo
->fun
[sinfo
->num_fun
- 1].hi
> sec
->size
)
1813 const char *f1
= func_name (&sinfo
->fun
[sinfo
->num_fun
- 1]);
1815 info
->callbacks
->einfo (_("warning: %s exceeds section size\n"), f1
);
1816 sinfo
->fun
[sinfo
->num_fun
- 1].hi
= sec
->size
;
1818 else if (insns_at_end (&sinfo
->fun
[sinfo
->num_fun
- 1], sec
->size
))
1824 /* Search current function info for a function that contains address
1825 OFFSET in section SEC. */
1827 static struct function_info
*
1828 find_function (asection
*sec
, bfd_vma offset
, struct bfd_link_info
*info
)
1830 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1831 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
1835 hi
= sinfo
->num_fun
;
1838 mid
= (lo
+ hi
) / 2;
1839 if (offset
< sinfo
->fun
[mid
].lo
)
1841 else if (offset
>= sinfo
->fun
[mid
].hi
)
1844 return &sinfo
->fun
[mid
];
1846 info
->callbacks
->einfo (_("%A:0x%v not found in function table\n"),
1851 /* Add CALLEE to CALLER call list if not already present. */
1854 insert_callee (struct function_info
*caller
, struct call_info
*callee
)
1856 struct call_info
*p
;
1857 for (p
= caller
->call_list
; p
!= NULL
; p
= p
->next
)
1858 if (p
->fun
== callee
->fun
)
1860 /* Tail calls use less stack than normal calls. Retain entry
1861 for normal call over one for tail call. */
1862 if (p
->is_tail
> callee
->is_tail
)
1863 p
->is_tail
= callee
->is_tail
;
1866 callee
->next
= caller
->call_list
;
1867 caller
->call_list
= callee
;
1871 /* Rummage through the relocs for SEC, looking for function calls.
1872 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
1873 mark destination symbols on calls as being functions. Also
1874 look at branches, which may be tail calls or go to hot/cold
1875 section part of same function. */
1878 mark_functions_via_relocs (asection
*sec
,
1879 struct bfd_link_info
*info
,
1882 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
1883 Elf_Internal_Shdr
*symtab_hdr
= &elf_tdata (sec
->owner
)->symtab_hdr
;
1884 Elf_Internal_Sym
*syms
;
1886 static bfd_boolean warned
;
1888 internal_relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
1890 if (internal_relocs
== NULL
)
1893 symtab_hdr
= &elf_tdata (sec
->owner
)->symtab_hdr
;
1894 psyms
= &symtab_hdr
->contents
;
1895 syms
= *(Elf_Internal_Sym
**) psyms
;
1896 irela
= internal_relocs
;
1897 irelaend
= irela
+ sec
->reloc_count
;
1898 for (; irela
< irelaend
; irela
++)
1900 enum elf_spu_reloc_type r_type
;
1901 unsigned int r_indx
;
1903 Elf_Internal_Sym
*sym
;
1904 struct elf_link_hash_entry
*h
;
1906 unsigned char insn
[4];
1907 bfd_boolean is_call
;
1908 struct function_info
*caller
;
1909 struct call_info
*callee
;
1911 r_type
= ELF32_R_TYPE (irela
->r_info
);
1912 if (r_type
!= R_SPU_REL16
1913 && r_type
!= R_SPU_ADDR16
)
1916 r_indx
= ELF32_R_SYM (irela
->r_info
);
1917 if (!get_sym_h (&h
, &sym
, &sym_sec
, psyms
, r_indx
, sec
->owner
))
1921 || sym_sec
->output_section
== NULL
1922 || sym_sec
->output_section
->owner
!= sec
->output_section
->owner
)
1925 if (!bfd_get_section_contents (sec
->owner
, sec
, insn
,
1926 irela
->r_offset
, 4))
1928 if (!is_branch (insn
))
1931 if ((sym_sec
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
1932 != (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
1936 if (!call_tree
|| !warned
)
1937 info
->callbacks
->einfo (_("%B(%A+0x%v): call to non-code section"
1938 " %B(%A), stack analysis incomplete\n"),
1939 sec
->owner
, sec
, irela
->r_offset
,
1940 sym_sec
->owner
, sym_sec
);
1944 is_call
= (insn
[0] & 0xfd) == 0x31;
1947 val
= h
->root
.u
.def
.value
;
1949 val
= sym
->st_value
;
1950 val
+= irela
->r_addend
;
1954 struct function_info
*fun
;
1956 if (irela
->r_addend
!= 0)
1958 Elf_Internal_Sym
*fake
= bfd_zmalloc (sizeof (*fake
));
1961 fake
->st_value
= val
;
1963 = _bfd_elf_section_from_bfd_section (sym_sec
->owner
, sym_sec
);
1967 fun
= maybe_insert_function (sym_sec
, sym
, FALSE
, is_call
);
1969 fun
= maybe_insert_function (sym_sec
, h
, TRUE
, is_call
);
1972 if (irela
->r_addend
!= 0
1973 && fun
->u
.sym
!= sym
)
1978 caller
= find_function (sec
, irela
->r_offset
, info
);
1981 callee
= bfd_malloc (sizeof *callee
);
1985 callee
->fun
= find_function (sym_sec
, val
, info
);
1986 if (callee
->fun
== NULL
)
1988 callee
->is_tail
= !is_call
;
1989 if (!insert_callee (caller
, callee
))
1992 && !callee
->fun
->is_func
1993 && callee
->fun
->stack
== 0)
1995 /* This is either a tail call or a branch from one part of
1996 the function to another, ie. hot/cold section. If the
1997 destination has been called by some other function then
1998 it is a separate function. We also assume that functions
1999 are not split across input files. */
2000 if (sec
->owner
!= sym_sec
->owner
)
2002 callee
->fun
->start
= NULL
;
2003 callee
->fun
->is_func
= TRUE
;
2005 else if (callee
->fun
->start
== NULL
)
2006 callee
->fun
->start
= caller
;
2009 struct function_info
*callee_start
;
2010 struct function_info
*caller_start
;
2011 callee_start
= callee
->fun
;
2012 while (callee_start
->start
)
2013 callee_start
= callee_start
->start
;
2014 caller_start
= caller
;
2015 while (caller_start
->start
)
2016 caller_start
= caller_start
->start
;
2017 if (caller_start
!= callee_start
)
2019 callee
->fun
->start
= NULL
;
2020 callee
->fun
->is_func
= TRUE
;
2029 /* Handle something like .init or .fini, which has a piece of a function.
2030 These sections are pasted together to form a single function. */
2033 pasted_function (asection
*sec
, struct bfd_link_info
*info
)
2035 struct bfd_link_order
*l
;
2036 struct _spu_elf_section_data
*sec_data
;
2037 struct spu_elf_stack_info
*sinfo
;
2038 Elf_Internal_Sym
*fake
;
2039 struct function_info
*fun
, *fun_start
;
2041 fake
= bfd_zmalloc (sizeof (*fake
));
2045 fake
->st_size
= sec
->size
;
2047 = _bfd_elf_section_from_bfd_section (sec
->owner
, sec
);
2048 fun
= maybe_insert_function (sec
, fake
, FALSE
, FALSE
);
2052 /* Find a function immediately preceding this section. */
2054 for (l
= sec
->output_section
->map_head
.link_order
; l
!= NULL
; l
= l
->next
)
2056 if (l
->u
.indirect
.section
== sec
)
2058 if (fun_start
!= NULL
)
2059 fun
->start
= fun_start
;
2062 if (l
->type
== bfd_indirect_link_order
2063 && (sec_data
= spu_elf_section_data (l
->u
.indirect
.section
)) != NULL
2064 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
2065 && sinfo
->num_fun
!= 0)
2066 fun_start
= &sinfo
->fun
[sinfo
->num_fun
- 1];
2069 info
->callbacks
->einfo (_("%A link_order not found\n"), sec
);
2073 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2074 overlay stub sections. */
2077 interesting_section (asection
*s
, bfd
*obfd
)
2079 return (s
->output_section
!= NULL
2080 && s
->output_section
->owner
== obfd
2081 && ((s
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_IN_MEMORY
))
2082 == (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2086 /* Map address ranges in code sections to functions. */
2089 discover_functions (bfd
*output_bfd
, struct bfd_link_info
*info
)
2093 Elf_Internal_Sym
***psym_arr
;
2094 asection
***sec_arr
;
2095 bfd_boolean gaps
= FALSE
;
2098 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2101 psym_arr
= bfd_zmalloc (bfd_idx
* sizeof (*psym_arr
));
2102 if (psym_arr
== NULL
)
2104 sec_arr
= bfd_zmalloc (bfd_idx
* sizeof (*sec_arr
));
2105 if (sec_arr
== NULL
)
2109 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2111 ibfd
= ibfd
->link_next
, bfd_idx
++)
2113 extern const bfd_target bfd_elf32_spu_vec
;
2114 Elf_Internal_Shdr
*symtab_hdr
;
2117 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2118 asection
**psecs
, **p
;
2120 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2123 /* Read all the symbols. */
2124 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2125 symcount
= symtab_hdr
->sh_size
/ symtab_hdr
->sh_entsize
;
2129 syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
2132 syms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
, symcount
, 0,
2134 symtab_hdr
->contents
= (void *) syms
;
2139 /* Select defined function symbols that are going to be output. */
2140 psyms
= bfd_malloc ((symcount
+ 1) * sizeof (*psyms
));
2143 psym_arr
[bfd_idx
] = psyms
;
2144 psecs
= bfd_malloc (symcount
* sizeof (*psecs
));
2147 sec_arr
[bfd_idx
] = psecs
;
2148 for (psy
= psyms
, p
= psecs
, sy
= syms
; sy
< syms
+ symcount
; ++p
, ++sy
)
2149 if (ELF_ST_TYPE (sy
->st_info
) == STT_NOTYPE
2150 || ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
2154 *p
= s
= bfd_section_from_elf_index (ibfd
, sy
->st_shndx
);
2155 if (s
!= NULL
&& interesting_section (s
, output_bfd
))
2158 symcount
= psy
- psyms
;
2161 /* Sort them by section and offset within section. */
2162 sort_syms_syms
= syms
;
2163 sort_syms_psecs
= psecs
;
2164 qsort (psyms
, symcount
, sizeof (*psyms
), sort_syms
);
2166 /* Now inspect the function symbols. */
2167 for (psy
= psyms
; psy
< psyms
+ symcount
; )
2169 asection
*s
= psecs
[*psy
- syms
];
2170 Elf_Internal_Sym
**psy2
;
2172 for (psy2
= psy
; ++psy2
< psyms
+ symcount
; )
2173 if (psecs
[*psy2
- syms
] != s
)
2176 if (!alloc_stack_info (s
, psy2
- psy
))
2181 /* First install info about properly typed and sized functions.
2182 In an ideal world this will cover all code sections, except
2183 when partitioning functions into hot and cold sections,
2184 and the horrible pasted together .init and .fini functions. */
2185 for (psy
= psyms
; psy
< psyms
+ symcount
; ++psy
)
2188 if (ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
2190 asection
*s
= psecs
[sy
- syms
];
2191 if (!maybe_insert_function (s
, sy
, FALSE
, TRUE
))
2196 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2197 if (interesting_section (sec
, output_bfd
))
2198 gaps
|= check_function_ranges (sec
, info
);
2203 /* See if we can discover more function symbols by looking at
2205 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2207 ibfd
= ibfd
->link_next
, bfd_idx
++)
2211 if (psym_arr
[bfd_idx
] == NULL
)
2214 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2215 if (interesting_section (sec
, output_bfd
)
2216 && sec
->reloc_count
!= 0)
2218 if (!mark_functions_via_relocs (sec
, info
, FALSE
))
2223 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2225 ibfd
= ibfd
->link_next
, bfd_idx
++)
2227 Elf_Internal_Shdr
*symtab_hdr
;
2229 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2232 if ((psyms
= psym_arr
[bfd_idx
]) == NULL
)
2235 psecs
= sec_arr
[bfd_idx
];
2237 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2238 syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
2241 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2242 if (interesting_section (sec
, output_bfd
))
2243 gaps
|= check_function_ranges (sec
, info
);
2247 /* Finally, install all globals. */
2248 for (psy
= psyms
; (sy
= *psy
) != NULL
; ++psy
)
2252 s
= psecs
[sy
- syms
];
2254 /* Global syms might be improperly typed functions. */
2255 if (ELF_ST_TYPE (sy
->st_info
) != STT_FUNC
2256 && ELF_ST_BIND (sy
->st_info
) == STB_GLOBAL
)
2258 if (!maybe_insert_function (s
, sy
, FALSE
, FALSE
))
2263 /* Some of the symbols we've installed as marking the
2264 beginning of functions may have a size of zero. Extend
2265 the range of such functions to the beginning of the
2266 next symbol of interest. */
2267 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2268 if (interesting_section (sec
, output_bfd
))
2270 struct _spu_elf_section_data
*sec_data
;
2271 struct spu_elf_stack_info
*sinfo
;
2273 sec_data
= spu_elf_section_data (sec
);
2274 sinfo
= sec_data
->u
.i
.stack_info
;
2278 bfd_vma hi
= sec
->size
;
2280 for (fun_idx
= sinfo
->num_fun
; --fun_idx
>= 0; )
2282 sinfo
->fun
[fun_idx
].hi
= hi
;
2283 hi
= sinfo
->fun
[fun_idx
].lo
;
2286 /* No symbols in this section. Must be .init or .fini
2287 or something similar. */
2288 else if (!pasted_function (sec
, info
))
2294 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2296 ibfd
= ibfd
->link_next
, bfd_idx
++)
2298 if (psym_arr
[bfd_idx
] == NULL
)
2301 free (psym_arr
[bfd_idx
]);
2302 free (sec_arr
[bfd_idx
]);
2311 /* Mark nodes in the call graph that are called by some other node. */
2314 mark_non_root (struct function_info
*fun
)
2316 struct call_info
*call
;
2319 for (call
= fun
->call_list
; call
; call
= call
->next
)
2321 call
->fun
->non_root
= TRUE
;
2322 if (!call
->fun
->visit1
)
2323 mark_non_root (call
->fun
);
2327 /* Remove cycles from the call graph. */
2330 call_graph_traverse (struct function_info
*fun
, struct bfd_link_info
*info
)
2332 struct call_info
**callp
, *call
;
2335 fun
->marking
= TRUE
;
2337 callp
= &fun
->call_list
;
2338 while ((call
= *callp
) != NULL
)
2340 if (!call
->fun
->visit2
)
2341 call_graph_traverse (call
->fun
, info
);
2342 else if (call
->fun
->marking
)
2344 const char *f1
= func_name (fun
);
2345 const char *f2
= func_name (call
->fun
);
2347 info
->callbacks
->info (_("Stack analysis will ignore the call "
2350 *callp
= call
->next
;
2353 callp
= &call
->next
;
2355 fun
->marking
= FALSE
;
2358 /* Populate call_list for each function. */
2361 build_call_tree (bfd
*output_bfd
, struct bfd_link_info
*info
)
2365 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2367 extern const bfd_target bfd_elf32_spu_vec
;
2370 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2373 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2375 if (!interesting_section (sec
, output_bfd
)
2376 || sec
->reloc_count
== 0)
2379 if (!mark_functions_via_relocs (sec
, info
, TRUE
))
2383 /* Transfer call info from hot/cold section part of function
2385 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2387 struct _spu_elf_section_data
*sec_data
;
2388 struct spu_elf_stack_info
*sinfo
;
2390 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
2391 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
2394 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
2396 struct function_info
*start
= sinfo
->fun
[i
].start
;
2400 struct call_info
*call
;
2402 while (start
->start
!= NULL
)
2403 start
= start
->start
;
2404 call
= sinfo
->fun
[i
].call_list
;
2405 while (call
!= NULL
)
2407 struct call_info
*call_next
= call
->next
;
2408 if (!insert_callee (start
, call
))
2412 sinfo
->fun
[i
].call_list
= NULL
;
2413 sinfo
->fun
[i
].non_root
= TRUE
;
2420 /* Find the call graph root(s). */
2421 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2423 extern const bfd_target bfd_elf32_spu_vec
;
2426 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2429 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2431 struct _spu_elf_section_data
*sec_data
;
2432 struct spu_elf_stack_info
*sinfo
;
2434 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
2435 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
2438 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
2439 if (!sinfo
->fun
[i
].visit1
)
2440 mark_non_root (&sinfo
->fun
[i
]);
2445 /* Remove cycles from the call graph. We start from the root node(s)
2446 so that we break cycles in a reasonable place. */
2447 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2449 extern const bfd_target bfd_elf32_spu_vec
;
2452 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2455 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2457 struct _spu_elf_section_data
*sec_data
;
2458 struct spu_elf_stack_info
*sinfo
;
2460 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
2461 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
2464 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
2465 if (!sinfo
->fun
[i
].non_root
)
2466 call_graph_traverse (&sinfo
->fun
[i
], info
);
2474 /* Descend the call graph for FUN, accumulating total stack required. */
2477 sum_stack (struct function_info
*fun
,
2478 struct bfd_link_info
*info
,
2479 int emit_stack_syms
)
2481 struct call_info
*call
;
2482 struct function_info
*max
= NULL
;
2483 bfd_vma max_stack
= fun
->stack
;
2490 for (call
= fun
->call_list
; call
; call
= call
->next
)
2492 stack
= sum_stack (call
->fun
, info
, emit_stack_syms
);
2493 /* Include caller stack for normal calls, don't do so for
2494 tail calls. fun->stack here is local stack usage for
2497 stack
+= fun
->stack
;
2498 if (max_stack
< stack
)
2505 f1
= func_name (fun
);
2506 info
->callbacks
->minfo (_("%s: 0x%v 0x%v\n"),
2507 f1
, (bfd_vma
) fun
->stack
, max_stack
);
2511 info
->callbacks
->minfo (_(" calls:\n"));
2512 for (call
= fun
->call_list
; call
; call
= call
->next
)
2514 const char *f2
= func_name (call
->fun
);
2515 const char *ann1
= call
->fun
== max
? "*" : " ";
2516 const char *ann2
= call
->is_tail
? "t" : " ";
2518 info
->callbacks
->minfo (_(" %s%s %s\n"), ann1
, ann2
, f2
);
2522 /* Now fun->stack holds cumulative stack. */
2523 fun
->stack
= max_stack
;
2526 if (emit_stack_syms
)
2528 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2529 char *name
= bfd_malloc (18 + strlen (f1
));
2530 struct elf_link_hash_entry
*h
;
2534 if (fun
->global
|| ELF_ST_BIND (fun
->u
.sym
->st_info
) == STB_GLOBAL
)
2535 sprintf (name
, "__stack_%s", f1
);
2537 sprintf (name
, "__stack_%x_%s", fun
->sec
->id
& 0xffffffff, f1
);
2539 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
2542 && (h
->root
.type
== bfd_link_hash_new
2543 || h
->root
.type
== bfd_link_hash_undefined
2544 || h
->root
.type
== bfd_link_hash_undefweak
))
2546 h
->root
.type
= bfd_link_hash_defined
;
2547 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2548 h
->root
.u
.def
.value
= max_stack
;
2553 h
->ref_regular_nonweak
= 1;
2554 h
->forced_local
= 1;
2563 /* Provide an estimate of total stack required. */
2566 spu_elf_stack_analysis (bfd
*output_bfd
,
2567 struct bfd_link_info
*info
,
2568 int emit_stack_syms
)
2571 bfd_vma max_stack
= 0;
2573 if (!discover_functions (output_bfd
, info
))
2576 if (!build_call_tree (output_bfd
, info
))
2579 info
->callbacks
->info (_("Stack size for call graph root nodes.\n"));
2580 info
->callbacks
->minfo (_("\nStack size for functions. "
2581 "Annotations: '*' max stack, 't' tail call\n"));
2582 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2584 extern const bfd_target bfd_elf32_spu_vec
;
2587 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2590 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2592 struct _spu_elf_section_data
*sec_data
;
2593 struct spu_elf_stack_info
*sinfo
;
2595 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
2596 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
2599 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
2601 if (!sinfo
->fun
[i
].non_root
)
2606 stack
= sum_stack (&sinfo
->fun
[i
], info
,
2608 f1
= func_name (&sinfo
->fun
[i
]);
2609 info
->callbacks
->info (_(" %s: 0x%v\n"),
2611 if (max_stack
< stack
)
2619 info
->callbacks
->info (_("Maximum stack required is 0x%v\n"), max_stack
);
2623 /* Perform a final link. */
2626 spu_elf_final_link (bfd
*output_bfd
, struct bfd_link_info
*info
)
2628 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2630 if (htab
->stack_analysis
2631 && !spu_elf_stack_analysis (output_bfd
, info
, htab
->emit_stack_syms
))
2632 info
->callbacks
->einfo ("%X%P: stack analysis error: %E\n");
2634 return bfd_elf_final_link (output_bfd
, info
);
2637 /* Called when not normally emitting relocs, ie. !info->relocatable
2638 and !info->emitrelocations. Returns a count of special relocs
2639 that need to be emitted. */
2642 spu_elf_count_relocs (asection
*sec
, Elf_Internal_Rela
*relocs
)
2644 unsigned int count
= 0;
2645 Elf_Internal_Rela
*relend
= relocs
+ sec
->reloc_count
;
2647 for (; relocs
< relend
; relocs
++)
2649 int r_type
= ELF32_R_TYPE (relocs
->r_info
);
2650 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
2657 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
2660 spu_elf_relocate_section (bfd
*output_bfd
,
2661 struct bfd_link_info
*info
,
2663 asection
*input_section
,
2665 Elf_Internal_Rela
*relocs
,
2666 Elf_Internal_Sym
*local_syms
,
2667 asection
**local_sections
)
2669 Elf_Internal_Shdr
*symtab_hdr
;
2670 struct elf_link_hash_entry
**sym_hashes
;
2671 Elf_Internal_Rela
*rel
, *relend
;
2672 struct spu_link_hash_table
*htab
;
2674 bfd_boolean emit_these_relocs
= FALSE
;
2676 htab
= spu_hash_table (info
);
2677 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
2678 sym_hashes
= (struct elf_link_hash_entry
**) (elf_sym_hashes (input_bfd
));
2681 relend
= relocs
+ input_section
->reloc_count
;
2682 for (; rel
< relend
; rel
++)
2685 reloc_howto_type
*howto
;
2686 unsigned long r_symndx
;
2687 Elf_Internal_Sym
*sym
;
2689 struct elf_link_hash_entry
*h
;
2690 const char *sym_name
;
2693 bfd_reloc_status_type r
;
2694 bfd_boolean unresolved_reloc
;
2698 r_symndx
= ELF32_R_SYM (rel
->r_info
);
2699 r_type
= ELF32_R_TYPE (rel
->r_info
);
2700 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
2702 emit_these_relocs
= TRUE
;
2706 howto
= elf_howto_table
+ r_type
;
2707 unresolved_reloc
= FALSE
;
2712 if (r_symndx
< symtab_hdr
->sh_info
)
2714 sym
= local_syms
+ r_symndx
;
2715 sec
= local_sections
[r_symndx
];
2716 sym_name
= bfd_elf_sym_name (input_bfd
, symtab_hdr
, sym
, sec
);
2717 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, &sec
, rel
);
2721 RELOC_FOR_GLOBAL_SYMBOL (info
, input_bfd
, input_section
, rel
,
2722 r_symndx
, symtab_hdr
, sym_hashes
,
2724 unresolved_reloc
, warned
);
2725 sym_name
= h
->root
.root
.string
;
2728 if (sec
!= NULL
&& elf_discarded_section (sec
))
2730 /* For relocs against symbols from removed linkonce sections,
2731 or sections discarded by a linker script, we just want the
2732 section contents zeroed. Avoid any special processing. */
2733 _bfd_clear_contents (howto
, input_bfd
, contents
+ rel
->r_offset
);
2739 if (info
->relocatable
)
2742 if (unresolved_reloc
)
2744 (*_bfd_error_handler
)
2745 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
2747 bfd_get_section_name (input_bfd
, input_section
),
2748 (long) rel
->r_offset
,
2754 /* If this symbol is in an overlay area, we may need to relocate
2755 to the overlay stub. */
2756 addend
= rel
->r_addend
;
2757 branch
= (is_branch (contents
+ rel
->r_offset
)
2758 || is_hint (contents
+ rel
->r_offset
));
2759 if (htab
->stub_sec
!= NULL
2760 && needs_ovl_stub (sym_name
, sec
, input_section
, htab
, branch
)
2762 || (h
!= htab
->ovly_load
&& h
!= htab
->ovly_return
)))
2764 unsigned int ovl
= 0;
2765 struct got_entry
*g
, **head
;
2768 ovl
= (spu_elf_section_data (input_section
->output_section
)
2772 head
= &h
->got
.glist
;
2774 head
= elf_local_got_ents (input_bfd
) + r_symndx
;
2776 for (g
= *head
; g
!= NULL
; g
= g
->next
)
2777 if (g
->ovl
== ovl
|| g
->ovl
== 0)
2782 relocation
= g
->stub_addr
;
2786 r
= _bfd_final_link_relocate (howto
,
2790 rel
->r_offset
, relocation
, addend
);
2792 if (r
!= bfd_reloc_ok
)
2794 const char *msg
= (const char *) 0;
2798 case bfd_reloc_overflow
:
2799 if (!((*info
->callbacks
->reloc_overflow
)
2800 (info
, (h
? &h
->root
: NULL
), sym_name
, howto
->name
,
2801 (bfd_vma
) 0, input_bfd
, input_section
, rel
->r_offset
)))
2805 case bfd_reloc_undefined
:
2806 if (!((*info
->callbacks
->undefined_symbol
)
2807 (info
, sym_name
, input_bfd
, input_section
,
2808 rel
->r_offset
, TRUE
)))
2812 case bfd_reloc_outofrange
:
2813 msg
= _("internal error: out of range error");
2816 case bfd_reloc_notsupported
:
2817 msg
= _("internal error: unsupported relocation error");
2820 case bfd_reloc_dangerous
:
2821 msg
= _("internal error: dangerous error");
2825 msg
= _("internal error: unknown error");
2830 if (!((*info
->callbacks
->warning
)
2831 (info
, msg
, sym_name
, input_bfd
, input_section
,
2840 && emit_these_relocs
2841 && !info
->relocatable
2842 && !info
->emitrelocations
)
2844 Elf_Internal_Rela
*wrel
;
2845 Elf_Internal_Shdr
*rel_hdr
;
2847 wrel
= rel
= relocs
;
2848 relend
= relocs
+ input_section
->reloc_count
;
2849 for (; rel
< relend
; rel
++)
2853 r_type
= ELF32_R_TYPE (rel
->r_info
);
2854 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
2857 input_section
->reloc_count
= wrel
- relocs
;
2858 /* Backflips for _bfd_elf_link_output_relocs. */
2859 rel_hdr
= &elf_section_data (input_section
)->rel_hdr
;
2860 rel_hdr
->sh_size
= input_section
->reloc_count
* rel_hdr
->sh_entsize
;
2867 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
2870 spu_elf_output_symbol_hook (struct bfd_link_info
*info
,
2871 const char *sym_name ATTRIBUTE_UNUSED
,
2872 Elf_Internal_Sym
*sym
,
2873 asection
*sym_sec ATTRIBUTE_UNUSED
,
2874 struct elf_link_hash_entry
*h
)
2876 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2878 if (!info
->relocatable
2879 && htab
->stub_sec
!= NULL
2881 && (h
->root
.type
== bfd_link_hash_defined
2882 || h
->root
.type
== bfd_link_hash_defweak
)
2884 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0)
2886 struct got_entry
*g
= h
->got
.glist
;
2888 if (g
!= NULL
&& g
->ovl
== 0)
2890 sym
->st_shndx
= (_bfd_elf_section_from_bfd_section
2891 (htab
->stub_sec
[0]->output_section
->owner
,
2892 htab
->stub_sec
[0]->output_section
));
2893 sym
->st_value
= g
->stub_addr
;
2900 static int spu_plugin
= 0;
2903 spu_elf_plugin (int val
)
2908 /* Set ELF header e_type for plugins. */
2911 spu_elf_post_process_headers (bfd
*abfd
,
2912 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
2916 Elf_Internal_Ehdr
*i_ehdrp
= elf_elfheader (abfd
);
2918 i_ehdrp
->e_type
= ET_DYN
;
2922 /* We may add an extra PT_LOAD segment for .toe. We also need extra
2923 segments for overlays. */
2926 spu_elf_additional_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
2928 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2929 int extra
= htab
->num_overlays
;
2935 sec
= bfd_get_section_by_name (abfd
, ".toe");
2936 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
2942 /* Remove .toe section from other PT_LOAD segments and put it in
2943 a segment of its own. Put overlays in separate segments too. */
2946 spu_elf_modify_segment_map (bfd
*abfd
, struct bfd_link_info
*info
)
2949 struct elf_segment_map
*m
;
2955 toe
= bfd_get_section_by_name (abfd
, ".toe");
2956 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
2957 if (m
->p_type
== PT_LOAD
&& m
->count
> 1)
2958 for (i
= 0; i
< m
->count
; i
++)
2959 if ((s
= m
->sections
[i
]) == toe
2960 || spu_elf_section_data (s
)->u
.o
.ovl_index
!= 0)
2962 struct elf_segment_map
*m2
;
2965 if (i
+ 1 < m
->count
)
2967 amt
= sizeof (struct elf_segment_map
);
2968 amt
+= (m
->count
- (i
+ 2)) * sizeof (m
->sections
[0]);
2969 m2
= bfd_zalloc (abfd
, amt
);
2972 m2
->count
= m
->count
- (i
+ 1);
2973 memcpy (m2
->sections
, m
->sections
+ i
+ 1,
2974 m2
->count
* sizeof (m
->sections
[0]));
2975 m2
->p_type
= PT_LOAD
;
2983 amt
= sizeof (struct elf_segment_map
);
2984 m2
= bfd_zalloc (abfd
, amt
);
2987 m2
->p_type
= PT_LOAD
;
2989 m2
->sections
[0] = s
;
2999 /* Check that all loadable section VMAs lie in the range
3000 LO .. HI inclusive. */
3003 spu_elf_check_vma (bfd
*abfd
, bfd_vma lo
, bfd_vma hi
)
3005 struct elf_segment_map
*m
;
3008 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
3009 if (m
->p_type
== PT_LOAD
)
3010 for (i
= 0; i
< m
->count
; i
++)
3011 if (m
->sections
[i
]->size
!= 0
3012 && (m
->sections
[i
]->vma
< lo
3013 || m
->sections
[i
]->vma
> hi
3014 || m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
))
3015 return m
->sections
[i
];
3020 /* Tweak the section type of .note.spu_name. */
3023 spu_elf_fake_sections (bfd
*obfd ATTRIBUTE_UNUSED
,
3024 Elf_Internal_Shdr
*hdr
,
3027 if (strcmp (sec
->name
, SPU_PTNOTE_SPUNAME
) == 0)
3028 hdr
->sh_type
= SHT_NOTE
;
3032 /* Tweak phdrs before writing them out. */
3035 spu_elf_modify_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
3037 const struct elf_backend_data
*bed
;
3038 struct elf_obj_tdata
*tdata
;
3039 Elf_Internal_Phdr
*phdr
, *last
;
3040 struct spu_link_hash_table
*htab
;
3047 bed
= get_elf_backend_data (abfd
);
3048 tdata
= elf_tdata (abfd
);
3050 count
= tdata
->program_header_size
/ bed
->s
->sizeof_phdr
;
3051 htab
= spu_hash_table (info
);
3052 if (htab
->num_overlays
!= 0)
3054 struct elf_segment_map
*m
;
3057 for (i
= 0, m
= elf_tdata (abfd
)->segment_map
; m
; ++i
, m
= m
->next
)
3059 && (o
= spu_elf_section_data (m
->sections
[0])->u
.o
.ovl_index
) != 0)
3061 /* Mark this as an overlay header. */
3062 phdr
[i
].p_flags
|= PF_OVERLAY
;
3064 if (htab
->ovtab
!= NULL
&& htab
->ovtab
->size
!= 0)
3066 bfd_byte
*p
= htab
->ovtab
->contents
;
3067 unsigned int off
= o
* 16 + 8;
3069 /* Write file_off into _ovly_table. */
3070 bfd_put_32 (htab
->ovtab
->owner
, phdr
[i
].p_offset
, p
+ off
);
3075 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
3076 of 16. This should always be possible when using the standard
3077 linker scripts, but don't create overlapping segments if
3078 someone is playing games with linker scripts. */
3080 for (i
= count
; i
-- != 0; )
3081 if (phdr
[i
].p_type
== PT_LOAD
)
3085 adjust
= -phdr
[i
].p_filesz
& 15;
3088 && phdr
[i
].p_offset
+ phdr
[i
].p_filesz
> last
->p_offset
- adjust
)
3091 adjust
= -phdr
[i
].p_memsz
& 15;
3094 && phdr
[i
].p_filesz
!= 0
3095 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
> last
->p_vaddr
- adjust
3096 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
<= last
->p_vaddr
)
3099 if (phdr
[i
].p_filesz
!= 0)
3103 if (i
== (unsigned int) -1)
3104 for (i
= count
; i
-- != 0; )
3105 if (phdr
[i
].p_type
== PT_LOAD
)
3109 adjust
= -phdr
[i
].p_filesz
& 15;
3110 phdr
[i
].p_filesz
+= adjust
;
3112 adjust
= -phdr
[i
].p_memsz
& 15;
3113 phdr
[i
].p_memsz
+= adjust
;
3119 #define TARGET_BIG_SYM bfd_elf32_spu_vec
3120 #define TARGET_BIG_NAME "elf32-spu"
3121 #define ELF_ARCH bfd_arch_spu
3122 #define ELF_MACHINE_CODE EM_SPU
3123 /* This matches the alignment need for DMA. */
3124 #define ELF_MAXPAGESIZE 0x80
3125 #define elf_backend_rela_normal 1
3126 #define elf_backend_can_gc_sections 1
3128 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
3129 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
3130 #define elf_info_to_howto spu_elf_info_to_howto
3131 #define elf_backend_count_relocs spu_elf_count_relocs
3132 #define elf_backend_relocate_section spu_elf_relocate_section
3133 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
3134 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
3135 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
3136 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
3138 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
3139 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
3140 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
3141 #define elf_backend_post_process_headers spu_elf_post_process_headers
3142 #define elf_backend_fake_sections spu_elf_fake_sections
3143 #define elf_backend_special_sections spu_elf_special_sections
3144 #define bfd_elf32_bfd_final_link spu_elf_final_link
3146 #include "elf32-target.h"