* elf32-spu.c (struct spu_link_hash_table): Add "stubs".
[binutils-gdb.git] / bfd / elf32-spu.c
1 /* SPU specific support for 32-bit ELF
2
3 Copyright 2006, 2007 Free Software Foundation, Inc.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
20
21 #include "sysdep.h"
22 #include "bfd.h"
23 #include "bfdlink.h"
24 #include "libbfd.h"
25 #include "elf-bfd.h"
26 #include "elf/spu.h"
27 #include "elf32-spu.h"
28
29 /* We use RELA style relocs. Don't define USE_REL. */
30
31 static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
32 void *, asection *,
33 bfd *, char **);
34
35 /* Values of type 'enum elf_spu_reloc_type' are used to index this
36 array, so it must be declared in the order of that type. */
37
38 static reloc_howto_type elf_howto_table[] = {
39 HOWTO (R_SPU_NONE, 0, 0, 0, FALSE, 0, complain_overflow_dont,
40 bfd_elf_generic_reloc, "SPU_NONE",
41 FALSE, 0, 0x00000000, FALSE),
42 HOWTO (R_SPU_ADDR10, 4, 2, 10, FALSE, 14, complain_overflow_bitfield,
43 bfd_elf_generic_reloc, "SPU_ADDR10",
44 FALSE, 0, 0x00ffc000, FALSE),
45 HOWTO (R_SPU_ADDR16, 2, 2, 16, FALSE, 7, complain_overflow_bitfield,
46 bfd_elf_generic_reloc, "SPU_ADDR16",
47 FALSE, 0, 0x007fff80, FALSE),
48 HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE, 7, complain_overflow_bitfield,
49 bfd_elf_generic_reloc, "SPU_ADDR16_HI",
50 FALSE, 0, 0x007fff80, FALSE),
51 HOWTO (R_SPU_ADDR16_LO, 0, 2, 16, FALSE, 7, complain_overflow_dont,
52 bfd_elf_generic_reloc, "SPU_ADDR16_LO",
53 FALSE, 0, 0x007fff80, FALSE),
54 HOWTO (R_SPU_ADDR18, 0, 2, 18, FALSE, 7, complain_overflow_bitfield,
55 bfd_elf_generic_reloc, "SPU_ADDR18",
56 FALSE, 0, 0x01ffff80, FALSE),
57 HOWTO (R_SPU_ADDR32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
58 bfd_elf_generic_reloc, "SPU_ADDR32",
59 FALSE, 0, 0xffffffff, FALSE),
60 HOWTO (R_SPU_REL16, 2, 2, 16, TRUE, 7, complain_overflow_bitfield,
61 bfd_elf_generic_reloc, "SPU_REL16",
62 FALSE, 0, 0x007fff80, TRUE),
63 HOWTO (R_SPU_ADDR7, 0, 2, 7, FALSE, 14, complain_overflow_dont,
64 bfd_elf_generic_reloc, "SPU_ADDR7",
65 FALSE, 0, 0x001fc000, FALSE),
66 HOWTO (R_SPU_REL9, 2, 2, 9, TRUE, 0, complain_overflow_signed,
67 spu_elf_rel9, "SPU_REL9",
68 FALSE, 0, 0x0180007f, TRUE),
69 HOWTO (R_SPU_REL9I, 2, 2, 9, TRUE, 0, complain_overflow_signed,
70 spu_elf_rel9, "SPU_REL9I",
71 FALSE, 0, 0x0000c07f, TRUE),
72 HOWTO (R_SPU_ADDR10I, 0, 2, 10, FALSE, 14, complain_overflow_signed,
73 bfd_elf_generic_reloc, "SPU_ADDR10I",
74 FALSE, 0, 0x00ffc000, FALSE),
75 HOWTO (R_SPU_ADDR16I, 0, 2, 16, FALSE, 7, complain_overflow_signed,
76 bfd_elf_generic_reloc, "SPU_ADDR16I",
77 FALSE, 0, 0x007fff80, FALSE),
78 HOWTO (R_SPU_REL32, 0, 2, 32, TRUE, 0, complain_overflow_dont,
79 bfd_elf_generic_reloc, "SPU_REL32",
80 FALSE, 0, 0xffffffff, TRUE),
81 HOWTO (R_SPU_ADDR16X, 0, 2, 16, FALSE, 7, complain_overflow_bitfield,
82 bfd_elf_generic_reloc, "SPU_ADDR16X",
83 FALSE, 0, 0x007fff80, FALSE),
84 HOWTO (R_SPU_PPU32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
85 bfd_elf_generic_reloc, "SPU_PPU32",
86 FALSE, 0, 0xffffffff, FALSE),
87 HOWTO (R_SPU_PPU64, 0, 4, 64, FALSE, 0, complain_overflow_dont,
88 bfd_elf_generic_reloc, "SPU_PPU64",
89 FALSE, 0, -1, FALSE),
90 };
91
92 static struct bfd_elf_special_section const spu_elf_special_sections[] = {
93 { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
94 { NULL, 0, 0, 0, 0 }
95 };
96
97 static enum elf_spu_reloc_type
98 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
99 {
100 switch (code)
101 {
102 default:
103 return R_SPU_NONE;
104 case BFD_RELOC_SPU_IMM10W:
105 return R_SPU_ADDR10;
106 case BFD_RELOC_SPU_IMM16W:
107 return R_SPU_ADDR16;
108 case BFD_RELOC_SPU_LO16:
109 return R_SPU_ADDR16_LO;
110 case BFD_RELOC_SPU_HI16:
111 return R_SPU_ADDR16_HI;
112 case BFD_RELOC_SPU_IMM18:
113 return R_SPU_ADDR18;
114 case BFD_RELOC_SPU_PCREL16:
115 return R_SPU_REL16;
116 case BFD_RELOC_SPU_IMM7:
117 return R_SPU_ADDR7;
118 case BFD_RELOC_SPU_IMM8:
119 return R_SPU_NONE;
120 case BFD_RELOC_SPU_PCREL9a:
121 return R_SPU_REL9;
122 case BFD_RELOC_SPU_PCREL9b:
123 return R_SPU_REL9I;
124 case BFD_RELOC_SPU_IMM10:
125 return R_SPU_ADDR10I;
126 case BFD_RELOC_SPU_IMM16:
127 return R_SPU_ADDR16I;
128 case BFD_RELOC_32:
129 return R_SPU_ADDR32;
130 case BFD_RELOC_32_PCREL:
131 return R_SPU_REL32;
132 case BFD_RELOC_SPU_PPU32:
133 return R_SPU_PPU32;
134 case BFD_RELOC_SPU_PPU64:
135 return R_SPU_PPU64;
136 }
137 }
138
139 static void
140 spu_elf_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
141 arelent *cache_ptr,
142 Elf_Internal_Rela *dst)
143 {
144 enum elf_spu_reloc_type r_type;
145
146 r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
147 BFD_ASSERT (r_type < R_SPU_max);
148 cache_ptr->howto = &elf_howto_table[(int) r_type];
149 }
150
151 static reloc_howto_type *
152 spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
153 bfd_reloc_code_real_type code)
154 {
155 enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
156
157 if (r_type == R_SPU_NONE)
158 return NULL;
159
160 return elf_howto_table + r_type;
161 }
162
163 static reloc_howto_type *
164 spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
165 const char *r_name)
166 {
167 unsigned int i;
168
169 for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
170 if (elf_howto_table[i].name != NULL
171 && strcasecmp (elf_howto_table[i].name, r_name) == 0)
172 return &elf_howto_table[i];
173
174 return NULL;
175 }
176
177 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
178
179 static bfd_reloc_status_type
180 spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
181 void *data, asection *input_section,
182 bfd *output_bfd, char **error_message)
183 {
184 bfd_size_type octets;
185 bfd_vma val;
186 long insn;
187
188 /* If this is a relocatable link (output_bfd test tells us), just
189 call the generic function. Any adjustment will be done at final
190 link time. */
191 if (output_bfd != NULL)
192 return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
193 input_section, output_bfd, error_message);
194
195 if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
196 return bfd_reloc_outofrange;
197 octets = reloc_entry->address * bfd_octets_per_byte (abfd);
198
199 /* Get symbol value. */
200 val = 0;
201 if (!bfd_is_com_section (symbol->section))
202 val = symbol->value;
203 if (symbol->section->output_section)
204 val += symbol->section->output_section->vma;
205
206 val += reloc_entry->addend;
207
208 /* Make it pc-relative. */
209 val -= input_section->output_section->vma + input_section->output_offset;
210
211 val >>= 2;
212 if (val + 256 >= 512)
213 return bfd_reloc_overflow;
214
215 insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
216
217 /* Move two high bits of value to REL9I and REL9 position.
218 The mask will take care of selecting the right field. */
219 val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
220 insn &= ~reloc_entry->howto->dst_mask;
221 insn |= val & reloc_entry->howto->dst_mask;
222 bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
223 return bfd_reloc_ok;
224 }
225
226 static bfd_boolean
227 spu_elf_new_section_hook (bfd *abfd, asection *sec)
228 {
229 if (!sec->used_by_bfd)
230 {
231 struct _spu_elf_section_data *sdata;
232
233 sdata = bfd_zalloc (abfd, sizeof (*sdata));
234 if (sdata == NULL)
235 return FALSE;
236 sec->used_by_bfd = sdata;
237 }
238
239 return _bfd_elf_new_section_hook (abfd, sec);
240 }
241
242 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
243 strip --strip-unneeded will not remove them. */
244
245 static void
246 spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
247 {
248 if (sym->name != NULL
249 && sym->section != bfd_abs_section_ptr
250 && strncmp (sym->name, "_EAR_", 5) == 0)
251 sym->flags |= BSF_KEEP;
252 }
253
254 /* SPU ELF linker hash table. */
255
256 struct spu_link_hash_table
257 {
258 struct elf_link_hash_table elf;
259
260 /* The stub hash table. */
261 struct bfd_hash_table stub_hash_table;
262
263 /* Sorted array of stubs. */
264 struct {
265 struct spu_stub_hash_entry **sh;
266 unsigned int count;
267 int err;
268 } stubs;
269
270 /* Shortcuts to overlay sections. */
271 asection *stub;
272 asection *ovtab;
273
274 struct elf_link_hash_entry *ovly_load;
275
276 /* An array of two output sections per overlay region, chosen such that
277 the first section vma is the overlay buffer vma (ie. the section has
278 the lowest vma in the group that occupy the region), and the second
279 section vma+size specifies the end of the region. We keep pointers
280 to sections like this because section vmas may change when laying
281 them out. */
282 asection **ovl_region;
283
284 /* Number of overlay buffers. */
285 unsigned int num_buf;
286
287 /* Total number of overlays. */
288 unsigned int num_overlays;
289
290 /* Set if we should emit symbols for stubs. */
291 unsigned int emit_stub_syms:1;
292
293 /* Set if we want stubs on calls out of overlay regions to
294 non-overlay regions. */
295 unsigned int non_overlay_stubs : 1;
296
297 /* Set on error. */
298 unsigned int stub_overflow : 1;
299
300 /* Set if stack size analysis should be done. */
301 unsigned int stack_analysis : 1;
302
303 /* Set if __stack_* syms will be emitted. */
304 unsigned int emit_stack_syms : 1;
305 };
306
307 #define spu_hash_table(p) \
308 ((struct spu_link_hash_table *) ((p)->hash))
309
310 struct spu_stub_hash_entry
311 {
312 struct bfd_hash_entry root;
313
314 /* Destination of this stub. */
315 asection *target_section;
316 bfd_vma target_off;
317
318 /* Offset of entry in stub section. */
319 bfd_vma off;
320
321 /* Offset from this stub to stub that loads the overlay index. */
322 bfd_vma delta;
323 };
324
325 /* Create an entry in a spu stub hash table. */
326
327 static struct bfd_hash_entry *
328 stub_hash_newfunc (struct bfd_hash_entry *entry,
329 struct bfd_hash_table *table,
330 const char *string)
331 {
332 /* Allocate the structure if it has not already been allocated by a
333 subclass. */
334 if (entry == NULL)
335 {
336 entry = bfd_hash_allocate (table, sizeof (struct spu_stub_hash_entry));
337 if (entry == NULL)
338 return entry;
339 }
340
341 /* Call the allocation method of the superclass. */
342 entry = bfd_hash_newfunc (entry, table, string);
343 if (entry != NULL)
344 {
345 struct spu_stub_hash_entry *sh = (struct spu_stub_hash_entry *) entry;
346
347 sh->target_section = NULL;
348 sh->target_off = 0;
349 sh->off = 0;
350 sh->delta = 0;
351 }
352
353 return entry;
354 }
355
356 /* Create a spu ELF linker hash table. */
357
358 static struct bfd_link_hash_table *
359 spu_elf_link_hash_table_create (bfd *abfd)
360 {
361 struct spu_link_hash_table *htab;
362
363 htab = bfd_malloc (sizeof (*htab));
364 if (htab == NULL)
365 return NULL;
366
367 if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
368 _bfd_elf_link_hash_newfunc,
369 sizeof (struct elf_link_hash_entry)))
370 {
371 free (htab);
372 return NULL;
373 }
374
375 /* Init the stub hash table too. */
376 if (!bfd_hash_table_init (&htab->stub_hash_table, stub_hash_newfunc,
377 sizeof (struct spu_stub_hash_entry)))
378 return NULL;
379
380 memset (&htab->stubs, 0,
381 sizeof (*htab) - offsetof (struct spu_link_hash_table, stubs));
382
383 return &htab->elf.root;
384 }
385
386 /* Free the derived linker hash table. */
387
388 static void
389 spu_elf_link_hash_table_free (struct bfd_link_hash_table *hash)
390 {
391 struct spu_link_hash_table *ret = (struct spu_link_hash_table *) hash;
392
393 bfd_hash_table_free (&ret->stub_hash_table);
394 _bfd_generic_link_hash_table_free (hash);
395 }
396
397 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
398 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
399 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
400
401 static bfd_boolean
402 get_sym_h (struct elf_link_hash_entry **hp,
403 Elf_Internal_Sym **symp,
404 asection **symsecp,
405 Elf_Internal_Sym **locsymsp,
406 unsigned long r_symndx,
407 bfd *ibfd)
408 {
409 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
410
411 if (r_symndx >= symtab_hdr->sh_info)
412 {
413 struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
414 struct elf_link_hash_entry *h;
415
416 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
417 while (h->root.type == bfd_link_hash_indirect
418 || h->root.type == bfd_link_hash_warning)
419 h = (struct elf_link_hash_entry *) h->root.u.i.link;
420
421 if (hp != NULL)
422 *hp = h;
423
424 if (symp != NULL)
425 *symp = NULL;
426
427 if (symsecp != NULL)
428 {
429 asection *symsec = NULL;
430 if (h->root.type == bfd_link_hash_defined
431 || h->root.type == bfd_link_hash_defweak)
432 symsec = h->root.u.def.section;
433 *symsecp = symsec;
434 }
435 }
436 else
437 {
438 Elf_Internal_Sym *sym;
439 Elf_Internal_Sym *locsyms = *locsymsp;
440
441 if (locsyms == NULL)
442 {
443 locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
444 if (locsyms == NULL)
445 {
446 size_t symcount = symtab_hdr->sh_info;
447
448 /* If we are reading symbols into the contents, then
449 read the global syms too. This is done to cache
450 syms for later stack analysis. */
451 if ((unsigned char **) locsymsp == &symtab_hdr->contents)
452 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
453 locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
454 NULL, NULL, NULL);
455 }
456 if (locsyms == NULL)
457 return FALSE;
458 *locsymsp = locsyms;
459 }
460 sym = locsyms + r_symndx;
461
462 if (hp != NULL)
463 *hp = NULL;
464
465 if (symp != NULL)
466 *symp = sym;
467
468 if (symsecp != NULL)
469 {
470 asection *symsec = NULL;
471 if ((sym->st_shndx != SHN_UNDEF
472 && sym->st_shndx < SHN_LORESERVE)
473 || sym->st_shndx > SHN_HIRESERVE)
474 symsec = bfd_section_from_elf_index (ibfd, sym->st_shndx);
475 *symsecp = symsec;
476 }
477 }
478
479 return TRUE;
480 }
481
482 /* Build a name for an entry in the stub hash table. We can't use a
483 local symbol name because ld -r might generate duplicate local symbols. */
484
485 static char *
486 spu_stub_name (const asection *sym_sec,
487 const struct elf_link_hash_entry *h,
488 const Elf_Internal_Rela *rel)
489 {
490 char *stub_name;
491 bfd_size_type len;
492
493 if (h)
494 {
495 len = strlen (h->root.root.string) + 1 + 8 + 1;
496 stub_name = bfd_malloc (len);
497 if (stub_name == NULL)
498 return stub_name;
499
500 sprintf (stub_name, "%s+%x",
501 h->root.root.string,
502 (int) rel->r_addend & 0xffffffff);
503 len -= 8;
504 }
505 else
506 {
507 len = 8 + 1 + 8 + 1 + 8 + 1;
508 stub_name = bfd_malloc (len);
509 if (stub_name == NULL)
510 return stub_name;
511
512 sprintf (stub_name, "%x:%x+%x",
513 sym_sec->id & 0xffffffff,
514 (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
515 (int) rel->r_addend & 0xffffffff);
516 len = strlen (stub_name);
517 }
518
519 if (stub_name[len - 2] == '+'
520 && stub_name[len - 1] == '0'
521 && stub_name[len] == 0)
522 stub_name[len - 2] = 0;
523
524 return stub_name;
525 }
526
527 /* Create the note section if not already present. This is done early so
528 that the linker maps the sections to the right place in the output. */
529
530 bfd_boolean
531 spu_elf_create_sections (bfd *output_bfd,
532 struct bfd_link_info *info,
533 int stack_analysis,
534 int emit_stack_syms)
535 {
536 bfd *ibfd;
537 struct spu_link_hash_table *htab = spu_hash_table (info);
538
539 /* Stash some options away where we can get at them later. */
540 htab->stack_analysis = stack_analysis;
541 htab->emit_stack_syms = emit_stack_syms;
542
543 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
544 if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
545 break;
546
547 if (ibfd == NULL)
548 {
549 /* Make SPU_PTNOTE_SPUNAME section. */
550 asection *s;
551 size_t name_len;
552 size_t size;
553 bfd_byte *data;
554 flagword flags;
555
556 ibfd = info->input_bfds;
557 flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
558 s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
559 if (s == NULL
560 || !bfd_set_section_alignment (ibfd, s, 4))
561 return FALSE;
562
563 name_len = strlen (bfd_get_filename (output_bfd)) + 1;
564 size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
565 size += (name_len + 3) & -4;
566
567 if (!bfd_set_section_size (ibfd, s, size))
568 return FALSE;
569
570 data = bfd_zalloc (ibfd, size);
571 if (data == NULL)
572 return FALSE;
573
574 bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
575 bfd_put_32 (ibfd, name_len, data + 4);
576 bfd_put_32 (ibfd, 1, data + 8);
577 memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
578 memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
579 bfd_get_filename (output_bfd), name_len);
580 s->contents = data;
581 }
582
583 return TRUE;
584 }
585
586 /* qsort predicate to sort sections by vma. */
587
588 static int
589 sort_sections (const void *a, const void *b)
590 {
591 const asection *const *s1 = a;
592 const asection *const *s2 = b;
593 bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
594
595 if (delta != 0)
596 return delta < 0 ? -1 : 1;
597
598 return (*s1)->index - (*s2)->index;
599 }
600
601 /* Identify overlays in the output bfd, and number them. */
602
603 bfd_boolean
604 spu_elf_find_overlays (bfd *output_bfd, struct bfd_link_info *info)
605 {
606 struct spu_link_hash_table *htab = spu_hash_table (info);
607 asection **alloc_sec;
608 unsigned int i, n, ovl_index, num_buf;
609 asection *s;
610 bfd_vma ovl_end;
611
612 if (output_bfd->section_count < 2)
613 return FALSE;
614
615 alloc_sec = bfd_malloc (output_bfd->section_count * sizeof (*alloc_sec));
616 if (alloc_sec == NULL)
617 return FALSE;
618
619 /* Pick out all the alloced sections. */
620 for (n = 0, s = output_bfd->sections; s != NULL; s = s->next)
621 if ((s->flags & SEC_ALLOC) != 0
622 && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
623 && s->size != 0)
624 alloc_sec[n++] = s;
625
626 if (n == 0)
627 {
628 free (alloc_sec);
629 return FALSE;
630 }
631
632 /* Sort them by vma. */
633 qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
634
635 /* Look for overlapping vmas. Any with overlap must be overlays.
636 Count them. Also count the number of overlay regions and for
637 each region save a section from that region with the lowest vma
638 and another section with the highest end vma. */
639 ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
640 for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
641 {
642 s = alloc_sec[i];
643 if (s->vma < ovl_end)
644 {
645 asection *s0 = alloc_sec[i - 1];
646
647 if (spu_elf_section_data (s0)->ovl_index == 0)
648 {
649 spu_elf_section_data (s0)->ovl_index = ++ovl_index;
650 alloc_sec[num_buf * 2] = s0;
651 alloc_sec[num_buf * 2 + 1] = s0;
652 num_buf++;
653 }
654 spu_elf_section_data (s)->ovl_index = ++ovl_index;
655 if (ovl_end < s->vma + s->size)
656 {
657 ovl_end = s->vma + s->size;
658 alloc_sec[num_buf * 2 - 1] = s;
659 }
660 }
661 else
662 ovl_end = s->vma + s->size;
663 }
664
665 htab->num_overlays = ovl_index;
666 htab->num_buf = num_buf;
667 if (ovl_index == 0)
668 {
669 free (alloc_sec);
670 return FALSE;
671 }
672
673 alloc_sec = bfd_realloc (alloc_sec, num_buf * 2 * sizeof (*alloc_sec));
674 if (alloc_sec == NULL)
675 return FALSE;
676
677 htab->ovl_region = alloc_sec;
678 return TRUE;
679 }
680
681 /* One of these per stub. */
682 #define SIZEOF_STUB1 8
683 #define ILA_79 0x4200004f /* ila $79,function_address */
684 #define BR 0x32000000 /* br stub2 */
685
686 /* One of these per overlay. */
687 #define SIZEOF_STUB2 8
688 #define ILA_78 0x4200004e /* ila $78,overlay_number */
689 /* br __ovly_load */
690 #define NOP 0x40200000
691
692 /* Return true for all relative and absolute branch instructions.
693 bra 00110000 0..
694 brasl 00110001 0..
695 br 00110010 0..
696 brsl 00110011 0..
697 brz 00100000 0..
698 brnz 00100001 0..
699 brhz 00100010 0..
700 brhnz 00100011 0.. */
701
702 static bfd_boolean
703 is_branch (const unsigned char *insn)
704 {
705 return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
706 }
707
708 /* Return true for all indirect branch instructions.
709 bi 00110101 000
710 bisl 00110101 001
711 iret 00110101 010
712 bisled 00110101 011
713 biz 00100101 000
714 binz 00100101 001
715 bihz 00100101 010
716 bihnz 00100101 011 */
717
718 static bfd_boolean
719 is_indirect_branch (const unsigned char *insn)
720 {
721 return (insn[0] & 0xef) == 0x25 && (insn[1] & 0x80) == 0;
722 }
723
724 /* Return true for branch hint instructions.
725 hbra 0001000..
726 hbrr 0001001.. */
727
728 static bfd_boolean
729 is_hint (const unsigned char *insn)
730 {
731 return (insn[0] & 0xfc) == 0x10;
732 }
733
734 /* Return TRUE if this reloc symbol should possibly go via an overlay stub. */
735
736 static bfd_boolean
737 needs_ovl_stub (const char *sym_name,
738 asection *sym_sec,
739 asection *input_section,
740 struct spu_link_hash_table *htab,
741 bfd_boolean is_branch)
742 {
743 if (htab->num_overlays == 0)
744 return FALSE;
745
746 if (sym_sec == NULL
747 || sym_sec->output_section == NULL
748 || spu_elf_section_data (sym_sec->output_section) == NULL)
749 return FALSE;
750
751 /* setjmp always goes via an overlay stub, because then the return
752 and hence the longjmp goes via __ovly_return. That magically
753 makes setjmp/longjmp between overlays work. */
754 if (strncmp (sym_name, "setjmp", 6) == 0
755 && (sym_name[6] == '\0' || sym_name[6] == '@'))
756 return TRUE;
757
758 /* Usually, symbols in non-overlay sections don't need stubs. */
759 if (spu_elf_section_data (sym_sec->output_section)->ovl_index == 0
760 && !htab->non_overlay_stubs)
761 return FALSE;
762
763 /* A reference from some other section to a symbol in an overlay
764 section needs a stub. */
765 if (spu_elf_section_data (sym_sec->output_section)->ovl_index
766 != spu_elf_section_data (input_section->output_section)->ovl_index)
767 return TRUE;
768
769 /* If this insn isn't a branch then we are possibly taking the
770 address of a function and passing it out somehow. */
771 return !is_branch;
772 }
773
774 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
775 symbols. */
776
777 static bfd_boolean
778 allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
779 {
780 /* Symbols starting with _SPUEAR_ need a stub because they may be
781 invoked by the PPU. */
782 if ((h->root.type == bfd_link_hash_defined
783 || h->root.type == bfd_link_hash_defweak)
784 && h->def_regular
785 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
786 {
787 struct spu_link_hash_table *htab = inf;
788 static Elf_Internal_Rela zero_rel;
789 char *stub_name = spu_stub_name (h->root.u.def.section, h, &zero_rel);
790 struct spu_stub_hash_entry *sh;
791
792 if (stub_name == NULL)
793 {
794 htab->stubs.err = 1;
795 return FALSE;
796 }
797
798 sh = (struct spu_stub_hash_entry *)
799 bfd_hash_lookup (&htab->stub_hash_table, stub_name, TRUE, FALSE);
800 if (sh == NULL)
801 {
802 free (stub_name);
803 return FALSE;
804 }
805
806 /* If this entry isn't new, we already have a stub. */
807 if (sh->target_section != NULL)
808 {
809 free (stub_name);
810 return TRUE;
811 }
812
813 sh->target_section = h->root.u.def.section;
814 sh->target_off = h->root.u.def.value;
815 htab->stubs.count += 1;
816 }
817
818 return TRUE;
819 }
820
821 /* Called via bfd_hash_traverse to set up pointers to all symbols
822 in the stub hash table. */
823
824 static bfd_boolean
825 populate_stubs (struct bfd_hash_entry *bh, void *inf)
826 {
827 struct spu_link_hash_table *htab = inf;
828
829 htab->stubs.sh[--htab->stubs.count] = (struct spu_stub_hash_entry *) bh;
830 return TRUE;
831 }
832
833 /* qsort predicate to sort stubs by overlay number. */
834
835 static int
836 sort_stubs (const void *a, const void *b)
837 {
838 const struct spu_stub_hash_entry *const *sa = a;
839 const struct spu_stub_hash_entry *const *sb = b;
840 int i;
841 bfd_signed_vma d;
842
843 i = spu_elf_section_data ((*sa)->target_section->output_section)->ovl_index;
844 i -= spu_elf_section_data ((*sb)->target_section->output_section)->ovl_index;
845 if (i != 0)
846 return i;
847
848 d = ((*sa)->target_section->output_section->vma
849 + (*sa)->target_section->output_offset
850 + (*sa)->target_off
851 - (*sb)->target_section->output_section->vma
852 - (*sb)->target_section->output_offset
853 - (*sb)->target_off);
854 if (d != 0)
855 return d < 0 ? -1 : 1;
856
857 /* Two functions at the same address. Aliases perhaps. */
858 i = strcmp ((*sb)->root.string, (*sa)->root.string);
859 BFD_ASSERT (i != 0);
860 return i;
861 }
862
863 /* Allocate space for overlay call and return stubs. */
864
865 bfd_boolean
866 spu_elf_size_stubs (bfd *output_bfd,
867 struct bfd_link_info *info,
868 int non_overlay_stubs,
869 int stack_analysis,
870 asection **stub,
871 asection **ovtab,
872 asection **toe)
873 {
874 struct spu_link_hash_table *htab = spu_hash_table (info);
875 bfd *ibfd;
876 unsigned i, group;
877 flagword flags;
878
879 htab->non_overlay_stubs = non_overlay_stubs;
880 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
881 {
882 extern const bfd_target bfd_elf32_spu_vec;
883 Elf_Internal_Shdr *symtab_hdr;
884 asection *section;
885 Elf_Internal_Sym *local_syms = NULL;
886 void *psyms;
887
888 if (ibfd->xvec != &bfd_elf32_spu_vec)
889 continue;
890
891 /* We'll need the symbol table in a second. */
892 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
893 if (symtab_hdr->sh_info == 0)
894 continue;
895
896 /* Arrange to read and keep global syms for later stack analysis. */
897 psyms = &local_syms;
898 if (stack_analysis)
899 psyms = &symtab_hdr->contents;
900
901 /* Walk over each section attached to the input bfd. */
902 for (section = ibfd->sections; section != NULL; section = section->next)
903 {
904 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
905
906 /* If there aren't any relocs, then there's nothing more to do. */
907 if ((section->flags & SEC_RELOC) == 0
908 || (section->flags & SEC_ALLOC) == 0
909 || (section->flags & SEC_LOAD) == 0
910 || section->reloc_count == 0)
911 continue;
912
913 /* If this section is a link-once section that will be
914 discarded, then don't create any stubs. */
915 if (section->output_section == NULL
916 || section->output_section->owner != output_bfd)
917 continue;
918
919 /* Get the relocs. */
920 internal_relocs
921 = _bfd_elf_link_read_relocs (ibfd, section, NULL, NULL,
922 info->keep_memory);
923 if (internal_relocs == NULL)
924 goto error_ret_free_local;
925
926 /* Now examine each relocation. */
927 irela = internal_relocs;
928 irelaend = irela + section->reloc_count;
929 for (; irela < irelaend; irela++)
930 {
931 enum elf_spu_reloc_type r_type;
932 unsigned int r_indx;
933 asection *sym_sec;
934 Elf_Internal_Sym *sym;
935 struct elf_link_hash_entry *h;
936 const char *sym_name;
937 char *stub_name;
938 struct spu_stub_hash_entry *sh;
939 unsigned int sym_type;
940 enum _insn_type { non_branch, branch, call } insn_type;
941
942 r_type = ELF32_R_TYPE (irela->r_info);
943 r_indx = ELF32_R_SYM (irela->r_info);
944
945 if (r_type >= R_SPU_max)
946 {
947 bfd_set_error (bfd_error_bad_value);
948 goto error_ret_free_internal;
949 }
950
951 /* Determine the reloc target section. */
952 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, ibfd))
953 goto error_ret_free_internal;
954
955 if (sym_sec == NULL
956 || sym_sec->output_section == NULL
957 || sym_sec->output_section->owner != output_bfd)
958 continue;
959
960 /* Ensure no stubs for user supplied overlay manager syms. */
961 if (h != NULL
962 && (strcmp (h->root.root.string, "__ovly_load") == 0
963 || strcmp (h->root.root.string, "__ovly_return") == 0))
964 continue;
965
966 insn_type = non_branch;
967 if (r_type == R_SPU_REL16
968 || r_type == R_SPU_ADDR16)
969 {
970 unsigned char insn[4];
971
972 if (!bfd_get_section_contents (ibfd, section, insn,
973 irela->r_offset, 4))
974 goto error_ret_free_internal;
975
976 if (is_branch (insn) || is_hint (insn))
977 {
978 insn_type = branch;
979 if ((insn[0] & 0xfd) == 0x31)
980 insn_type = call;
981 }
982 }
983
984 /* We are only interested in function symbols. */
985 if (h != NULL)
986 {
987 sym_type = h->type;
988 sym_name = h->root.root.string;
989 }
990 else
991 {
992 sym_type = ELF_ST_TYPE (sym->st_info);
993 sym_name = bfd_elf_sym_name (sym_sec->owner,
994 symtab_hdr,
995 sym,
996 sym_sec);
997 }
998 if (sym_type != STT_FUNC)
999 {
1000 /* It's common for people to write assembly and forget
1001 to give function symbols the right type. Handle
1002 calls to such symbols, but warn so that (hopefully)
1003 people will fix their code. We need the symbol
1004 type to be correct to distinguish function pointer
1005 initialisation from other pointer initialisation. */
1006 if (insn_type == call)
1007 (*_bfd_error_handler) (_("warning: call to non-function"
1008 " symbol %s defined in %B"),
1009 sym_sec->owner, sym_name);
1010 else
1011 continue;
1012 }
1013
1014 if (!needs_ovl_stub (sym_name, sym_sec, section, htab,
1015 insn_type != non_branch))
1016 continue;
1017
1018 stub_name = spu_stub_name (sym_sec, h, irela);
1019 if (stub_name == NULL)
1020 goto error_ret_free_internal;
1021
1022 sh = (struct spu_stub_hash_entry *)
1023 bfd_hash_lookup (&htab->stub_hash_table, stub_name,
1024 TRUE, FALSE);
1025 if (sh == NULL)
1026 {
1027 free (stub_name);
1028 error_ret_free_internal:
1029 if (elf_section_data (section)->relocs != internal_relocs)
1030 free (internal_relocs);
1031 error_ret_free_local:
1032 if (local_syms != NULL
1033 && (symtab_hdr->contents
1034 != (unsigned char *) local_syms))
1035 free (local_syms);
1036 return FALSE;
1037 }
1038
1039 /* If this entry isn't new, we already have a stub. */
1040 if (sh->target_section != NULL)
1041 {
1042 free (stub_name);
1043 continue;
1044 }
1045
1046 sh->target_section = sym_sec;
1047 if (h != NULL)
1048 sh->target_off = h->root.u.def.value;
1049 else
1050 sh->target_off = sym->st_value;
1051 sh->target_off += irela->r_addend;
1052
1053 htab->stubs.count += 1;
1054 }
1055
1056 /* We're done with the internal relocs, free them. */
1057 if (elf_section_data (section)->relocs != internal_relocs)
1058 free (internal_relocs);
1059 }
1060
1061 if (local_syms != NULL
1062 && symtab_hdr->contents != (unsigned char *) local_syms)
1063 {
1064 if (!info->keep_memory)
1065 free (local_syms);
1066 else
1067 symtab_hdr->contents = (unsigned char *) local_syms;
1068 }
1069 }
1070
1071 elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, htab);
1072 if (htab->stubs.err)
1073 return FALSE;
1074
1075 *stub = NULL;
1076 if (htab->stubs.count == 0)
1077 return TRUE;
1078
1079 ibfd = info->input_bfds;
1080 flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
1081 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1082 htab->stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1083 *stub = htab->stub;
1084 if (htab->stub == NULL
1085 || !bfd_set_section_alignment (ibfd, htab->stub, 2))
1086 return FALSE;
1087
1088 flags = (SEC_ALLOC | SEC_LOAD
1089 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1090 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1091 *ovtab = htab->ovtab;
1092 if (htab->ovtab == NULL
1093 || !bfd_set_section_alignment (ibfd, htab->stub, 4))
1094 return FALSE;
1095
1096 *toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1097 if (*toe == NULL
1098 || !bfd_set_section_alignment (ibfd, *toe, 4))
1099 return FALSE;
1100 (*toe)->size = 16;
1101
1102 /* Retrieve all the stubs and sort. */
1103 htab->stubs.sh = bfd_malloc (htab->stubs.count * sizeof (*htab->stubs.sh));
1104 if (htab->stubs.sh == NULL)
1105 return FALSE;
1106 i = htab->stubs.count;
1107 bfd_hash_traverse (&htab->stub_hash_table, populate_stubs, htab);
1108 BFD_ASSERT (htab->stubs.count == 0);
1109
1110 htab->stubs.count = i;
1111 qsort (htab->stubs.sh, htab->stubs.count, sizeof (*htab->stubs.sh),
1112 sort_stubs);
1113
1114 /* Now that the stubs are sorted, place them in the stub section.
1115 Stubs are grouped per overlay
1116 . ila $79,func1
1117 . br 1f
1118 . ila $79,func2
1119 . br 1f
1120 .
1121 .
1122 . ila $79,funcn
1123 . nop
1124 . 1:
1125 . ila $78,ovl_index
1126 . br __ovly_load */
1127
1128 group = 0;
1129 for (i = 0; i < htab->stubs.count; i++)
1130 {
1131 if (spu_elf_section_data (htab->stubs.sh[group]->target_section
1132 ->output_section)->ovl_index
1133 != spu_elf_section_data (htab->stubs.sh[i]->target_section
1134 ->output_section)->ovl_index)
1135 {
1136 htab->stub->size += SIZEOF_STUB2;
1137 for (; group != i; group++)
1138 htab->stubs.sh[group]->delta
1139 = htab->stubs.sh[i - 1]->off - htab->stubs.sh[group]->off;
1140 }
1141 if (group == i
1142 || ((htab->stubs.sh[i - 1]->target_section->output_section->vma
1143 + htab->stubs.sh[i - 1]->target_section->output_offset
1144 + htab->stubs.sh[i - 1]->target_off)
1145 != (htab->stubs.sh[i]->target_section->output_section->vma
1146 + htab->stubs.sh[i]->target_section->output_offset
1147 + htab->stubs.sh[i]->target_off)))
1148 {
1149 htab->stubs.sh[i]->off = htab->stub->size;
1150 htab->stub->size += SIZEOF_STUB1;
1151 }
1152 else
1153 htab->stubs.sh[i]->off = htab->stubs.sh[i - 1]->off;
1154 }
1155 if (group != i)
1156 htab->stub->size += SIZEOF_STUB2;
1157 for (; group != i; group++)
1158 htab->stubs.sh[group]->delta
1159 = htab->stubs.sh[i - 1]->off - htab->stubs.sh[group]->off;
1160
1161 /* htab->ovtab consists of two arrays.
1162 . struct {
1163 . u32 vma;
1164 . u32 size;
1165 . u32 file_off;
1166 . u32 buf;
1167 . } _ovly_table[];
1168 .
1169 . struct {
1170 . u32 mapped;
1171 . } _ovly_buf_table[]; */
1172
1173 htab->ovtab->alignment_power = 4;
1174 htab->ovtab->size = htab->num_overlays * 16 + htab->num_buf * 4;
1175
1176 return TRUE;
1177 }
1178
1179 /* Functions to handle embedded spu_ovl.o object. */
1180
1181 static void *
1182 ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1183 {
1184 return stream;
1185 }
1186
1187 static file_ptr
1188 ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1189 void *stream,
1190 void *buf,
1191 file_ptr nbytes,
1192 file_ptr offset)
1193 {
1194 struct _ovl_stream *os;
1195 size_t count;
1196 size_t max;
1197
1198 os = (struct _ovl_stream *) stream;
1199 max = (const char *) os->end - (const char *) os->start;
1200
1201 if ((ufile_ptr) offset >= max)
1202 return 0;
1203
1204 count = nbytes;
1205 if (count > max - offset)
1206 count = max - offset;
1207
1208 memcpy (buf, (const char *) os->start + offset, count);
1209 return count;
1210 }
1211
1212 bfd_boolean
1213 spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1214 {
1215 *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1216 "elf32-spu",
1217 ovl_mgr_open,
1218 (void *) stream,
1219 ovl_mgr_pread,
1220 NULL,
1221 NULL);
1222 return *ovl_bfd != NULL;
1223 }
1224
1225 /* Fill in the ila and br for a stub. On the last stub for a group,
1226 write the stub that sets the overlay number too. */
1227
1228 static bfd_boolean
1229 write_one_stub (struct spu_stub_hash_entry *ent, struct bfd_link_info *info)
1230 {
1231 struct spu_link_hash_table *htab = spu_hash_table (info);
1232 asection *sec = htab->stub;
1233 asection *s = ent->target_section;
1234 unsigned int ovl;
1235 bfd_vma val;
1236
1237 val = ent->target_off + s->output_offset + s->output_section->vma;
1238 bfd_put_32 (sec->owner, ILA_79 + ((val << 7) & 0x01ffff80),
1239 sec->contents + ent->off);
1240 val = ent->delta + 4;
1241 bfd_put_32 (sec->owner, BR + ((val << 5) & 0x007fff80),
1242 sec->contents + ent->off + 4);
1243
1244 /* If this is the last stub of this group, write stub2. */
1245 if (ent->delta == 0)
1246 {
1247 bfd_put_32 (sec->owner, NOP,
1248 sec->contents + ent->off + 4);
1249
1250 ovl = spu_elf_section_data (s->output_section)->ovl_index;
1251 bfd_put_32 (sec->owner, ILA_78 + ((ovl << 7) & 0x01ffff80),
1252 sec->contents + ent->off + 8);
1253
1254 val = (htab->ovly_load->root.u.def.section->output_section->vma
1255 + htab->ovly_load->root.u.def.section->output_offset
1256 + htab->ovly_load->root.u.def.value
1257 - (sec->output_section->vma
1258 + sec->output_offset
1259 + ent->off + 12));
1260
1261 if (val + 0x20000 >= 0x40000)
1262 htab->stub_overflow = TRUE;
1263
1264 bfd_put_32 (sec->owner, BR + ((val << 5) & 0x007fff80),
1265 sec->contents + ent->off + 12);
1266 }
1267
1268 if (htab->emit_stub_syms)
1269 {
1270 struct elf_link_hash_entry *h;
1271 size_t len1, len2;
1272 char *name;
1273
1274 len1 = sizeof ("00000000.ovl_call.") - 1;
1275 len2 = strlen (ent->root.string);
1276 name = bfd_malloc (len1 + len2 + 1);
1277 if (name == NULL)
1278 return FALSE;
1279 memcpy (name, "00000000.ovl_call.", len1);
1280 memcpy (name + len1, ent->root.string, len2 + 1);
1281 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
1282 free (name);
1283 if (h == NULL)
1284 return FALSE;
1285 if (h->root.type == bfd_link_hash_new)
1286 {
1287 h->root.type = bfd_link_hash_defined;
1288 h->root.u.def.section = sec;
1289 h->root.u.def.value = ent->off;
1290 h->size = (ent->delta == 0
1291 ? SIZEOF_STUB1 + SIZEOF_STUB2 : SIZEOF_STUB1);
1292 h->type = STT_FUNC;
1293 h->ref_regular = 1;
1294 h->def_regular = 1;
1295 h->ref_regular_nonweak = 1;
1296 h->forced_local = 1;
1297 h->non_elf = 0;
1298 }
1299 }
1300
1301 return TRUE;
1302 }
1303
1304 /* Define an STT_OBJECT symbol. */
1305
1306 static struct elf_link_hash_entry *
1307 define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1308 {
1309 struct elf_link_hash_entry *h;
1310
1311 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
1312 if (h == NULL)
1313 return NULL;
1314
1315 if (h->root.type != bfd_link_hash_defined
1316 || !h->def_regular)
1317 {
1318 h->root.type = bfd_link_hash_defined;
1319 h->root.u.def.section = htab->ovtab;
1320 h->type = STT_OBJECT;
1321 h->ref_regular = 1;
1322 h->def_regular = 1;
1323 h->ref_regular_nonweak = 1;
1324 h->non_elf = 0;
1325 }
1326 else
1327 {
1328 (*_bfd_error_handler) (_("%B is not allowed to define %s"),
1329 h->root.u.def.section->owner,
1330 h->root.root.string);
1331 bfd_set_error (bfd_error_bad_value);
1332 return NULL;
1333 }
1334
1335 return h;
1336 }
1337
1338 /* Fill in all stubs and the overlay tables. */
1339
1340 bfd_boolean
1341 spu_elf_build_stubs (struct bfd_link_info *info, int emit_syms, asection *toe)
1342 {
1343 struct spu_link_hash_table *htab = spu_hash_table (info);
1344 struct elf_link_hash_entry *h;
1345 bfd_byte *p;
1346 asection *s;
1347 bfd *obfd;
1348 unsigned int i;
1349
1350 htab->emit_stub_syms = emit_syms;
1351 htab->stub->contents = bfd_zalloc (htab->stub->owner, htab->stub->size);
1352 if (htab->stub->contents == NULL)
1353 return FALSE;
1354
1355 h = elf_link_hash_lookup (&htab->elf, "__ovly_load", FALSE, FALSE, FALSE);
1356 htab->ovly_load = h;
1357 BFD_ASSERT (h != NULL
1358 && (h->root.type == bfd_link_hash_defined
1359 || h->root.type == bfd_link_hash_defweak)
1360 && h->def_regular);
1361
1362 s = h->root.u.def.section->output_section;
1363 if (spu_elf_section_data (s)->ovl_index)
1364 {
1365 (*_bfd_error_handler) (_("%s in overlay section"),
1366 h->root.u.def.section->owner);
1367 bfd_set_error (bfd_error_bad_value);
1368 return FALSE;
1369 }
1370
1371 /* Write out all the stubs. */
1372 for (i = 0; i < htab->stubs.count; i++)
1373 write_one_stub (htab->stubs.sh[i], info);
1374
1375 if (htab->stub_overflow)
1376 {
1377 (*_bfd_error_handler) (_("overlay stub relocation overflow"));
1378 bfd_set_error (bfd_error_bad_value);
1379 return FALSE;
1380 }
1381
1382 htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
1383 if (htab->ovtab->contents == NULL)
1384 return FALSE;
1385
1386 /* Write out _ovly_table. */
1387 p = htab->ovtab->contents;
1388 obfd = htab->ovtab->output_section->owner;
1389 for (s = obfd->sections; s != NULL; s = s->next)
1390 {
1391 unsigned int ovl_index = spu_elf_section_data (s)->ovl_index;
1392
1393 if (ovl_index != 0)
1394 {
1395 unsigned int lo, hi, mid;
1396 unsigned long off = (ovl_index - 1) * 16;
1397 bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
1398 bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16, p + off + 4);
1399 /* file_off written later in spu_elf_modify_program_headers. */
1400
1401 lo = 0;
1402 hi = htab->num_buf;
1403 while (lo < hi)
1404 {
1405 mid = (lo + hi) >> 1;
1406 if (htab->ovl_region[2 * mid + 1]->vma
1407 + htab->ovl_region[2 * mid + 1]->size <= s->vma)
1408 lo = mid + 1;
1409 else if (htab->ovl_region[2 * mid]->vma > s->vma)
1410 hi = mid;
1411 else
1412 {
1413 bfd_put_32 (htab->ovtab->owner, mid + 1, p + off + 12);
1414 break;
1415 }
1416 }
1417 BFD_ASSERT (lo < hi);
1418 }
1419 }
1420
1421 /* Write out _ovly_buf_table. */
1422 p = htab->ovtab->contents + htab->num_overlays * 16;
1423 for (i = 0; i < htab->num_buf; i++)
1424 {
1425 bfd_put_32 (htab->ovtab->owner, 0, p);
1426 p += 4;
1427 }
1428
1429 h = define_ovtab_symbol (htab, "_ovly_table");
1430 if (h == NULL)
1431 return FALSE;
1432 h->root.u.def.value = 0;
1433 h->size = htab->num_overlays * 16;
1434
1435 h = define_ovtab_symbol (htab, "_ovly_table_end");
1436 if (h == NULL)
1437 return FALSE;
1438 h->root.u.def.value = htab->num_overlays * 16;
1439 h->size = 0;
1440
1441 h = define_ovtab_symbol (htab, "_ovly_buf_table");
1442 if (h == NULL)
1443 return FALSE;
1444 h->root.u.def.value = htab->num_overlays * 16;
1445 h->size = htab->num_buf * 4;
1446
1447 h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
1448 if (h == NULL)
1449 return FALSE;
1450 h->root.u.def.value = htab->num_overlays * 16 + htab->num_buf * 4;
1451 h->size = 0;
1452
1453 h = define_ovtab_symbol (htab, "_EAR_");
1454 if (h == NULL)
1455 return FALSE;
1456 h->root.u.def.section = toe;
1457 h->root.u.def.value = 0;
1458 h->size = 16;
1459
1460 return TRUE;
1461 }
1462
1463 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
1464 Search for stack adjusting insns, and return the sp delta. */
1465
1466 static int
1467 find_function_stack_adjust (asection *sec, bfd_vma offset)
1468 {
1469 int unrecog;
1470 int reg[128];
1471
1472 memset (reg, 0, sizeof (reg));
1473 for (unrecog = 0; offset + 4 <= sec->size && unrecog < 32; offset += 4)
1474 {
1475 unsigned char buf[4];
1476 int rt, ra;
1477 int imm;
1478
1479 /* Assume no relocs on stack adjusing insns. */
1480 if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
1481 break;
1482
1483 if (buf[0] == 0x24 /* stqd */)
1484 continue;
1485
1486 rt = buf[3] & 0x7f;
1487 ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
1488 /* Partly decoded immediate field. */
1489 imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
1490
1491 if (buf[0] == 0x1c /* ai */)
1492 {
1493 imm >>= 7;
1494 imm = (imm ^ 0x200) - 0x200;
1495 reg[rt] = reg[ra] + imm;
1496
1497 if (rt == 1 /* sp */)
1498 {
1499 if (imm > 0)
1500 break;
1501 return reg[rt];
1502 }
1503 }
1504 else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
1505 {
1506 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
1507
1508 reg[rt] = reg[ra] + reg[rb];
1509 if (rt == 1)
1510 return reg[rt];
1511 }
1512 else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
1513 {
1514 if (buf[0] >= 0x42 /* ila */)
1515 imm |= (buf[0] & 1) << 17;
1516 else
1517 {
1518 imm &= 0xffff;
1519
1520 if (buf[0] == 0x40 /* il */)
1521 {
1522 if ((buf[1] & 0x80) == 0)
1523 goto unknown_insn;
1524 imm = (imm ^ 0x8000) - 0x8000;
1525 }
1526 else if ((buf[1] & 0x80) == 0 /* ilhu */)
1527 imm <<= 16;
1528 }
1529 reg[rt] = imm;
1530 continue;
1531 }
1532 else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
1533 {
1534 reg[rt] |= imm & 0xffff;
1535 continue;
1536 }
1537 else if (buf[0] == 0x04 /* ori */)
1538 {
1539 imm >>= 7;
1540 imm = (imm ^ 0x200) - 0x200;
1541 reg[rt] = reg[ra] | imm;
1542 continue;
1543 }
1544 else if ((buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
1545 || (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */))
1546 {
1547 /* Used in pic reg load. Say rt is trashed. */
1548 reg[rt] = 0;
1549 continue;
1550 }
1551 else if (is_branch (buf) || is_indirect_branch (buf))
1552 /* If we hit a branch then we must be out of the prologue. */
1553 break;
1554 unknown_insn:
1555 ++unrecog;
1556 }
1557
1558 return 0;
1559 }
1560
1561 /* qsort predicate to sort symbols by section and value. */
1562
1563 static Elf_Internal_Sym *sort_syms_syms;
1564 static asection **sort_syms_psecs;
1565
1566 static int
1567 sort_syms (const void *a, const void *b)
1568 {
1569 Elf_Internal_Sym *const *s1 = a;
1570 Elf_Internal_Sym *const *s2 = b;
1571 asection *sec1,*sec2;
1572 bfd_signed_vma delta;
1573
1574 sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
1575 sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
1576
1577 if (sec1 != sec2)
1578 return sec1->index - sec2->index;
1579
1580 delta = (*s1)->st_value - (*s2)->st_value;
1581 if (delta != 0)
1582 return delta < 0 ? -1 : 1;
1583
1584 delta = (*s2)->st_size - (*s1)->st_size;
1585 if (delta != 0)
1586 return delta < 0 ? -1 : 1;
1587
1588 return *s1 < *s2 ? -1 : 1;
1589 }
1590
1591 struct call_info
1592 {
1593 struct function_info *fun;
1594 struct call_info *next;
1595 int is_tail;
1596 };
1597
1598 struct function_info
1599 {
1600 /* List of functions called. Also branches to hot/cold part of
1601 function. */
1602 struct call_info *call_list;
1603 /* For hot/cold part of function, point to owner. */
1604 struct function_info *start;
1605 /* Symbol at start of function. */
1606 union {
1607 Elf_Internal_Sym *sym;
1608 struct elf_link_hash_entry *h;
1609 } u;
1610 /* Function section. */
1611 asection *sec;
1612 /* Address range of (this part of) function. */
1613 bfd_vma lo, hi;
1614 /* Stack usage. */
1615 int stack;
1616 /* Set if global symbol. */
1617 unsigned int global : 1;
1618 /* Set if known to be start of function (as distinct from a hunk
1619 in hot/cold section. */
1620 unsigned int is_func : 1;
1621 /* Flags used during call tree traversal. */
1622 unsigned int visit1 : 1;
1623 unsigned int non_root : 1;
1624 unsigned int visit2 : 1;
1625 unsigned int marking : 1;
1626 unsigned int visit3 : 1;
1627 };
1628
1629 struct spu_elf_stack_info
1630 {
1631 int num_fun;
1632 int max_fun;
1633 /* Variable size array describing functions, one per contiguous
1634 address range belonging to a function. */
1635 struct function_info fun[1];
1636 };
1637
1638 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
1639 entries for section SEC. */
1640
1641 static struct spu_elf_stack_info *
1642 alloc_stack_info (asection *sec, int max_fun)
1643 {
1644 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1645 bfd_size_type amt;
1646
1647 amt = sizeof (struct spu_elf_stack_info);
1648 amt += (max_fun - 1) * sizeof (struct function_info);
1649 sec_data->stack_info = bfd_zmalloc (amt);
1650 if (sec_data->stack_info != NULL)
1651 sec_data->stack_info->max_fun = max_fun;
1652 return sec_data->stack_info;
1653 }
1654
1655 /* Add a new struct function_info describing a (part of a) function
1656 starting at SYM_H. Keep the array sorted by address. */
1657
1658 static struct function_info *
1659 maybe_insert_function (asection *sec,
1660 void *sym_h,
1661 bfd_boolean global,
1662 bfd_boolean is_func)
1663 {
1664 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1665 struct spu_elf_stack_info *sinfo = sec_data->stack_info;
1666 int i;
1667 bfd_vma off, size;
1668
1669 if (sinfo == NULL)
1670 {
1671 sinfo = alloc_stack_info (sec, 20);
1672 if (sinfo == NULL)
1673 return NULL;
1674 }
1675
1676 if (!global)
1677 {
1678 Elf_Internal_Sym *sym = sym_h;
1679 off = sym->st_value;
1680 size = sym->st_size;
1681 }
1682 else
1683 {
1684 struct elf_link_hash_entry *h = sym_h;
1685 off = h->root.u.def.value;
1686 size = h->size;
1687 }
1688
1689 for (i = sinfo->num_fun; --i >= 0; )
1690 if (sinfo->fun[i].lo <= off)
1691 break;
1692
1693 if (i >= 0)
1694 {
1695 /* Don't add another entry for an alias, but do update some
1696 info. */
1697 if (sinfo->fun[i].lo == off)
1698 {
1699 /* Prefer globals over local syms. */
1700 if (global && !sinfo->fun[i].global)
1701 {
1702 sinfo->fun[i].global = TRUE;
1703 sinfo->fun[i].u.h = sym_h;
1704 }
1705 if (is_func)
1706 sinfo->fun[i].is_func = TRUE;
1707 return &sinfo->fun[i];
1708 }
1709 /* Ignore a zero-size symbol inside an existing function. */
1710 else if (sinfo->fun[i].hi > off && size == 0)
1711 return &sinfo->fun[i];
1712 }
1713
1714 if (++i < sinfo->num_fun)
1715 memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
1716 (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
1717 else if (i >= sinfo->max_fun)
1718 {
1719 bfd_size_type amt = sizeof (struct spu_elf_stack_info);
1720 bfd_size_type old = amt;
1721
1722 old += (sinfo->max_fun - 1) * sizeof (struct function_info);
1723 sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
1724 amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
1725 sinfo = bfd_realloc (sinfo, amt);
1726 if (sinfo == NULL)
1727 return NULL;
1728 memset ((char *) sinfo + old, 0, amt - old);
1729 sec_data->stack_info = sinfo;
1730 }
1731 sinfo->fun[i].is_func = is_func;
1732 sinfo->fun[i].global = global;
1733 sinfo->fun[i].sec = sec;
1734 if (global)
1735 sinfo->fun[i].u.h = sym_h;
1736 else
1737 sinfo->fun[i].u.sym = sym_h;
1738 sinfo->fun[i].lo = off;
1739 sinfo->fun[i].hi = off + size;
1740 sinfo->fun[i].stack = -find_function_stack_adjust (sec, off);
1741 sinfo->num_fun += 1;
1742 return &sinfo->fun[i];
1743 }
1744
1745 /* Return the name of FUN. */
1746
1747 static const char *
1748 func_name (struct function_info *fun)
1749 {
1750 asection *sec;
1751 bfd *ibfd;
1752 Elf_Internal_Shdr *symtab_hdr;
1753
1754 while (fun->start != NULL)
1755 fun = fun->start;
1756
1757 if (fun->global)
1758 return fun->u.h->root.root.string;
1759
1760 sec = fun->sec;
1761 if (fun->u.sym->st_name == 0)
1762 {
1763 size_t len = strlen (sec->name);
1764 char *name = bfd_malloc (len + 10);
1765 if (name == NULL)
1766 return "(null)";
1767 sprintf (name, "%s+%lx", sec->name,
1768 (unsigned long) fun->u.sym->st_value & 0xffffffff);
1769 return name;
1770 }
1771 ibfd = sec->owner;
1772 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1773 return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
1774 }
1775
1776 /* Read the instruction at OFF in SEC. Return true iff the instruction
1777 is a nop, lnop, or stop 0 (all zero insn). */
1778
1779 static bfd_boolean
1780 is_nop (asection *sec, bfd_vma off)
1781 {
1782 unsigned char insn[4];
1783
1784 if (off + 4 > sec->size
1785 || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
1786 return FALSE;
1787 if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
1788 return TRUE;
1789 if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
1790 return TRUE;
1791 return FALSE;
1792 }
1793
1794 /* Extend the range of FUN to cover nop padding up to LIMIT.
1795 Return TRUE iff some instruction other than a NOP was found. */
1796
1797 static bfd_boolean
1798 insns_at_end (struct function_info *fun, bfd_vma limit)
1799 {
1800 bfd_vma off = (fun->hi + 3) & -4;
1801
1802 while (off < limit && is_nop (fun->sec, off))
1803 off += 4;
1804 if (off < limit)
1805 {
1806 fun->hi = off;
1807 return TRUE;
1808 }
1809 fun->hi = limit;
1810 return FALSE;
1811 }
1812
1813 /* Check and fix overlapping function ranges. Return TRUE iff there
1814 are gaps in the current info we have about functions in SEC. */
1815
1816 static bfd_boolean
1817 check_function_ranges (asection *sec, struct bfd_link_info *info)
1818 {
1819 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1820 struct spu_elf_stack_info *sinfo = sec_data->stack_info;
1821 int i;
1822 bfd_boolean gaps = FALSE;
1823
1824 if (sinfo == NULL)
1825 return FALSE;
1826
1827 for (i = 1; i < sinfo->num_fun; i++)
1828 if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
1829 {
1830 /* Fix overlapping symbols. */
1831 const char *f1 = func_name (&sinfo->fun[i - 1]);
1832 const char *f2 = func_name (&sinfo->fun[i]);
1833
1834 info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
1835 sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
1836 }
1837 else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
1838 gaps = TRUE;
1839
1840 if (sinfo->num_fun == 0)
1841 gaps = TRUE;
1842 else
1843 {
1844 if (sinfo->fun[0].lo != 0)
1845 gaps = TRUE;
1846 if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
1847 {
1848 const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
1849
1850 info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
1851 sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
1852 }
1853 else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
1854 gaps = TRUE;
1855 }
1856 return gaps;
1857 }
1858
1859 /* Search current function info for a function that contains address
1860 OFFSET in section SEC. */
1861
1862 static struct function_info *
1863 find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
1864 {
1865 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1866 struct spu_elf_stack_info *sinfo = sec_data->stack_info;
1867 int lo, hi, mid;
1868
1869 lo = 0;
1870 hi = sinfo->num_fun;
1871 while (lo < hi)
1872 {
1873 mid = (lo + hi) / 2;
1874 if (offset < sinfo->fun[mid].lo)
1875 hi = mid;
1876 else if (offset >= sinfo->fun[mid].hi)
1877 lo = mid + 1;
1878 else
1879 return &sinfo->fun[mid];
1880 }
1881 info->callbacks->einfo (_("%A:0x%v not found in function table\n"),
1882 sec, offset);
1883 return NULL;
1884 }
1885
1886 /* Add CALLEE to CALLER call list if not already present. */
1887
1888 static bfd_boolean
1889 insert_callee (struct function_info *caller, struct call_info *callee)
1890 {
1891 struct call_info *p;
1892 for (p = caller->call_list; p != NULL; p = p->next)
1893 if (p->fun == callee->fun)
1894 {
1895 /* Tail calls use less stack than normal calls. Retain entry
1896 for normal call over one for tail call. */
1897 if (p->is_tail > callee->is_tail)
1898 p->is_tail = callee->is_tail;
1899 return FALSE;
1900 }
1901 callee->next = caller->call_list;
1902 caller->call_list = callee;
1903 return TRUE;
1904 }
1905
1906 /* Rummage through the relocs for SEC, looking for function calls.
1907 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
1908 mark destination symbols on calls as being functions. Also
1909 look at branches, which may be tail calls or go to hot/cold
1910 section part of same function. */
1911
1912 static bfd_boolean
1913 mark_functions_via_relocs (asection *sec,
1914 struct bfd_link_info *info,
1915 int call_tree)
1916 {
1917 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1918 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
1919 Elf_Internal_Sym *syms;
1920 void *psyms;
1921 static bfd_boolean warned;
1922
1923 internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
1924 info->keep_memory);
1925 if (internal_relocs == NULL)
1926 return FALSE;
1927
1928 symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
1929 psyms = &symtab_hdr->contents;
1930 syms = *(Elf_Internal_Sym **) psyms;
1931 irela = internal_relocs;
1932 irelaend = irela + sec->reloc_count;
1933 for (; irela < irelaend; irela++)
1934 {
1935 enum elf_spu_reloc_type r_type;
1936 unsigned int r_indx;
1937 asection *sym_sec;
1938 Elf_Internal_Sym *sym;
1939 struct elf_link_hash_entry *h;
1940 bfd_vma val;
1941 unsigned char insn[4];
1942 bfd_boolean is_call;
1943 struct function_info *caller;
1944 struct call_info *callee;
1945
1946 r_type = ELF32_R_TYPE (irela->r_info);
1947 if (r_type != R_SPU_REL16
1948 && r_type != R_SPU_ADDR16)
1949 continue;
1950
1951 r_indx = ELF32_R_SYM (irela->r_info);
1952 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
1953 return FALSE;
1954
1955 if (sym_sec == NULL
1956 || sym_sec->output_section == NULL
1957 || sym_sec->output_section->owner != sec->output_section->owner)
1958 continue;
1959
1960 if (!bfd_get_section_contents (sec->owner, sec, insn,
1961 irela->r_offset, 4))
1962 return FALSE;
1963 if (!is_branch (insn))
1964 continue;
1965
1966 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
1967 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
1968 {
1969 if (!call_tree)
1970 warned = TRUE;
1971 if (!call_tree || !warned)
1972 info->callbacks->einfo (_("%B(%A+0x%v): call to non-code section"
1973 " %B(%A), stack analysis incomplete\n"),
1974 sec->owner, sec, irela->r_offset,
1975 sym_sec->owner, sym_sec);
1976 continue;
1977 }
1978
1979 is_call = (insn[0] & 0xfd) == 0x31;
1980
1981 if (h)
1982 val = h->root.u.def.value;
1983 else
1984 val = sym->st_value;
1985 val += irela->r_addend;
1986
1987 if (!call_tree)
1988 {
1989 struct function_info *fun;
1990
1991 if (irela->r_addend != 0)
1992 {
1993 Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
1994 if (fake == NULL)
1995 return FALSE;
1996 fake->st_value = val;
1997 fake->st_shndx
1998 = _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
1999 sym = fake;
2000 }
2001 if (sym)
2002 fun = maybe_insert_function (sym_sec, sym, FALSE, is_call);
2003 else
2004 fun = maybe_insert_function (sym_sec, h, TRUE, is_call);
2005 if (fun == NULL)
2006 return FALSE;
2007 if (irela->r_addend != 0
2008 && fun->u.sym != sym)
2009 free (sym);
2010 continue;
2011 }
2012
2013 caller = find_function (sec, irela->r_offset, info);
2014 if (caller == NULL)
2015 return FALSE;
2016 callee = bfd_malloc (sizeof *callee);
2017 if (callee == NULL)
2018 return FALSE;
2019
2020 callee->fun = find_function (sym_sec, val, info);
2021 if (callee->fun == NULL)
2022 return FALSE;
2023 callee->is_tail = !is_call;
2024 if (!insert_callee (caller, callee))
2025 free (callee);
2026 else if (!is_call
2027 && !callee->fun->is_func
2028 && callee->fun->stack == 0)
2029 {
2030 /* This is either a tail call or a branch from one part of
2031 the function to another, ie. hot/cold section. If the
2032 destination has been called by some other function then
2033 it is a separate function. We also assume that functions
2034 are not split across input files. */
2035 if (callee->fun->start != NULL
2036 || sec->owner != sym_sec->owner)
2037 {
2038 callee->fun->start = NULL;
2039 callee->fun->is_func = TRUE;
2040 }
2041 else
2042 callee->fun->start = caller;
2043 }
2044 }
2045
2046 return TRUE;
2047 }
2048
2049 /* Handle something like .init or .fini, which has a piece of a function.
2050 These sections are pasted together to form a single function. */
2051
2052 static bfd_boolean
2053 pasted_function (asection *sec, struct bfd_link_info *info)
2054 {
2055 struct bfd_link_order *l;
2056 struct _spu_elf_section_data *sec_data;
2057 struct spu_elf_stack_info *sinfo;
2058 Elf_Internal_Sym *fake;
2059 struct function_info *fun, *fun_start;
2060
2061 fake = bfd_zmalloc (sizeof (*fake));
2062 if (fake == NULL)
2063 return FALSE;
2064 fake->st_value = 0;
2065 fake->st_size = sec->size;
2066 fake->st_shndx
2067 = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2068 fun = maybe_insert_function (sec, fake, FALSE, FALSE);
2069 if (!fun)
2070 return FALSE;
2071
2072 /* Find a function immediately preceding this section. */
2073 fun_start = NULL;
2074 for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2075 {
2076 if (l->u.indirect.section == sec)
2077 {
2078 if (fun_start != NULL)
2079 {
2080 if (fun_start->start)
2081 fun_start = fun_start->start;
2082 fun->start = fun_start;
2083 }
2084 return TRUE;
2085 }
2086 if (l->type == bfd_indirect_link_order
2087 && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
2088 && (sinfo = sec_data->stack_info) != NULL
2089 && sinfo->num_fun != 0)
2090 fun_start = &sinfo->fun[sinfo->num_fun - 1];
2091 }
2092
2093 info->callbacks->einfo (_("%A link_order not found\n"), sec);
2094 return FALSE;
2095 }
2096
2097 /* We're only interested in code sections. */
2098
2099 static bfd_boolean
2100 interesting_section (asection *s, bfd *obfd, struct spu_link_hash_table *htab)
2101 {
2102 return (s != htab->stub
2103 && s->output_section != NULL
2104 && s->output_section->owner == obfd
2105 && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2106 == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2107 && s->size != 0);
2108 }
2109
2110 /* Map address ranges in code sections to functions. */
2111
2112 static bfd_boolean
2113 discover_functions (bfd *output_bfd, struct bfd_link_info *info)
2114 {
2115 struct spu_link_hash_table *htab = spu_hash_table (info);
2116 bfd *ibfd;
2117 int bfd_idx;
2118 Elf_Internal_Sym ***psym_arr;
2119 asection ***sec_arr;
2120 bfd_boolean gaps = FALSE;
2121
2122 bfd_idx = 0;
2123 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2124 bfd_idx++;
2125
2126 psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2127 if (psym_arr == NULL)
2128 return FALSE;
2129 sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2130 if (sec_arr == NULL)
2131 return FALSE;
2132
2133
2134 for (ibfd = info->input_bfds, bfd_idx = 0;
2135 ibfd != NULL;
2136 ibfd = ibfd->link_next, bfd_idx++)
2137 {
2138 extern const bfd_target bfd_elf32_spu_vec;
2139 Elf_Internal_Shdr *symtab_hdr;
2140 asection *sec;
2141 size_t symcount;
2142 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2143 asection **psecs, **p;
2144
2145 if (ibfd->xvec != &bfd_elf32_spu_vec)
2146 continue;
2147
2148 /* Read all the symbols. */
2149 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2150 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
2151 if (symcount == 0)
2152 continue;
2153
2154 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
2155 if (syms == NULL)
2156 {
2157 syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
2158 NULL, NULL, NULL);
2159 symtab_hdr->contents = (void *) syms;
2160 if (syms == NULL)
2161 return FALSE;
2162 }
2163
2164 /* Select defined function symbols that are going to be output. */
2165 psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
2166 if (psyms == NULL)
2167 return FALSE;
2168 psym_arr[bfd_idx] = psyms;
2169 psecs = bfd_malloc (symcount * sizeof (*psecs));
2170 if (psecs == NULL)
2171 return FALSE;
2172 sec_arr[bfd_idx] = psecs;
2173 for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
2174 if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
2175 || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
2176 {
2177 asection *s;
2178
2179 *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
2180 if (s != NULL && interesting_section (s, output_bfd, htab))
2181 *psy++ = sy;
2182 }
2183 symcount = psy - psyms;
2184 *psy = NULL;
2185
2186 /* Sort them by section and offset within section. */
2187 sort_syms_syms = syms;
2188 sort_syms_psecs = psecs;
2189 qsort (psyms, symcount, sizeof (*psyms), sort_syms);
2190
2191 /* Now inspect the function symbols. */
2192 for (psy = psyms; psy < psyms + symcount; )
2193 {
2194 asection *s = psecs[*psy - syms];
2195 Elf_Internal_Sym **psy2;
2196
2197 for (psy2 = psy; ++psy2 < psyms + symcount; )
2198 if (psecs[*psy2 - syms] != s)
2199 break;
2200
2201 if (!alloc_stack_info (s, psy2 - psy))
2202 return FALSE;
2203 psy = psy2;
2204 }
2205
2206 /* First install info about properly typed and sized functions.
2207 In an ideal world this will cover all code sections, except
2208 when partitioning functions into hot and cold sections,
2209 and the horrible pasted together .init and .fini functions. */
2210 for (psy = psyms; psy < psyms + symcount; ++psy)
2211 {
2212 sy = *psy;
2213 if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
2214 {
2215 asection *s = psecs[sy - syms];
2216 if (!maybe_insert_function (s, sy, FALSE, TRUE))
2217 return FALSE;
2218 }
2219 }
2220
2221 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2222 if (interesting_section (sec, output_bfd, htab))
2223 gaps |= check_function_ranges (sec, info);
2224 }
2225
2226 if (gaps)
2227 {
2228 /* See if we can discover more function symbols by looking at
2229 relocations. */
2230 for (ibfd = info->input_bfds, bfd_idx = 0;
2231 ibfd != NULL;
2232 ibfd = ibfd->link_next, bfd_idx++)
2233 {
2234 asection *sec;
2235
2236 if (psym_arr[bfd_idx] == NULL)
2237 continue;
2238
2239 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2240 if (interesting_section (sec, output_bfd, htab)
2241 && sec->reloc_count != 0)
2242 {
2243 if (!mark_functions_via_relocs (sec, info, FALSE))
2244 return FALSE;
2245 }
2246 }
2247
2248 for (ibfd = info->input_bfds, bfd_idx = 0;
2249 ibfd != NULL;
2250 ibfd = ibfd->link_next, bfd_idx++)
2251 {
2252 Elf_Internal_Shdr *symtab_hdr;
2253 asection *sec;
2254 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2255 asection **psecs;
2256
2257 if ((psyms = psym_arr[bfd_idx]) == NULL)
2258 continue;
2259
2260 psecs = sec_arr[bfd_idx];
2261
2262 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2263 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
2264
2265 gaps = FALSE;
2266 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2267 if (interesting_section (sec, output_bfd, htab))
2268 gaps |= check_function_ranges (sec, info);
2269 if (!gaps)
2270 continue;
2271
2272 /* Finally, install all globals. */
2273 for (psy = psyms; (sy = *psy) != NULL; ++psy)
2274 {
2275 asection *s;
2276
2277 s = psecs[sy - syms];
2278
2279 /* Global syms might be improperly typed functions. */
2280 if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
2281 && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
2282 {
2283 if (!maybe_insert_function (s, sy, FALSE, FALSE))
2284 return FALSE;
2285 }
2286 }
2287
2288 /* Some of the symbols we've installed as marking the
2289 beginning of functions may have a size of zero. Extend
2290 the range of such functions to the beginning of the
2291 next symbol of interest. */
2292 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2293 if (interesting_section (sec, output_bfd, htab))
2294 {
2295 struct _spu_elf_section_data *sec_data;
2296 struct spu_elf_stack_info *sinfo;
2297
2298 sec_data = spu_elf_section_data (sec);
2299 sinfo = sec_data->stack_info;
2300 if (sinfo != NULL)
2301 {
2302 int fun_idx;
2303 bfd_vma hi = sec->size;
2304
2305 for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
2306 {
2307 sinfo->fun[fun_idx].hi = hi;
2308 hi = sinfo->fun[fun_idx].lo;
2309 }
2310 }
2311 /* No symbols in this section. Must be .init or .fini
2312 or something similar. */
2313 else if (!pasted_function (sec, info))
2314 return FALSE;
2315 }
2316 }
2317 }
2318
2319 for (ibfd = info->input_bfds, bfd_idx = 0;
2320 ibfd != NULL;
2321 ibfd = ibfd->link_next, bfd_idx++)
2322 {
2323 if (psym_arr[bfd_idx] == NULL)
2324 continue;
2325
2326 free (psym_arr[bfd_idx]);
2327 free (sec_arr[bfd_idx]);
2328 }
2329
2330 free (psym_arr);
2331 free (sec_arr);
2332
2333 return TRUE;
2334 }
2335
2336 /* Mark nodes in the call graph that are called by some other node. */
2337
2338 static void
2339 mark_non_root (struct function_info *fun)
2340 {
2341 struct call_info *call;
2342
2343 fun->visit1 = TRUE;
2344 for (call = fun->call_list; call; call = call->next)
2345 {
2346 call->fun->non_root = TRUE;
2347 if (!call->fun->visit1)
2348 mark_non_root (call->fun);
2349 }
2350 }
2351
2352 /* Remove cycles from the call graph. */
2353
2354 static void
2355 call_graph_traverse (struct function_info *fun, struct bfd_link_info *info)
2356 {
2357 struct call_info **callp, *call;
2358
2359 fun->visit2 = TRUE;
2360 fun->marking = TRUE;
2361
2362 callp = &fun->call_list;
2363 while ((call = *callp) != NULL)
2364 {
2365 if (!call->fun->visit2)
2366 call_graph_traverse (call->fun, info);
2367 else if (call->fun->marking)
2368 {
2369 const char *f1 = func_name (fun);
2370 const char *f2 = func_name (call->fun);
2371
2372 info->callbacks->info (_("Stack analysis will ignore the call "
2373 "from %s to %s\n"),
2374 f1, f2);
2375 *callp = call->next;
2376 continue;
2377 }
2378 callp = &call->next;
2379 }
2380 fun->marking = FALSE;
2381 }
2382
2383 /* Populate call_list for each function. */
2384
2385 static bfd_boolean
2386 build_call_tree (bfd *output_bfd, struct bfd_link_info *info)
2387 {
2388 struct spu_link_hash_table *htab = spu_hash_table (info);
2389 bfd *ibfd;
2390
2391 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2392 {
2393 extern const bfd_target bfd_elf32_spu_vec;
2394 asection *sec;
2395
2396 if (ibfd->xvec != &bfd_elf32_spu_vec)
2397 continue;
2398
2399 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2400 {
2401 if (!interesting_section (sec, output_bfd, htab)
2402 || sec->reloc_count == 0)
2403 continue;
2404
2405 if (!mark_functions_via_relocs (sec, info, TRUE))
2406 return FALSE;
2407 }
2408
2409 /* Transfer call info from hot/cold section part of function
2410 to main entry. */
2411 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2412 {
2413 struct _spu_elf_section_data *sec_data;
2414 struct spu_elf_stack_info *sinfo;
2415
2416 if ((sec_data = spu_elf_section_data (sec)) != NULL
2417 && (sinfo = sec_data->stack_info) != NULL)
2418 {
2419 int i;
2420 for (i = 0; i < sinfo->num_fun; ++i)
2421 {
2422 if (sinfo->fun[i].start != NULL)
2423 {
2424 struct call_info *call = sinfo->fun[i].call_list;
2425
2426 while (call != NULL)
2427 {
2428 struct call_info *call_next = call->next;
2429 if (!insert_callee (sinfo->fun[i].start, call))
2430 free (call);
2431 call = call_next;
2432 }
2433 sinfo->fun[i].call_list = NULL;
2434 sinfo->fun[i].non_root = TRUE;
2435 }
2436 }
2437 }
2438 }
2439 }
2440
2441 /* Find the call graph root(s). */
2442 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2443 {
2444 extern const bfd_target bfd_elf32_spu_vec;
2445 asection *sec;
2446
2447 if (ibfd->xvec != &bfd_elf32_spu_vec)
2448 continue;
2449
2450 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2451 {
2452 struct _spu_elf_section_data *sec_data;
2453 struct spu_elf_stack_info *sinfo;
2454
2455 if ((sec_data = spu_elf_section_data (sec)) != NULL
2456 && (sinfo = sec_data->stack_info) != NULL)
2457 {
2458 int i;
2459 for (i = 0; i < sinfo->num_fun; ++i)
2460 if (!sinfo->fun[i].visit1)
2461 mark_non_root (&sinfo->fun[i]);
2462 }
2463 }
2464 }
2465
2466 /* Remove cycles from the call graph. We start from the root node(s)
2467 so that we break cycles in a reasonable place. */
2468 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2469 {
2470 extern const bfd_target bfd_elf32_spu_vec;
2471 asection *sec;
2472
2473 if (ibfd->xvec != &bfd_elf32_spu_vec)
2474 continue;
2475
2476 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2477 {
2478 struct _spu_elf_section_data *sec_data;
2479 struct spu_elf_stack_info *sinfo;
2480
2481 if ((sec_data = spu_elf_section_data (sec)) != NULL
2482 && (sinfo = sec_data->stack_info) != NULL)
2483 {
2484 int i;
2485 for (i = 0; i < sinfo->num_fun; ++i)
2486 if (!sinfo->fun[i].non_root)
2487 call_graph_traverse (&sinfo->fun[i], info);
2488 }
2489 }
2490 }
2491
2492 return TRUE;
2493 }
2494
2495 /* Descend the call graph for FUN, accumulating total stack required. */
2496
2497 static bfd_vma
2498 sum_stack (struct function_info *fun,
2499 struct bfd_link_info *info,
2500 int emit_stack_syms)
2501 {
2502 struct call_info *call;
2503 struct function_info *max = NULL;
2504 bfd_vma max_stack = fun->stack;
2505 bfd_vma stack;
2506 const char *f1;
2507
2508 if (fun->visit3)
2509 return max_stack;
2510
2511 for (call = fun->call_list; call; call = call->next)
2512 {
2513 stack = sum_stack (call->fun, info, emit_stack_syms);
2514 /* Include caller stack for normal calls, don't do so for
2515 tail calls. fun->stack here is local stack usage for
2516 this function. */
2517 if (!call->is_tail)
2518 stack += fun->stack;
2519 if (max_stack < stack)
2520 {
2521 max_stack = stack;
2522 max = call->fun;
2523 }
2524 }
2525
2526 f1 = func_name (fun);
2527 info->callbacks->minfo (_("%s: 0x%v 0x%v\n"),
2528 f1, (bfd_vma) fun->stack, max_stack);
2529
2530 if (fun->call_list)
2531 {
2532 info->callbacks->minfo (_(" calls:\n"));
2533 for (call = fun->call_list; call; call = call->next)
2534 {
2535 const char *f2 = func_name (call->fun);
2536 const char *ann1 = call->fun == max ? "*" : " ";
2537 const char *ann2 = call->is_tail ? "t" : " ";
2538
2539 info->callbacks->minfo (_(" %s%s %s\n"), ann1, ann2, f2);
2540 }
2541 }
2542
2543 /* Now fun->stack holds cumulative stack. */
2544 fun->stack = max_stack;
2545 fun->visit3 = TRUE;
2546
2547 if (emit_stack_syms)
2548 {
2549 struct spu_link_hash_table *htab = spu_hash_table (info);
2550 char *name = bfd_malloc (18 + strlen (f1));
2551 struct elf_link_hash_entry *h;
2552
2553 if (name != NULL)
2554 {
2555 if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
2556 sprintf (name, "__stack_%s", f1);
2557 else
2558 sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
2559
2560 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
2561 free (name);
2562 if (h != NULL
2563 && (h->root.type == bfd_link_hash_new
2564 || h->root.type == bfd_link_hash_undefined
2565 || h->root.type == bfd_link_hash_undefweak))
2566 {
2567 h->root.type = bfd_link_hash_defined;
2568 h->root.u.def.section = bfd_abs_section_ptr;
2569 h->root.u.def.value = max_stack;
2570 h->size = 0;
2571 h->type = 0;
2572 h->ref_regular = 1;
2573 h->def_regular = 1;
2574 h->ref_regular_nonweak = 1;
2575 h->forced_local = 1;
2576 h->non_elf = 0;
2577 }
2578 }
2579 }
2580
2581 return max_stack;
2582 }
2583
2584 /* Provide an estimate of total stack required. */
2585
2586 static bfd_boolean
2587 spu_elf_stack_analysis (bfd *output_bfd,
2588 struct bfd_link_info *info,
2589 int emit_stack_syms)
2590 {
2591 bfd *ibfd;
2592 bfd_vma max_stack = 0;
2593
2594 if (!discover_functions (output_bfd, info))
2595 return FALSE;
2596
2597 if (!build_call_tree (output_bfd, info))
2598 return FALSE;
2599
2600 info->callbacks->info (_("Stack size for call graph root nodes.\n"));
2601 info->callbacks->minfo (_("\nStack size for functions. "
2602 "Annotations: '*' max stack, 't' tail call\n"));
2603 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2604 {
2605 extern const bfd_target bfd_elf32_spu_vec;
2606 asection *sec;
2607
2608 if (ibfd->xvec != &bfd_elf32_spu_vec)
2609 continue;
2610
2611 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2612 {
2613 struct _spu_elf_section_data *sec_data;
2614 struct spu_elf_stack_info *sinfo;
2615
2616 if ((sec_data = spu_elf_section_data (sec)) != NULL
2617 && (sinfo = sec_data->stack_info) != NULL)
2618 {
2619 int i;
2620 for (i = 0; i < sinfo->num_fun; ++i)
2621 {
2622 if (!sinfo->fun[i].non_root)
2623 {
2624 bfd_vma stack;
2625 const char *f1;
2626
2627 stack = sum_stack (&sinfo->fun[i], info,
2628 emit_stack_syms);
2629 f1 = func_name (&sinfo->fun[i]);
2630 info->callbacks->info (_(" %s: 0x%v\n"),
2631 f1, stack);
2632 if (max_stack < stack)
2633 max_stack = stack;
2634 }
2635 }
2636 }
2637 }
2638 }
2639
2640 info->callbacks->info (_("Maximum stack required is 0x%v\n"), max_stack);
2641 return TRUE;
2642 }
2643
2644 /* Perform a final link. */
2645
2646 static bfd_boolean
2647 spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
2648 {
2649 struct spu_link_hash_table *htab = spu_hash_table (info);
2650
2651 if (htab->stack_analysis
2652 && !spu_elf_stack_analysis (output_bfd, info, htab->emit_stack_syms))
2653 info->callbacks->einfo ("%X%P: stack analysis error: %E\n");
2654
2655 return bfd_elf_final_link (output_bfd, info);
2656 }
2657
2658 /* Called when not normally emitting relocs, ie. !info->relocatable
2659 and !info->emitrelocations. Returns a count of special relocs
2660 that need to be emitted. */
2661
2662 static unsigned int
2663 spu_elf_count_relocs (asection *sec, Elf_Internal_Rela *relocs)
2664 {
2665 unsigned int count = 0;
2666 Elf_Internal_Rela *relend = relocs + sec->reloc_count;
2667
2668 for (; relocs < relend; relocs++)
2669 {
2670 int r_type = ELF32_R_TYPE (relocs->r_info);
2671 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
2672 ++count;
2673 }
2674
2675 return count;
2676 }
2677
2678 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
2679
2680 static bfd_boolean
2681 spu_elf_relocate_section (bfd *output_bfd,
2682 struct bfd_link_info *info,
2683 bfd *input_bfd,
2684 asection *input_section,
2685 bfd_byte *contents,
2686 Elf_Internal_Rela *relocs,
2687 Elf_Internal_Sym *local_syms,
2688 asection **local_sections)
2689 {
2690 Elf_Internal_Shdr *symtab_hdr;
2691 struct elf_link_hash_entry **sym_hashes;
2692 Elf_Internal_Rela *rel, *relend;
2693 struct spu_link_hash_table *htab;
2694 bfd_boolean ret = TRUE;
2695 bfd_boolean emit_these_relocs = FALSE;
2696
2697 htab = spu_hash_table (info);
2698 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
2699 sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
2700
2701 rel = relocs;
2702 relend = relocs + input_section->reloc_count;
2703 for (; rel < relend; rel++)
2704 {
2705 int r_type;
2706 reloc_howto_type *howto;
2707 unsigned long r_symndx;
2708 Elf_Internal_Sym *sym;
2709 asection *sec;
2710 struct elf_link_hash_entry *h;
2711 const char *sym_name;
2712 bfd_vma relocation;
2713 bfd_vma addend;
2714 bfd_reloc_status_type r;
2715 bfd_boolean unresolved_reloc;
2716 bfd_boolean warned;
2717 bfd_boolean branch;
2718
2719 r_symndx = ELF32_R_SYM (rel->r_info);
2720 r_type = ELF32_R_TYPE (rel->r_info);
2721 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
2722 {
2723 emit_these_relocs = TRUE;
2724 continue;
2725 }
2726
2727 howto = elf_howto_table + r_type;
2728 unresolved_reloc = FALSE;
2729 warned = FALSE;
2730 h = NULL;
2731 sym = NULL;
2732 sec = NULL;
2733 if (r_symndx < symtab_hdr->sh_info)
2734 {
2735 sym = local_syms + r_symndx;
2736 sec = local_sections[r_symndx];
2737 sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
2738 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
2739 }
2740 else
2741 {
2742 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2743 r_symndx, symtab_hdr, sym_hashes,
2744 h, sec, relocation,
2745 unresolved_reloc, warned);
2746 sym_name = h->root.root.string;
2747 }
2748
2749 if (sec != NULL && elf_discarded_section (sec))
2750 {
2751 /* For relocs against symbols from removed linkonce sections,
2752 or sections discarded by a linker script, we just want the
2753 section contents zeroed. Avoid any special processing. */
2754 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
2755 rel->r_info = 0;
2756 rel->r_addend = 0;
2757 continue;
2758 }
2759
2760 if (info->relocatable)
2761 continue;
2762
2763 if (unresolved_reloc)
2764 {
2765 (*_bfd_error_handler)
2766 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
2767 input_bfd,
2768 bfd_get_section_name (input_bfd, input_section),
2769 (long) rel->r_offset,
2770 howto->name,
2771 sym_name);
2772 ret = FALSE;
2773 }
2774
2775 /* If this symbol is in an overlay area, we may need to relocate
2776 to the overlay stub. */
2777 addend = rel->r_addend;
2778 branch = (is_branch (contents + rel->r_offset)
2779 || is_hint (contents + rel->r_offset));
2780 if (needs_ovl_stub (sym_name, sec, input_section, htab, branch))
2781 {
2782 char *stub_name;
2783 struct spu_stub_hash_entry *sh;
2784
2785 stub_name = spu_stub_name (sec, h, rel);
2786 if (stub_name == NULL)
2787 return FALSE;
2788
2789 sh = (struct spu_stub_hash_entry *)
2790 bfd_hash_lookup (&htab->stub_hash_table, stub_name, FALSE, FALSE);
2791 if (sh != NULL)
2792 {
2793 relocation = (htab->stub->output_section->vma
2794 + htab->stub->output_offset
2795 + sh->off);
2796 addend = 0;
2797 }
2798 free (stub_name);
2799 }
2800
2801 r = _bfd_final_link_relocate (howto,
2802 input_bfd,
2803 input_section,
2804 contents,
2805 rel->r_offset, relocation, addend);
2806
2807 if (r != bfd_reloc_ok)
2808 {
2809 const char *msg = (const char *) 0;
2810
2811 switch (r)
2812 {
2813 case bfd_reloc_overflow:
2814 if (!((*info->callbacks->reloc_overflow)
2815 (info, (h ? &h->root : NULL), sym_name, howto->name,
2816 (bfd_vma) 0, input_bfd, input_section, rel->r_offset)))
2817 return FALSE;
2818 break;
2819
2820 case bfd_reloc_undefined:
2821 if (!((*info->callbacks->undefined_symbol)
2822 (info, sym_name, input_bfd, input_section,
2823 rel->r_offset, TRUE)))
2824 return FALSE;
2825 break;
2826
2827 case bfd_reloc_outofrange:
2828 msg = _("internal error: out of range error");
2829 goto common_error;
2830
2831 case bfd_reloc_notsupported:
2832 msg = _("internal error: unsupported relocation error");
2833 goto common_error;
2834
2835 case bfd_reloc_dangerous:
2836 msg = _("internal error: dangerous error");
2837 goto common_error;
2838
2839 default:
2840 msg = _("internal error: unknown error");
2841 /* fall through */
2842
2843 common_error:
2844 if (!((*info->callbacks->warning)
2845 (info, msg, sym_name, input_bfd, input_section,
2846 rel->r_offset)))
2847 return FALSE;
2848 break;
2849 }
2850 }
2851 }
2852
2853 if (ret
2854 && emit_these_relocs
2855 && !info->relocatable
2856 && !info->emitrelocations)
2857 {
2858 Elf_Internal_Rela *wrel;
2859 Elf_Internal_Shdr *rel_hdr;
2860
2861 wrel = rel = relocs;
2862 relend = relocs + input_section->reloc_count;
2863 for (; rel < relend; rel++)
2864 {
2865 int r_type;
2866
2867 r_type = ELF32_R_TYPE (rel->r_info);
2868 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
2869 *wrel++ = *rel;
2870 }
2871 input_section->reloc_count = wrel - relocs;
2872 /* Backflips for _bfd_elf_link_output_relocs. */
2873 rel_hdr = &elf_section_data (input_section)->rel_hdr;
2874 rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
2875 ret = 2;
2876 }
2877
2878 return ret;
2879 }
2880
2881 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
2882
2883 static bfd_boolean
2884 spu_elf_output_symbol_hook (struct bfd_link_info *info,
2885 const char *sym_name ATTRIBUTE_UNUSED,
2886 Elf_Internal_Sym *sym,
2887 asection *sym_sec ATTRIBUTE_UNUSED,
2888 struct elf_link_hash_entry *h)
2889 {
2890 struct spu_link_hash_table *htab = spu_hash_table (info);
2891
2892 if (!info->relocatable
2893 && htab->num_overlays != 0
2894 && h != NULL
2895 && (h->root.type == bfd_link_hash_defined
2896 || h->root.type == bfd_link_hash_defweak)
2897 && h->def_regular
2898 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
2899 {
2900 static Elf_Internal_Rela zero_rel;
2901 char *stub_name = spu_stub_name (h->root.u.def.section, h, &zero_rel);
2902 struct spu_stub_hash_entry *sh;
2903
2904 if (stub_name == NULL)
2905 return FALSE;
2906 sh = (struct spu_stub_hash_entry *)
2907 bfd_hash_lookup (&htab->stub_hash_table, stub_name, FALSE, FALSE);
2908 free (stub_name);
2909 if (sh == NULL)
2910 return TRUE;
2911 sym->st_shndx
2912 = _bfd_elf_section_from_bfd_section (htab->stub->output_section->owner,
2913 htab->stub->output_section);
2914 sym->st_value = (htab->stub->output_section->vma
2915 + htab->stub->output_offset
2916 + sh->off);
2917 }
2918
2919 return TRUE;
2920 }
2921
2922 static int spu_plugin = 0;
2923
2924 void
2925 spu_elf_plugin (int val)
2926 {
2927 spu_plugin = val;
2928 }
2929
2930 /* Set ELF header e_type for plugins. */
2931
2932 static void
2933 spu_elf_post_process_headers (bfd *abfd,
2934 struct bfd_link_info *info ATTRIBUTE_UNUSED)
2935 {
2936 if (spu_plugin)
2937 {
2938 Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
2939
2940 i_ehdrp->e_type = ET_DYN;
2941 }
2942 }
2943
2944 /* We may add an extra PT_LOAD segment for .toe. We also need extra
2945 segments for overlays. */
2946
2947 static int
2948 spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
2949 {
2950 struct spu_link_hash_table *htab = spu_hash_table (info);
2951 int extra = htab->num_overlays;
2952 asection *sec;
2953
2954 if (extra)
2955 ++extra;
2956
2957 sec = bfd_get_section_by_name (abfd, ".toe");
2958 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
2959 ++extra;
2960
2961 return extra;
2962 }
2963
2964 /* Remove .toe section from other PT_LOAD segments and put it in
2965 a segment of its own. Put overlays in separate segments too. */
2966
2967 static bfd_boolean
2968 spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
2969 {
2970 asection *toe, *s;
2971 struct elf_segment_map *m;
2972 unsigned int i;
2973
2974 if (info == NULL)
2975 return TRUE;
2976
2977 toe = bfd_get_section_by_name (abfd, ".toe");
2978 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
2979 if (m->p_type == PT_LOAD && m->count > 1)
2980 for (i = 0; i < m->count; i++)
2981 if ((s = m->sections[i]) == toe
2982 || spu_elf_section_data (s)->ovl_index != 0)
2983 {
2984 struct elf_segment_map *m2;
2985 bfd_vma amt;
2986
2987 if (i + 1 < m->count)
2988 {
2989 amt = sizeof (struct elf_segment_map);
2990 amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
2991 m2 = bfd_zalloc (abfd, amt);
2992 if (m2 == NULL)
2993 return FALSE;
2994 m2->count = m->count - (i + 1);
2995 memcpy (m2->sections, m->sections + i + 1,
2996 m2->count * sizeof (m->sections[0]));
2997 m2->p_type = PT_LOAD;
2998 m2->next = m->next;
2999 m->next = m2;
3000 }
3001 m->count = 1;
3002 if (i != 0)
3003 {
3004 m->count = i;
3005 amt = sizeof (struct elf_segment_map);
3006 m2 = bfd_zalloc (abfd, amt);
3007 if (m2 == NULL)
3008 return FALSE;
3009 m2->p_type = PT_LOAD;
3010 m2->count = 1;
3011 m2->sections[0] = s;
3012 m2->next = m->next;
3013 m->next = m2;
3014 }
3015 break;
3016 }
3017
3018 return TRUE;
3019 }
3020
3021 /* Check that all loadable section VMAs lie in the range
3022 LO .. HI inclusive. */
3023
3024 asection *
3025 spu_elf_check_vma (bfd *abfd, bfd_vma lo, bfd_vma hi)
3026 {
3027 struct elf_segment_map *m;
3028 unsigned int i;
3029
3030 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
3031 if (m->p_type == PT_LOAD)
3032 for (i = 0; i < m->count; i++)
3033 if (m->sections[i]->size != 0
3034 && (m->sections[i]->vma < lo
3035 || m->sections[i]->vma > hi
3036 || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
3037 return m->sections[i];
3038
3039 return NULL;
3040 }
3041
3042 /* Tweak the section type of .note.spu_name. */
3043
3044 static bfd_boolean
3045 spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
3046 Elf_Internal_Shdr *hdr,
3047 asection *sec)
3048 {
3049 if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
3050 hdr->sh_type = SHT_NOTE;
3051 return TRUE;
3052 }
3053
3054 /* Tweak phdrs before writing them out. */
3055
3056 static int
3057 spu_elf_modify_program_headers (bfd *abfd, struct bfd_link_info *info)
3058 {
3059 const struct elf_backend_data *bed;
3060 struct elf_obj_tdata *tdata;
3061 Elf_Internal_Phdr *phdr, *last;
3062 struct spu_link_hash_table *htab;
3063 unsigned int count;
3064 unsigned int i;
3065
3066 if (info == NULL)
3067 return TRUE;
3068
3069 bed = get_elf_backend_data (abfd);
3070 tdata = elf_tdata (abfd);
3071 phdr = tdata->phdr;
3072 count = tdata->program_header_size / bed->s->sizeof_phdr;
3073 htab = spu_hash_table (info);
3074 if (htab->num_overlays != 0)
3075 {
3076 struct elf_segment_map *m;
3077 unsigned int o;
3078
3079 for (i = 0, m = elf_tdata (abfd)->segment_map; m; ++i, m = m->next)
3080 if (m->count != 0
3081 && (o = spu_elf_section_data (m->sections[0])->ovl_index) != 0)
3082 {
3083 /* Mark this as an overlay header. */
3084 phdr[i].p_flags |= PF_OVERLAY;
3085
3086 if (htab->ovtab != NULL && htab->ovtab->size != 0)
3087 {
3088 bfd_byte *p = htab->ovtab->contents;
3089 unsigned int off = (o - 1) * 16 + 8;
3090
3091 /* Write file_off into _ovly_table. */
3092 bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
3093 }
3094 }
3095 }
3096
3097 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
3098 of 16. This should always be possible when using the standard
3099 linker scripts, but don't create overlapping segments if
3100 someone is playing games with linker scripts. */
3101 last = NULL;
3102 for (i = count; i-- != 0; )
3103 if (phdr[i].p_type == PT_LOAD)
3104 {
3105 unsigned adjust;
3106
3107 adjust = -phdr[i].p_filesz & 15;
3108 if (adjust != 0
3109 && last != NULL
3110 && phdr[i].p_offset + phdr[i].p_filesz > last->p_offset - adjust)
3111 break;
3112
3113 adjust = -phdr[i].p_memsz & 15;
3114 if (adjust != 0
3115 && last != NULL
3116 && phdr[i].p_filesz != 0
3117 && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
3118 && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
3119 break;
3120
3121 if (phdr[i].p_filesz != 0)
3122 last = &phdr[i];
3123 }
3124
3125 if (i == (unsigned int) -1)
3126 for (i = count; i-- != 0; )
3127 if (phdr[i].p_type == PT_LOAD)
3128 {
3129 unsigned adjust;
3130
3131 adjust = -phdr[i].p_filesz & 15;
3132 phdr[i].p_filesz += adjust;
3133
3134 adjust = -phdr[i].p_memsz & 15;
3135 phdr[i].p_memsz += adjust;
3136 }
3137
3138 return TRUE;
3139 }
3140
3141 #define TARGET_BIG_SYM bfd_elf32_spu_vec
3142 #define TARGET_BIG_NAME "elf32-spu"
3143 #define ELF_ARCH bfd_arch_spu
3144 #define ELF_MACHINE_CODE EM_SPU
3145 /* This matches the alignment need for DMA. */
3146 #define ELF_MAXPAGESIZE 0x80
3147 #define elf_backend_rela_normal 1
3148 #define elf_backend_can_gc_sections 1
3149
3150 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
3151 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
3152 #define elf_info_to_howto spu_elf_info_to_howto
3153 #define elf_backend_count_relocs spu_elf_count_relocs
3154 #define elf_backend_relocate_section spu_elf_relocate_section
3155 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
3156 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
3157 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
3158 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
3159 #define bfd_elf32_bfd_link_hash_table_free spu_elf_link_hash_table_free
3160
3161 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
3162 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
3163 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
3164 #define elf_backend_post_process_headers spu_elf_post_process_headers
3165 #define elf_backend_fake_sections spu_elf_fake_sections
3166 #define elf_backend_special_sections spu_elf_special_sections
3167 #define bfd_elf32_bfd_final_link spu_elf_final_link
3168
3169 #include "elf32-target.h"