]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - bfd/elf32-spu.c
gdb
[thirdparty/binutils-gdb.git] / bfd / elf32-spu.c
CommitLineData
e9f53129
AM
1/* SPU specific support for 32-bit ELF
2
d16c7321 3 Copyright 2006, 2007, 2008 Free Software Foundation, Inc.
e9f53129
AM
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
cd123cb7 9 the Free Software Foundation; either version 3 of the License, or
e9f53129
AM
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
20
e9f53129 21#include "sysdep.h"
9dcc4794 22#include "libiberty.h"
3db64b00 23#include "bfd.h"
e9f53129
AM
24#include "bfdlink.h"
25#include "libbfd.h"
26#include "elf-bfd.h"
27#include "elf/spu.h"
28#include "elf32-spu.h"
29
30/* We use RELA style relocs. Don't define USE_REL. */
31
32static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
33 void *, asection *,
34 bfd *, char **);
35
36/* Values of type 'enum elf_spu_reloc_type' are used to index this
37 array, so it must be declared in the order of that type. */
38
39static reloc_howto_type elf_howto_table[] = {
40 HOWTO (R_SPU_NONE, 0, 0, 0, FALSE, 0, complain_overflow_dont,
41 bfd_elf_generic_reloc, "SPU_NONE",
42 FALSE, 0, 0x00000000, FALSE),
43 HOWTO (R_SPU_ADDR10, 4, 2, 10, FALSE, 14, complain_overflow_bitfield,
44 bfd_elf_generic_reloc, "SPU_ADDR10",
45 FALSE, 0, 0x00ffc000, FALSE),
46 HOWTO (R_SPU_ADDR16, 2, 2, 16, FALSE, 7, complain_overflow_bitfield,
47 bfd_elf_generic_reloc, "SPU_ADDR16",
48 FALSE, 0, 0x007fff80, FALSE),
49 HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE, 7, complain_overflow_bitfield,
50 bfd_elf_generic_reloc, "SPU_ADDR16_HI",
51 FALSE, 0, 0x007fff80, FALSE),
52 HOWTO (R_SPU_ADDR16_LO, 0, 2, 16, FALSE, 7, complain_overflow_dont,
53 bfd_elf_generic_reloc, "SPU_ADDR16_LO",
54 FALSE, 0, 0x007fff80, FALSE),
55 HOWTO (R_SPU_ADDR18, 0, 2, 18, FALSE, 7, complain_overflow_bitfield,
56 bfd_elf_generic_reloc, "SPU_ADDR18",
57 FALSE, 0, 0x01ffff80, FALSE),
b427ea91 58 HOWTO (R_SPU_ADDR32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
e9f53129
AM
59 bfd_elf_generic_reloc, "SPU_ADDR32",
60 FALSE, 0, 0xffffffff, FALSE),
61 HOWTO (R_SPU_REL16, 2, 2, 16, TRUE, 7, complain_overflow_bitfield,
62 bfd_elf_generic_reloc, "SPU_REL16",
63 FALSE, 0, 0x007fff80, TRUE),
64 HOWTO (R_SPU_ADDR7, 0, 2, 7, FALSE, 14, complain_overflow_dont,
65 bfd_elf_generic_reloc, "SPU_ADDR7",
66 FALSE, 0, 0x001fc000, FALSE),
67 HOWTO (R_SPU_REL9, 2, 2, 9, TRUE, 0, complain_overflow_signed,
68 spu_elf_rel9, "SPU_REL9",
69 FALSE, 0, 0x0180007f, TRUE),
70 HOWTO (R_SPU_REL9I, 2, 2, 9, TRUE, 0, complain_overflow_signed,
71 spu_elf_rel9, "SPU_REL9I",
72 FALSE, 0, 0x0000c07f, TRUE),
73 HOWTO (R_SPU_ADDR10I, 0, 2, 10, FALSE, 14, complain_overflow_signed,
74 bfd_elf_generic_reloc, "SPU_ADDR10I",
75 FALSE, 0, 0x00ffc000, FALSE),
76 HOWTO (R_SPU_ADDR16I, 0, 2, 16, FALSE, 7, complain_overflow_signed,
77 bfd_elf_generic_reloc, "SPU_ADDR16I",
78 FALSE, 0, 0x007fff80, FALSE),
b427ea91 79 HOWTO (R_SPU_REL32, 0, 2, 32, TRUE, 0, complain_overflow_dont,
e9f53129
AM
80 bfd_elf_generic_reloc, "SPU_REL32",
81 FALSE, 0, 0xffffffff, TRUE),
4f4416b5
AM
82 HOWTO (R_SPU_ADDR16X, 0, 2, 16, FALSE, 7, complain_overflow_bitfield,
83 bfd_elf_generic_reloc, "SPU_ADDR16X",
84 FALSE, 0, 0x007fff80, FALSE),
b427ea91 85 HOWTO (R_SPU_PPU32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
ece5ef60
AM
86 bfd_elf_generic_reloc, "SPU_PPU32",
87 FALSE, 0, 0xffffffff, FALSE),
b427ea91 88 HOWTO (R_SPU_PPU64, 0, 4, 64, FALSE, 0, complain_overflow_dont,
ece5ef60
AM
89 bfd_elf_generic_reloc, "SPU_PPU64",
90 FALSE, 0, -1, FALSE),
e9f53129
AM
91};
92
93static struct bfd_elf_special_section const spu_elf_special_sections[] = {
8374f9d4 94 { "._ea", 4, 0, SHT_PROGBITS, SHF_WRITE },
e9f53129
AM
95 { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
96 { NULL, 0, 0, 0, 0 }
97};
98
99static enum elf_spu_reloc_type
100spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
101{
102 switch (code)
103 {
104 default:
105 return R_SPU_NONE;
106 case BFD_RELOC_SPU_IMM10W:
107 return R_SPU_ADDR10;
108 case BFD_RELOC_SPU_IMM16W:
109 return R_SPU_ADDR16;
110 case BFD_RELOC_SPU_LO16:
111 return R_SPU_ADDR16_LO;
112 case BFD_RELOC_SPU_HI16:
113 return R_SPU_ADDR16_HI;
114 case BFD_RELOC_SPU_IMM18:
115 return R_SPU_ADDR18;
116 case BFD_RELOC_SPU_PCREL16:
117 return R_SPU_REL16;
118 case BFD_RELOC_SPU_IMM7:
119 return R_SPU_ADDR7;
120 case BFD_RELOC_SPU_IMM8:
121 return R_SPU_NONE;
122 case BFD_RELOC_SPU_PCREL9a:
123 return R_SPU_REL9;
124 case BFD_RELOC_SPU_PCREL9b:
125 return R_SPU_REL9I;
126 case BFD_RELOC_SPU_IMM10:
127 return R_SPU_ADDR10I;
128 case BFD_RELOC_SPU_IMM16:
129 return R_SPU_ADDR16I;
130 case BFD_RELOC_32:
131 return R_SPU_ADDR32;
132 case BFD_RELOC_32_PCREL:
133 return R_SPU_REL32;
ece5ef60
AM
134 case BFD_RELOC_SPU_PPU32:
135 return R_SPU_PPU32;
136 case BFD_RELOC_SPU_PPU64:
137 return R_SPU_PPU64;
e9f53129
AM
138 }
139}
140
141static void
142spu_elf_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
143 arelent *cache_ptr,
144 Elf_Internal_Rela *dst)
145{
146 enum elf_spu_reloc_type r_type;
147
148 r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
149 BFD_ASSERT (r_type < R_SPU_max);
150 cache_ptr->howto = &elf_howto_table[(int) r_type];
151}
152
153static reloc_howto_type *
154spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
155 bfd_reloc_code_real_type code)
156{
b16f296e
AM
157 enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
158
159 if (r_type == R_SPU_NONE)
160 return NULL;
161
162 return elf_howto_table + r_type;
e9f53129
AM
163}
164
157090f7
AM
165static reloc_howto_type *
166spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
167 const char *r_name)
168{
169 unsigned int i;
170
171 for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
172 if (elf_howto_table[i].name != NULL
173 && strcasecmp (elf_howto_table[i].name, r_name) == 0)
174 return &elf_howto_table[i];
175
176 return NULL;
177}
178
e9f53129
AM
179/* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
180
181static bfd_reloc_status_type
182spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
183 void *data, asection *input_section,
184 bfd *output_bfd, char **error_message)
185{
186 bfd_size_type octets;
187 bfd_vma val;
188 long insn;
189
190 /* If this is a relocatable link (output_bfd test tells us), just
191 call the generic function. Any adjustment will be done at final
192 link time. */
193 if (output_bfd != NULL)
194 return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
195 input_section, output_bfd, error_message);
196
197 if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
198 return bfd_reloc_outofrange;
199 octets = reloc_entry->address * bfd_octets_per_byte (abfd);
200
201 /* Get symbol value. */
202 val = 0;
203 if (!bfd_is_com_section (symbol->section))
204 val = symbol->value;
205 if (symbol->section->output_section)
206 val += symbol->section->output_section->vma;
207
208 val += reloc_entry->addend;
209
210 /* Make it pc-relative. */
211 val -= input_section->output_section->vma + input_section->output_offset;
212
213 val >>= 2;
214 if (val + 256 >= 512)
215 return bfd_reloc_overflow;
216
217 insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
218
219 /* Move two high bits of value to REL9I and REL9 position.
220 The mask will take care of selecting the right field. */
221 val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
222 insn &= ~reloc_entry->howto->dst_mask;
223 insn |= val & reloc_entry->howto->dst_mask;
224 bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
225 return bfd_reloc_ok;
226}
227
228static bfd_boolean
229spu_elf_new_section_hook (bfd *abfd, asection *sec)
230{
231 if (!sec->used_by_bfd)
232 {
233 struct _spu_elf_section_data *sdata;
234
235 sdata = bfd_zalloc (abfd, sizeof (*sdata));
236 if (sdata == NULL)
237 return FALSE;
238 sec->used_by_bfd = sdata;
239 }
240
241 return _bfd_elf_new_section_hook (abfd, sec);
242}
243
124b52c6
AM
244/* Set up overlay info for executables. */
245
246static bfd_boolean
247spu_elf_object_p (bfd *abfd)
248{
249 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
250 {
251 unsigned int i, num_ovl, num_buf;
252 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
253 Elf_Internal_Ehdr *ehdr = elf_elfheader (abfd);
254 Elf_Internal_Phdr *last_phdr = NULL;
255
256 for (num_buf = 0, num_ovl = 0, i = 0; i < ehdr->e_phnum; i++, phdr++)
257 if (phdr->p_type == PT_LOAD && (phdr->p_flags & PF_OVERLAY) != 0)
258 {
259 unsigned int j;
260
261 ++num_ovl;
262 if (last_phdr == NULL
263 || ((last_phdr->p_vaddr ^ phdr->p_vaddr) & 0x3ffff) != 0)
264 ++num_buf;
265 last_phdr = phdr;
266 for (j = 1; j < elf_numsections (abfd); j++)
267 {
268 Elf_Internal_Shdr *shdr = elf_elfsections (abfd)[j];
269
270 if (ELF_IS_SECTION_IN_SEGMENT_MEMORY (shdr, phdr))
271 {
272 asection *sec = shdr->bfd_section;
273 spu_elf_section_data (sec)->u.o.ovl_index = num_ovl;
274 spu_elf_section_data (sec)->u.o.ovl_buf = num_buf;
275 }
276 }
277 }
278 }
279 return TRUE;
280}
281
e9f53129
AM
282/* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
283 strip --strip-unneeded will not remove them. */
284
285static void
286spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
287{
288 if (sym->name != NULL
289 && sym->section != bfd_abs_section_ptr
290 && strncmp (sym->name, "_EAR_", 5) == 0)
291 sym->flags |= BSF_KEEP;
292}
293
294/* SPU ELF linker hash table. */
295
296struct spu_link_hash_table
297{
298 struct elf_link_hash_table elf;
299
e9f53129 300 /* Shortcuts to overlay sections. */
e9f53129 301 asection *ovtab;
47f6dab9
AM
302 asection *toe;
303 asection **ovl_sec;
304
305 /* Count of stubs in each overlay section. */
306 unsigned int *stub_count;
307
308 /* The stub section for each overlay section. */
309 asection **stub_sec;
e9f53129
AM
310
311 struct elf_link_hash_entry *ovly_load;
47f6dab9 312 struct elf_link_hash_entry *ovly_return;
2cb5950e 313 unsigned long ovly_load_r_symndx;
e9f53129 314
e9f53129
AM
315 /* Number of overlay buffers. */
316 unsigned int num_buf;
317
318 /* Total number of overlays. */
319 unsigned int num_overlays;
320
9dcc4794
AM
321 /* How much memory we have. */
322 unsigned int local_store;
323 /* Local store --auto-overlay should reserve for non-overlay
324 functions and data. */
325 unsigned int overlay_fixed;
326 /* Local store --auto-overlay should reserve for stack and heap. */
327 unsigned int reserved;
99302af9
AM
328 /* If reserved is not specified, stack analysis will calculate a value
329 for the stack. This parameter adjusts that value to allow for
330 negative sp access (the ABI says 2000 bytes below sp are valid,
331 and the overlay manager uses some of this area). */
332 int extra_stack_space;
9dcc4794
AM
333 /* Count of overlay stubs needed in non-overlay area. */
334 unsigned int non_ovly_stub;
335
336 /* Stash various callbacks for --auto-overlay. */
337 void (*spu_elf_load_ovl_mgr) (void);
338 FILE *(*spu_elf_open_overlay_script) (void);
339 void (*spu_elf_relink) (void);
340
341 /* Bit 0 set if --auto-overlay.
342 Bit 1 set if --auto-relink.
343 Bit 2 set if --overlay-rodata. */
344 unsigned int auto_overlay : 3;
345#define AUTO_OVERLAY 1
346#define AUTO_RELINK 2
347#define OVERLAY_RODATA 4
348
e9f53129
AM
349 /* Set if we should emit symbols for stubs. */
350 unsigned int emit_stub_syms:1;
351
352 /* Set if we want stubs on calls out of overlay regions to
353 non-overlay regions. */
354 unsigned int non_overlay_stubs : 1;
355
356 /* Set on error. */
47f6dab9 357 unsigned int stub_err : 1;
49fa1e15
AM
358
359 /* Set if stack size analysis should be done. */
360 unsigned int stack_analysis : 1;
361
362 /* Set if __stack_* syms will be emitted. */
363 unsigned int emit_stack_syms : 1;
e9f53129
AM
364};
365
47f6dab9 366/* Hijack the generic got fields for overlay stub accounting. */
e9f53129 367
47f6dab9 368struct got_entry
e9f53129 369{
47f6dab9
AM
370 struct got_entry *next;
371 unsigned int ovl;
4a628337 372 bfd_vma addend;
47f6dab9 373 bfd_vma stub_addr;
e9f53129
AM
374};
375
47f6dab9
AM
376#define spu_hash_table(p) \
377 ((struct spu_link_hash_table *) ((p)->hash))
e9f53129
AM
378
379/* Create a spu ELF linker hash table. */
380
381static struct bfd_link_hash_table *
382spu_elf_link_hash_table_create (bfd *abfd)
383{
384 struct spu_link_hash_table *htab;
385
386 htab = bfd_malloc (sizeof (*htab));
387 if (htab == NULL)
388 return NULL;
389
390 if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
391 _bfd_elf_link_hash_newfunc,
392 sizeof (struct elf_link_hash_entry)))
393 {
394 free (htab);
395 return NULL;
396 }
397
47f6dab9
AM
398 memset (&htab->ovtab, 0,
399 sizeof (*htab) - offsetof (struct spu_link_hash_table, ovtab));
e9f53129 400
47f6dab9
AM
401 htab->elf.init_got_refcount.refcount = 0;
402 htab->elf.init_got_refcount.glist = NULL;
403 htab->elf.init_got_offset.offset = 0;
404 htab->elf.init_got_offset.glist = NULL;
e9f53129
AM
405 return &htab->elf.root;
406}
407
e9f53129
AM
408/* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
409 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
410 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
411
412static bfd_boolean
413get_sym_h (struct elf_link_hash_entry **hp,
414 Elf_Internal_Sym **symp,
415 asection **symsecp,
416 Elf_Internal_Sym **locsymsp,
417 unsigned long r_symndx,
418 bfd *ibfd)
419{
420 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
421
422 if (r_symndx >= symtab_hdr->sh_info)
423 {
424 struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
425 struct elf_link_hash_entry *h;
426
427 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
428 while (h->root.type == bfd_link_hash_indirect
429 || h->root.type == bfd_link_hash_warning)
430 h = (struct elf_link_hash_entry *) h->root.u.i.link;
431
432 if (hp != NULL)
433 *hp = h;
434
435 if (symp != NULL)
436 *symp = NULL;
437
438 if (symsecp != NULL)
439 {
440 asection *symsec = NULL;
441 if (h->root.type == bfd_link_hash_defined
442 || h->root.type == bfd_link_hash_defweak)
443 symsec = h->root.u.def.section;
444 *symsecp = symsec;
445 }
446 }
447 else
448 {
449 Elf_Internal_Sym *sym;
450 Elf_Internal_Sym *locsyms = *locsymsp;
451
452 if (locsyms == NULL)
453 {
454 locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
455 if (locsyms == NULL)
1f27ab8d
AM
456 locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr,
457 symtab_hdr->sh_info,
458 0, NULL, NULL, NULL);
e9f53129
AM
459 if (locsyms == NULL)
460 return FALSE;
461 *locsymsp = locsyms;
462 }
463 sym = locsyms + r_symndx;
464
465 if (hp != NULL)
466 *hp = NULL;
467
468 if (symp != NULL)
469 *symp = sym;
470
471 if (symsecp != NULL)
cb33740c 472 *symsecp = bfd_section_from_elf_index (ibfd, sym->st_shndx);
e9f53129 473 }
49fa1e15 474
e9f53129
AM
475 return TRUE;
476}
477
e9f53129
AM
478/* Create the note section if not already present. This is done early so
479 that the linker maps the sections to the right place in the output. */
480
481bfd_boolean
c65be8d7 482spu_elf_create_sections (struct bfd_link_info *info,
49fa1e15
AM
483 int stack_analysis,
484 int emit_stack_syms)
e9f53129
AM
485{
486 bfd *ibfd;
49fa1e15
AM
487 struct spu_link_hash_table *htab = spu_hash_table (info);
488
489 /* Stash some options away where we can get at them later. */
490 htab->stack_analysis = stack_analysis;
491 htab->emit_stack_syms = emit_stack_syms;
e9f53129 492
58eb693e 493 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
e9f53129
AM
494 if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
495 break;
496
497 if (ibfd == NULL)
498 {
499 /* Make SPU_PTNOTE_SPUNAME section. */
500 asection *s;
501 size_t name_len;
502 size_t size;
503 bfd_byte *data;
504 flagword flags;
505
506 ibfd = info->input_bfds;
507 flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
508 s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
509 if (s == NULL
510 || !bfd_set_section_alignment (ibfd, s, 4))
511 return FALSE;
512
c65be8d7 513 name_len = strlen (bfd_get_filename (info->output_bfd)) + 1;
e9f53129
AM
514 size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
515 size += (name_len + 3) & -4;
516
517 if (!bfd_set_section_size (ibfd, s, size))
518 return FALSE;
519
520 data = bfd_zalloc (ibfd, size);
521 if (data == NULL)
522 return FALSE;
523
524 bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
525 bfd_put_32 (ibfd, name_len, data + 4);
526 bfd_put_32 (ibfd, 1, data + 8);
527 memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
528 memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
c65be8d7 529 bfd_get_filename (info->output_bfd), name_len);
e9f53129
AM
530 s->contents = data;
531 }
532
533 return TRUE;
534}
535
e9f53129
AM
536/* qsort predicate to sort sections by vma. */
537
538static int
539sort_sections (const void *a, const void *b)
540{
541 const asection *const *s1 = a;
542 const asection *const *s2 = b;
543 bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
544
545 if (delta != 0)
546 return delta < 0 ? -1 : 1;
547
548 return (*s1)->index - (*s2)->index;
549}
550
551/* Identify overlays in the output bfd, and number them. */
552
553bfd_boolean
c65be8d7 554spu_elf_find_overlays (struct bfd_link_info *info)
e9f53129
AM
555{
556 struct spu_link_hash_table *htab = spu_hash_table (info);
557 asection **alloc_sec;
558 unsigned int i, n, ovl_index, num_buf;
559 asection *s;
560 bfd_vma ovl_end;
561
c65be8d7 562 if (info->output_bfd->section_count < 2)
e9f53129
AM
563 return FALSE;
564
c65be8d7
AM
565 alloc_sec
566 = bfd_malloc (info->output_bfd->section_count * sizeof (*alloc_sec));
e9f53129
AM
567 if (alloc_sec == NULL)
568 return FALSE;
569
570 /* Pick out all the alloced sections. */
c65be8d7 571 for (n = 0, s = info->output_bfd->sections; s != NULL; s = s->next)
e9f53129
AM
572 if ((s->flags & SEC_ALLOC) != 0
573 && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
574 && s->size != 0)
575 alloc_sec[n++] = s;
576
577 if (n == 0)
578 {
579 free (alloc_sec);
580 return FALSE;
581 }
582
583 /* Sort them by vma. */
584 qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
585
586 /* Look for overlapping vmas. Any with overlap must be overlays.
47f6dab9 587 Count them. Also count the number of overlay regions. */
e9f53129
AM
588 ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
589 for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
590 {
591 s = alloc_sec[i];
592 if (s->vma < ovl_end)
593 {
594 asection *s0 = alloc_sec[i - 1];
595
47f6dab9 596 if (spu_elf_section_data (s0)->u.o.ovl_index == 0)
e9f53129 597 {
47f6dab9
AM
598 alloc_sec[ovl_index] = s0;
599 spu_elf_section_data (s0)->u.o.ovl_index = ++ovl_index;
600 spu_elf_section_data (s0)->u.o.ovl_buf = ++num_buf;
e9f53129 601 }
47f6dab9
AM
602 alloc_sec[ovl_index] = s;
603 spu_elf_section_data (s)->u.o.ovl_index = ++ovl_index;
604 spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
605 if (s0->vma != s->vma)
e9f53129 606 {
47f6dab9
AM
607 info->callbacks->einfo (_("%X%P: overlay sections %A and %A "
608 "do not start at the same address.\n"),
609 s0, s);
610 return FALSE;
e9f53129 611 }
47f6dab9
AM
612 if (ovl_end < s->vma + s->size)
613 ovl_end = s->vma + s->size;
e9f53129
AM
614 }
615 else
616 ovl_end = s->vma + s->size;
617 }
618
619 htab->num_overlays = ovl_index;
620 htab->num_buf = num_buf;
47f6dab9 621 htab->ovl_sec = alloc_sec;
fdba2fcd
AM
622 htab->ovly_load = elf_link_hash_lookup (&htab->elf, "__ovly_load",
623 FALSE, FALSE, FALSE);
624 htab->ovly_return = elf_link_hash_lookup (&htab->elf, "__ovly_return",
625 FALSE, FALSE, FALSE);
47f6dab9 626 return ovl_index != 0;
e9f53129
AM
627}
628
47f6dab9
AM
629/* Support two sizes of overlay stubs, a slower more compact stub of two
630 intructions, and a faster stub of four instructions. */
631#ifndef OVL_STUB_SIZE
632/* Default to faster. */
633#define OVL_STUB_SIZE 16
634/* #define OVL_STUB_SIZE 8 */
635#endif
636#define BRSL 0x33000000
637#define BR 0x32000000
e9f53129 638#define NOP 0x40200000
47f6dab9
AM
639#define LNOP 0x00200000
640#define ILA 0x42000000
e9f53129 641
49fa1e15 642/* Return true for all relative and absolute branch instructions.
e9f53129
AM
643 bra 00110000 0..
644 brasl 00110001 0..
645 br 00110010 0..
646 brsl 00110011 0..
647 brz 00100000 0..
648 brnz 00100001 0..
649 brhz 00100010 0..
49fa1e15
AM
650 brhnz 00100011 0.. */
651
652static bfd_boolean
653is_branch (const unsigned char *insn)
654{
655 return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
656}
657
fad9eaf0
AM
658/* Return true for all indirect branch instructions.
659 bi 00110101 000
660 bisl 00110101 001
661 iret 00110101 010
662 bisled 00110101 011
663 biz 00100101 000
664 binz 00100101 001
665 bihz 00100101 010
666 bihnz 00100101 011 */
667
668static bfd_boolean
669is_indirect_branch (const unsigned char *insn)
670{
671 return (insn[0] & 0xef) == 0x25 && (insn[1] & 0x80) == 0;
672}
673
49fa1e15 674/* Return true for branch hint instructions.
e9f53129
AM
675 hbra 0001000..
676 hbrr 0001001.. */
677
678static bfd_boolean
49fa1e15 679is_hint (const unsigned char *insn)
e9f53129 680{
49fa1e15 681 return (insn[0] & 0xfc) == 0x10;
e9f53129
AM
682}
683
fdba2fcd 684/* True if INPUT_SECTION might need overlay stubs. */
aa7a0635
AM
685
686static bfd_boolean
fdba2fcd
AM
687maybe_needs_stubs (asection *input_section, bfd *output_bfd)
688{
689 /* No stubs for debug sections and suchlike. */
690 if ((input_section->flags & SEC_ALLOC) == 0)
691 return FALSE;
692
693 /* No stubs for link-once sections that will be discarded. */
694 if (input_section->output_section == NULL
695 || input_section->output_section->owner != output_bfd)
696 return FALSE;
697
698 /* Don't create stubs for .eh_frame references. */
699 if (strcmp (input_section->name, ".eh_frame") == 0)
700 return FALSE;
701
702 return TRUE;
703}
704
705enum _stub_type
706{
707 no_stub,
708 ovl_stub,
709 nonovl_stub,
710 stub_error
711};
712
713/* Return non-zero if this reloc symbol should go via an overlay stub.
714 Return 2 if the stub must be in non-overlay area. */
715
716static enum _stub_type
717needs_ovl_stub (struct elf_link_hash_entry *h,
718 Elf_Internal_Sym *sym,
aa7a0635
AM
719 asection *sym_sec,
720 asection *input_section,
fdba2fcd
AM
721 Elf_Internal_Rela *irela,
722 bfd_byte *contents,
723 struct bfd_link_info *info)
aa7a0635 724{
fdba2fcd
AM
725 struct spu_link_hash_table *htab = spu_hash_table (info);
726 enum elf_spu_reloc_type r_type;
727 unsigned int sym_type;
728 bfd_boolean branch;
729 enum _stub_type ret = no_stub;
aa7a0635
AM
730
731 if (sym_sec == NULL
2c67c5f3 732 || sym_sec->output_section == NULL
fdba2fcd 733 || sym_sec->output_section->owner != info->output_bfd
2c67c5f3 734 || spu_elf_section_data (sym_sec->output_section) == NULL)
fdba2fcd 735 return ret;
aa7a0635 736
fdba2fcd
AM
737 if (h != NULL)
738 {
739 /* Ensure no stubs for user supplied overlay manager syms. */
740 if (h == htab->ovly_load || h == htab->ovly_return)
741 return ret;
742
743 /* setjmp always goes via an overlay stub, because then the return
744 and hence the longjmp goes via __ovly_return. That magically
745 makes setjmp/longjmp between overlays work. */
746 if (strncmp (h->root.root.string, "setjmp", 6) == 0
747 && (h->root.root.string[6] == '\0' || h->root.root.string[6] == '@'))
748 ret = ovl_stub;
749 }
aa7a0635
AM
750
751 /* Usually, symbols in non-overlay sections don't need stubs. */
47f6dab9 752 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index == 0
aa7a0635 753 && !htab->non_overlay_stubs)
fdba2fcd
AM
754 return ret;
755
756 if (h != NULL)
757 sym_type = h->type;
758 else
759 sym_type = ELF_ST_TYPE (sym->st_info);
760
761 r_type = ELF32_R_TYPE (irela->r_info);
762 branch = FALSE;
763 if (r_type == R_SPU_REL16 || r_type == R_SPU_ADDR16)
764 {
765 bfd_byte insn[4];
766
767 if (contents == NULL)
768 {
769 contents = insn;
770 if (!bfd_get_section_contents (input_section->owner,
771 input_section,
772 contents,
773 irela->r_offset, 4))
774 return stub_error;
775 }
776 else
777 contents += irela->r_offset;
778
779 if (is_branch (contents) || is_hint (contents))
780 {
781 branch = TRUE;
782 if ((contents[0] & 0xfd) == 0x31
783 && sym_type != STT_FUNC
9dcc4794 784 && contents != insn)
fdba2fcd
AM
785 {
786 /* It's common for people to write assembly and forget
787 to give function symbols the right type. Handle
788 calls to such symbols, but warn so that (hopefully)
789 people will fix their code. We need the symbol
790 type to be correct to distinguish function pointer
791 initialisation from other pointer initialisations. */
792 const char *sym_name;
793
794 if (h != NULL)
795 sym_name = h->root.root.string;
796 else
797 {
798 Elf_Internal_Shdr *symtab_hdr;
799 symtab_hdr = &elf_tdata (input_section->owner)->symtab_hdr;
800 sym_name = bfd_elf_sym_name (input_section->owner,
801 symtab_hdr,
802 sym,
803 sym_sec);
804 }
805 (*_bfd_error_handler) (_("warning: call to non-function"
806 " symbol %s defined in %B"),
807 sym_sec->owner, sym_name);
808
809 }
810 }
811 }
812
813 if (sym_type != STT_FUNC
814 && !branch
815 && (sym_sec->flags & SEC_CODE) == 0)
816 return ret;
aa7a0635
AM
817
818 /* A reference from some other section to a symbol in an overlay
819 section needs a stub. */
47f6dab9
AM
820 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index
821 != spu_elf_section_data (input_section->output_section)->u.o.ovl_index)
7c39fb20 822 ret = ovl_stub;
aa7a0635
AM
823
824 /* If this insn isn't a branch then we are possibly taking the
825 address of a function and passing it out somehow. */
fdba2fcd 826 return !branch && sym_type == STT_FUNC ? nonovl_stub : ret;
aa7a0635
AM
827}
828
47f6dab9
AM
829static bfd_boolean
830count_stub (struct spu_link_hash_table *htab,
831 bfd *ibfd,
832 asection *isec,
fdba2fcd 833 enum _stub_type stub_type,
47f6dab9
AM
834 struct elf_link_hash_entry *h,
835 const Elf_Internal_Rela *irela)
836{
837 unsigned int ovl = 0;
838 struct got_entry *g, **head;
4a628337 839 bfd_vma addend;
47f6dab9
AM
840
841 /* If this instruction is a branch or call, we need a stub
842 for it. One stub per function per overlay.
843 If it isn't a branch, then we are taking the address of
844 this function so need a stub in the non-overlay area
845 for it. One stub per function. */
fdba2fcd 846 if (stub_type != nonovl_stub)
47f6dab9
AM
847 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
848
849 if (h != NULL)
850 head = &h->got.glist;
851 else
852 {
853 if (elf_local_got_ents (ibfd) == NULL)
854 {
855 bfd_size_type amt = (elf_tdata (ibfd)->symtab_hdr.sh_info
856 * sizeof (*elf_local_got_ents (ibfd)));
857 elf_local_got_ents (ibfd) = bfd_zmalloc (amt);
858 if (elf_local_got_ents (ibfd) == NULL)
859 return FALSE;
860 }
861 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
862 }
863
4a628337
AM
864 addend = 0;
865 if (irela != NULL)
866 addend = irela->r_addend;
47f6dab9
AM
867
868 if (ovl == 0)
869 {
870 struct got_entry *gnext;
871
4a628337
AM
872 for (g = *head; g != NULL; g = g->next)
873 if (g->addend == addend && g->ovl == 0)
874 break;
875
876 if (g == NULL)
47f6dab9 877 {
4a628337
AM
878 /* Need a new non-overlay area stub. Zap other stubs. */
879 for (g = *head; g != NULL; g = gnext)
880 {
881 gnext = g->next;
882 if (g->addend == addend)
883 {
884 htab->stub_count[g->ovl] -= 1;
885 free (g);
886 }
887 }
47f6dab9
AM
888 }
889 }
890 else
891 {
4a628337
AM
892 for (g = *head; g != NULL; g = g->next)
893 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
47f6dab9
AM
894 break;
895 }
896
897 if (g == NULL)
898 {
899 g = bfd_malloc (sizeof *g);
900 if (g == NULL)
901 return FALSE;
902 g->ovl = ovl;
4a628337 903 g->addend = addend;
47f6dab9
AM
904 g->stub_addr = (bfd_vma) -1;
905 g->next = *head;
906 *head = g;
907
908 htab->stub_count[ovl] += 1;
909 }
910
911 return TRUE;
912}
913
914/* Two instruction overlay stubs look like:
915
916 brsl $75,__ovly_load
917 .word target_ovl_and_address
918
919 ovl_and_address is a word with the overlay number in the top 14 bits
920 and local store address in the bottom 18 bits.
921
922 Four instruction overlay stubs look like:
923
924 ila $78,ovl_number
925 lnop
926 ila $79,target_address
927 br __ovly_load */
928
929static bfd_boolean
930build_stub (struct spu_link_hash_table *htab,
931 bfd *ibfd,
932 asection *isec,
fdba2fcd 933 enum _stub_type stub_type,
47f6dab9
AM
934 struct elf_link_hash_entry *h,
935 const Elf_Internal_Rela *irela,
936 bfd_vma dest,
937 asection *dest_sec)
938{
939 unsigned int ovl;
940 struct got_entry *g, **head;
941 asection *sec;
4a628337 942 bfd_vma addend, val, from, to;
47f6dab9
AM
943
944 ovl = 0;
fdba2fcd 945 if (stub_type != nonovl_stub)
47f6dab9
AM
946 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
947
948 if (h != NULL)
949 head = &h->got.glist;
950 else
951 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
952
4a628337
AM
953 addend = 0;
954 if (irela != NULL)
955 addend = irela->r_addend;
47f6dab9 956
4a628337
AM
957 for (g = *head; g != NULL; g = g->next)
958 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
47f6dab9
AM
959 break;
960 if (g == NULL)
961 abort ();
962
4a628337
AM
963 if (g->ovl == 0 && ovl != 0)
964 return TRUE;
965
47f6dab9
AM
966 if (g->stub_addr != (bfd_vma) -1)
967 return TRUE;
968
969 sec = htab->stub_sec[ovl];
970 dest += dest_sec->output_offset + dest_sec->output_section->vma;
971 from = sec->size + sec->output_offset + sec->output_section->vma;
972 g->stub_addr = from;
973 to = (htab->ovly_load->root.u.def.value
974 + htab->ovly_load->root.u.def.section->output_offset
975 + htab->ovly_load->root.u.def.section->output_section->vma);
976 val = to - from;
977 if (OVL_STUB_SIZE == 16)
978 val -= 12;
979 if (((dest | to | from) & 3) != 0
f3c29e8a 980 || val + 0x40000 >= 0x80000)
47f6dab9
AM
981 {
982 htab->stub_err = 1;
983 return FALSE;
984 }
985 ovl = spu_elf_section_data (dest_sec->output_section)->u.o.ovl_index;
986
987 if (OVL_STUB_SIZE == 16)
988 {
989 bfd_put_32 (sec->owner, ILA + ((ovl << 7) & 0x01ffff80) + 78,
990 sec->contents + sec->size);
991 bfd_put_32 (sec->owner, LNOP,
992 sec->contents + sec->size + 4);
993 bfd_put_32 (sec->owner, ILA + ((dest << 7) & 0x01ffff80) + 79,
994 sec->contents + sec->size + 8);
995 bfd_put_32 (sec->owner, BR + ((val << 5) & 0x007fff80),
996 sec->contents + sec->size + 12);
997 }
998 else if (OVL_STUB_SIZE == 8)
999 {
1000 bfd_put_32 (sec->owner, BRSL + ((val << 5) & 0x007fff80) + 75,
1001 sec->contents + sec->size);
1002
124b52c6 1003 val = (dest & 0x3ffff) | (ovl << 18);
47f6dab9
AM
1004 bfd_put_32 (sec->owner, val,
1005 sec->contents + sec->size + 4);
1006 }
1007 else
1008 abort ();
1009 sec->size += OVL_STUB_SIZE;
1010
1011 if (htab->emit_stub_syms)
1012 {
1013 size_t len;
1014 char *name;
1015 int add;
1016
1017 len = 8 + sizeof (".ovl_call.") - 1;
1018 if (h != NULL)
1019 len += strlen (h->root.root.string);
1020 else
1021 len += 8 + 1 + 8;
1022 add = 0;
1023 if (irela != NULL)
1024 add = (int) irela->r_addend & 0xffffffff;
1025 if (add != 0)
1026 len += 1 + 8;
1027 name = bfd_malloc (len);
1028 if (name == NULL)
1029 return FALSE;
1030
1031 sprintf (name, "%08x.ovl_call.", g->ovl);
1032 if (h != NULL)
1033 strcpy (name + 8 + sizeof (".ovl_call.") - 1, h->root.root.string);
1034 else
1035 sprintf (name + 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1036 dest_sec->id & 0xffffffff,
1037 (int) ELF32_R_SYM (irela->r_info) & 0xffffffff);
1038 if (add != 0)
1039 sprintf (name + len - 9, "+%x", add);
1040
1041 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
1042 free (name);
1043 if (h == NULL)
1044 return FALSE;
1045 if (h->root.type == bfd_link_hash_new)
1046 {
1047 h->root.type = bfd_link_hash_defined;
1048 h->root.u.def.section = sec;
1049 h->root.u.def.value = sec->size - OVL_STUB_SIZE;
1050 h->size = OVL_STUB_SIZE;
1051 h->type = STT_FUNC;
1052 h->ref_regular = 1;
1053 h->def_regular = 1;
1054 h->ref_regular_nonweak = 1;
1055 h->forced_local = 1;
1056 h->non_elf = 0;
1057 }
1058 }
1059
1060 return TRUE;
1061}
1062
f4b39977
AM
1063/* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1064 symbols. */
1065
1066static bfd_boolean
1067allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1068{
1069 /* Symbols starting with _SPUEAR_ need a stub because they may be
1070 invoked by the PPU. */
380814a6
AM
1071 struct bfd_link_info *info = inf;
1072 struct spu_link_hash_table *htab = spu_hash_table (info);
1073 asection *sym_sec;
1074
f4b39977
AM
1075 if ((h->root.type == bfd_link_hash_defined
1076 || h->root.type == bfd_link_hash_defweak)
1077 && h->def_regular
380814a6
AM
1078 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1079 && (sym_sec = h->root.u.def.section) != NULL
1080 && sym_sec->output_section != NULL
1081 && sym_sec->output_section->owner == info->output_bfd
1082 && spu_elf_section_data (sym_sec->output_section) != NULL
1083 && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1084 || htab->non_overlay_stubs))
f4b39977 1085 {
f3c29e8a 1086 return count_stub (htab, NULL, NULL, nonovl_stub, h, NULL);
f4b39977
AM
1087 }
1088
1089 return TRUE;
1090}
1091
e9f53129 1092static bfd_boolean
47f6dab9 1093build_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
e9f53129 1094{
47f6dab9
AM
1095 /* Symbols starting with _SPUEAR_ need a stub because they may be
1096 invoked by the PPU. */
380814a6
AM
1097 struct bfd_link_info *info = inf;
1098 struct spu_link_hash_table *htab = spu_hash_table (info);
1099 asection *sym_sec;
1100
47f6dab9
AM
1101 if ((h->root.type == bfd_link_hash_defined
1102 || h->root.type == bfd_link_hash_defweak)
1103 && h->def_regular
380814a6
AM
1104 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1105 && (sym_sec = h->root.u.def.section) != NULL
1106 && sym_sec->output_section != NULL
1107 && sym_sec->output_section->owner == info->output_bfd
1108 && spu_elf_section_data (sym_sec->output_section) != NULL
1109 && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1110 || htab->non_overlay_stubs))
47f6dab9 1111 {
f3c29e8a
AM
1112 return build_stub (htab, NULL, NULL, nonovl_stub, h, NULL,
1113 h->root.u.def.value, sym_sec);
47f6dab9
AM
1114 }
1115
e9f53129
AM
1116 return TRUE;
1117}
1118
47f6dab9 1119/* Size or build stubs. */
e9f53129 1120
47f6dab9 1121static bfd_boolean
c65be8d7 1122process_stubs (struct bfd_link_info *info, bfd_boolean build)
e9f53129
AM
1123{
1124 struct spu_link_hash_table *htab = spu_hash_table (info);
1125 bfd *ibfd;
e9f53129 1126
e9f53129
AM
1127 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
1128 {
1129 extern const bfd_target bfd_elf32_spu_vec;
1130 Elf_Internal_Shdr *symtab_hdr;
47f6dab9 1131 asection *isec;
e9f53129
AM
1132 Elf_Internal_Sym *local_syms = NULL;
1133
1134 if (ibfd->xvec != &bfd_elf32_spu_vec)
1135 continue;
1136
1137 /* We'll need the symbol table in a second. */
1138 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1139 if (symtab_hdr->sh_info == 0)
1140 continue;
1141
1142 /* Walk over each section attached to the input bfd. */
47f6dab9 1143 for (isec = ibfd->sections; isec != NULL; isec = isec->next)
e9f53129
AM
1144 {
1145 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1146
1147 /* If there aren't any relocs, then there's nothing more to do. */
47f6dab9 1148 if ((isec->flags & SEC_RELOC) == 0
47f6dab9 1149 || isec->reloc_count == 0)
e9f53129
AM
1150 continue;
1151
c65be8d7 1152 if (!maybe_needs_stubs (isec, info->output_bfd))
e9f53129
AM
1153 continue;
1154
1155 /* Get the relocs. */
47f6dab9
AM
1156 internal_relocs = _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
1157 info->keep_memory);
e9f53129
AM
1158 if (internal_relocs == NULL)
1159 goto error_ret_free_local;
1160
1161 /* Now examine each relocation. */
1162 irela = internal_relocs;
47f6dab9 1163 irelaend = irela + isec->reloc_count;
e9f53129
AM
1164 for (; irela < irelaend; irela++)
1165 {
1166 enum elf_spu_reloc_type r_type;
1167 unsigned int r_indx;
1168 asection *sym_sec;
1169 Elf_Internal_Sym *sym;
1170 struct elf_link_hash_entry *h;
fdba2fcd 1171 enum _stub_type stub_type;
e9f53129
AM
1172
1173 r_type = ELF32_R_TYPE (irela->r_info);
1174 r_indx = ELF32_R_SYM (irela->r_info);
1175
1176 if (r_type >= R_SPU_max)
1177 {
1178 bfd_set_error (bfd_error_bad_value);
47f6dab9
AM
1179 error_ret_free_internal:
1180 if (elf_section_data (isec)->relocs != internal_relocs)
1181 free (internal_relocs);
1182 error_ret_free_local:
1183 if (local_syms != NULL
1184 && (symtab_hdr->contents
1185 != (unsigned char *) local_syms))
1186 free (local_syms);
1187 return FALSE;
e9f53129
AM
1188 }
1189
1190 /* Determine the reloc target section. */
1f27ab8d 1191 if (!get_sym_h (&h, &sym, &sym_sec, &local_syms, r_indx, ibfd))
e9f53129
AM
1192 goto error_ret_free_internal;
1193
fdba2fcd
AM
1194 stub_type = needs_ovl_stub (h, sym, sym_sec, isec, irela,
1195 NULL, info);
1196 if (stub_type == no_stub)
e9f53129 1197 continue;
fdba2fcd
AM
1198 else if (stub_type == stub_error)
1199 goto error_ret_free_internal;
e9f53129 1200
47f6dab9 1201 if (htab->stub_count == NULL)
e9f53129 1202 {
47f6dab9
AM
1203 bfd_size_type amt;
1204 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_count);
1205 htab->stub_count = bfd_zmalloc (amt);
1206 if (htab->stub_count == NULL)
1207 goto error_ret_free_internal;
e9f53129
AM
1208 }
1209
47f6dab9 1210 if (!build)
e9f53129 1211 {
fdba2fcd 1212 if (!count_stub (htab, ibfd, isec, stub_type, h, irela))
47f6dab9 1213 goto error_ret_free_internal;
e9f53129 1214 }
e9f53129 1215 else
47f6dab9
AM
1216 {
1217 bfd_vma dest;
1218
1219 if (h != NULL)
1220 dest = h->root.u.def.value;
1221 else
1222 dest = sym->st_value;
4a628337 1223 dest += irela->r_addend;
fdba2fcd 1224 if (!build_stub (htab, ibfd, isec, stub_type, h, irela,
47f6dab9
AM
1225 dest, sym_sec))
1226 goto error_ret_free_internal;
1227 }
e9f53129
AM
1228 }
1229
1230 /* We're done with the internal relocs, free them. */
47f6dab9 1231 if (elf_section_data (isec)->relocs != internal_relocs)
e9f53129
AM
1232 free (internal_relocs);
1233 }
1234
1235 if (local_syms != NULL
1236 && symtab_hdr->contents != (unsigned char *) local_syms)
1237 {
1238 if (!info->keep_memory)
1239 free (local_syms);
1240 else
1241 symtab_hdr->contents = (unsigned char *) local_syms;
1242 }
1243 }
1244
47f6dab9
AM
1245 return TRUE;
1246}
1247
1248/* Allocate space for overlay call and return stubs. */
1249
1250int
c65be8d7 1251spu_elf_size_stubs (struct bfd_link_info *info,
47f6dab9
AM
1252 void (*place_spu_section) (asection *, asection *,
1253 const char *),
1254 int non_overlay_stubs)
1255{
1256 struct spu_link_hash_table *htab = spu_hash_table (info);
1257 bfd *ibfd;
1258 bfd_size_type amt;
1259 flagword flags;
1260 unsigned int i;
1261 asection *stub;
1262
1263 htab->non_overlay_stubs = non_overlay_stubs;
c65be8d7 1264 if (!process_stubs (info, FALSE))
47f6dab9
AM
1265 return 0;
1266
380814a6 1267 elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, info);
47f6dab9
AM
1268 if (htab->stub_err)
1269 return 0;
f4b39977 1270
47f6dab9
AM
1271 if (htab->stub_count == NULL)
1272 return 1;
e9f53129
AM
1273
1274 ibfd = info->input_bfds;
47f6dab9
AM
1275 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_sec);
1276 htab->stub_sec = bfd_zmalloc (amt);
1277 if (htab->stub_sec == NULL)
1278 return 0;
e9f53129 1279
47f6dab9 1280 flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
e9f53129 1281 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
47f6dab9
AM
1282 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1283 htab->stub_sec[0] = stub;
1284 if (stub == NULL
1285 || !bfd_set_section_alignment (ibfd, stub, 3 + (OVL_STUB_SIZE > 8)))
1286 return 0;
1287 stub->size = htab->stub_count[0] * OVL_STUB_SIZE;
1288 (*place_spu_section) (stub, NULL, ".text");
e9f53129 1289
47f6dab9 1290 for (i = 0; i < htab->num_overlays; ++i)
e9f53129 1291 {
47f6dab9
AM
1292 asection *osec = htab->ovl_sec[i];
1293 unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1294 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1295 htab->stub_sec[ovl] = stub;
1296 if (stub == NULL
1297 || !bfd_set_section_alignment (ibfd, stub, 3 + (OVL_STUB_SIZE > 8)))
1298 return 0;
1299 stub->size = htab->stub_count[ovl] * OVL_STUB_SIZE;
1300 (*place_spu_section) (stub, osec, NULL);
e9f53129 1301 }
e9f53129
AM
1302
1303 /* htab->ovtab consists of two arrays.
1304 . struct {
1305 . u32 vma;
1306 . u32 size;
1307 . u32 file_off;
1308 . u32 buf;
1309 . } _ovly_table[];
1310 .
1311 . struct {
1312 . u32 mapped;
47f6dab9
AM
1313 . } _ovly_buf_table[];
1314 . */
e9f53129 1315
47f6dab9
AM
1316 flags = (SEC_ALLOC | SEC_LOAD
1317 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1318 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1319 if (htab->ovtab == NULL
1320 || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1321 return 0;
e9f53129 1322
2e444bea 1323 htab->ovtab->size = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
47f6dab9
AM
1324 (*place_spu_section) (htab->ovtab, NULL, ".data");
1325
1326 htab->toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1327 if (htab->toe == NULL
1328 || !bfd_set_section_alignment (ibfd, htab->toe, 4))
1329 return 0;
1330 htab->toe->size = 16;
1331 (*place_spu_section) (htab->toe, NULL, ".toe");
1332
1333 return 2;
e9f53129
AM
1334}
1335
1336/* Functions to handle embedded spu_ovl.o object. */
1337
1338static void *
1339ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1340{
1341 return stream;
1342}
1343
1344static file_ptr
1345ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1346 void *stream,
1347 void *buf,
1348 file_ptr nbytes,
1349 file_ptr offset)
1350{
1351 struct _ovl_stream *os;
1352 size_t count;
1353 size_t max;
1354
1355 os = (struct _ovl_stream *) stream;
7a8757b3 1356 max = (const char *) os->end - (const char *) os->start;
e9f53129
AM
1357
1358 if ((ufile_ptr) offset >= max)
1359 return 0;
1360
1361 count = nbytes;
1362 if (count > max - offset)
1363 count = max - offset;
1364
7a8757b3 1365 memcpy (buf, (const char *) os->start + offset, count);
e9f53129
AM
1366 return count;
1367}
1368
1369bfd_boolean
1370spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1371{
1372 *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1373 "elf32-spu",
1374 ovl_mgr_open,
1375 (void *) stream,
1376 ovl_mgr_pread,
f6cf9273 1377 NULL,
e9f53129
AM
1378 NULL);
1379 return *ovl_bfd != NULL;
1380}
1381
e9f53129
AM
1382/* Define an STT_OBJECT symbol. */
1383
1384static struct elf_link_hash_entry *
1385define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1386{
1387 struct elf_link_hash_entry *h;
1388
1389 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
1390 if (h == NULL)
1391 return NULL;
1392
1393 if (h->root.type != bfd_link_hash_defined
1394 || !h->def_regular)
1395 {
1396 h->root.type = bfd_link_hash_defined;
1397 h->root.u.def.section = htab->ovtab;
1398 h->type = STT_OBJECT;
1399 h->ref_regular = 1;
1400 h->def_regular = 1;
1401 h->ref_regular_nonweak = 1;
1402 h->non_elf = 0;
1403 }
1404 else
1405 {
1406 (*_bfd_error_handler) (_("%B is not allowed to define %s"),
1407 h->root.u.def.section->owner,
1408 h->root.root.string);
1409 bfd_set_error (bfd_error_bad_value);
1410 return NULL;
1411 }
1412
1413 return h;
1414}
1415
1416/* Fill in all stubs and the overlay tables. */
1417
1418bfd_boolean
47f6dab9 1419spu_elf_build_stubs (struct bfd_link_info *info, int emit_syms)
e9f53129
AM
1420{
1421 struct spu_link_hash_table *htab = spu_hash_table (info);
1422 struct elf_link_hash_entry *h;
1423 bfd_byte *p;
1424 asection *s;
1425 bfd *obfd;
1426 unsigned int i;
1427
1428 htab->emit_stub_syms = emit_syms;
47f6dab9
AM
1429 if (htab->stub_count == NULL)
1430 return TRUE;
1431
1432 for (i = 0; i <= htab->num_overlays; i++)
1433 if (htab->stub_sec[i]->size != 0)
1434 {
1435 htab->stub_sec[i]->contents = bfd_zalloc (htab->stub_sec[i]->owner,
1436 htab->stub_sec[i]->size);
1437 if (htab->stub_sec[i]->contents == NULL)
1438 return FALSE;
1439 htab->stub_sec[i]->rawsize = htab->stub_sec[i]->size;
1440 htab->stub_sec[i]->size = 0;
1441 }
e9f53129
AM
1442
1443 h = elf_link_hash_lookup (&htab->elf, "__ovly_load", FALSE, FALSE, FALSE);
1444 htab->ovly_load = h;
1445 BFD_ASSERT (h != NULL
1446 && (h->root.type == bfd_link_hash_defined
1447 || h->root.type == bfd_link_hash_defweak)
1448 && h->def_regular);
1449
1450 s = h->root.u.def.section->output_section;
47f6dab9 1451 if (spu_elf_section_data (s)->u.o.ovl_index)
e9f53129
AM
1452 {
1453 (*_bfd_error_handler) (_("%s in overlay section"),
2ec9638b 1454 h->root.root.string);
e9f53129
AM
1455 bfd_set_error (bfd_error_bad_value);
1456 return FALSE;
1457 }
1458
47f6dab9
AM
1459 h = elf_link_hash_lookup (&htab->elf, "__ovly_return", FALSE, FALSE, FALSE);
1460 htab->ovly_return = h;
1461
c65be8d7
AM
1462 /* Fill in all the stubs. */
1463 process_stubs (info, TRUE);
f3c29e8a
AM
1464 if (!htab->stub_err)
1465 elf_link_hash_traverse (&htab->elf, build_spuear_stubs, info);
47f6dab9 1466
47f6dab9 1467 if (htab->stub_err)
f3c29e8a
AM
1468 {
1469 (*_bfd_error_handler) (_("overlay stub relocation overflow"));
1470 bfd_set_error (bfd_error_bad_value);
1471 return FALSE;
1472 }
e9f53129 1473
47f6dab9
AM
1474 for (i = 0; i <= htab->num_overlays; i++)
1475 {
1476 if (htab->stub_sec[i]->size != htab->stub_sec[i]->rawsize)
1477 {
1478 (*_bfd_error_handler) (_("stubs don't match calculated size"));
1479 bfd_set_error (bfd_error_bad_value);
1480 return FALSE;
1481 }
1482 htab->stub_sec[i]->rawsize = 0;
1483 }
1484
e9f53129
AM
1485 htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
1486 if (htab->ovtab->contents == NULL)
1487 return FALSE;
1488
1489 /* Write out _ovly_table. */
1490 p = htab->ovtab->contents;
2e444bea
AM
1491 /* set low bit of .size to mark non-overlay area as present. */
1492 p[7] = 1;
c65be8d7 1493 obfd = htab->ovtab->output_section->owner;
e9f53129
AM
1494 for (s = obfd->sections; s != NULL; s = s->next)
1495 {
47f6dab9 1496 unsigned int ovl_index = spu_elf_section_data (s)->u.o.ovl_index;
e9f53129
AM
1497
1498 if (ovl_index != 0)
1499 {
47f6dab9
AM
1500 unsigned long off = ovl_index * 16;
1501 unsigned int ovl_buf = spu_elf_section_data (s)->u.o.ovl_buf;
1502
e9f53129
AM
1503 bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
1504 bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16, p + off + 4);
1505 /* file_off written later in spu_elf_modify_program_headers. */
2e444bea 1506 bfd_put_32 (htab->ovtab->owner, ovl_buf, p + off + 12);
e9f53129
AM
1507 }
1508 }
1509
e9f53129
AM
1510 h = define_ovtab_symbol (htab, "_ovly_table");
1511 if (h == NULL)
1512 return FALSE;
47f6dab9 1513 h->root.u.def.value = 16;
e9f53129
AM
1514 h->size = htab->num_overlays * 16;
1515
1516 h = define_ovtab_symbol (htab, "_ovly_table_end");
1517 if (h == NULL)
1518 return FALSE;
47f6dab9 1519 h->root.u.def.value = htab->num_overlays * 16 + 16;
e9f53129
AM
1520 h->size = 0;
1521
1522 h = define_ovtab_symbol (htab, "_ovly_buf_table");
1523 if (h == NULL)
1524 return FALSE;
47f6dab9 1525 h->root.u.def.value = htab->num_overlays * 16 + 16;
2e444bea 1526 h->size = htab->num_buf * 4;
e9f53129
AM
1527
1528 h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
1529 if (h == NULL)
1530 return FALSE;
2e444bea 1531 h->root.u.def.value = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
e9f53129
AM
1532 h->size = 0;
1533
1534 h = define_ovtab_symbol (htab, "_EAR_");
1535 if (h == NULL)
1536 return FALSE;
47f6dab9 1537 h->root.u.def.section = htab->toe;
e9f53129
AM
1538 h->root.u.def.value = 0;
1539 h->size = 16;
1540
1541 return TRUE;
1542}
1543
c65be8d7 1544/* Check that all loadable section VMAs lie in the range
9dcc4794 1545 LO .. HI inclusive, and stash some parameters for --auto-overlay. */
c65be8d7
AM
1546
1547asection *
9dcc4794
AM
1548spu_elf_check_vma (struct bfd_link_info *info,
1549 int auto_overlay,
1550 unsigned int lo,
1551 unsigned int hi,
1552 unsigned int overlay_fixed,
1553 unsigned int reserved,
99302af9 1554 int extra_stack_space,
9dcc4794
AM
1555 void (*spu_elf_load_ovl_mgr) (void),
1556 FILE *(*spu_elf_open_overlay_script) (void),
1557 void (*spu_elf_relink) (void))
c65be8d7
AM
1558{
1559 struct elf_segment_map *m;
1560 unsigned int i;
9dcc4794 1561 struct spu_link_hash_table *htab = spu_hash_table (info);
c65be8d7
AM
1562 bfd *abfd = info->output_bfd;
1563
9dcc4794
AM
1564 if (auto_overlay & AUTO_OVERLAY)
1565 htab->auto_overlay = auto_overlay;
1566 htab->local_store = hi + 1 - lo;
1567 htab->overlay_fixed = overlay_fixed;
1568 htab->reserved = reserved;
99302af9 1569 htab->extra_stack_space = extra_stack_space;
9dcc4794
AM
1570 htab->spu_elf_load_ovl_mgr = spu_elf_load_ovl_mgr;
1571 htab->spu_elf_open_overlay_script = spu_elf_open_overlay_script;
1572 htab->spu_elf_relink = spu_elf_relink;
1573
c65be8d7
AM
1574 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
1575 if (m->p_type == PT_LOAD)
1576 for (i = 0; i < m->count; i++)
1577 if (m->sections[i]->size != 0
1578 && (m->sections[i]->vma < lo
1579 || m->sections[i]->vma > hi
1580 || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
1581 return m->sections[i];
1582
9dcc4794
AM
1583 /* No need for overlays if it all fits. */
1584 htab->auto_overlay = 0;
c65be8d7
AM
1585 return NULL;
1586}
1587
49fa1e15
AM
1588/* OFFSET in SEC (presumably) is the beginning of a function prologue.
1589 Search for stack adjusting insns, and return the sp delta. */
1590
1591static int
1592find_function_stack_adjust (asection *sec, bfd_vma offset)
1593{
49fa1e15
AM
1594 int reg[128];
1595
1596 memset (reg, 0, sizeof (reg));
667f3338 1597 for ( ; offset + 4 <= sec->size; offset += 4)
49fa1e15
AM
1598 {
1599 unsigned char buf[4];
1600 int rt, ra;
1601 int imm;
1602
1603 /* Assume no relocs on stack adjusing insns. */
1604 if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
1605 break;
1606
1607 if (buf[0] == 0x24 /* stqd */)
1608 continue;
1609
1610 rt = buf[3] & 0x7f;
1611 ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
1612 /* Partly decoded immediate field. */
1613 imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
1614
1615 if (buf[0] == 0x1c /* ai */)
1616 {
1617 imm >>= 7;
1618 imm = (imm ^ 0x200) - 0x200;
1619 reg[rt] = reg[ra] + imm;
1620
1621 if (rt == 1 /* sp */)
1622 {
667f3338 1623 if (reg[rt] > 0)
49fa1e15
AM
1624 break;
1625 return reg[rt];
1626 }
1627 }
1628 else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
1629 {
1630 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
1631
1632 reg[rt] = reg[ra] + reg[rb];
1633 if (rt == 1)
667f3338
AM
1634 {
1635 if (reg[rt] > 0)
1636 break;
1637 return reg[rt];
1638 }
49fa1e15
AM
1639 }
1640 else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
1641 {
1642 if (buf[0] >= 0x42 /* ila */)
1643 imm |= (buf[0] & 1) << 17;
1644 else
1645 {
1646 imm &= 0xffff;
1647
1648 if (buf[0] == 0x40 /* il */)
1649 {
1650 if ((buf[1] & 0x80) == 0)
667f3338 1651 continue;
49fa1e15
AM
1652 imm = (imm ^ 0x8000) - 0x8000;
1653 }
1654 else if ((buf[1] & 0x80) == 0 /* ilhu */)
1655 imm <<= 16;
1656 }
1657 reg[rt] = imm;
1658 continue;
1659 }
1660 else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
1661 {
1662 reg[rt] |= imm & 0xffff;
1663 continue;
1664 }
1665 else if (buf[0] == 0x04 /* ori */)
1666 {
1667 imm >>= 7;
1668 imm = (imm ^ 0x200) - 0x200;
1669 reg[rt] = reg[ra] | imm;
1670 continue;
1671 }
667f3338
AM
1672 else if (buf[0] == 0x32 && (buf[1] & 0x80) != 0 /* fsmbi */)
1673 {
1674 reg[rt] = ( ((imm & 0x8000) ? 0xff000000 : 0)
1675 | ((imm & 0x4000) ? 0x00ff0000 : 0)
1676 | ((imm & 0x2000) ? 0x0000ff00 : 0)
1677 | ((imm & 0x1000) ? 0x000000ff : 0));
1678 continue;
1679 }
1680 else if (buf[0] == 0x16 /* andbi */)
49fa1e15 1681 {
667f3338
AM
1682 imm >>= 7;
1683 imm &= 0xff;
1684 imm |= imm << 8;
1685 imm |= imm << 16;
1686 reg[rt] = reg[ra] & imm;
1687 continue;
1688 }
1689 else if (buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
1690 {
1691 /* Used in pic reg load. Say rt is trashed. Won't be used
1692 in stack adjust, but we need to continue past this branch. */
49fa1e15
AM
1693 reg[rt] = 0;
1694 continue;
1695 }
fad9eaf0 1696 else if (is_branch (buf) || is_indirect_branch (buf))
49fa1e15
AM
1697 /* If we hit a branch then we must be out of the prologue. */
1698 break;
49fa1e15
AM
1699 }
1700
1701 return 0;
1702}
1703
1704/* qsort predicate to sort symbols by section and value. */
1705
1706static Elf_Internal_Sym *sort_syms_syms;
1707static asection **sort_syms_psecs;
1708
1709static int
1710sort_syms (const void *a, const void *b)
1711{
1712 Elf_Internal_Sym *const *s1 = a;
1713 Elf_Internal_Sym *const *s2 = b;
1714 asection *sec1,*sec2;
1715 bfd_signed_vma delta;
1716
1717 sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
1718 sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
1719
1720 if (sec1 != sec2)
1721 return sec1->index - sec2->index;
1722
1723 delta = (*s1)->st_value - (*s2)->st_value;
1724 if (delta != 0)
1725 return delta < 0 ? -1 : 1;
1726
1727 delta = (*s2)->st_size - (*s1)->st_size;
1728 if (delta != 0)
1729 return delta < 0 ? -1 : 1;
1730
1731 return *s1 < *s2 ? -1 : 1;
1732}
1733
1734struct call_info
1735{
1736 struct function_info *fun;
1737 struct call_info *next;
9dcc4794
AM
1738 unsigned int count;
1739 unsigned int max_depth;
c65be8d7 1740 unsigned int is_tail : 1;
9dcc4794 1741 unsigned int is_pasted : 1;
49fa1e15
AM
1742};
1743
1744struct function_info
1745{
1746 /* List of functions called. Also branches to hot/cold part of
1747 function. */
1748 struct call_info *call_list;
1749 /* For hot/cold part of function, point to owner. */
1750 struct function_info *start;
1751 /* Symbol at start of function. */
1752 union {
1753 Elf_Internal_Sym *sym;
1754 struct elf_link_hash_entry *h;
1755 } u;
1756 /* Function section. */
1757 asection *sec;
9dcc4794
AM
1758 asection *rodata;
1759 /* Where last called from, and number of sections called from. */
1760 asection *last_caller;
1761 unsigned int call_count;
49fa1e15
AM
1762 /* Address range of (this part of) function. */
1763 bfd_vma lo, hi;
1764 /* Stack usage. */
1765 int stack;
9dcc4794
AM
1766 /* Distance from root of call tree. Tail and hot/cold branches
1767 count as one deeper. We aren't counting stack frames here. */
1768 unsigned int depth;
49fa1e15
AM
1769 /* Set if global symbol. */
1770 unsigned int global : 1;
1771 /* Set if known to be start of function (as distinct from a hunk
1772 in hot/cold section. */
1773 unsigned int is_func : 1;
9dcc4794 1774 /* Set if not a root node. */
49fa1e15 1775 unsigned int non_root : 1;
9dcc4794
AM
1776 /* Flags used during call tree traversal. It's cheaper to replicate
1777 the visit flags than have one which needs clearing after a traversal. */
1778 unsigned int visit1 : 1;
49fa1e15
AM
1779 unsigned int visit2 : 1;
1780 unsigned int marking : 1;
1781 unsigned int visit3 : 1;
9dcc4794
AM
1782 unsigned int visit4 : 1;
1783 unsigned int visit5 : 1;
1784 unsigned int visit6 : 1;
1785 unsigned int visit7 : 1;
49fa1e15
AM
1786};
1787
1788struct spu_elf_stack_info
1789{
1790 int num_fun;
1791 int max_fun;
1792 /* Variable size array describing functions, one per contiguous
1793 address range belonging to a function. */
1794 struct function_info fun[1];
1795};
1796
1797/* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
1798 entries for section SEC. */
1799
1800static struct spu_elf_stack_info *
1801alloc_stack_info (asection *sec, int max_fun)
1802{
1803 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1804 bfd_size_type amt;
1805
1806 amt = sizeof (struct spu_elf_stack_info);
1807 amt += (max_fun - 1) * sizeof (struct function_info);
47f6dab9
AM
1808 sec_data->u.i.stack_info = bfd_zmalloc (amt);
1809 if (sec_data->u.i.stack_info != NULL)
1810 sec_data->u.i.stack_info->max_fun = max_fun;
1811 return sec_data->u.i.stack_info;
49fa1e15
AM
1812}
1813
1814/* Add a new struct function_info describing a (part of a) function
1815 starting at SYM_H. Keep the array sorted by address. */
1816
1817static struct function_info *
1818maybe_insert_function (asection *sec,
1819 void *sym_h,
1820 bfd_boolean global,
1821 bfd_boolean is_func)
1822{
1823 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
47f6dab9 1824 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
49fa1e15
AM
1825 int i;
1826 bfd_vma off, size;
1827
1828 if (sinfo == NULL)
1829 {
1830 sinfo = alloc_stack_info (sec, 20);
1831 if (sinfo == NULL)
1832 return NULL;
1833 }
1834
1835 if (!global)
1836 {
1837 Elf_Internal_Sym *sym = sym_h;
1838 off = sym->st_value;
1839 size = sym->st_size;
1840 }
1841 else
1842 {
1843 struct elf_link_hash_entry *h = sym_h;
1844 off = h->root.u.def.value;
1845 size = h->size;
1846 }
1847
1848 for (i = sinfo->num_fun; --i >= 0; )
1849 if (sinfo->fun[i].lo <= off)
1850 break;
1851
1852 if (i >= 0)
1853 {
1854 /* Don't add another entry for an alias, but do update some
1855 info. */
1856 if (sinfo->fun[i].lo == off)
1857 {
1858 /* Prefer globals over local syms. */
1859 if (global && !sinfo->fun[i].global)
1860 {
1861 sinfo->fun[i].global = TRUE;
1862 sinfo->fun[i].u.h = sym_h;
1863 }
1864 if (is_func)
1865 sinfo->fun[i].is_func = TRUE;
1866 return &sinfo->fun[i];
1867 }
1868 /* Ignore a zero-size symbol inside an existing function. */
1869 else if (sinfo->fun[i].hi > off && size == 0)
1870 return &sinfo->fun[i];
1871 }
1872
1f27ab8d 1873 if (sinfo->num_fun >= sinfo->max_fun)
49fa1e15
AM
1874 {
1875 bfd_size_type amt = sizeof (struct spu_elf_stack_info);
1876 bfd_size_type old = amt;
1877
1878 old += (sinfo->max_fun - 1) * sizeof (struct function_info);
1879 sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
1880 amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
1881 sinfo = bfd_realloc (sinfo, amt);
1882 if (sinfo == NULL)
1883 return NULL;
1884 memset ((char *) sinfo + old, 0, amt - old);
47f6dab9 1885 sec_data->u.i.stack_info = sinfo;
49fa1e15 1886 }
1f27ab8d
AM
1887
1888 if (++i < sinfo->num_fun)
1889 memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
1890 (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
49fa1e15
AM
1891 sinfo->fun[i].is_func = is_func;
1892 sinfo->fun[i].global = global;
1893 sinfo->fun[i].sec = sec;
1894 if (global)
1895 sinfo->fun[i].u.h = sym_h;
1896 else
1897 sinfo->fun[i].u.sym = sym_h;
1898 sinfo->fun[i].lo = off;
1899 sinfo->fun[i].hi = off + size;
1900 sinfo->fun[i].stack = -find_function_stack_adjust (sec, off);
1901 sinfo->num_fun += 1;
1902 return &sinfo->fun[i];
1903}
1904
1905/* Return the name of FUN. */
1906
1907static const char *
1908func_name (struct function_info *fun)
1909{
1910 asection *sec;
1911 bfd *ibfd;
1912 Elf_Internal_Shdr *symtab_hdr;
1913
1914 while (fun->start != NULL)
1915 fun = fun->start;
1916
1917 if (fun->global)
1918 return fun->u.h->root.root.string;
1919
1920 sec = fun->sec;
1921 if (fun->u.sym->st_name == 0)
1922 {
1923 size_t len = strlen (sec->name);
1924 char *name = bfd_malloc (len + 10);
1925 if (name == NULL)
1926 return "(null)";
1927 sprintf (name, "%s+%lx", sec->name,
1928 (unsigned long) fun->u.sym->st_value & 0xffffffff);
1929 return name;
1930 }
1931 ibfd = sec->owner;
1932 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1933 return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
1934}
1935
1936/* Read the instruction at OFF in SEC. Return true iff the instruction
1937 is a nop, lnop, or stop 0 (all zero insn). */
1938
1939static bfd_boolean
1940is_nop (asection *sec, bfd_vma off)
1941{
1942 unsigned char insn[4];
1943
1944 if (off + 4 > sec->size
1945 || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
1946 return FALSE;
1947 if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
1948 return TRUE;
1949 if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
1950 return TRUE;
1951 return FALSE;
1952}
1953
1954/* Extend the range of FUN to cover nop padding up to LIMIT.
1955 Return TRUE iff some instruction other than a NOP was found. */
1956
1957static bfd_boolean
1958insns_at_end (struct function_info *fun, bfd_vma limit)
1959{
1960 bfd_vma off = (fun->hi + 3) & -4;
1961
1962 while (off < limit && is_nop (fun->sec, off))
1963 off += 4;
1964 if (off < limit)
1965 {
1966 fun->hi = off;
1967 return TRUE;
1968 }
1969 fun->hi = limit;
1970 return FALSE;
1971}
1972
1973/* Check and fix overlapping function ranges. Return TRUE iff there
1974 are gaps in the current info we have about functions in SEC. */
1975
1976static bfd_boolean
1977check_function_ranges (asection *sec, struct bfd_link_info *info)
1978{
1979 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
47f6dab9 1980 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
49fa1e15
AM
1981 int i;
1982 bfd_boolean gaps = FALSE;
1983
1984 if (sinfo == NULL)
1985 return FALSE;
1986
1987 for (i = 1; i < sinfo->num_fun; i++)
1988 if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
1989 {
1990 /* Fix overlapping symbols. */
1991 const char *f1 = func_name (&sinfo->fun[i - 1]);
1992 const char *f2 = func_name (&sinfo->fun[i]);
1993
1994 info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
1995 sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
1996 }
1997 else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
1998 gaps = TRUE;
1999
2000 if (sinfo->num_fun == 0)
2001 gaps = TRUE;
2002 else
2003 {
2004 if (sinfo->fun[0].lo != 0)
2005 gaps = TRUE;
2006 if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
2007 {
2008 const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
2009
2010 info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
2011 sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
2012 }
2013 else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
2014 gaps = TRUE;
2015 }
2016 return gaps;
2017}
2018
2019/* Search current function info for a function that contains address
2020 OFFSET in section SEC. */
2021
2022static struct function_info *
2023find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
2024{
2025 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
47f6dab9 2026 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
49fa1e15
AM
2027 int lo, hi, mid;
2028
2029 lo = 0;
2030 hi = sinfo->num_fun;
2031 while (lo < hi)
2032 {
2033 mid = (lo + hi) / 2;
2034 if (offset < sinfo->fun[mid].lo)
2035 hi = mid;
2036 else if (offset >= sinfo->fun[mid].hi)
2037 lo = mid + 1;
2038 else
2039 return &sinfo->fun[mid];
2040 }
2041 info->callbacks->einfo (_("%A:0x%v not found in function table\n"),
2042 sec, offset);
2043 return NULL;
2044}
2045
9dcc4794
AM
2046/* Add CALLEE to CALLER call list if not already present. Return TRUE
2047 if CALLEE was new. If this function return FALSE, CALLEE should
2048 be freed. */
49fa1e15
AM
2049
2050static bfd_boolean
2051insert_callee (struct function_info *caller, struct call_info *callee)
2052{
055ed83b
AM
2053 struct call_info **pp, *p;
2054
2055 for (pp = &caller->call_list; (p = *pp) != NULL; pp = &p->next)
49fa1e15
AM
2056 if (p->fun == callee->fun)
2057 {
2058 /* Tail calls use less stack than normal calls. Retain entry
2059 for normal call over one for tail call. */
c65be8d7
AM
2060 p->is_tail &= callee->is_tail;
2061 if (!p->is_tail)
2062 {
2063 p->fun->start = NULL;
2064 p->fun->is_func = TRUE;
2065 }
9dcc4794 2066 p->count += 1;
055ed83b
AM
2067 /* Reorder list so most recent call is first. */
2068 *pp = p->next;
2069 p->next = caller->call_list;
2070 caller->call_list = p;
49fa1e15
AM
2071 return FALSE;
2072 }
2073 callee->next = caller->call_list;
9dcc4794 2074 callee->count += 1;
49fa1e15
AM
2075 caller->call_list = callee;
2076 return TRUE;
2077}
2078
9dcc4794
AM
2079/* Copy CALL and insert the copy into CALLER. */
2080
2081static bfd_boolean
2082copy_callee (struct function_info *caller, const struct call_info *call)
2083{
2084 struct call_info *callee;
2085 callee = bfd_malloc (sizeof (*callee));
2086 if (callee == NULL)
2087 return FALSE;
2088 *callee = *call;
2089 if (!insert_callee (caller, callee))
2090 free (callee);
2091 return TRUE;
2092}
2093
055ed83b
AM
2094/* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2095 overlay stub sections. */
2096
2097static bfd_boolean
2098interesting_section (asection *s, bfd *obfd)
2099{
2100 return (s->output_section != NULL
2101 && s->output_section->owner == obfd
2102 && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_IN_MEMORY))
2103 == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2104 && s->size != 0);
2105}
2106
49fa1e15
AM
2107/* Rummage through the relocs for SEC, looking for function calls.
2108 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
2109 mark destination symbols on calls as being functions. Also
2110 look at branches, which may be tail calls or go to hot/cold
2111 section part of same function. */
2112
2113static bfd_boolean
2114mark_functions_via_relocs (asection *sec,
2115 struct bfd_link_info *info,
2116 int call_tree)
2117{
2118 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
2ec9638b 2119 Elf_Internal_Shdr *symtab_hdr;
d0249648 2120 void *psyms;
49fa1e15
AM
2121 static bfd_boolean warned;
2122
055ed83b
AM
2123 if (!interesting_section (sec, info->output_bfd)
2124 || sec->reloc_count == 0)
2125 return TRUE;
2126
49fa1e15
AM
2127 internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
2128 info->keep_memory);
2129 if (internal_relocs == NULL)
2130 return FALSE;
2131
2132 symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
d0249648 2133 psyms = &symtab_hdr->contents;
49fa1e15
AM
2134 irela = internal_relocs;
2135 irelaend = irela + sec->reloc_count;
2136 for (; irela < irelaend; irela++)
2137 {
2138 enum elf_spu_reloc_type r_type;
2139 unsigned int r_indx;
2140 asection *sym_sec;
2141 Elf_Internal_Sym *sym;
2142 struct elf_link_hash_entry *h;
2143 bfd_vma val;
9dcc4794 2144 bfd_boolean reject, is_call;
49fa1e15
AM
2145 struct function_info *caller;
2146 struct call_info *callee;
2147
9dcc4794 2148 reject = FALSE;
49fa1e15
AM
2149 r_type = ELF32_R_TYPE (irela->r_info);
2150 if (r_type != R_SPU_REL16
2151 && r_type != R_SPU_ADDR16)
9dcc4794
AM
2152 {
2153 reject = TRUE;
2154 if (!(call_tree && spu_hash_table (info)->auto_overlay))
2155 continue;
2156 }
49fa1e15
AM
2157
2158 r_indx = ELF32_R_SYM (irela->r_info);
2159 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
2160 return FALSE;
2161
2162 if (sym_sec == NULL
2163 || sym_sec->output_section == NULL
055ed83b 2164 || sym_sec->output_section->owner != info->output_bfd)
49fa1e15
AM
2165 continue;
2166
9dcc4794
AM
2167 is_call = FALSE;
2168 if (!reject)
2169 {
2170 unsigned char insn[4];
2171
2172 if (!bfd_get_section_contents (sec->owner, sec, insn,
2173 irela->r_offset, 4))
2174 return FALSE;
2175 if (is_branch (insn))
2176 {
2177 is_call = (insn[0] & 0xfd) == 0x31;
2178 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2179 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2180 {
2181 if (!warned)
2182 info->callbacks->einfo
2183 (_("%B(%A+0x%v): call to non-code section"
2184 " %B(%A), analysis incomplete\n"),
2185 sec->owner, sec, irela->r_offset,
2186 sym_sec->owner, sym_sec);
2187 warned = TRUE;
2188 continue;
2189 }
2190 }
2191 else
2192 {
2193 reject = TRUE;
2194 if (!(call_tree && spu_hash_table (info)->auto_overlay)
2195 || is_hint (insn))
2196 continue;
2197 }
2198 }
49fa1e15 2199
9dcc4794 2200 if (reject)
49fa1e15 2201 {
9dcc4794
AM
2202 /* For --auto-overlay, count possible stubs we need for
2203 function pointer references. */
2204 unsigned int sym_type;
2205 if (h)
2206 sym_type = h->type;
2207 else
2208 sym_type = ELF_ST_TYPE (sym->st_info);
2209 if (sym_type == STT_FUNC)
2210 spu_hash_table (info)->non_ovly_stub += 1;
49fa1e15
AM
2211 continue;
2212 }
2213
49fa1e15
AM
2214 if (h)
2215 val = h->root.u.def.value;
2216 else
2217 val = sym->st_value;
2218 val += irela->r_addend;
2219
2220 if (!call_tree)
2221 {
2222 struct function_info *fun;
2223
2224 if (irela->r_addend != 0)
2225 {
2226 Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
2227 if (fake == NULL)
2228 return FALSE;
2229 fake->st_value = val;
2230 fake->st_shndx
2231 = _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
2232 sym = fake;
2233 }
2234 if (sym)
2235 fun = maybe_insert_function (sym_sec, sym, FALSE, is_call);
2236 else
2237 fun = maybe_insert_function (sym_sec, h, TRUE, is_call);
2238 if (fun == NULL)
2239 return FALSE;
2240 if (irela->r_addend != 0
2241 && fun->u.sym != sym)
2242 free (sym);
2243 continue;
2244 }
2245
2246 caller = find_function (sec, irela->r_offset, info);
2247 if (caller == NULL)
2248 return FALSE;
2249 callee = bfd_malloc (sizeof *callee);
2250 if (callee == NULL)
2251 return FALSE;
2252
2253 callee->fun = find_function (sym_sec, val, info);
2254 if (callee->fun == NULL)
2255 return FALSE;
2256 callee->is_tail = !is_call;
9dcc4794
AM
2257 callee->is_pasted = FALSE;
2258 callee->count = 0;
2259 if (callee->fun->last_caller != sec)
2260 {
2261 callee->fun->last_caller = sec;
2262 callee->fun->call_count += 1;
2263 }
49fa1e15
AM
2264 if (!insert_callee (caller, callee))
2265 free (callee);
2266 else if (!is_call
2267 && !callee->fun->is_func
2268 && callee->fun->stack == 0)
2269 {
2270 /* This is either a tail call or a branch from one part of
2271 the function to another, ie. hot/cold section. If the
2272 destination has been called by some other function then
2273 it is a separate function. We also assume that functions
2274 are not split across input files. */
911f096e 2275 if (sec->owner != sym_sec->owner)
49fa1e15
AM
2276 {
2277 callee->fun->start = NULL;
2278 callee->fun->is_func = TRUE;
2279 }
911f096e 2280 else if (callee->fun->start == NULL)
49fa1e15 2281 callee->fun->start = caller;
911f096e
AM
2282 else
2283 {
2284 struct function_info *callee_start;
2285 struct function_info *caller_start;
2286 callee_start = callee->fun;
2287 while (callee_start->start)
2288 callee_start = callee_start->start;
2289 caller_start = caller;
2290 while (caller_start->start)
2291 caller_start = caller_start->start;
2292 if (caller_start != callee_start)
2293 {
2294 callee->fun->start = NULL;
2295 callee->fun->is_func = TRUE;
2296 }
2297 }
49fa1e15
AM
2298 }
2299 }
2300
2301 return TRUE;
2302}
2303
2304/* Handle something like .init or .fini, which has a piece of a function.
2305 These sections are pasted together to form a single function. */
2306
2307static bfd_boolean
2308pasted_function (asection *sec, struct bfd_link_info *info)
2309{
2310 struct bfd_link_order *l;
2311 struct _spu_elf_section_data *sec_data;
2312 struct spu_elf_stack_info *sinfo;
2313 Elf_Internal_Sym *fake;
2314 struct function_info *fun, *fun_start;
2315
2316 fake = bfd_zmalloc (sizeof (*fake));
2317 if (fake == NULL)
2318 return FALSE;
2319 fake->st_value = 0;
2320 fake->st_size = sec->size;
2321 fake->st_shndx
2322 = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2323 fun = maybe_insert_function (sec, fake, FALSE, FALSE);
2324 if (!fun)
2325 return FALSE;
2326
2327 /* Find a function immediately preceding this section. */
2328 fun_start = NULL;
2329 for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2330 {
2331 if (l->u.indirect.section == sec)
2332 {
2333 if (fun_start != NULL)
9dcc4794
AM
2334 {
2335 struct call_info *callee = bfd_malloc (sizeof *callee);
2336 if (callee == NULL)
2337 return FALSE;
2338
2339 fun->start = fun_start;
2340 callee->fun = fun;
2341 callee->is_tail = TRUE;
2342 callee->is_pasted = TRUE;
2343 callee->count = 0;
2344 if (!insert_callee (fun_start, callee))
2345 free (callee);
2346 return TRUE;
2347 }
2348 break;
49fa1e15
AM
2349 }
2350 if (l->type == bfd_indirect_link_order
2351 && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
47f6dab9 2352 && (sinfo = sec_data->u.i.stack_info) != NULL
49fa1e15
AM
2353 && sinfo->num_fun != 0)
2354 fun_start = &sinfo->fun[sinfo->num_fun - 1];
2355 }
2356
2357 info->callbacks->einfo (_("%A link_order not found\n"), sec);
2358 return FALSE;
2359}
2360
49fa1e15
AM
2361/* Map address ranges in code sections to functions. */
2362
2363static bfd_boolean
c65be8d7 2364discover_functions (struct bfd_link_info *info)
49fa1e15 2365{
49fa1e15
AM
2366 bfd *ibfd;
2367 int bfd_idx;
2368 Elf_Internal_Sym ***psym_arr;
2369 asection ***sec_arr;
2370 bfd_boolean gaps = FALSE;
2371
2372 bfd_idx = 0;
2373 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2374 bfd_idx++;
2375
2376 psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2377 if (psym_arr == NULL)
2378 return FALSE;
2379 sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2380 if (sec_arr == NULL)
2381 return FALSE;
2382
2383
2384 for (ibfd = info->input_bfds, bfd_idx = 0;
2385 ibfd != NULL;
2386 ibfd = ibfd->link_next, bfd_idx++)
2387 {
2388 extern const bfd_target bfd_elf32_spu_vec;
2389 Elf_Internal_Shdr *symtab_hdr;
2390 asection *sec;
2391 size_t symcount;
2392 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2393 asection **psecs, **p;
2394
2395 if (ibfd->xvec != &bfd_elf32_spu_vec)
2396 continue;
2397
2398 /* Read all the symbols. */
2399 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2400 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
2401 if (symcount == 0)
055ed83b
AM
2402 {
2403 if (!gaps)
2404 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2405 if (interesting_section (sec, info->output_bfd))
2406 {
2407 gaps = TRUE;
2408 break;
2409 }
2410 continue;
2411 }
49fa1e15 2412
1f27ab8d 2413 if (symtab_hdr->contents != NULL)
49fa1e15 2414 {
1f27ab8d
AM
2415 /* Don't use cached symbols since the generic ELF linker
2416 code only reads local symbols, and we need globals too. */
2417 free (symtab_hdr->contents);
2418 symtab_hdr->contents = NULL;
49fa1e15 2419 }
1f27ab8d
AM
2420 syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
2421 NULL, NULL, NULL);
2422 symtab_hdr->contents = (void *) syms;
2423 if (syms == NULL)
2424 return FALSE;
49fa1e15
AM
2425
2426 /* Select defined function symbols that are going to be output. */
2427 psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
2428 if (psyms == NULL)
2429 return FALSE;
2430 psym_arr[bfd_idx] = psyms;
2431 psecs = bfd_malloc (symcount * sizeof (*psecs));
2432 if (psecs == NULL)
2433 return FALSE;
2434 sec_arr[bfd_idx] = psecs;
2435 for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
2436 if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
2437 || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
2438 {
2439 asection *s;
2440
2441 *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
c65be8d7 2442 if (s != NULL && interesting_section (s, info->output_bfd))
49fa1e15
AM
2443 *psy++ = sy;
2444 }
2445 symcount = psy - psyms;
2446 *psy = NULL;
2447
2448 /* Sort them by section and offset within section. */
2449 sort_syms_syms = syms;
2450 sort_syms_psecs = psecs;
2451 qsort (psyms, symcount, sizeof (*psyms), sort_syms);
2452
2453 /* Now inspect the function symbols. */
2454 for (psy = psyms; psy < psyms + symcount; )
2455 {
2456 asection *s = psecs[*psy - syms];
2457 Elf_Internal_Sym **psy2;
2458
2459 for (psy2 = psy; ++psy2 < psyms + symcount; )
2460 if (psecs[*psy2 - syms] != s)
2461 break;
2462
2463 if (!alloc_stack_info (s, psy2 - psy))
2464 return FALSE;
2465 psy = psy2;
2466 }
2467
2468 /* First install info about properly typed and sized functions.
2469 In an ideal world this will cover all code sections, except
2470 when partitioning functions into hot and cold sections,
2471 and the horrible pasted together .init and .fini functions. */
2472 for (psy = psyms; psy < psyms + symcount; ++psy)
2473 {
2474 sy = *psy;
2475 if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
2476 {
2477 asection *s = psecs[sy - syms];
2478 if (!maybe_insert_function (s, sy, FALSE, TRUE))
2479 return FALSE;
2480 }
2481 }
2482
2483 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
c65be8d7 2484 if (interesting_section (sec, info->output_bfd))
49fa1e15
AM
2485 gaps |= check_function_ranges (sec, info);
2486 }
2487
2488 if (gaps)
2489 {
2490 /* See if we can discover more function symbols by looking at
2491 relocations. */
2492 for (ibfd = info->input_bfds, bfd_idx = 0;
2493 ibfd != NULL;
2494 ibfd = ibfd->link_next, bfd_idx++)
2495 {
2496 asection *sec;
2497
2498 if (psym_arr[bfd_idx] == NULL)
2499 continue;
2500
2501 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
055ed83b
AM
2502 if (!mark_functions_via_relocs (sec, info, FALSE))
2503 return FALSE;
49fa1e15
AM
2504 }
2505
2506 for (ibfd = info->input_bfds, bfd_idx = 0;
2507 ibfd != NULL;
2508 ibfd = ibfd->link_next, bfd_idx++)
2509 {
2510 Elf_Internal_Shdr *symtab_hdr;
2511 asection *sec;
2512 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2513 asection **psecs;
2514
2515 if ((psyms = psym_arr[bfd_idx]) == NULL)
2516 continue;
2517
2518 psecs = sec_arr[bfd_idx];
2519
2520 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2521 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
2522
2523 gaps = FALSE;
2524 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
c65be8d7 2525 if (interesting_section (sec, info->output_bfd))
49fa1e15
AM
2526 gaps |= check_function_ranges (sec, info);
2527 if (!gaps)
2528 continue;
2529
2530 /* Finally, install all globals. */
2531 for (psy = psyms; (sy = *psy) != NULL; ++psy)
2532 {
2533 asection *s;
2534
2535 s = psecs[sy - syms];
2536
2537 /* Global syms might be improperly typed functions. */
2538 if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
2539 && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
2540 {
2541 if (!maybe_insert_function (s, sy, FALSE, FALSE))
2542 return FALSE;
2543 }
2544 }
055ed83b
AM
2545 }
2546
2547 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2548 {
2549 extern const bfd_target bfd_elf32_spu_vec;
2550 asection *sec;
2551
2552 if (ibfd->xvec != &bfd_elf32_spu_vec)
2553 continue;
49fa1e15
AM
2554
2555 /* Some of the symbols we've installed as marking the
2556 beginning of functions may have a size of zero. Extend
2557 the range of such functions to the beginning of the
2558 next symbol of interest. */
2559 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
c65be8d7 2560 if (interesting_section (sec, info->output_bfd))
49fa1e15
AM
2561 {
2562 struct _spu_elf_section_data *sec_data;
2563 struct spu_elf_stack_info *sinfo;
2564
2565 sec_data = spu_elf_section_data (sec);
47f6dab9 2566 sinfo = sec_data->u.i.stack_info;
49fa1e15
AM
2567 if (sinfo != NULL)
2568 {
2569 int fun_idx;
2570 bfd_vma hi = sec->size;
2571
2572 for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
2573 {
2574 sinfo->fun[fun_idx].hi = hi;
2575 hi = sinfo->fun[fun_idx].lo;
2576 }
2577 }
2578 /* No symbols in this section. Must be .init or .fini
2579 or something similar. */
2580 else if (!pasted_function (sec, info))
2581 return FALSE;
2582 }
2583 }
2584 }
2585
2586 for (ibfd = info->input_bfds, bfd_idx = 0;
2587 ibfd != NULL;
2588 ibfd = ibfd->link_next, bfd_idx++)
2589 {
2590 if (psym_arr[bfd_idx] == NULL)
2591 continue;
2592
2593 free (psym_arr[bfd_idx]);
2594 free (sec_arr[bfd_idx]);
2595 }
2596
2597 free (psym_arr);
2598 free (sec_arr);
2599
2600 return TRUE;
2601}
2602
055ed83b
AM
2603/* Iterate over all function_info we have collected, calling DOIT on
2604 each node if ROOT_ONLY is false. Only call DOIT on root nodes
2605 if ROOT_ONLY. */
2606
2607static bfd_boolean
2608for_each_node (bfd_boolean (*doit) (struct function_info *,
2609 struct bfd_link_info *,
2610 void *),
2611 struct bfd_link_info *info,
2612 void *param,
2613 int root_only)
2614{
2615 bfd *ibfd;
2616
2617 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2618 {
2619 extern const bfd_target bfd_elf32_spu_vec;
2620 asection *sec;
2621
2622 if (ibfd->xvec != &bfd_elf32_spu_vec)
2623 continue;
2624
2625 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2626 {
2627 struct _spu_elf_section_data *sec_data;
2628 struct spu_elf_stack_info *sinfo;
2629
2630 if ((sec_data = spu_elf_section_data (sec)) != NULL
2631 && (sinfo = sec_data->u.i.stack_info) != NULL)
2632 {
2633 int i;
2634 for (i = 0; i < sinfo->num_fun; ++i)
2635 if (!root_only || !sinfo->fun[i].non_root)
2636 if (!doit (&sinfo->fun[i], info, param))
2637 return FALSE;
2638 }
2639 }
2640 }
2641 return TRUE;
2642}
2643
2644/* Transfer call info attached to struct function_info entries for
2645 all of a given function's sections to the first entry. */
2646
2647static bfd_boolean
2648transfer_calls (struct function_info *fun,
2649 struct bfd_link_info *info ATTRIBUTE_UNUSED,
2650 void *param ATTRIBUTE_UNUSED)
2651{
2652 struct function_info *start = fun->start;
2653
2654 if (start != NULL)
2655 {
2656 struct call_info *call, *call_next;
2657
2658 while (start->start != NULL)
2659 start = start->start;
2660 for (call = fun->call_list; call != NULL; call = call_next)
2661 {
2662 call_next = call->next;
2663 if (!insert_callee (start, call))
2664 free (call);
2665 }
2666 fun->call_list = NULL;
2667 }
2668 return TRUE;
2669}
2670
49fa1e15
AM
2671/* Mark nodes in the call graph that are called by some other node. */
2672
055ed83b
AM
2673static bfd_boolean
2674mark_non_root (struct function_info *fun,
2675 struct bfd_link_info *info ATTRIBUTE_UNUSED,
2676 void *param ATTRIBUTE_UNUSED)
49fa1e15
AM
2677{
2678 struct call_info *call;
2679
055ed83b
AM
2680 if (fun->visit1)
2681 return TRUE;
49fa1e15
AM
2682 fun->visit1 = TRUE;
2683 for (call = fun->call_list; call; call = call->next)
2684 {
2685 call->fun->non_root = TRUE;
055ed83b 2686 mark_non_root (call->fun, 0, 0);
49fa1e15 2687 }
055ed83b 2688 return TRUE;
49fa1e15
AM
2689}
2690
9dcc4794 2691/* Remove cycles from the call graph. Set depth of nodes. */
49fa1e15 2692
055ed83b
AM
2693static bfd_boolean
2694remove_cycles (struct function_info *fun,
2695 struct bfd_link_info *info,
9dcc4794 2696 void *param)
49fa1e15
AM
2697{
2698 struct call_info **callp, *call;
9dcc4794
AM
2699 unsigned int depth = *(unsigned int *) param;
2700 unsigned int max_depth = depth;
49fa1e15 2701
9dcc4794 2702 fun->depth = depth;
49fa1e15
AM
2703 fun->visit2 = TRUE;
2704 fun->marking = TRUE;
2705
2706 callp = &fun->call_list;
2707 while ((call = *callp) != NULL)
2708 {
2709 if (!call->fun->visit2)
055ed83b 2710 {
9dcc4794
AM
2711 call->max_depth = depth + !call->is_pasted;
2712 if (!remove_cycles (call->fun, info, &call->max_depth))
055ed83b 2713 return FALSE;
9dcc4794
AM
2714 if (max_depth < call->max_depth)
2715 max_depth = call->max_depth;
055ed83b 2716 }
49fa1e15
AM
2717 else if (call->fun->marking)
2718 {
9dcc4794
AM
2719 if (!spu_hash_table (info)->auto_overlay)
2720 {
2721 const char *f1 = func_name (fun);
2722 const char *f2 = func_name (call->fun);
49fa1e15 2723
9dcc4794
AM
2724 info->callbacks->info (_("Stack analysis will ignore the call "
2725 "from %s to %s\n"),
2726 f1, f2);
2727 }
49fa1e15 2728 *callp = call->next;
055ed83b 2729 free (call);
49fa1e15
AM
2730 continue;
2731 }
2732 callp = &call->next;
2733 }
2734 fun->marking = FALSE;
9dcc4794 2735 *(unsigned int *) param = max_depth;
055ed83b 2736 return TRUE;
49fa1e15
AM
2737}
2738
667f3338
AM
2739/* Check that we actually visited all nodes in remove_cycles. If we
2740 didn't, then there is some cycle in the call graph not attached to
2741 any root node. Arbitrarily choose a node in the cycle as a new
2742 root and break the cycle. */
2743
2744static bfd_boolean
2745mark_detached_root (struct function_info *fun,
2746 struct bfd_link_info *info,
2747 void *param)
2748{
2749 if (fun->visit2)
2750 return TRUE;
2751 fun->non_root = FALSE;
2752 *(unsigned int *) param = 0;
2753 return remove_cycles (fun, info, param);
2754}
2755
49fa1e15
AM
2756/* Populate call_list for each function. */
2757
2758static bfd_boolean
c65be8d7 2759build_call_tree (struct bfd_link_info *info)
49fa1e15 2760{
49fa1e15 2761 bfd *ibfd;
9dcc4794 2762 unsigned int depth;
49fa1e15
AM
2763
2764 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2765 {
2766 extern const bfd_target bfd_elf32_spu_vec;
2767 asection *sec;
2768
2769 if (ibfd->xvec != &bfd_elf32_spu_vec)
2770 continue;
2771
2772 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
055ed83b
AM
2773 if (!mark_functions_via_relocs (sec, info, TRUE))
2774 return FALSE;
49fa1e15
AM
2775 }
2776
055ed83b
AM
2777 /* Transfer call info from hot/cold section part of function
2778 to main entry. */
9dcc4794
AM
2779 if (!spu_hash_table (info)->auto_overlay
2780 && !for_each_node (transfer_calls, info, 0, FALSE))
055ed83b 2781 return FALSE;
49fa1e15 2782
055ed83b
AM
2783 /* Find the call graph root(s). */
2784 if (!for_each_node (mark_non_root, info, 0, FALSE))
2785 return FALSE;
49fa1e15
AM
2786
2787 /* Remove cycles from the call graph. We start from the root node(s)
2788 so that we break cycles in a reasonable place. */
9dcc4794 2789 depth = 0;
667f3338
AM
2790 if (!for_each_node (remove_cycles, info, &depth, TRUE))
2791 return FALSE;
2792
2793 return for_each_node (mark_detached_root, info, &depth, FALSE);
9dcc4794
AM
2794}
2795
2796/* qsort predicate to sort calls by max_depth then count. */
2797
2798static int
2799sort_calls (const void *a, const void *b)
2800{
2801 struct call_info *const *c1 = a;
2802 struct call_info *const *c2 = b;
2803 int delta;
2804
2805 delta = (*c2)->max_depth - (*c1)->max_depth;
2806 if (delta != 0)
2807 return delta;
2808
2809 delta = (*c2)->count - (*c1)->count;
2810 if (delta != 0)
2811 return delta;
2812
667f3338 2813 return (char *) c1 - (char *) c2;
9dcc4794
AM
2814}
2815
2816struct _mos_param {
2817 unsigned int max_overlay_size;
2818};
2819
2820/* Set linker_mark and gc_mark on any sections that we will put in
2821 overlays. These flags are used by the generic ELF linker, but we
2822 won't be continuing on to bfd_elf_final_link so it is OK to use
2823 them. linker_mark is clear before we get here. Set segment_mark
2824 on sections that are part of a pasted function (excluding the last
2825 section).
2826
2827 Set up function rodata section if --overlay-rodata. We don't
2828 currently include merged string constant rodata sections since
2829
2830 Sort the call graph so that the deepest nodes will be visited
2831 first. */
2832
2833static bfd_boolean
2834mark_overlay_section (struct function_info *fun,
2835 struct bfd_link_info *info,
2836 void *param)
2837{
2838 struct call_info *call;
2839 unsigned int count;
2840 struct _mos_param *mos_param = param;
2841
2842 if (fun->visit4)
2843 return TRUE;
2844
2845 fun->visit4 = TRUE;
2846 if (!fun->sec->linker_mark)
2847 {
4f0d75be
AM
2848 unsigned int size;
2849
9dcc4794
AM
2850 fun->sec->linker_mark = 1;
2851 fun->sec->gc_mark = 1;
2852 fun->sec->segment_mark = 0;
2853 /* Ensure SEC_CODE is set on this text section (it ought to
2854 be!), and SEC_CODE is clear on rodata sections. We use
2855 this flag to differentiate the two overlay section types. */
2856 fun->sec->flags |= SEC_CODE;
4f0d75be 2857
9dcc4794
AM
2858 if (spu_hash_table (info)->auto_overlay & OVERLAY_RODATA)
2859 {
2860 char *name = NULL;
9dcc4794
AM
2861
2862 /* Find the rodata section corresponding to this function's
2863 text section. */
2864 if (strcmp (fun->sec->name, ".text") == 0)
2865 {
2866 name = bfd_malloc (sizeof (".rodata"));
2867 if (name == NULL)
2868 return FALSE;
2869 memcpy (name, ".rodata", sizeof (".rodata"));
2870 }
2871 else if (strncmp (fun->sec->name, ".text.", 6) == 0)
2872 {
2873 size_t len = strlen (fun->sec->name);
2874 name = bfd_malloc (len + 3);
2875 if (name == NULL)
2876 return FALSE;
2877 memcpy (name, ".rodata", sizeof (".rodata"));
2878 memcpy (name + 7, fun->sec->name + 5, len - 4);
2879 }
2880 else if (strncmp (fun->sec->name, ".gnu.linkonce.t.", 16) == 0)
2881 {
2882 size_t len = strlen (fun->sec->name) + 1;
2883 name = bfd_malloc (len);
2884 if (name == NULL)
2885 return FALSE;
2886 memcpy (name, fun->sec->name, len);
2887 name[14] = 'r';
2888 }
2889
2890 if (name != NULL)
2891 {
2892 asection *rodata = NULL;
2893 asection *group_sec = elf_section_data (fun->sec)->next_in_group;
2894 if (group_sec == NULL)
2895 rodata = bfd_get_section_by_name (fun->sec->owner, name);
2896 else
2897 while (group_sec != NULL && group_sec != fun->sec)
2898 {
2899 if (strcmp (group_sec->name, name) == 0)
2900 {
2901 rodata = group_sec;
2902 break;
2903 }
2904 group_sec = elf_section_data (group_sec)->next_in_group;
2905 }
2906 fun->rodata = rodata;
2907 if (fun->rodata)
2908 {
2909 fun->rodata->linker_mark = 1;
2910 fun->rodata->gc_mark = 1;
2911 fun->rodata->flags &= ~SEC_CODE;
2912 }
2913 free (name);
2914 }
9dcc4794 2915 }
4f0d75be
AM
2916 size = fun->sec->size;
2917 if (fun->rodata)
2918 size += fun->rodata->size;
2919 if (mos_param->max_overlay_size < size)
2920 mos_param->max_overlay_size = size;
9dcc4794
AM
2921 }
2922
2923 for (count = 0, call = fun->call_list; call != NULL; call = call->next)
2924 count += 1;
2925
2926 if (count > 1)
2927 {
2928 struct call_info **calls = bfd_malloc (count * sizeof (*calls));
2929 if (calls == NULL)
2930 return FALSE;
2931
2932 for (count = 0, call = fun->call_list; call != NULL; call = call->next)
2933 calls[count++] = call;
2934
2935 qsort (calls, count, sizeof (*calls), sort_calls);
2936
2937 fun->call_list = NULL;
2938 while (count != 0)
2939 {
2940 --count;
2941 calls[count]->next = fun->call_list;
2942 fun->call_list = calls[count];
2943 }
2944 free (calls);
2945 }
2946
2947 for (call = fun->call_list; call != NULL; call = call->next)
2948 {
2949 if (call->is_pasted)
2950 {
2951 /* There can only be one is_pasted call per function_info. */
2952 BFD_ASSERT (!fun->sec->segment_mark);
2953 fun->sec->segment_mark = 1;
2954 }
2955 if (!mark_overlay_section (call->fun, info, param))
2956 return FALSE;
2957 }
2958
2959 /* Don't put entry code into an overlay. The overlay manager needs
2960 a stack! */
2961 if (fun->lo + fun->sec->output_offset + fun->sec->output_section->vma
2962 == info->output_bfd->start_address)
2963 {
2964 fun->sec->linker_mark = 0;
2965 if (fun->rodata != NULL)
2966 fun->rodata->linker_mark = 0;
2967 }
2968 return TRUE;
2969}
2970
99302af9
AM
2971/* If non-zero then unmark functions called from those within sections
2972 that we need to unmark. Unfortunately this isn't reliable since the
2973 call graph cannot know the destination of function pointer calls. */
2974#define RECURSE_UNMARK 0
2975
9dcc4794
AM
2976struct _uos_param {
2977 asection *exclude_input_section;
2978 asection *exclude_output_section;
2979 unsigned long clearing;
2980};
2981
2982/* Undo some of mark_overlay_section's work. */
2983
2984static bfd_boolean
2985unmark_overlay_section (struct function_info *fun,
2986 struct bfd_link_info *info,
2987 void *param)
2988{
2989 struct call_info *call;
2990 struct _uos_param *uos_param = param;
2991 unsigned int excluded = 0;
2992
2993 if (fun->visit5)
2994 return TRUE;
2995
2996 fun->visit5 = TRUE;
2997
2998 excluded = 0;
2999 if (fun->sec == uos_param->exclude_input_section
3000 || fun->sec->output_section == uos_param->exclude_output_section)
3001 excluded = 1;
3002
99302af9
AM
3003 if (RECURSE_UNMARK)
3004 uos_param->clearing += excluded;
9dcc4794 3005
99302af9 3006 if (RECURSE_UNMARK ? uos_param->clearing : excluded)
9dcc4794
AM
3007 {
3008 fun->sec->linker_mark = 0;
3009 if (fun->rodata)
3010 fun->rodata->linker_mark = 0;
3011 }
3012
3013 for (call = fun->call_list; call != NULL; call = call->next)
3014 if (!unmark_overlay_section (call->fun, info, param))
3015 return FALSE;
3016
99302af9
AM
3017 if (RECURSE_UNMARK)
3018 uos_param->clearing -= excluded;
9dcc4794
AM
3019 return TRUE;
3020}
3021
3022struct _cl_param {
3023 unsigned int lib_size;
3024 asection **lib_sections;
3025};
3026
3027/* Add sections we have marked as belonging to overlays to an array
3028 for consideration as non-overlay sections. The array consist of
3029 pairs of sections, (text,rodata), for functions in the call graph. */
3030
3031static bfd_boolean
3032collect_lib_sections (struct function_info *fun,
3033 struct bfd_link_info *info,
3034 void *param)
3035{
3036 struct _cl_param *lib_param = param;
3037 struct call_info *call;
3038 unsigned int size;
3039
3040 if (fun->visit6)
3041 return TRUE;
3042
3043 fun->visit6 = TRUE;
3044 if (!fun->sec->linker_mark || !fun->sec->gc_mark || fun->sec->segment_mark)
3045 return TRUE;
3046
3047 size = fun->sec->size;
3048 if (fun->rodata)
3049 size += fun->rodata->size;
3050 if (size > lib_param->lib_size)
3051 return TRUE;
3052
3053 *lib_param->lib_sections++ = fun->sec;
3054 fun->sec->gc_mark = 0;
3055 if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3056 {
3057 *lib_param->lib_sections++ = fun->rodata;
3058 fun->rodata->gc_mark = 0;
3059 }
3060 else
3061 *lib_param->lib_sections++ = NULL;
3062
3063 for (call = fun->call_list; call != NULL; call = call->next)
3064 collect_lib_sections (call->fun, info, param);
3065
3066 return TRUE;
3067}
3068
3069/* qsort predicate to sort sections by call count. */
3070
3071static int
3072sort_lib (const void *a, const void *b)
3073{
3074 asection *const *s1 = a;
3075 asection *const *s2 = b;
3076 struct _spu_elf_section_data *sec_data;
3077 struct spu_elf_stack_info *sinfo;
3078 int delta;
3079
3080 delta = 0;
3081 if ((sec_data = spu_elf_section_data (*s1)) != NULL
3082 && (sinfo = sec_data->u.i.stack_info) != NULL)
3083 {
3084 int i;
3085 for (i = 0; i < sinfo->num_fun; ++i)
3086 delta -= sinfo->fun[i].call_count;
3087 }
3088
3089 if ((sec_data = spu_elf_section_data (*s2)) != NULL
3090 && (sinfo = sec_data->u.i.stack_info) != NULL)
3091 {
3092 int i;
3093 for (i = 0; i < sinfo->num_fun; ++i)
3094 delta += sinfo->fun[i].call_count;
3095 }
3096
3097 if (delta != 0)
3098 return delta;
3099
3100 return s1 - s2;
3101}
3102
3103/* Remove some sections from those marked to be in overlays. Choose
3104 those that are called from many places, likely library functions. */
3105
3106static unsigned int
3107auto_ovl_lib_functions (struct bfd_link_info *info, unsigned int lib_size)
3108{
3109 bfd *ibfd;
3110 asection **lib_sections;
3111 unsigned int i, lib_count;
3112 struct _cl_param collect_lib_param;
3113 struct function_info dummy_caller;
3114
3115 memset (&dummy_caller, 0, sizeof (dummy_caller));
3116 lib_count = 0;
3117 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3118 {
3119 extern const bfd_target bfd_elf32_spu_vec;
3120 asection *sec;
3121
3122 if (ibfd->xvec != &bfd_elf32_spu_vec)
3123 continue;
3124
3125 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3126 if (sec->linker_mark
3127 && sec->size < lib_size
3128 && (sec->flags & SEC_CODE) != 0)
3129 lib_count += 1;
3130 }
3131 lib_sections = bfd_malloc (lib_count * 2 * sizeof (*lib_sections));
3132 if (lib_sections == NULL)
3133 return (unsigned int) -1;
3134 collect_lib_param.lib_size = lib_size;
3135 collect_lib_param.lib_sections = lib_sections;
3136 if (!for_each_node (collect_lib_sections, info, &collect_lib_param,
3137 TRUE))
3138 return (unsigned int) -1;
3139 lib_count = (collect_lib_param.lib_sections - lib_sections) / 2;
3140
3141 /* Sort sections so that those with the most calls are first. */
3142 if (lib_count > 1)
3143 qsort (lib_sections, lib_count, 2 * sizeof (*lib_sections), sort_lib);
3144
3145 for (i = 0; i < lib_count; i++)
3146 {
3147 unsigned int tmp, stub_size;
3148 asection *sec;
3149 struct _spu_elf_section_data *sec_data;
3150 struct spu_elf_stack_info *sinfo;
3151
3152 sec = lib_sections[2 * i];
3153 /* If this section is OK, its size must be less than lib_size. */
3154 tmp = sec->size;
3155 /* If it has a rodata section, then add that too. */
3156 if (lib_sections[2 * i + 1])
3157 tmp += lib_sections[2 * i + 1]->size;
3158 /* Add any new overlay call stubs needed by the section. */
3159 stub_size = 0;
3160 if (tmp < lib_size
3161 && (sec_data = spu_elf_section_data (sec)) != NULL
3162 && (sinfo = sec_data->u.i.stack_info) != NULL)
3163 {
3164 int k;
3165 struct call_info *call;
3166
3167 for (k = 0; k < sinfo->num_fun; ++k)
3168 for (call = sinfo->fun[k].call_list; call; call = call->next)
3169 if (call->fun->sec->linker_mark)
3170 {
3171 struct call_info *p;
3172 for (p = dummy_caller.call_list; p; p = p->next)
3173 if (p->fun == call->fun)
3174 break;
3175 if (!p)
3176 stub_size += OVL_STUB_SIZE;
3177 }
3178 }
3179 if (tmp + stub_size < lib_size)
3180 {
3181 struct call_info **pp, *p;
3182
3183 /* This section fits. Mark it as non-overlay. */
3184 lib_sections[2 * i]->linker_mark = 0;
3185 if (lib_sections[2 * i + 1])
3186 lib_sections[2 * i + 1]->linker_mark = 0;
3187 lib_size -= tmp + stub_size;
3188 /* Call stubs to the section we just added are no longer
3189 needed. */
3190 pp = &dummy_caller.call_list;
3191 while ((p = *pp) != NULL)
3192 if (!p->fun->sec->linker_mark)
3193 {
3194 lib_size += OVL_STUB_SIZE;
3195 *pp = p->next;
3196 free (p);
3197 }
3198 else
3199 pp = &p->next;
3200 /* Add new call stubs to dummy_caller. */
3201 if ((sec_data = spu_elf_section_data (sec)) != NULL
3202 && (sinfo = sec_data->u.i.stack_info) != NULL)
3203 {
3204 int k;
3205 struct call_info *call;
3206
3207 for (k = 0; k < sinfo->num_fun; ++k)
3208 for (call = sinfo->fun[k].call_list;
3209 call;
3210 call = call->next)
3211 if (call->fun->sec->linker_mark)
3212 {
3213 struct call_info *callee;
3214 callee = bfd_malloc (sizeof (*callee));
3215 if (callee == NULL)
3216 return (unsigned int) -1;
3217 *callee = *call;
3218 if (!insert_callee (&dummy_caller, callee))
3219 free (callee);
3220 }
3221 }
3222 }
3223 }
3224 while (dummy_caller.call_list != NULL)
3225 {
3226 struct call_info *call = dummy_caller.call_list;
3227 dummy_caller.call_list = call->next;
3228 free (call);
3229 }
3230 for (i = 0; i < 2 * lib_count; i++)
3231 if (lib_sections[i])
3232 lib_sections[i]->gc_mark = 1;
3233 free (lib_sections);
3234 return lib_size;
3235}
3236
3237/* Build an array of overlay sections. The deepest node's section is
2ec9638b 3238 added first, then its parent node's section, then everything called
9dcc4794
AM
3239 from the parent section. The idea being to group sections to
3240 minimise calls between different overlays. */
3241
3242static bfd_boolean
3243collect_overlays (struct function_info *fun,
3244 struct bfd_link_info *info,
3245 void *param)
3246{
3247 struct call_info *call;
3248 bfd_boolean added_fun;
3249 asection ***ovly_sections = param;
3250
3251 if (fun->visit7)
3252 return TRUE;
3253
3254 fun->visit7 = TRUE;
3255 for (call = fun->call_list; call != NULL; call = call->next)
3256 if (!call->is_pasted)
3257 {
3258 if (!collect_overlays (call->fun, info, ovly_sections))
3259 return FALSE;
3260 break;
3261 }
3262
3263 added_fun = FALSE;
3264 if (fun->sec->linker_mark && fun->sec->gc_mark)
3265 {
3266 fun->sec->gc_mark = 0;
3267 *(*ovly_sections)++ = fun->sec;
3268 if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3269 {
3270 fun->rodata->gc_mark = 0;
3271 *(*ovly_sections)++ = fun->rodata;
3272 }
3273 else
3274 *(*ovly_sections)++ = NULL;
3275 added_fun = TRUE;
3276
3277 /* Pasted sections must stay with the first section. We don't
3278 put pasted sections in the array, just the first section.
3279 Mark subsequent sections as already considered. */
3280 if (fun->sec->segment_mark)
3281 {
3282 struct function_info *call_fun = fun;
3283 do
3284 {
3285 for (call = call_fun->call_list; call != NULL; call = call->next)
3286 if (call->is_pasted)
3287 {
3288 call_fun = call->fun;
3289 call_fun->sec->gc_mark = 0;
3290 if (call_fun->rodata)
3291 call_fun->rodata->gc_mark = 0;
3292 break;
3293 }
3294 if (call == NULL)
3295 abort ();
3296 }
3297 while (call_fun->sec->segment_mark);
3298 }
3299 }
3300
3301 for (call = fun->call_list; call != NULL; call = call->next)
3302 if (!collect_overlays (call->fun, info, ovly_sections))
3303 return FALSE;
3304
3305 if (added_fun)
3306 {
3307 struct _spu_elf_section_data *sec_data;
3308 struct spu_elf_stack_info *sinfo;
3309
3310 if ((sec_data = spu_elf_section_data (fun->sec)) != NULL
3311 && (sinfo = sec_data->u.i.stack_info) != NULL)
3312 {
3313 int i;
3314 for (i = 0; i < sinfo->num_fun; ++i)
3315 if (!collect_overlays (&sinfo->fun[i], info, ovly_sections))
3316 return FALSE;
3317 }
3318 }
3319
3320 return TRUE;
49fa1e15
AM
3321}
3322
055ed83b
AM
3323struct _sum_stack_param {
3324 size_t cum_stack;
3325 size_t overall_stack;
3326 bfd_boolean emit_stack_syms;
3327};
3328
49fa1e15
AM
3329/* Descend the call graph for FUN, accumulating total stack required. */
3330
055ed83b 3331static bfd_boolean
49fa1e15
AM
3332sum_stack (struct function_info *fun,
3333 struct bfd_link_info *info,
055ed83b 3334 void *param)
49fa1e15
AM
3335{
3336 struct call_info *call;
055ed83b
AM
3337 struct function_info *max;
3338 size_t stack, cum_stack;
49fa1e15 3339 const char *f1;
9dcc4794 3340 bfd_boolean has_call;
055ed83b 3341 struct _sum_stack_param *sum_stack_param = param;
9dcc4794 3342 struct spu_link_hash_table *htab;
49fa1e15 3343
055ed83b
AM
3344 cum_stack = fun->stack;
3345 sum_stack_param->cum_stack = cum_stack;
49fa1e15 3346 if (fun->visit3)
055ed83b 3347 return TRUE;
49fa1e15 3348
9dcc4794 3349 has_call = FALSE;
055ed83b 3350 max = NULL;
49fa1e15
AM
3351 for (call = fun->call_list; call; call = call->next)
3352 {
9dcc4794
AM
3353 if (!call->is_pasted)
3354 has_call = TRUE;
055ed83b
AM
3355 if (!sum_stack (call->fun, info, sum_stack_param))
3356 return FALSE;
3357 stack = sum_stack_param->cum_stack;
49fa1e15
AM
3358 /* Include caller stack for normal calls, don't do so for
3359 tail calls. fun->stack here is local stack usage for
3360 this function. */
9dcc4794 3361 if (!call->is_tail || call->is_pasted || call->fun->start != NULL)
49fa1e15 3362 stack += fun->stack;
055ed83b 3363 if (cum_stack < stack)
49fa1e15 3364 {
055ed83b 3365 cum_stack = stack;
49fa1e15
AM
3366 max = call->fun;
3367 }
3368 }
3369
055ed83b
AM
3370 sum_stack_param->cum_stack = cum_stack;
3371 stack = fun->stack;
3372 /* Now fun->stack holds cumulative stack. */
3373 fun->stack = cum_stack;
3374 fun->visit3 = TRUE;
3375
3376 if (!fun->non_root
3377 && sum_stack_param->overall_stack < cum_stack)
3378 sum_stack_param->overall_stack = cum_stack;
3379
9dcc4794
AM
3380 htab = spu_hash_table (info);
3381 if (htab->auto_overlay)
3382 return TRUE;
3383
49fa1e15 3384 f1 = func_name (fun);
055ed83b
AM
3385 if (!fun->non_root)
3386 info->callbacks->info (_(" %s: 0x%v\n"), f1, (bfd_vma) cum_stack);
fad9eaf0 3387 info->callbacks->minfo (_("%s: 0x%v 0x%v\n"),
055ed83b 3388 f1, (bfd_vma) stack, (bfd_vma) cum_stack);
49fa1e15 3389
9dcc4794 3390 if (has_call)
49fa1e15
AM
3391 {
3392 info->callbacks->minfo (_(" calls:\n"));
3393 for (call = fun->call_list; call; call = call->next)
9dcc4794
AM
3394 if (!call->is_pasted)
3395 {
3396 const char *f2 = func_name (call->fun);
3397 const char *ann1 = call->fun == max ? "*" : " ";
3398 const char *ann2 = call->is_tail ? "t" : " ";
49fa1e15 3399
9dcc4794
AM
3400 info->callbacks->minfo (_(" %s%s %s\n"), ann1, ann2, f2);
3401 }
49fa1e15
AM
3402 }
3403
055ed83b 3404 if (sum_stack_param->emit_stack_syms)
49fa1e15 3405 {
49fa1e15
AM
3406 char *name = bfd_malloc (18 + strlen (f1));
3407 struct elf_link_hash_entry *h;
3408
055ed83b
AM
3409 if (name == NULL)
3410 return FALSE;
3411
3412 if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
3413 sprintf (name, "__stack_%s", f1);
3414 else
3415 sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
3416
3417 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
3418 free (name);
3419 if (h != NULL
3420 && (h->root.type == bfd_link_hash_new
3421 || h->root.type == bfd_link_hash_undefined
3422 || h->root.type == bfd_link_hash_undefweak))
49fa1e15 3423 {
055ed83b
AM
3424 h->root.type = bfd_link_hash_defined;
3425 h->root.u.def.section = bfd_abs_section_ptr;
3426 h->root.u.def.value = cum_stack;
3427 h->size = 0;
3428 h->type = 0;
3429 h->ref_regular = 1;
3430 h->def_regular = 1;
3431 h->ref_regular_nonweak = 1;
3432 h->forced_local = 1;
3433 h->non_elf = 0;
49fa1e15
AM
3434 }
3435 }
3436
055ed83b 3437 return TRUE;
49fa1e15
AM
3438}
3439
9dcc4794
AM
3440/* SEC is part of a pasted function. Return the call_info for the
3441 next section of this function. */
3442
3443static struct call_info *
3444find_pasted_call (asection *sec)
3445{
3446 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
3447 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
3448 struct call_info *call;
3449 int k;
3450
3451 for (k = 0; k < sinfo->num_fun; ++k)
3452 for (call = sinfo->fun[k].call_list; call != NULL; call = call->next)
3453 if (call->is_pasted)
3454 return call;
3455 abort ();
3456 return 0;
3457}
3458
3459/* qsort predicate to sort bfds by file name. */
3460
3461static int
3462sort_bfds (const void *a, const void *b)
3463{
3464 bfd *const *abfd1 = a;
3465 bfd *const *abfd2 = b;
3466
3467 return strcmp ((*abfd1)->filename, (*abfd2)->filename);
3468}
3469
3470/* Handle --auto-overlay. */
3471
3472static void spu_elf_auto_overlay (struct bfd_link_info *, void (*) (void))
3473 ATTRIBUTE_NORETURN;
3474
3475static void
3476spu_elf_auto_overlay (struct bfd_link_info *info,
3477 void (*spu_elf_load_ovl_mgr) (void))
3478{
3479 bfd *ibfd;
3480 bfd **bfd_arr;
3481 struct elf_segment_map *m;
3482 unsigned int fixed_size, lo, hi;
3483 struct spu_link_hash_table *htab;
3484 unsigned int base, i, count, bfd_count;
3485 int ovlynum;
3486 asection **ovly_sections, **ovly_p;
3487 FILE *script;
3488 unsigned int total_overlay_size, overlay_size;
3489 struct elf_link_hash_entry *h;
3490 struct _mos_param mos_param;
3491 struct _uos_param uos_param;
3492 struct function_info dummy_caller;
3493
3494 /* Find the extents of our loadable image. */
3495 lo = (unsigned int) -1;
3496 hi = 0;
3497 for (m = elf_tdata (info->output_bfd)->segment_map; m != NULL; m = m->next)
3498 if (m->p_type == PT_LOAD)
3499 for (i = 0; i < m->count; i++)
3500 if (m->sections[i]->size != 0)
3501 {
3502 if (m->sections[i]->vma < lo)
3503 lo = m->sections[i]->vma;
3504 if (m->sections[i]->vma + m->sections[i]->size - 1 > hi)
3505 hi = m->sections[i]->vma + m->sections[i]->size - 1;
3506 }
3507 fixed_size = hi + 1 - lo;
3508
3509 if (!discover_functions (info))
3510 goto err_exit;
3511
3512 if (!build_call_tree (info))
3513 goto err_exit;
3514
3515 uos_param.exclude_input_section = 0;
3516 uos_param.exclude_output_section
3517 = bfd_get_section_by_name (info->output_bfd, ".interrupt");
3518
3519 htab = spu_hash_table (info);
3520 h = elf_link_hash_lookup (&htab->elf, "__ovly_load",
3521 FALSE, FALSE, FALSE);
3522 if (h != NULL
3523 && (h->root.type == bfd_link_hash_defined
3524 || h->root.type == bfd_link_hash_defweak)
3525 && h->def_regular)
3526 {
3527 /* We have a user supplied overlay manager. */
3528 uos_param.exclude_input_section = h->root.u.def.section;
3529 }
3530 else
3531 {
3532 /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
3533 builtin version to .text, and will adjust .text size. */
3534 asection *text = bfd_get_section_by_name (info->output_bfd, ".text");
3535 if (text != NULL)
3536 fixed_size -= text->size;
3537 spu_elf_load_ovl_mgr ();
3538 text = bfd_get_section_by_name (info->output_bfd, ".text");
3539 if (text != NULL)
3540 fixed_size += text->size;
3541 }
3542
3543 /* Mark overlay sections, and find max overlay section size. */
3544 mos_param.max_overlay_size = 0;
3545 if (!for_each_node (mark_overlay_section, info, &mos_param, TRUE))
3546 goto err_exit;
3547
3548 /* We can't put the overlay manager or interrupt routines in
3549 overlays. */
3550 uos_param.clearing = 0;
3551 if ((uos_param.exclude_input_section
3552 || uos_param.exclude_output_section)
3553 && !for_each_node (unmark_overlay_section, info, &uos_param, TRUE))
3554 goto err_exit;
3555
3556 bfd_count = 0;
3557 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3558 ++bfd_count;
3559 bfd_arr = bfd_malloc (bfd_count * sizeof (*bfd_arr));
3560 if (bfd_arr == NULL)
3561 goto err_exit;
3562
3563 /* Count overlay sections, and subtract their sizes from "fixed_size". */
3564 count = 0;
3565 bfd_count = 0;
3566 total_overlay_size = 0;
3567 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3568 {
3569 extern const bfd_target bfd_elf32_spu_vec;
3570 asection *sec;
3571 unsigned int old_count;
3572
3573 if (ibfd->xvec != &bfd_elf32_spu_vec)
3574 continue;
3575
3576 old_count = count;
3577 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3578 if (sec->linker_mark)
3579 {
3580 if ((sec->flags & SEC_CODE) != 0)
3581 count += 1;
3582 fixed_size -= sec->size;
3583 total_overlay_size += sec->size;
3584 }
3585 if (count != old_count)
3586 bfd_arr[bfd_count++] = ibfd;
3587 }
3588
3589 /* Since the overlay link script selects sections by file name and
3590 section name, ensure that file names are unique. */
3591 if (bfd_count > 1)
3592 {
3593 bfd_boolean ok = TRUE;
3594
3595 qsort (bfd_arr, bfd_count, sizeof (*bfd_arr), sort_bfds);
3596 for (i = 1; i < bfd_count; ++i)
3597 if (strcmp (bfd_arr[i - 1]->filename, bfd_arr[i]->filename) == 0)
3598 {
97407faf 3599 if (bfd_arr[i - 1]->my_archive == bfd_arr[i]->my_archive)
9dcc4794 3600 {
97407faf 3601 if (bfd_arr[i - 1]->my_archive && bfd_arr[i]->my_archive)
9dcc4794 3602 info->callbacks->einfo (_("%s duplicated in %s\n"),
97407faf 3603 bfd_arr[i]->filename,
9dcc4794 3604 bfd_arr[i]->my_archive->filename);
97407faf
AM
3605 else
3606 info->callbacks->einfo (_("%s duplicated\n"),
3607 bfd_arr[i]->filename);
3608 ok = FALSE;
9dcc4794 3609 }
9dcc4794
AM
3610 }
3611 if (!ok)
3612 {
9dcc4794
AM
3613 info->callbacks->einfo (_("sorry, no support for duplicate "
3614 "object files in auto-overlay script\n"));
3615 bfd_set_error (bfd_error_bad_value);
3616 goto err_exit;
3617 }
3618 }
3619 free (bfd_arr);
3620
3621 if (htab->reserved == 0)
3622 {
3623 struct _sum_stack_param sum_stack_param;
3624
3625 sum_stack_param.emit_stack_syms = 0;
3626 sum_stack_param.overall_stack = 0;
3627 if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
3628 goto err_exit;
99302af9 3629 htab->reserved = sum_stack_param.overall_stack + htab->extra_stack_space;
9dcc4794
AM
3630 }
3631 fixed_size += htab->reserved;
3632 fixed_size += htab->non_ovly_stub * OVL_STUB_SIZE;
3633 if (fixed_size + mos_param.max_overlay_size <= htab->local_store)
3634 {
3635 /* Guess number of overlays. Assuming overlay buffer is on
3636 average only half full should be conservative. */
3637 ovlynum = total_overlay_size * 2 / (htab->local_store - fixed_size);
3638 /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
3639 fixed_size += ovlynum * 16 + 16 + 4 + 16;
3640 }
3641
3642 if (fixed_size + mos_param.max_overlay_size > htab->local_store)
4f0d75be
AM
3643 info->callbacks->einfo (_("non-overlay size of 0x%v plus maximum overlay "
3644 "size of 0x%v exceeds local store\n"),
3645 (bfd_vma) fixed_size,
3646 (bfd_vma) mos_param.max_overlay_size);
9dcc4794
AM
3647
3648 /* Now see if we should put some functions in the non-overlay area. */
e5e6a5ff 3649 else if (fixed_size < htab->overlay_fixed)
9dcc4794 3650 {
e5e6a5ff
AM
3651 unsigned int max_fixed, lib_size;
3652
3653 max_fixed = htab->local_store - mos_param.max_overlay_size;
3654 if (max_fixed > htab->overlay_fixed)
3655 max_fixed = htab->overlay_fixed;
3656 lib_size = max_fixed - fixed_size;
9dcc4794
AM
3657 lib_size = auto_ovl_lib_functions (info, lib_size);
3658 if (lib_size == (unsigned int) -1)
3659 goto err_exit;
e5e6a5ff 3660 fixed_size = max_fixed - lib_size;
9dcc4794
AM
3661 }
3662
3663 /* Build an array of sections, suitably sorted to place into
3664 overlays. */
3665 ovly_sections = bfd_malloc (2 * count * sizeof (*ovly_sections));
3666 if (ovly_sections == NULL)
3667 goto err_exit;
3668 ovly_p = ovly_sections;
3669 if (!for_each_node (collect_overlays, info, &ovly_p, TRUE))
3670 goto err_exit;
3671 count = (size_t) (ovly_p - ovly_sections) / 2;
3672
3673 script = htab->spu_elf_open_overlay_script ();
3674
3675 if (fprintf (script, "SECTIONS\n{\n OVERLAY :\n {\n") <= 0)
3676 goto file_err;
3677
3678 memset (&dummy_caller, 0, sizeof (dummy_caller));
3679 overlay_size = htab->local_store - fixed_size;
3680 base = 0;
3681 ovlynum = 0;
3682 while (base < count)
3683 {
3684 unsigned int size = 0;
3685 unsigned int j;
3686
3687 for (i = base; i < count; i++)
3688 {
3689 asection *sec;
3690 unsigned int tmp;
3691 unsigned int stub_size;
3692 struct call_info *call, *pasty;
3693 struct _spu_elf_section_data *sec_data;
3694 struct spu_elf_stack_info *sinfo;
3695 int k;
3696
3697 /* See whether we can add this section to the current
3698 overlay without overflowing our overlay buffer. */
3699 sec = ovly_sections[2 * i];
3700 tmp = size + sec->size;
3701 if (ovly_sections[2 * i + 1])
3702 tmp += ovly_sections[2 * i + 1]->size;
3703 if (tmp > overlay_size)
3704 break;
3705 if (sec->segment_mark)
3706 {
3707 /* Pasted sections must stay together, so add their
3708 sizes too. */
3709 struct call_info *pasty = find_pasted_call (sec);
3710 while (pasty != NULL)
3711 {
3712 struct function_info *call_fun = pasty->fun;
3713 tmp += call_fun->sec->size;
3714 if (call_fun->rodata)
3715 tmp += call_fun->rodata->size;
3716 for (pasty = call_fun->call_list; pasty; pasty = pasty->next)
3717 if (pasty->is_pasted)
3718 break;
3719 }
3720 }
3721 if (tmp > overlay_size)
3722 break;
3723
3724 /* If we add this section, we might need new overlay call
3725 stubs. Add any overlay section calls to dummy_call. */
3726 pasty = NULL;
3727 sec_data = spu_elf_section_data (sec);
3728 sinfo = sec_data->u.i.stack_info;
3729 for (k = 0; k < sinfo->num_fun; ++k)
3730 for (call = sinfo->fun[k].call_list; call; call = call->next)
3731 if (call->is_pasted)
3732 {
3733 BFD_ASSERT (pasty == NULL);
3734 pasty = call;
3735 }
3736 else if (call->fun->sec->linker_mark)
3737 {
3738 if (!copy_callee (&dummy_caller, call))
3739 goto err_exit;
3740 }
3741 while (pasty != NULL)
3742 {
3743 struct function_info *call_fun = pasty->fun;
3744 pasty = NULL;
3745 for (call = call_fun->call_list; call; call = call->next)
3746 if (call->is_pasted)
3747 {
3748 BFD_ASSERT (pasty == NULL);
3749 pasty = call;
3750 }
3751 else if (!copy_callee (&dummy_caller, call))
3752 goto err_exit;
3753 }
3754
3755 /* Calculate call stub size. */
3756 stub_size = 0;
3757 for (call = dummy_caller.call_list; call; call = call->next)
3758 {
3759 unsigned int k;
3760
3761 stub_size += OVL_STUB_SIZE;
3762 /* If the call is within this overlay, we won't need a
3763 stub. */
3764 for (k = base; k < i + 1; k++)
3765 if (call->fun->sec == ovly_sections[2 * k])
3766 {
3767 stub_size -= OVL_STUB_SIZE;
3768 break;
3769 }
3770 }
3771 if (tmp + stub_size > overlay_size)
3772 break;
3773
3774 size = tmp;
3775 }
3776
3777 if (i == base)
3778 {
3779 info->callbacks->einfo (_("%B:%A%s exceeds overlay size\n"),
3780 ovly_sections[2 * i]->owner,
3781 ovly_sections[2 * i],
3782 ovly_sections[2 * i + 1] ? " + rodata" : "");
3783 bfd_set_error (bfd_error_bad_value);
3784 goto err_exit;
3785 }
3786
3787 if (fprintf (script, " .ovly%d {\n", ++ovlynum) <= 0)
3788 goto file_err;
3789 for (j = base; j < i; j++)
3790 {
3791 asection *sec = ovly_sections[2 * j];
3792
97407faf
AM
3793 if (fprintf (script, " %s%c%s (%s)\n",
3794 (sec->owner->my_archive != NULL
3795 ? sec->owner->my_archive->filename : ""),
3796 info->path_separator,
3797 sec->owner->filename,
9dcc4794
AM
3798 sec->name) <= 0)
3799 goto file_err;
3800 if (sec->segment_mark)
3801 {
3802 struct call_info *call = find_pasted_call (sec);
3803 while (call != NULL)
3804 {
3805 struct function_info *call_fun = call->fun;
3806 sec = call_fun->sec;
97407faf
AM
3807 if (fprintf (script, " %s%c%s (%s)\n",
3808 (sec->owner->my_archive != NULL
3809 ? sec->owner->my_archive->filename : ""),
3810 info->path_separator,
3811 sec->owner->filename,
9dcc4794
AM
3812 sec->name) <= 0)
3813 goto file_err;
3814 for (call = call_fun->call_list; call; call = call->next)
3815 if (call->is_pasted)
3816 break;
3817 }
3818 }
3819 }
3820
3821 for (j = base; j < i; j++)
3822 {
3823 asection *sec = ovly_sections[2 * j + 1];
97407faf
AM
3824 if (sec != NULL
3825 && fprintf (script, " %s%c%s (%s)\n",
3826 (sec->owner->my_archive != NULL
3827 ? sec->owner->my_archive->filename : ""),
3828 info->path_separator,
3829 sec->owner->filename,
3830 sec->name) <= 0)
9dcc4794
AM
3831 goto file_err;
3832
3833 sec = ovly_sections[2 * j];
3834 if (sec->segment_mark)
3835 {
3836 struct call_info *call = find_pasted_call (sec);
3837 while (call != NULL)
3838 {
3839 struct function_info *call_fun = call->fun;
3840 sec = call_fun->rodata;
97407faf
AM
3841 if (sec != NULL
3842 && fprintf (script, " %s%c%s (%s)\n",
3843 (sec->owner->my_archive != NULL
3844 ? sec->owner->my_archive->filename : ""),
3845 info->path_separator,
3846 sec->owner->filename,
3847 sec->name) <= 0)
9dcc4794
AM
3848 goto file_err;
3849 for (call = call_fun->call_list; call; call = call->next)
3850 if (call->is_pasted)
3851 break;
3852 }
3853 }
3854 }
3855
3856 if (fprintf (script, " }\n") <= 0)
3857 goto file_err;
3858
3859 while (dummy_caller.call_list != NULL)
3860 {
3861 struct call_info *call = dummy_caller.call_list;
3862 dummy_caller.call_list = call->next;
3863 free (call);
3864 }
3865
3866 base = i;
3867 }
3868 free (ovly_sections);
3869
3870 if (fprintf (script, " }\n}\nINSERT AFTER .text;\n") <= 0)
3871 goto file_err;
3872 if (fclose (script) != 0)
3873 goto file_err;
3874
3875 if (htab->auto_overlay & AUTO_RELINK)
3876 htab->spu_elf_relink ();
3877
3878 xexit (0);
3879
3880 file_err:
3881 bfd_set_error (bfd_error_system_call);
3882 err_exit:
3883 info->callbacks->einfo ("%F%P: auto overlay error: %E\n");
3884 xexit (1);
3885}
3886
49fa1e15
AM
3887/* Provide an estimate of total stack required. */
3888
3889static bfd_boolean
c65be8d7 3890spu_elf_stack_analysis (struct bfd_link_info *info, int emit_stack_syms)
49fa1e15 3891{
055ed83b 3892 struct _sum_stack_param sum_stack_param;
49fa1e15 3893
c65be8d7 3894 if (!discover_functions (info))
49fa1e15
AM
3895 return FALSE;
3896
c65be8d7 3897 if (!build_call_tree (info))
49fa1e15
AM
3898 return FALSE;
3899
3900 info->callbacks->info (_("Stack size for call graph root nodes.\n"));
3901 info->callbacks->minfo (_("\nStack size for functions. "
3902 "Annotations: '*' max stack, 't' tail call\n"));
49fa1e15 3903
055ed83b
AM
3904 sum_stack_param.emit_stack_syms = emit_stack_syms;
3905 sum_stack_param.overall_stack = 0;
3906 if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
3907 return FALSE;
49fa1e15 3908
055ed83b
AM
3909 info->callbacks->info (_("Maximum stack required is 0x%v\n"),
3910 (bfd_vma) sum_stack_param.overall_stack);
49fa1e15
AM
3911 return TRUE;
3912}
3913
3914/* Perform a final link. */
3915
3916static bfd_boolean
3917spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
3918{
3919 struct spu_link_hash_table *htab = spu_hash_table (info);
3920
9dcc4794
AM
3921 if (htab->auto_overlay)
3922 spu_elf_auto_overlay (info, htab->spu_elf_load_ovl_mgr);
3923
49fa1e15 3924 if (htab->stack_analysis
c65be8d7 3925 && !spu_elf_stack_analysis (info, htab->emit_stack_syms))
49fa1e15
AM
3926 info->callbacks->einfo ("%X%P: stack analysis error: %E\n");
3927
3928 return bfd_elf_final_link (output_bfd, info);
3929}
3930
ece5ef60
AM
3931/* Called when not normally emitting relocs, ie. !info->relocatable
3932 and !info->emitrelocations. Returns a count of special relocs
3933 that need to be emitted. */
3934
3935static unsigned int
58217f29 3936spu_elf_count_relocs (struct bfd_link_info *info, asection *sec)
ece5ef60 3937{
58217f29 3938 Elf_Internal_Rela *relocs;
ece5ef60 3939 unsigned int count = 0;
ece5ef60 3940
58217f29
AM
3941 relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
3942 info->keep_memory);
3943 if (relocs != NULL)
ece5ef60 3944 {
58217f29
AM
3945 Elf_Internal_Rela *rel;
3946 Elf_Internal_Rela *relend = relocs + sec->reloc_count;
3947
3948 for (rel = relocs; rel < relend; rel++)
3949 {
3950 int r_type = ELF32_R_TYPE (rel->r_info);
3951 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
3952 ++count;
3953 }
3954
3955 if (elf_section_data (sec)->relocs != relocs)
3956 free (relocs);
ece5ef60
AM
3957 }
3958
3959 return count;
3960}
3961
e9f53129
AM
3962/* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
3963
d16c7321 3964static int
e9f53129
AM
3965spu_elf_relocate_section (bfd *output_bfd,
3966 struct bfd_link_info *info,
3967 bfd *input_bfd,
3968 asection *input_section,
3969 bfd_byte *contents,
3970 Elf_Internal_Rela *relocs,
3971 Elf_Internal_Sym *local_syms,
3972 asection **local_sections)
3973{
3974 Elf_Internal_Shdr *symtab_hdr;
3975 struct elf_link_hash_entry **sym_hashes;
3976 Elf_Internal_Rela *rel, *relend;
3977 struct spu_link_hash_table *htab;
8374f9d4 3978 asection *ea = bfd_get_section_by_name (output_bfd, "._ea");
d16c7321 3979 int ret = TRUE;
ece5ef60 3980 bfd_boolean emit_these_relocs = FALSE;
cc5ca406 3981 bfd_boolean is_ea_sym;
fdba2fcd 3982 bfd_boolean stubs;
e9f53129 3983
e9f53129 3984 htab = spu_hash_table (info);
fdba2fcd
AM
3985 stubs = (htab->stub_sec != NULL
3986 && maybe_needs_stubs (input_section, output_bfd));
e9f53129
AM
3987 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
3988 sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
3989
3990 rel = relocs;
3991 relend = relocs + input_section->reloc_count;
3992 for (; rel < relend; rel++)
3993 {
3994 int r_type;
3995 reloc_howto_type *howto;
8374f9d4 3996 unsigned int r_symndx;
e9f53129
AM
3997 Elf_Internal_Sym *sym;
3998 asection *sec;
3999 struct elf_link_hash_entry *h;
4000 const char *sym_name;
4001 bfd_vma relocation;
4002 bfd_vma addend;
4003 bfd_reloc_status_type r;
4004 bfd_boolean unresolved_reloc;
4005 bfd_boolean warned;
124b52c6 4006 enum _stub_type stub_type;
e9f53129
AM
4007
4008 r_symndx = ELF32_R_SYM (rel->r_info);
4009 r_type = ELF32_R_TYPE (rel->r_info);
4010 howto = elf_howto_table + r_type;
4011 unresolved_reloc = FALSE;
4012 warned = FALSE;
e9f53129
AM
4013 h = NULL;
4014 sym = NULL;
4015 sec = NULL;
4016 if (r_symndx < symtab_hdr->sh_info)
4017 {
4018 sym = local_syms + r_symndx;
4019 sec = local_sections[r_symndx];
4020 sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
4021 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
4022 }
4023 else
4024 {
dc1859a6
AM
4025 if (sym_hashes == NULL)
4026 return FALSE;
4027
4028 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
4029
4030 while (h->root.type == bfd_link_hash_indirect
4031 || h->root.type == bfd_link_hash_warning)
4032 h = (struct elf_link_hash_entry *) h->root.u.i.link;
4033
4034 relocation = 0;
4035 if (h->root.type == bfd_link_hash_defined
4036 || h->root.type == bfd_link_hash_defweak)
4037 {
4038 sec = h->root.u.def.section;
4039 if (sec == NULL
4040 || sec->output_section == NULL)
4041 /* Set a flag that will be cleared later if we find a
4042 relocation value for this symbol. output_section
4043 is typically NULL for symbols satisfied by a shared
4044 library. */
4045 unresolved_reloc = TRUE;
4046 else
4047 relocation = (h->root.u.def.value
4048 + sec->output_section->vma
4049 + sec->output_offset);
4050 }
4051 else if (h->root.type == bfd_link_hash_undefweak)
4052 ;
4053 else if (info->unresolved_syms_in_objects == RM_IGNORE
4054 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
4055 ;
4056 else if (!info->relocatable
4057 && !(r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64))
4058 {
4059 bfd_boolean err;
4060 err = (info->unresolved_syms_in_objects == RM_GENERATE_ERROR
4061 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT);
4062 if (!info->callbacks->undefined_symbol (info,
4063 h->root.root.string,
4064 input_bfd,
4065 input_section,
4066 rel->r_offset, err))
4067 return FALSE;
4068 warned = TRUE;
4069 }
e9f53129
AM
4070 sym_name = h->root.root.string;
4071 }
4072
ab96bf03
AM
4073 if (sec != NULL && elf_discarded_section (sec))
4074 {
4075 /* For relocs against symbols from removed linkonce sections,
4076 or sections discarded by a linker script, we just want the
4077 section contents zeroed. Avoid any special processing. */
4078 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
4079 rel->r_info = 0;
4080 rel->r_addend = 0;
4081 continue;
4082 }
4083
4084 if (info->relocatable)
4085 continue;
4086
cc5ca406
AM
4087 is_ea_sym = (ea != NULL
4088 && sec != NULL
4089 && sec->output_section == ea);
4090
8374f9d4
AM
4091 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4092 {
cc5ca406 4093 if (is_ea_sym)
8374f9d4
AM
4094 {
4095 /* ._ea is a special section that isn't allocated in SPU
4096 memory, but rather occupies space in PPU memory as
4097 part of an embedded ELF image. If this reloc is
4098 against a symbol defined in ._ea, then transform the
4099 reloc into an equivalent one without a symbol
4100 relative to the start of the ELF image. */
4101 rel->r_addend += (relocation
4102 - ea->vma
4103 + elf_section_data (ea)->this_hdr.sh_offset);
4104 rel->r_info = ELF32_R_INFO (0, r_type);
4105 }
4106 emit_these_relocs = TRUE;
4107 continue;
4108 }
4109
cc5ca406 4110 if (is_ea_sym)
8374f9d4
AM
4111 unresolved_reloc = TRUE;
4112
e9f53129
AM
4113 if (unresolved_reloc)
4114 {
4115 (*_bfd_error_handler)
4116 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
4117 input_bfd,
4118 bfd_get_section_name (input_bfd, input_section),
4119 (long) rel->r_offset,
4120 howto->name,
4121 sym_name);
4122 ret = FALSE;
4123 }
4124
4125 /* If this symbol is in an overlay area, we may need to relocate
4126 to the overlay stub. */
4127 addend = rel->r_addend;
124b52c6
AM
4128 if (stubs
4129 && (stub_type = needs_ovl_stub (h, sym, sec, input_section, rel,
4130 contents, info)) != no_stub)
e9f53129 4131 {
124b52c6
AM
4132 unsigned int ovl = 0;
4133 struct got_entry *g, **head;
47f6dab9 4134
124b52c6
AM
4135 if (stub_type != nonovl_stub)
4136 ovl = (spu_elf_section_data (input_section->output_section)
4137 ->u.o.ovl_index);
5f5fb9ec 4138
124b52c6
AM
4139 if (h != NULL)
4140 head = &h->got.glist;
4141 else
4142 head = elf_local_got_ents (input_bfd) + r_symndx;
47f6dab9 4143
124b52c6
AM
4144 for (g = *head; g != NULL; g = g->next)
4145 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
4146 break;
4147 if (g == NULL)
4148 abort ();
5f5fb9ec 4149
124b52c6
AM
4150 relocation = g->stub_addr;
4151 addend = 0;
e9f53129
AM
4152 }
4153
4154 r = _bfd_final_link_relocate (howto,
4155 input_bfd,
4156 input_section,
4157 contents,
4158 rel->r_offset, relocation, addend);
4159
4160 if (r != bfd_reloc_ok)
4161 {
4162 const char *msg = (const char *) 0;
4163
4164 switch (r)
4165 {
4166 case bfd_reloc_overflow:
4167 if (!((*info->callbacks->reloc_overflow)
4168 (info, (h ? &h->root : NULL), sym_name, howto->name,
4169 (bfd_vma) 0, input_bfd, input_section, rel->r_offset)))
4170 return FALSE;
4171 break;
4172
4173 case bfd_reloc_undefined:
4174 if (!((*info->callbacks->undefined_symbol)
4175 (info, sym_name, input_bfd, input_section,
4176 rel->r_offset, TRUE)))
4177 return FALSE;
4178 break;
4179
4180 case bfd_reloc_outofrange:
4181 msg = _("internal error: out of range error");
4182 goto common_error;
4183
4184 case bfd_reloc_notsupported:
4185 msg = _("internal error: unsupported relocation error");
4186 goto common_error;
4187
4188 case bfd_reloc_dangerous:
4189 msg = _("internal error: dangerous error");
4190 goto common_error;
4191
4192 default:
4193 msg = _("internal error: unknown error");
4194 /* fall through */
4195
4196 common_error:
d16c7321 4197 ret = FALSE;
e9f53129
AM
4198 if (!((*info->callbacks->warning)
4199 (info, msg, sym_name, input_bfd, input_section,
4200 rel->r_offset)))
4201 return FALSE;
4202 break;
4203 }
4204 }
4205 }
4206
ece5ef60
AM
4207 if (ret
4208 && emit_these_relocs
ece5ef60
AM
4209 && !info->emitrelocations)
4210 {
4211 Elf_Internal_Rela *wrel;
4212 Elf_Internal_Shdr *rel_hdr;
4213
4214 wrel = rel = relocs;
4215 relend = relocs + input_section->reloc_count;
4216 for (; rel < relend; rel++)
4217 {
4218 int r_type;
4219
4220 r_type = ELF32_R_TYPE (rel->r_info);
4221 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4222 *wrel++ = *rel;
4223 }
4224 input_section->reloc_count = wrel - relocs;
4225 /* Backflips for _bfd_elf_link_output_relocs. */
4226 rel_hdr = &elf_section_data (input_section)->rel_hdr;
4227 rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
4228 ret = 2;
4229 }
4230
e9f53129
AM
4231 return ret;
4232}
4233
c1b2796f
AM
4234/* Adjust _SPUEAR_ syms to point at their overlay stubs. */
4235
4236static bfd_boolean
4237spu_elf_output_symbol_hook (struct bfd_link_info *info,
4238 const char *sym_name ATTRIBUTE_UNUSED,
4239 Elf_Internal_Sym *sym,
4240 asection *sym_sec ATTRIBUTE_UNUSED,
4241 struct elf_link_hash_entry *h)
4242{
4243 struct spu_link_hash_table *htab = spu_hash_table (info);
4244
4245 if (!info->relocatable
47f6dab9 4246 && htab->stub_sec != NULL
c1b2796f
AM
4247 && h != NULL
4248 && (h->root.type == bfd_link_hash_defined
4249 || h->root.type == bfd_link_hash_defweak)
4250 && h->def_regular
4251 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
4252 {
4a628337 4253 struct got_entry *g;
c1b2796f 4254
4a628337
AM
4255 for (g = h->got.glist; g != NULL; g = g->next)
4256 if (g->addend == 0 && g->ovl == 0)
4257 {
4258 sym->st_shndx = (_bfd_elf_section_from_bfd_section
4259 (htab->stub_sec[0]->output_section->owner,
4260 htab->stub_sec[0]->output_section));
4261 sym->st_value = g->stub_addr;
4262 break;
4263 }
c1b2796f
AM
4264 }
4265
4266 return TRUE;
4267}
4268
e9f53129
AM
4269static int spu_plugin = 0;
4270
4271void
4272spu_elf_plugin (int val)
4273{
4274 spu_plugin = val;
4275}
4276
4277/* Set ELF header e_type for plugins. */
4278
4279static void
4280spu_elf_post_process_headers (bfd *abfd,
4281 struct bfd_link_info *info ATTRIBUTE_UNUSED)
4282{
4283 if (spu_plugin)
4284 {
4285 Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
4286
4287 i_ehdrp->e_type = ET_DYN;
4288 }
4289}
4290
4291/* We may add an extra PT_LOAD segment for .toe. We also need extra
4292 segments for overlays. */
4293
4294static int
4295spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
4296{
ceae84aa 4297 int extra = 0;
e9f53129
AM
4298 asection *sec;
4299
ceae84aa
AM
4300 if (info != NULL)
4301 {
4302 struct spu_link_hash_table *htab = spu_hash_table (info);
4303 extra = htab->num_overlays;
4304 }
4305
e9f53129
AM
4306 if (extra)
4307 ++extra;
4308
4309 sec = bfd_get_section_by_name (abfd, ".toe");
4310 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
4311 ++extra;
4312
4313 return extra;
4314}
4315
4316/* Remove .toe section from other PT_LOAD segments and put it in
4317 a segment of its own. Put overlays in separate segments too. */
4318
4319static bfd_boolean
4320spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
4321{
4322 asection *toe, *s;
4323 struct elf_segment_map *m;
4324 unsigned int i;
4325
4326 if (info == NULL)
4327 return TRUE;
4328
4329 toe = bfd_get_section_by_name (abfd, ".toe");
4330 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
4331 if (m->p_type == PT_LOAD && m->count > 1)
4332 for (i = 0; i < m->count; i++)
4333 if ((s = m->sections[i]) == toe
47f6dab9 4334 || spu_elf_section_data (s)->u.o.ovl_index != 0)
e9f53129
AM
4335 {
4336 struct elf_segment_map *m2;
4337 bfd_vma amt;
4338
4339 if (i + 1 < m->count)
4340 {
4341 amt = sizeof (struct elf_segment_map);
4342 amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
4343 m2 = bfd_zalloc (abfd, amt);
4344 if (m2 == NULL)
4345 return FALSE;
4346 m2->count = m->count - (i + 1);
4347 memcpy (m2->sections, m->sections + i + 1,
4348 m2->count * sizeof (m->sections[0]));
4349 m2->p_type = PT_LOAD;
4350 m2->next = m->next;
4351 m->next = m2;
4352 }
4353 m->count = 1;
4354 if (i != 0)
4355 {
4356 m->count = i;
4357 amt = sizeof (struct elf_segment_map);
4358 m2 = bfd_zalloc (abfd, amt);
4359 if (m2 == NULL)
4360 return FALSE;
4361 m2->p_type = PT_LOAD;
4362 m2->count = 1;
4363 m2->sections[0] = s;
4364 m2->next = m->next;
4365 m->next = m2;
4366 }
4367 break;
4368 }
4369
4370 return TRUE;
4371}
4372
7d3287cb
AM
4373/* Tweak the section type of .note.spu_name. */
4374
4375static bfd_boolean
4376spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
4377 Elf_Internal_Shdr *hdr,
4378 asection *sec)
4379{
4380 if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
4381 hdr->sh_type = SHT_NOTE;
4382 return TRUE;
4383}
4384
e9f53129
AM
4385/* Tweak phdrs before writing them out. */
4386
4387static int
4388spu_elf_modify_program_headers (bfd *abfd, struct bfd_link_info *info)
4389{
4390 const struct elf_backend_data *bed;
4391 struct elf_obj_tdata *tdata;
4392 Elf_Internal_Phdr *phdr, *last;
4393 struct spu_link_hash_table *htab;
4394 unsigned int count;
4395 unsigned int i;
4396
4397 if (info == NULL)
4398 return TRUE;
4399
4400 bed = get_elf_backend_data (abfd);
4401 tdata = elf_tdata (abfd);
4402 phdr = tdata->phdr;
4403 count = tdata->program_header_size / bed->s->sizeof_phdr;
4404 htab = spu_hash_table (info);
4405 if (htab->num_overlays != 0)
4406 {
4407 struct elf_segment_map *m;
4408 unsigned int o;
4409
4410 for (i = 0, m = elf_tdata (abfd)->segment_map; m; ++i, m = m->next)
4411 if (m->count != 0
47f6dab9 4412 && (o = spu_elf_section_data (m->sections[0])->u.o.ovl_index) != 0)
e9f53129
AM
4413 {
4414 /* Mark this as an overlay header. */
4415 phdr[i].p_flags |= PF_OVERLAY;
4416
4417 if (htab->ovtab != NULL && htab->ovtab->size != 0)
4418 {
4419 bfd_byte *p = htab->ovtab->contents;
47f6dab9 4420 unsigned int off = o * 16 + 8;
e9f53129
AM
4421
4422 /* Write file_off into _ovly_table. */
4423 bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
4424 }
4425 }
4426 }
4427
4428 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
4429 of 16. This should always be possible when using the standard
4430 linker scripts, but don't create overlapping segments if
4431 someone is playing games with linker scripts. */
4432 last = NULL;
4433 for (i = count; i-- != 0; )
4434 if (phdr[i].p_type == PT_LOAD)
4435 {
4436 unsigned adjust;
4437
4438 adjust = -phdr[i].p_filesz & 15;
4439 if (adjust != 0
4440 && last != NULL
4441 && phdr[i].p_offset + phdr[i].p_filesz > last->p_offset - adjust)
4442 break;
4443
4444 adjust = -phdr[i].p_memsz & 15;
4445 if (adjust != 0
4446 && last != NULL
4447 && phdr[i].p_filesz != 0
4448 && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
4449 && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
4450 break;
4451
4452 if (phdr[i].p_filesz != 0)
4453 last = &phdr[i];
4454 }
4455
4456 if (i == (unsigned int) -1)
4457 for (i = count; i-- != 0; )
4458 if (phdr[i].p_type == PT_LOAD)
4459 {
4460 unsigned adjust;
4461
4462 adjust = -phdr[i].p_filesz & 15;
4463 phdr[i].p_filesz += adjust;
4464
4465 adjust = -phdr[i].p_memsz & 15;
4466 phdr[i].p_memsz += adjust;
4467 }
4468
4469 return TRUE;
4470}
4471
e9f53129
AM
4472#define TARGET_BIG_SYM bfd_elf32_spu_vec
4473#define TARGET_BIG_NAME "elf32-spu"
4474#define ELF_ARCH bfd_arch_spu
4475#define ELF_MACHINE_CODE EM_SPU
4476/* This matches the alignment need for DMA. */
4477#define ELF_MAXPAGESIZE 0x80
4478#define elf_backend_rela_normal 1
4479#define elf_backend_can_gc_sections 1
4480
4481#define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
157090f7 4482#define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
e9f53129 4483#define elf_info_to_howto spu_elf_info_to_howto
ece5ef60 4484#define elf_backend_count_relocs spu_elf_count_relocs
e9f53129
AM
4485#define elf_backend_relocate_section spu_elf_relocate_section
4486#define elf_backend_symbol_processing spu_elf_backend_symbol_processing
c1b2796f 4487#define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
124b52c6 4488#define elf_backend_object_p spu_elf_object_p
e9f53129
AM
4489#define bfd_elf32_new_section_hook spu_elf_new_section_hook
4490#define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
e9f53129
AM
4491
4492#define elf_backend_additional_program_headers spu_elf_additional_program_headers
4493#define elf_backend_modify_segment_map spu_elf_modify_segment_map
4494#define elf_backend_modify_program_headers spu_elf_modify_program_headers
4495#define elf_backend_post_process_headers spu_elf_post_process_headers
7d3287cb 4496#define elf_backend_fake_sections spu_elf_fake_sections
e9f53129 4497#define elf_backend_special_sections spu_elf_special_sections
49fa1e15 4498#define bfd_elf32_bfd_final_link spu_elf_final_link
e9f53129
AM
4499
4500#include "elf32-target.h"