]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - bfd/elf32-spu.c
Update year range in copyright notice of binutils files
[thirdparty/binutils-gdb.git] / bfd / elf32-spu.c
1 /* SPU specific support for 32-bit ELF
2
3 Copyright (C) 2006-2023 Free Software Foundation, Inc.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
20
21 #include "sysdep.h"
22 #include "libiberty.h"
23 #include "bfd.h"
24 #include "bfdlink.h"
25 #include "libbfd.h"
26 #include "elf-bfd.h"
27 #include "elf/spu.h"
28 #include "elf32-spu.h"
29
30 /* All users of this file have bfd_octets_per_byte (abfd, sec) == 1. */
31 #define OCTETS_PER_BYTE(ABFD, SEC) 1
32
33 /* We use RELA style relocs. Don't define USE_REL. */
34
35 static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
36 void *, asection *,
37 bfd *, char **);
38
39 /* Values of type 'enum elf_spu_reloc_type' are used to index this
40 array, so it must be declared in the order of that type. */
41
42 static reloc_howto_type elf_howto_table[] = {
43 HOWTO (R_SPU_NONE, 0, 0, 0, false, 0, complain_overflow_dont,
44 bfd_elf_generic_reloc, "SPU_NONE",
45 false, 0, 0x00000000, false),
46 HOWTO (R_SPU_ADDR10, 4, 4, 10, false, 14, complain_overflow_bitfield,
47 bfd_elf_generic_reloc, "SPU_ADDR10",
48 false, 0, 0x00ffc000, false),
49 HOWTO (R_SPU_ADDR16, 2, 4, 16, false, 7, complain_overflow_bitfield,
50 bfd_elf_generic_reloc, "SPU_ADDR16",
51 false, 0, 0x007fff80, false),
52 HOWTO (R_SPU_ADDR16_HI, 16, 4, 16, false, 7, complain_overflow_bitfield,
53 bfd_elf_generic_reloc, "SPU_ADDR16_HI",
54 false, 0, 0x007fff80, false),
55 HOWTO (R_SPU_ADDR16_LO, 0, 4, 16, false, 7, complain_overflow_dont,
56 bfd_elf_generic_reloc, "SPU_ADDR16_LO",
57 false, 0, 0x007fff80, false),
58 HOWTO (R_SPU_ADDR18, 0, 4, 18, false, 7, complain_overflow_bitfield,
59 bfd_elf_generic_reloc, "SPU_ADDR18",
60 false, 0, 0x01ffff80, false),
61 HOWTO (R_SPU_ADDR32, 0, 4, 32, false, 0, complain_overflow_dont,
62 bfd_elf_generic_reloc, "SPU_ADDR32",
63 false, 0, 0xffffffff, false),
64 HOWTO (R_SPU_REL16, 2, 4, 16, true, 7, complain_overflow_bitfield,
65 bfd_elf_generic_reloc, "SPU_REL16",
66 false, 0, 0x007fff80, true),
67 HOWTO (R_SPU_ADDR7, 0, 4, 7, false, 14, complain_overflow_dont,
68 bfd_elf_generic_reloc, "SPU_ADDR7",
69 false, 0, 0x001fc000, false),
70 HOWTO (R_SPU_REL9, 2, 4, 9, true, 0, complain_overflow_signed,
71 spu_elf_rel9, "SPU_REL9",
72 false, 0, 0x0180007f, true),
73 HOWTO (R_SPU_REL9I, 2, 4, 9, true, 0, complain_overflow_signed,
74 spu_elf_rel9, "SPU_REL9I",
75 false, 0, 0x0000c07f, true),
76 HOWTO (R_SPU_ADDR10I, 0, 4, 10, false, 14, complain_overflow_signed,
77 bfd_elf_generic_reloc, "SPU_ADDR10I",
78 false, 0, 0x00ffc000, false),
79 HOWTO (R_SPU_ADDR16I, 0, 4, 16, false, 7, complain_overflow_signed,
80 bfd_elf_generic_reloc, "SPU_ADDR16I",
81 false, 0, 0x007fff80, false),
82 HOWTO (R_SPU_REL32, 0, 4, 32, true, 0, complain_overflow_dont,
83 bfd_elf_generic_reloc, "SPU_REL32",
84 false, 0, 0xffffffff, true),
85 HOWTO (R_SPU_ADDR16X, 0, 4, 16, false, 7, complain_overflow_bitfield,
86 bfd_elf_generic_reloc, "SPU_ADDR16X",
87 false, 0, 0x007fff80, false),
88 HOWTO (R_SPU_PPU32, 0, 4, 32, false, 0, complain_overflow_dont,
89 bfd_elf_generic_reloc, "SPU_PPU32",
90 false, 0, 0xffffffff, false),
91 HOWTO (R_SPU_PPU64, 0, 8, 64, false, 0, complain_overflow_dont,
92 bfd_elf_generic_reloc, "SPU_PPU64",
93 false, 0, -1, false),
94 HOWTO (R_SPU_ADD_PIC, 0, 0, 0, false, 0, complain_overflow_dont,
95 bfd_elf_generic_reloc, "SPU_ADD_PIC",
96 false, 0, 0x00000000, false),
97 };
98
99 static struct bfd_elf_special_section const spu_elf_special_sections[] = {
100 { "._ea", 4, 0, SHT_PROGBITS, SHF_WRITE },
101 { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
102 { NULL, 0, 0, 0, 0 }
103 };
104
105 static enum elf_spu_reloc_type
106 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
107 {
108 switch (code)
109 {
110 default:
111 return (enum elf_spu_reloc_type) -1;
112 case BFD_RELOC_NONE:
113 return R_SPU_NONE;
114 case BFD_RELOC_SPU_IMM10W:
115 return R_SPU_ADDR10;
116 case BFD_RELOC_SPU_IMM16W:
117 return R_SPU_ADDR16;
118 case BFD_RELOC_SPU_LO16:
119 return R_SPU_ADDR16_LO;
120 case BFD_RELOC_SPU_HI16:
121 return R_SPU_ADDR16_HI;
122 case BFD_RELOC_SPU_IMM18:
123 return R_SPU_ADDR18;
124 case BFD_RELOC_SPU_PCREL16:
125 return R_SPU_REL16;
126 case BFD_RELOC_SPU_IMM7:
127 return R_SPU_ADDR7;
128 case BFD_RELOC_SPU_IMM8:
129 return R_SPU_NONE;
130 case BFD_RELOC_SPU_PCREL9a:
131 return R_SPU_REL9;
132 case BFD_RELOC_SPU_PCREL9b:
133 return R_SPU_REL9I;
134 case BFD_RELOC_SPU_IMM10:
135 return R_SPU_ADDR10I;
136 case BFD_RELOC_SPU_IMM16:
137 return R_SPU_ADDR16I;
138 case BFD_RELOC_32:
139 return R_SPU_ADDR32;
140 case BFD_RELOC_32_PCREL:
141 return R_SPU_REL32;
142 case BFD_RELOC_SPU_PPU32:
143 return R_SPU_PPU32;
144 case BFD_RELOC_SPU_PPU64:
145 return R_SPU_PPU64;
146 case BFD_RELOC_SPU_ADD_PIC:
147 return R_SPU_ADD_PIC;
148 }
149 }
150
151 static bool
152 spu_elf_info_to_howto (bfd *abfd,
153 arelent *cache_ptr,
154 Elf_Internal_Rela *dst)
155 {
156 enum elf_spu_reloc_type r_type;
157
158 r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
159 /* PR 17512: file: 90c2a92e. */
160 if (r_type >= R_SPU_max)
161 {
162 /* xgettext:c-format */
163 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
164 abfd, r_type);
165 bfd_set_error (bfd_error_bad_value);
166 return false;
167 }
168 cache_ptr->howto = &elf_howto_table[(int) r_type];
169 return true;
170 }
171
172 static reloc_howto_type *
173 spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
174 bfd_reloc_code_real_type code)
175 {
176 enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
177
178 if (r_type == (enum elf_spu_reloc_type) -1)
179 return NULL;
180
181 return elf_howto_table + r_type;
182 }
183
184 static reloc_howto_type *
185 spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
186 const char *r_name)
187 {
188 unsigned int i;
189
190 for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
191 if (elf_howto_table[i].name != NULL
192 && strcasecmp (elf_howto_table[i].name, r_name) == 0)
193 return &elf_howto_table[i];
194
195 return NULL;
196 }
197
198 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
199
200 static bfd_reloc_status_type
201 spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
202 void *data, asection *input_section,
203 bfd *output_bfd, char **error_message)
204 {
205 bfd_size_type octets;
206 bfd_vma val;
207 long insn;
208
209 /* If this is a relocatable link (output_bfd test tells us), just
210 call the generic function. Any adjustment will be done at final
211 link time. */
212 if (output_bfd != NULL)
213 return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
214 input_section, output_bfd, error_message);
215
216 if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
217 return bfd_reloc_outofrange;
218 octets = reloc_entry->address * OCTETS_PER_BYTE (abfd, input_section);
219
220 /* Get symbol value. */
221 val = 0;
222 if (!bfd_is_com_section (symbol->section))
223 val = symbol->value;
224 if (symbol->section->output_section)
225 val += symbol->section->output_section->vma;
226
227 val += reloc_entry->addend;
228
229 /* Make it pc-relative. */
230 val -= input_section->output_section->vma + input_section->output_offset;
231
232 val >>= 2;
233 if (val + 256 >= 512)
234 return bfd_reloc_overflow;
235
236 insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
237
238 /* Move two high bits of value to REL9I and REL9 position.
239 The mask will take care of selecting the right field. */
240 val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
241 insn &= ~reloc_entry->howto->dst_mask;
242 insn |= val & reloc_entry->howto->dst_mask;
243 bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
244 return bfd_reloc_ok;
245 }
246
247 static bool
248 spu_elf_new_section_hook (bfd *abfd, asection *sec)
249 {
250 if (!sec->used_by_bfd)
251 {
252 struct _spu_elf_section_data *sdata;
253
254 sdata = bfd_zalloc (abfd, sizeof (*sdata));
255 if (sdata == NULL)
256 return false;
257 sec->used_by_bfd = sdata;
258 }
259
260 return _bfd_elf_new_section_hook (abfd, sec);
261 }
262
263 /* Set up overlay info for executables. */
264
265 static bool
266 spu_elf_object_p (bfd *abfd)
267 {
268 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
269 {
270 unsigned int i, num_ovl, num_buf;
271 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
272 Elf_Internal_Ehdr *ehdr = elf_elfheader (abfd);
273 Elf_Internal_Phdr *last_phdr = NULL;
274
275 for (num_buf = 0, num_ovl = 0, i = 0; i < ehdr->e_phnum; i++, phdr++)
276 if (phdr->p_type == PT_LOAD && (phdr->p_flags & PF_OVERLAY) != 0)
277 {
278 unsigned int j;
279
280 ++num_ovl;
281 if (last_phdr == NULL
282 || ((last_phdr->p_vaddr ^ phdr->p_vaddr) & 0x3ffff) != 0)
283 ++num_buf;
284 last_phdr = phdr;
285 for (j = 1; j < elf_numsections (abfd); j++)
286 {
287 Elf_Internal_Shdr *shdr = elf_elfsections (abfd)[j];
288
289 if (shdr->bfd_section != NULL
290 && ELF_SECTION_SIZE (shdr, phdr) != 0
291 && ELF_SECTION_IN_SEGMENT (shdr, phdr))
292 {
293 asection *sec = shdr->bfd_section;
294 spu_elf_section_data (sec)->u.o.ovl_index = num_ovl;
295 spu_elf_section_data (sec)->u.o.ovl_buf = num_buf;
296 }
297 }
298 }
299 }
300 return true;
301 }
302
303 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
304 strip --strip-unneeded will not remove them. */
305
306 static void
307 spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
308 {
309 if (sym->name != NULL
310 && sym->section != bfd_abs_section_ptr
311 && startswith (sym->name, "_EAR_"))
312 sym->flags |= BSF_KEEP;
313 }
314
315 /* SPU ELF linker hash table. */
316
317 struct spu_link_hash_table
318 {
319 struct elf_link_hash_table elf;
320
321 struct spu_elf_params *params;
322
323 /* Shortcuts to overlay sections. */
324 asection *ovtab;
325 asection *init;
326 asection *toe;
327 asection **ovl_sec;
328
329 /* Count of stubs in each overlay section. */
330 unsigned int *stub_count;
331
332 /* The stub section for each overlay section. */
333 asection **stub_sec;
334
335 struct elf_link_hash_entry *ovly_entry[2];
336
337 /* Number of overlay buffers. */
338 unsigned int num_buf;
339
340 /* Total number of overlays. */
341 unsigned int num_overlays;
342
343 /* For soft icache. */
344 unsigned int line_size_log2;
345 unsigned int num_lines_log2;
346 unsigned int fromelem_size_log2;
347
348 /* How much memory we have. */
349 unsigned int local_store;
350
351 /* Count of overlay stubs needed in non-overlay area. */
352 unsigned int non_ovly_stub;
353
354 /* Pointer to the fixup section */
355 asection *sfixup;
356
357 /* Set on error. */
358 unsigned int stub_err : 1;
359 };
360
361 /* Hijack the generic got fields for overlay stub accounting. */
362
363 struct got_entry
364 {
365 struct got_entry *next;
366 unsigned int ovl;
367 union {
368 bfd_vma addend;
369 bfd_vma br_addr;
370 };
371 bfd_vma stub_addr;
372 };
373
374 #define spu_hash_table(p) \
375 ((is_elf_hash_table ((p)->hash) \
376 && elf_hash_table_id (elf_hash_table (p)) == SPU_ELF_DATA) \
377 ? (struct spu_link_hash_table *) (p)->hash : NULL)
378
379 struct call_info
380 {
381 struct function_info *fun;
382 struct call_info *next;
383 unsigned int count;
384 unsigned int max_depth;
385 unsigned int is_tail : 1;
386 unsigned int is_pasted : 1;
387 unsigned int broken_cycle : 1;
388 unsigned int priority : 13;
389 };
390
391 struct function_info
392 {
393 /* List of functions called. Also branches to hot/cold part of
394 function. */
395 struct call_info *call_list;
396 /* For hot/cold part of function, point to owner. */
397 struct function_info *start;
398 /* Symbol at start of function. */
399 union {
400 Elf_Internal_Sym *sym;
401 struct elf_link_hash_entry *h;
402 } u;
403 /* Function section. */
404 asection *sec;
405 asection *rodata;
406 /* Where last called from, and number of sections called from. */
407 asection *last_caller;
408 unsigned int call_count;
409 /* Address range of (this part of) function. */
410 bfd_vma lo, hi;
411 /* Offset where we found a store of lr, or -1 if none found. */
412 bfd_vma lr_store;
413 /* Offset where we found the stack adjustment insn. */
414 bfd_vma sp_adjust;
415 /* Stack usage. */
416 int stack;
417 /* Distance from root of call tree. Tail and hot/cold branches
418 count as one deeper. We aren't counting stack frames here. */
419 unsigned int depth;
420 /* Set if global symbol. */
421 unsigned int global : 1;
422 /* Set if known to be start of function (as distinct from a hunk
423 in hot/cold section. */
424 unsigned int is_func : 1;
425 /* Set if not a root node. */
426 unsigned int non_root : 1;
427 /* Flags used during call tree traversal. It's cheaper to replicate
428 the visit flags than have one which needs clearing after a traversal. */
429 unsigned int visit1 : 1;
430 unsigned int visit2 : 1;
431 unsigned int marking : 1;
432 unsigned int visit3 : 1;
433 unsigned int visit4 : 1;
434 unsigned int visit5 : 1;
435 unsigned int visit6 : 1;
436 unsigned int visit7 : 1;
437 };
438
439 struct spu_elf_stack_info
440 {
441 int num_fun;
442 int max_fun;
443 /* Variable size array describing functions, one per contiguous
444 address range belonging to a function. */
445 struct function_info fun[1];
446 };
447
448 static struct function_info *find_function (asection *, bfd_vma,
449 struct bfd_link_info *);
450
451 /* Create a spu ELF linker hash table. */
452
453 static struct bfd_link_hash_table *
454 spu_elf_link_hash_table_create (bfd *abfd)
455 {
456 struct spu_link_hash_table *htab;
457
458 htab = bfd_zmalloc (sizeof (*htab));
459 if (htab == NULL)
460 return NULL;
461
462 if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
463 _bfd_elf_link_hash_newfunc,
464 sizeof (struct elf_link_hash_entry),
465 SPU_ELF_DATA))
466 {
467 free (htab);
468 return NULL;
469 }
470
471 htab->elf.init_got_refcount.refcount = 0;
472 htab->elf.init_got_refcount.glist = NULL;
473 htab->elf.init_got_offset.offset = 0;
474 htab->elf.init_got_offset.glist = NULL;
475 return &htab->elf.root;
476 }
477
478 void
479 spu_elf_setup (struct bfd_link_info *info, struct spu_elf_params *params)
480 {
481 bfd_vma max_branch_log2;
482
483 struct spu_link_hash_table *htab = spu_hash_table (info);
484 htab->params = params;
485 htab->line_size_log2 = bfd_log2 (htab->params->line_size);
486 htab->num_lines_log2 = bfd_log2 (htab->params->num_lines);
487
488 /* For the software i-cache, we provide a "from" list whose size
489 is a power-of-two number of quadwords, big enough to hold one
490 byte per outgoing branch. Compute this number here. */
491 max_branch_log2 = bfd_log2 (htab->params->max_branch);
492 htab->fromelem_size_log2 = max_branch_log2 > 4 ? max_branch_log2 - 4 : 0;
493 }
494
495 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
496 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
497 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
498
499 static bool
500 get_sym_h (struct elf_link_hash_entry **hp,
501 Elf_Internal_Sym **symp,
502 asection **symsecp,
503 Elf_Internal_Sym **locsymsp,
504 unsigned long r_symndx,
505 bfd *ibfd)
506 {
507 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
508
509 if (r_symndx >= symtab_hdr->sh_info)
510 {
511 struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
512 struct elf_link_hash_entry *h;
513
514 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
515 while (h->root.type == bfd_link_hash_indirect
516 || h->root.type == bfd_link_hash_warning)
517 h = (struct elf_link_hash_entry *) h->root.u.i.link;
518
519 if (hp != NULL)
520 *hp = h;
521
522 if (symp != NULL)
523 *symp = NULL;
524
525 if (symsecp != NULL)
526 {
527 asection *symsec = NULL;
528 if (h->root.type == bfd_link_hash_defined
529 || h->root.type == bfd_link_hash_defweak)
530 symsec = h->root.u.def.section;
531 *symsecp = symsec;
532 }
533 }
534 else
535 {
536 Elf_Internal_Sym *sym;
537 Elf_Internal_Sym *locsyms = *locsymsp;
538
539 if (locsyms == NULL)
540 {
541 locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
542 if (locsyms == NULL)
543 locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr,
544 symtab_hdr->sh_info,
545 0, NULL, NULL, NULL);
546 if (locsyms == NULL)
547 return false;
548 *locsymsp = locsyms;
549 }
550 sym = locsyms + r_symndx;
551
552 if (hp != NULL)
553 *hp = NULL;
554
555 if (symp != NULL)
556 *symp = sym;
557
558 if (symsecp != NULL)
559 *symsecp = bfd_section_from_elf_index (ibfd, sym->st_shndx);
560 }
561
562 return true;
563 }
564
565 /* Create the note section if not already present. This is done early so
566 that the linker maps the sections to the right place in the output. */
567
568 bool
569 spu_elf_create_sections (struct bfd_link_info *info)
570 {
571 struct spu_link_hash_table *htab = spu_hash_table (info);
572 bfd *ibfd;
573
574 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
575 if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
576 break;
577
578 if (ibfd == NULL)
579 {
580 /* Make SPU_PTNOTE_SPUNAME section. */
581 asection *s;
582 size_t name_len;
583 size_t size;
584 bfd_byte *data;
585 flagword flags;
586
587 ibfd = info->input_bfds;
588 /* This should really be SEC_LINKER_CREATED, but then we'd need
589 to write out the section ourselves. */
590 flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
591 s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
592 if (s == NULL
593 || !bfd_set_section_alignment (s, 4))
594 return false;
595 /* Because we didn't set SEC_LINKER_CREATED we need to set the
596 proper section type. */
597 elf_section_type (s) = SHT_NOTE;
598
599 name_len = strlen (bfd_get_filename (info->output_bfd)) + 1;
600 size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
601 size += (name_len + 3) & -4;
602
603 if (!bfd_set_section_size (s, size))
604 return false;
605
606 data = bfd_zalloc (ibfd, size);
607 if (data == NULL)
608 return false;
609
610 bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
611 bfd_put_32 (ibfd, name_len, data + 4);
612 bfd_put_32 (ibfd, 1, data + 8);
613 memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
614 memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
615 bfd_get_filename (info->output_bfd), name_len);
616 s->contents = data;
617 }
618
619 if (htab->params->emit_fixups)
620 {
621 asection *s;
622 flagword flags;
623
624 if (htab->elf.dynobj == NULL)
625 htab->elf.dynobj = ibfd;
626 ibfd = htab->elf.dynobj;
627 flags = (SEC_LOAD | SEC_ALLOC | SEC_READONLY | SEC_HAS_CONTENTS
628 | SEC_IN_MEMORY | SEC_LINKER_CREATED);
629 s = bfd_make_section_anyway_with_flags (ibfd, ".fixup", flags);
630 if (s == NULL || !bfd_set_section_alignment (s, 2))
631 return false;
632 htab->sfixup = s;
633 }
634
635 return true;
636 }
637
638 /* qsort predicate to sort sections by vma. */
639
640 static int
641 sort_sections (const void *a, const void *b)
642 {
643 const asection *const *s1 = a;
644 const asection *const *s2 = b;
645 bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
646
647 if (delta != 0)
648 return delta < 0 ? -1 : 1;
649
650 return (*s1)->index - (*s2)->index;
651 }
652
653 /* Identify overlays in the output bfd, and number them.
654 Returns 0 on error, 1 if no overlays, 2 if overlays. */
655
656 int
657 spu_elf_find_overlays (struct bfd_link_info *info)
658 {
659 struct spu_link_hash_table *htab = spu_hash_table (info);
660 asection **alloc_sec;
661 unsigned int i, n, ovl_index, num_buf;
662 asection *s;
663 bfd_vma ovl_end;
664 static const char *const entry_names[2][2] = {
665 { "__ovly_load", "__icache_br_handler" },
666 { "__ovly_return", "__icache_call_handler" }
667 };
668
669 if (info->output_bfd->section_count < 2)
670 return 1;
671
672 alloc_sec
673 = bfd_malloc (info->output_bfd->section_count * sizeof (*alloc_sec));
674 if (alloc_sec == NULL)
675 return 0;
676
677 /* Pick out all the alloced sections. */
678 for (n = 0, s = info->output_bfd->sections; s != NULL; s = s->next)
679 if ((s->flags & SEC_ALLOC) != 0
680 && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
681 && s->size != 0)
682 alloc_sec[n++] = s;
683
684 if (n == 0)
685 {
686 free (alloc_sec);
687 return 1;
688 }
689
690 /* Sort them by vma. */
691 qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
692
693 ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
694 if (htab->params->ovly_flavour == ovly_soft_icache)
695 {
696 unsigned int prev_buf = 0, set_id = 0;
697
698 /* Look for an overlapping vma to find the first overlay section. */
699 bfd_vma vma_start = 0;
700
701 for (i = 1; i < n; i++)
702 {
703 s = alloc_sec[i];
704 if (s->vma < ovl_end)
705 {
706 asection *s0 = alloc_sec[i - 1];
707 vma_start = s0->vma;
708 ovl_end = (s0->vma
709 + ((bfd_vma) 1
710 << (htab->num_lines_log2 + htab->line_size_log2)));
711 --i;
712 break;
713 }
714 else
715 ovl_end = s->vma + s->size;
716 }
717
718 /* Now find any sections within the cache area. */
719 for (ovl_index = 0, num_buf = 0; i < n; i++)
720 {
721 s = alloc_sec[i];
722 if (s->vma >= ovl_end)
723 break;
724
725 /* A section in an overlay area called .ovl.init is not
726 an overlay, in the sense that it might be loaded in
727 by the overlay manager, but rather the initial
728 section contents for the overlay buffer. */
729 if (!startswith (s->name, ".ovl.init"))
730 {
731 num_buf = ((s->vma - vma_start) >> htab->line_size_log2) + 1;
732 set_id = (num_buf == prev_buf)? set_id + 1 : 0;
733 prev_buf = num_buf;
734
735 if ((s->vma - vma_start) & (htab->params->line_size - 1))
736 {
737 info->callbacks->einfo (_("%X%P: overlay section %pA "
738 "does not start on a cache line\n"),
739 s);
740 bfd_set_error (bfd_error_bad_value);
741 return 0;
742 }
743 else if (s->size > htab->params->line_size)
744 {
745 info->callbacks->einfo (_("%X%P: overlay section %pA "
746 "is larger than a cache line\n"),
747 s);
748 bfd_set_error (bfd_error_bad_value);
749 return 0;
750 }
751
752 alloc_sec[ovl_index++] = s;
753 spu_elf_section_data (s)->u.o.ovl_index
754 = (set_id << htab->num_lines_log2) + num_buf;
755 spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
756 }
757 }
758
759 /* Ensure there are no more overlay sections. */
760 for ( ; i < n; i++)
761 {
762 s = alloc_sec[i];
763 if (s->vma < ovl_end)
764 {
765 info->callbacks->einfo (_("%X%P: overlay section %pA "
766 "is not in cache area\n"),
767 alloc_sec[i-1]);
768 bfd_set_error (bfd_error_bad_value);
769 return 0;
770 }
771 else
772 ovl_end = s->vma + s->size;
773 }
774 }
775 else
776 {
777 /* Look for overlapping vmas. Any with overlap must be overlays.
778 Count them. Also count the number of overlay regions. */
779 for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
780 {
781 s = alloc_sec[i];
782 if (s->vma < ovl_end)
783 {
784 asection *s0 = alloc_sec[i - 1];
785
786 if (spu_elf_section_data (s0)->u.o.ovl_index == 0)
787 {
788 ++num_buf;
789 if (!startswith (s0->name, ".ovl.init"))
790 {
791 alloc_sec[ovl_index] = s0;
792 spu_elf_section_data (s0)->u.o.ovl_index = ++ovl_index;
793 spu_elf_section_data (s0)->u.o.ovl_buf = num_buf;
794 }
795 else
796 ovl_end = s->vma + s->size;
797 }
798 if (!startswith (s->name, ".ovl.init"))
799 {
800 alloc_sec[ovl_index] = s;
801 spu_elf_section_data (s)->u.o.ovl_index = ++ovl_index;
802 spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
803 if (s0->vma != s->vma)
804 {
805 /* xgettext:c-format */
806 info->callbacks->einfo (_("%X%P: overlay sections %pA "
807 "and %pA do not start at the "
808 "same address\n"),
809 s0, s);
810 bfd_set_error (bfd_error_bad_value);
811 return 0;
812 }
813 if (ovl_end < s->vma + s->size)
814 ovl_end = s->vma + s->size;
815 }
816 }
817 else
818 ovl_end = s->vma + s->size;
819 }
820 }
821
822 htab->num_overlays = ovl_index;
823 htab->num_buf = num_buf;
824 htab->ovl_sec = alloc_sec;
825
826 if (ovl_index == 0)
827 return 1;
828
829 for (i = 0; i < 2; i++)
830 {
831 const char *name;
832 struct elf_link_hash_entry *h;
833
834 name = entry_names[i][htab->params->ovly_flavour];
835 h = elf_link_hash_lookup (&htab->elf, name, true, false, false);
836 if (h == NULL)
837 return 0;
838
839 if (h->root.type == bfd_link_hash_new)
840 {
841 h->root.type = bfd_link_hash_undefined;
842 h->ref_regular = 1;
843 h->ref_regular_nonweak = 1;
844 h->non_elf = 0;
845 }
846 htab->ovly_entry[i] = h;
847 }
848
849 return 2;
850 }
851
852 /* Non-zero to use bra in overlay stubs rather than br. */
853 #define BRA_STUBS 0
854
855 #define BRA 0x30000000
856 #define BRASL 0x31000000
857 #define BR 0x32000000
858 #define BRSL 0x33000000
859 #define NOP 0x40200000
860 #define LNOP 0x00200000
861 #define ILA 0x42000000
862
863 /* Return true for all relative and absolute branch instructions.
864 bra 00110000 0..
865 brasl 00110001 0..
866 br 00110010 0..
867 brsl 00110011 0..
868 brz 00100000 0..
869 brnz 00100001 0..
870 brhz 00100010 0..
871 brhnz 00100011 0.. */
872
873 static bool
874 is_branch (const unsigned char *insn)
875 {
876 return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
877 }
878
879 /* Return true for all indirect branch instructions.
880 bi 00110101 000
881 bisl 00110101 001
882 iret 00110101 010
883 bisled 00110101 011
884 biz 00100101 000
885 binz 00100101 001
886 bihz 00100101 010
887 bihnz 00100101 011 */
888
889 static bool
890 is_indirect_branch (const unsigned char *insn)
891 {
892 return (insn[0] & 0xef) == 0x25 && (insn[1] & 0x80) == 0;
893 }
894
895 /* Return true for branch hint instructions.
896 hbra 0001000..
897 hbrr 0001001.. */
898
899 static bool
900 is_hint (const unsigned char *insn)
901 {
902 return (insn[0] & 0xfc) == 0x10;
903 }
904
905 /* True if INPUT_SECTION might need overlay stubs. */
906
907 static bool
908 maybe_needs_stubs (asection *input_section)
909 {
910 /* No stubs for debug sections and suchlike. */
911 if ((input_section->flags & SEC_ALLOC) == 0)
912 return false;
913
914 /* No stubs for link-once sections that will be discarded. */
915 if (input_section->output_section == bfd_abs_section_ptr)
916 return false;
917
918 /* Don't create stubs for .eh_frame references. */
919 if (strcmp (input_section->name, ".eh_frame") == 0)
920 return false;
921
922 return true;
923 }
924
925 enum _stub_type
926 {
927 no_stub,
928 call_ovl_stub,
929 br000_ovl_stub,
930 br001_ovl_stub,
931 br010_ovl_stub,
932 br011_ovl_stub,
933 br100_ovl_stub,
934 br101_ovl_stub,
935 br110_ovl_stub,
936 br111_ovl_stub,
937 nonovl_stub,
938 stub_error
939 };
940
941 /* Return non-zero if this reloc symbol should go via an overlay stub.
942 Return 2 if the stub must be in non-overlay area. */
943
944 static enum _stub_type
945 needs_ovl_stub (struct elf_link_hash_entry *h,
946 Elf_Internal_Sym *sym,
947 asection *sym_sec,
948 asection *input_section,
949 Elf_Internal_Rela *irela,
950 bfd_byte *contents,
951 struct bfd_link_info *info)
952 {
953 struct spu_link_hash_table *htab = spu_hash_table (info);
954 enum elf_spu_reloc_type r_type;
955 unsigned int sym_type;
956 bool branch, hint, call;
957 enum _stub_type ret = no_stub;
958 bfd_byte insn[4];
959
960 if (sym_sec == NULL
961 || sym_sec->output_section == bfd_abs_section_ptr
962 || spu_elf_section_data (sym_sec->output_section) == NULL)
963 return ret;
964
965 if (h != NULL)
966 {
967 /* Ensure no stubs for user supplied overlay manager syms. */
968 if (h == htab->ovly_entry[0] || h == htab->ovly_entry[1])
969 return ret;
970
971 /* setjmp always goes via an overlay stub, because then the return
972 and hence the longjmp goes via __ovly_return. That magically
973 makes setjmp/longjmp between overlays work. */
974 if (startswith (h->root.root.string, "setjmp")
975 && (h->root.root.string[6] == '\0' || h->root.root.string[6] == '@'))
976 ret = call_ovl_stub;
977 }
978
979 if (h != NULL)
980 sym_type = h->type;
981 else
982 sym_type = ELF_ST_TYPE (sym->st_info);
983
984 r_type = ELF32_R_TYPE (irela->r_info);
985 branch = false;
986 hint = false;
987 call = false;
988 if (r_type == R_SPU_REL16 || r_type == R_SPU_ADDR16)
989 {
990 if (contents == NULL)
991 {
992 contents = insn;
993 if (!bfd_get_section_contents (input_section->owner,
994 input_section,
995 contents,
996 irela->r_offset, 4))
997 return stub_error;
998 }
999 else
1000 contents += irela->r_offset;
1001
1002 branch = is_branch (contents);
1003 hint = is_hint (contents);
1004 if (branch || hint)
1005 {
1006 call = (contents[0] & 0xfd) == 0x31;
1007 if (call
1008 && sym_type != STT_FUNC
1009 && contents != insn)
1010 {
1011 /* It's common for people to write assembly and forget
1012 to give function symbols the right type. Handle
1013 calls to such symbols, but warn so that (hopefully)
1014 people will fix their code. We need the symbol
1015 type to be correct to distinguish function pointer
1016 initialisation from other pointer initialisations. */
1017 const char *sym_name;
1018
1019 if (h != NULL)
1020 sym_name = h->root.root.string;
1021 else
1022 {
1023 Elf_Internal_Shdr *symtab_hdr;
1024 symtab_hdr = &elf_tdata (input_section->owner)->symtab_hdr;
1025 sym_name = bfd_elf_sym_name (input_section->owner,
1026 symtab_hdr,
1027 sym,
1028 sym_sec);
1029 }
1030 _bfd_error_handler
1031 /* xgettext:c-format */
1032 (_("warning: call to non-function symbol %s defined in %pB"),
1033 sym_name, sym_sec->owner);
1034
1035 }
1036 }
1037 }
1038
1039 if ((!branch && htab->params->ovly_flavour == ovly_soft_icache)
1040 || (sym_type != STT_FUNC
1041 && !(branch || hint)
1042 && (sym_sec->flags & SEC_CODE) == 0))
1043 return no_stub;
1044
1045 /* Usually, symbols in non-overlay sections don't need stubs. */
1046 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index == 0
1047 && !htab->params->non_overlay_stubs)
1048 return ret;
1049
1050 /* A reference from some other section to a symbol in an overlay
1051 section needs a stub. */
1052 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index
1053 != spu_elf_section_data (input_section->output_section)->u.o.ovl_index)
1054 {
1055 unsigned int lrlive = 0;
1056 if (branch)
1057 lrlive = (contents[1] & 0x70) >> 4;
1058
1059 if (!lrlive && (call || sym_type == STT_FUNC))
1060 ret = call_ovl_stub;
1061 else
1062 ret = br000_ovl_stub + lrlive;
1063 }
1064
1065 /* If this insn isn't a branch then we are possibly taking the
1066 address of a function and passing it out somehow. Soft-icache code
1067 always generates inline code to do indirect branches. */
1068 if (!(branch || hint)
1069 && sym_type == STT_FUNC
1070 && htab->params->ovly_flavour != ovly_soft_icache)
1071 ret = nonovl_stub;
1072
1073 return ret;
1074 }
1075
1076 static bool
1077 count_stub (struct spu_link_hash_table *htab,
1078 bfd *ibfd,
1079 asection *isec,
1080 enum _stub_type stub_type,
1081 struct elf_link_hash_entry *h,
1082 const Elf_Internal_Rela *irela)
1083 {
1084 unsigned int ovl = 0;
1085 struct got_entry *g, **head;
1086 bfd_vma addend;
1087
1088 /* If this instruction is a branch or call, we need a stub
1089 for it. One stub per function per overlay.
1090 If it isn't a branch, then we are taking the address of
1091 this function so need a stub in the non-overlay area
1092 for it. One stub per function. */
1093 if (stub_type != nonovl_stub)
1094 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1095
1096 if (h != NULL)
1097 head = &h->got.glist;
1098 else
1099 {
1100 if (elf_local_got_ents (ibfd) == NULL)
1101 {
1102 bfd_size_type amt = (elf_tdata (ibfd)->symtab_hdr.sh_info
1103 * sizeof (*elf_local_got_ents (ibfd)));
1104 elf_local_got_ents (ibfd) = bfd_zmalloc (amt);
1105 if (elf_local_got_ents (ibfd) == NULL)
1106 return false;
1107 }
1108 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1109 }
1110
1111 if (htab->params->ovly_flavour == ovly_soft_icache)
1112 {
1113 htab->stub_count[ovl] += 1;
1114 return true;
1115 }
1116
1117 addend = 0;
1118 if (irela != NULL)
1119 addend = irela->r_addend;
1120
1121 if (ovl == 0)
1122 {
1123 struct got_entry *gnext;
1124
1125 for (g = *head; g != NULL; g = g->next)
1126 if (g->addend == addend && g->ovl == 0)
1127 break;
1128
1129 if (g == NULL)
1130 {
1131 /* Need a new non-overlay area stub. Zap other stubs. */
1132 for (g = *head; g != NULL; g = gnext)
1133 {
1134 gnext = g->next;
1135 if (g->addend == addend)
1136 {
1137 htab->stub_count[g->ovl] -= 1;
1138 free (g);
1139 }
1140 }
1141 }
1142 }
1143 else
1144 {
1145 for (g = *head; g != NULL; g = g->next)
1146 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1147 break;
1148 }
1149
1150 if (g == NULL)
1151 {
1152 g = bfd_malloc (sizeof *g);
1153 if (g == NULL)
1154 return false;
1155 g->ovl = ovl;
1156 g->addend = addend;
1157 g->stub_addr = (bfd_vma) -1;
1158 g->next = *head;
1159 *head = g;
1160
1161 htab->stub_count[ovl] += 1;
1162 }
1163
1164 return true;
1165 }
1166
1167 /* Support two sizes of overlay stubs, a slower more compact stub of two
1168 instructions, and a faster stub of four instructions.
1169 Soft-icache stubs are four or eight words. */
1170
1171 static unsigned int
1172 ovl_stub_size (struct spu_elf_params *params)
1173 {
1174 return 16 << params->ovly_flavour >> params->compact_stub;
1175 }
1176
1177 static unsigned int
1178 ovl_stub_size_log2 (struct spu_elf_params *params)
1179 {
1180 return 4 + params->ovly_flavour - params->compact_stub;
1181 }
1182
1183 /* Two instruction overlay stubs look like:
1184
1185 brsl $75,__ovly_load
1186 .word target_ovl_and_address
1187
1188 ovl_and_address is a word with the overlay number in the top 14 bits
1189 and local store address in the bottom 18 bits.
1190
1191 Four instruction overlay stubs look like:
1192
1193 ila $78,ovl_number
1194 lnop
1195 ila $79,target_address
1196 br __ovly_load
1197
1198 Software icache stubs are:
1199
1200 .word target_index
1201 .word target_ia;
1202 .word lrlive_branchlocalstoreaddr;
1203 brasl $75,__icache_br_handler
1204 .quad xor_pattern
1205 */
1206
1207 static bool
1208 build_stub (struct bfd_link_info *info,
1209 bfd *ibfd,
1210 asection *isec,
1211 enum _stub_type stub_type,
1212 struct elf_link_hash_entry *h,
1213 const Elf_Internal_Rela *irela,
1214 bfd_vma dest,
1215 asection *dest_sec)
1216 {
1217 struct spu_link_hash_table *htab = spu_hash_table (info);
1218 unsigned int ovl, dest_ovl, set_id;
1219 struct got_entry *g, **head;
1220 asection *sec;
1221 bfd_vma addend, from, to, br_dest, patt;
1222 unsigned int lrlive;
1223
1224 ovl = 0;
1225 if (stub_type != nonovl_stub)
1226 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1227
1228 if (h != NULL)
1229 head = &h->got.glist;
1230 else
1231 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1232
1233 addend = 0;
1234 if (irela != NULL)
1235 addend = irela->r_addend;
1236
1237 if (htab->params->ovly_flavour == ovly_soft_icache)
1238 {
1239 g = bfd_malloc (sizeof *g);
1240 if (g == NULL)
1241 return false;
1242 g->ovl = ovl;
1243 g->br_addr = 0;
1244 if (irela != NULL)
1245 g->br_addr = (irela->r_offset
1246 + isec->output_offset
1247 + isec->output_section->vma);
1248 g->next = *head;
1249 *head = g;
1250 }
1251 else
1252 {
1253 for (g = *head; g != NULL; g = g->next)
1254 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1255 break;
1256 if (g == NULL)
1257 abort ();
1258
1259 if (g->ovl == 0 && ovl != 0)
1260 return true;
1261
1262 if (g->stub_addr != (bfd_vma) -1)
1263 return true;
1264 }
1265
1266 sec = htab->stub_sec[ovl];
1267 dest += dest_sec->output_offset + dest_sec->output_section->vma;
1268 from = sec->size + sec->output_offset + sec->output_section->vma;
1269 g->stub_addr = from;
1270 to = (htab->ovly_entry[0]->root.u.def.value
1271 + htab->ovly_entry[0]->root.u.def.section->output_offset
1272 + htab->ovly_entry[0]->root.u.def.section->output_section->vma);
1273
1274 if (((dest | to | from) & 3) != 0)
1275 {
1276 htab->stub_err = 1;
1277 return false;
1278 }
1279 dest_ovl = spu_elf_section_data (dest_sec->output_section)->u.o.ovl_index;
1280
1281 if (htab->params->ovly_flavour == ovly_normal
1282 && !htab->params->compact_stub)
1283 {
1284 bfd_put_32 (sec->owner, ILA + ((dest_ovl << 7) & 0x01ffff80) + 78,
1285 sec->contents + sec->size);
1286 bfd_put_32 (sec->owner, LNOP,
1287 sec->contents + sec->size + 4);
1288 bfd_put_32 (sec->owner, ILA + ((dest << 7) & 0x01ffff80) + 79,
1289 sec->contents + sec->size + 8);
1290 if (!BRA_STUBS)
1291 bfd_put_32 (sec->owner, BR + (((to - (from + 12)) << 5) & 0x007fff80),
1292 sec->contents + sec->size + 12);
1293 else
1294 bfd_put_32 (sec->owner, BRA + ((to << 5) & 0x007fff80),
1295 sec->contents + sec->size + 12);
1296 }
1297 else if (htab->params->ovly_flavour == ovly_normal
1298 && htab->params->compact_stub)
1299 {
1300 if (!BRA_STUBS)
1301 bfd_put_32 (sec->owner, BRSL + (((to - from) << 5) & 0x007fff80) + 75,
1302 sec->contents + sec->size);
1303 else
1304 bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1305 sec->contents + sec->size);
1306 bfd_put_32 (sec->owner, (dest & 0x3ffff) | (dest_ovl << 18),
1307 sec->contents + sec->size + 4);
1308 }
1309 else if (htab->params->ovly_flavour == ovly_soft_icache
1310 && htab->params->compact_stub)
1311 {
1312 lrlive = 0;
1313 if (stub_type == nonovl_stub)
1314 ;
1315 else if (stub_type == call_ovl_stub)
1316 /* A brsl makes lr live and *(*sp+16) is live.
1317 Tail calls have the same liveness. */
1318 lrlive = 5;
1319 else if (!htab->params->lrlive_analysis)
1320 /* Assume stack frame and lr save. */
1321 lrlive = 1;
1322 else if (irela != NULL)
1323 {
1324 /* Analyse branch instructions. */
1325 struct function_info *caller;
1326 bfd_vma off;
1327
1328 caller = find_function (isec, irela->r_offset, info);
1329 if (caller->start == NULL)
1330 off = irela->r_offset;
1331 else
1332 {
1333 struct function_info *found = NULL;
1334
1335 /* Find the earliest piece of this function that
1336 has frame adjusting instructions. We might
1337 see dynamic frame adjustment (eg. for alloca)
1338 in some later piece, but functions using
1339 alloca always set up a frame earlier. Frame
1340 setup instructions are always in one piece. */
1341 if (caller->lr_store != (bfd_vma) -1
1342 || caller->sp_adjust != (bfd_vma) -1)
1343 found = caller;
1344 while (caller->start != NULL)
1345 {
1346 caller = caller->start;
1347 if (caller->lr_store != (bfd_vma) -1
1348 || caller->sp_adjust != (bfd_vma) -1)
1349 found = caller;
1350 }
1351 if (found != NULL)
1352 caller = found;
1353 off = (bfd_vma) -1;
1354 }
1355
1356 if (off > caller->sp_adjust)
1357 {
1358 if (off > caller->lr_store)
1359 /* Only *(*sp+16) is live. */
1360 lrlive = 1;
1361 else
1362 /* If no lr save, then we must be in a
1363 leaf function with a frame.
1364 lr is still live. */
1365 lrlive = 4;
1366 }
1367 else if (off > caller->lr_store)
1368 {
1369 /* Between lr save and stack adjust. */
1370 lrlive = 3;
1371 /* This should never happen since prologues won't
1372 be split here. */
1373 BFD_ASSERT (0);
1374 }
1375 else
1376 /* On entry to function. */
1377 lrlive = 5;
1378
1379 if (stub_type != br000_ovl_stub
1380 && lrlive != stub_type - br000_ovl_stub)
1381 /* xgettext:c-format */
1382 info->callbacks->einfo (_("%pA:0x%v lrlive .brinfo (%u) differs "
1383 "from analysis (%u)\n"),
1384 isec, irela->r_offset, lrlive,
1385 stub_type - br000_ovl_stub);
1386 }
1387
1388 /* If given lrlive info via .brinfo, use it. */
1389 if (stub_type > br000_ovl_stub)
1390 lrlive = stub_type - br000_ovl_stub;
1391
1392 if (ovl == 0)
1393 to = (htab->ovly_entry[1]->root.u.def.value
1394 + htab->ovly_entry[1]->root.u.def.section->output_offset
1395 + htab->ovly_entry[1]->root.u.def.section->output_section->vma);
1396
1397 /* The branch that uses this stub goes to stub_addr + 4. We'll
1398 set up an xor pattern that can be used by the icache manager
1399 to modify this branch to go directly to its destination. */
1400 g->stub_addr += 4;
1401 br_dest = g->stub_addr;
1402 if (irela == NULL)
1403 {
1404 /* Except in the case of _SPUEAR_ stubs, the branch in
1405 question is the one in the stub itself. */
1406 BFD_ASSERT (stub_type == nonovl_stub);
1407 g->br_addr = g->stub_addr;
1408 br_dest = to;
1409 }
1410
1411 set_id = ((dest_ovl - 1) >> htab->num_lines_log2) + 1;
1412 bfd_put_32 (sec->owner, (set_id << 18) | (dest & 0x3ffff),
1413 sec->contents + sec->size);
1414 bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1415 sec->contents + sec->size + 4);
1416 bfd_put_32 (sec->owner, (lrlive << 29) | (g->br_addr & 0x3ffff),
1417 sec->contents + sec->size + 8);
1418 patt = dest ^ br_dest;
1419 if (irela != NULL && ELF32_R_TYPE (irela->r_info) == R_SPU_REL16)
1420 patt = (dest - g->br_addr) ^ (br_dest - g->br_addr);
1421 bfd_put_32 (sec->owner, (patt << 5) & 0x007fff80,
1422 sec->contents + sec->size + 12);
1423
1424 if (ovl == 0)
1425 /* Extra space for linked list entries. */
1426 sec->size += 16;
1427 }
1428 else
1429 abort ();
1430
1431 sec->size += ovl_stub_size (htab->params);
1432
1433 if (htab->params->emit_stub_syms)
1434 {
1435 size_t len;
1436 char *name;
1437 int add;
1438
1439 len = 8 + sizeof (".ovl_call.") - 1;
1440 if (h != NULL)
1441 len += strlen (h->root.root.string);
1442 else
1443 len += 8 + 1 + 8;
1444 add = 0;
1445 if (irela != NULL)
1446 add = (int) irela->r_addend & 0xffffffff;
1447 if (add != 0)
1448 len += 1 + 8;
1449 name = bfd_malloc (len + 1);
1450 if (name == NULL)
1451 return false;
1452
1453 sprintf (name, "%08x.ovl_call.", g->ovl);
1454 if (h != NULL)
1455 strcpy (name + 8 + sizeof (".ovl_call.") - 1, h->root.root.string);
1456 else
1457 sprintf (name + 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1458 dest_sec->id & 0xffffffff,
1459 (int) ELF32_R_SYM (irela->r_info) & 0xffffffff);
1460 if (add != 0)
1461 sprintf (name + len - 9, "+%x", add);
1462
1463 h = elf_link_hash_lookup (&htab->elf, name, true, true, false);
1464 free (name);
1465 if (h == NULL)
1466 return false;
1467 if (h->root.type == bfd_link_hash_new)
1468 {
1469 h->root.type = bfd_link_hash_defined;
1470 h->root.u.def.section = sec;
1471 h->size = ovl_stub_size (htab->params);
1472 h->root.u.def.value = sec->size - h->size;
1473 h->type = STT_FUNC;
1474 h->ref_regular = 1;
1475 h->def_regular = 1;
1476 h->ref_regular_nonweak = 1;
1477 h->forced_local = 1;
1478 h->non_elf = 0;
1479 }
1480 }
1481
1482 return true;
1483 }
1484
1485 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1486 symbols. */
1487
1488 static bool
1489 allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1490 {
1491 /* Symbols starting with _SPUEAR_ need a stub because they may be
1492 invoked by the PPU. */
1493 struct bfd_link_info *info = inf;
1494 struct spu_link_hash_table *htab = spu_hash_table (info);
1495 asection *sym_sec;
1496
1497 if ((h->root.type == bfd_link_hash_defined
1498 || h->root.type == bfd_link_hash_defweak)
1499 && h->def_regular
1500 && startswith (h->root.root.string, "_SPUEAR_")
1501 && (sym_sec = h->root.u.def.section) != NULL
1502 && sym_sec->output_section != bfd_abs_section_ptr
1503 && spu_elf_section_data (sym_sec->output_section) != NULL
1504 && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1505 || htab->params->non_overlay_stubs))
1506 {
1507 return count_stub (htab, NULL, NULL, nonovl_stub, h, NULL);
1508 }
1509
1510 return true;
1511 }
1512
1513 static bool
1514 build_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1515 {
1516 /* Symbols starting with _SPUEAR_ need a stub because they may be
1517 invoked by the PPU. */
1518 struct bfd_link_info *info = inf;
1519 struct spu_link_hash_table *htab = spu_hash_table (info);
1520 asection *sym_sec;
1521
1522 if ((h->root.type == bfd_link_hash_defined
1523 || h->root.type == bfd_link_hash_defweak)
1524 && h->def_regular
1525 && startswith (h->root.root.string, "_SPUEAR_")
1526 && (sym_sec = h->root.u.def.section) != NULL
1527 && sym_sec->output_section != bfd_abs_section_ptr
1528 && spu_elf_section_data (sym_sec->output_section) != NULL
1529 && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1530 || htab->params->non_overlay_stubs))
1531 {
1532 return build_stub (info, NULL, NULL, nonovl_stub, h, NULL,
1533 h->root.u.def.value, sym_sec);
1534 }
1535
1536 return true;
1537 }
1538
1539 /* Size or build stubs. */
1540
1541 static bool
1542 process_stubs (struct bfd_link_info *info, bool build)
1543 {
1544 struct spu_link_hash_table *htab = spu_hash_table (info);
1545 bfd *ibfd;
1546
1547 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
1548 {
1549 extern const bfd_target spu_elf32_vec;
1550 Elf_Internal_Shdr *symtab_hdr;
1551 asection *isec;
1552 Elf_Internal_Sym *local_syms = NULL;
1553
1554 if (ibfd->xvec != &spu_elf32_vec)
1555 continue;
1556
1557 /* We'll need the symbol table in a second. */
1558 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1559 if (symtab_hdr->sh_info == 0)
1560 continue;
1561
1562 /* Walk over each section attached to the input bfd. */
1563 for (isec = ibfd->sections; isec != NULL; isec = isec->next)
1564 {
1565 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1566
1567 /* If there aren't any relocs, then there's nothing more to do. */
1568 if ((isec->flags & SEC_RELOC) == 0
1569 || isec->reloc_count == 0)
1570 continue;
1571
1572 if (!maybe_needs_stubs (isec))
1573 continue;
1574
1575 /* Get the relocs. */
1576 internal_relocs = _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
1577 info->keep_memory);
1578 if (internal_relocs == NULL)
1579 goto error_ret_free_local;
1580
1581 /* Now examine each relocation. */
1582 irela = internal_relocs;
1583 irelaend = irela + isec->reloc_count;
1584 for (; irela < irelaend; irela++)
1585 {
1586 enum elf_spu_reloc_type r_type;
1587 unsigned int r_indx;
1588 asection *sym_sec;
1589 Elf_Internal_Sym *sym;
1590 struct elf_link_hash_entry *h;
1591 enum _stub_type stub_type;
1592
1593 r_type = ELF32_R_TYPE (irela->r_info);
1594 r_indx = ELF32_R_SYM (irela->r_info);
1595
1596 if (r_type >= R_SPU_max)
1597 {
1598 bfd_set_error (bfd_error_bad_value);
1599 error_ret_free_internal:
1600 if (elf_section_data (isec)->relocs != internal_relocs)
1601 free (internal_relocs);
1602 error_ret_free_local:
1603 if (symtab_hdr->contents != (unsigned char *) local_syms)
1604 free (local_syms);
1605 return false;
1606 }
1607
1608 /* Determine the reloc target section. */
1609 if (!get_sym_h (&h, &sym, &sym_sec, &local_syms, r_indx, ibfd))
1610 goto error_ret_free_internal;
1611
1612 stub_type = needs_ovl_stub (h, sym, sym_sec, isec, irela,
1613 NULL, info);
1614 if (stub_type == no_stub)
1615 continue;
1616 else if (stub_type == stub_error)
1617 goto error_ret_free_internal;
1618
1619 if (htab->stub_count == NULL)
1620 {
1621 bfd_size_type amt;
1622 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_count);
1623 htab->stub_count = bfd_zmalloc (amt);
1624 if (htab->stub_count == NULL)
1625 goto error_ret_free_internal;
1626 }
1627
1628 if (!build)
1629 {
1630 if (!count_stub (htab, ibfd, isec, stub_type, h, irela))
1631 goto error_ret_free_internal;
1632 }
1633 else
1634 {
1635 bfd_vma dest;
1636
1637 if (h != NULL)
1638 dest = h->root.u.def.value;
1639 else
1640 dest = sym->st_value;
1641 dest += irela->r_addend;
1642 if (!build_stub (info, ibfd, isec, stub_type, h, irela,
1643 dest, sym_sec))
1644 goto error_ret_free_internal;
1645 }
1646 }
1647
1648 /* We're done with the internal relocs, free them. */
1649 if (elf_section_data (isec)->relocs != internal_relocs)
1650 free (internal_relocs);
1651 }
1652
1653 if (local_syms != NULL
1654 && symtab_hdr->contents != (unsigned char *) local_syms)
1655 {
1656 if (!info->keep_memory)
1657 free (local_syms);
1658 else
1659 symtab_hdr->contents = (unsigned char *) local_syms;
1660 }
1661 }
1662
1663 return true;
1664 }
1665
1666 /* Allocate space for overlay call and return stubs.
1667 Return 0 on error, 1 if no overlays, 2 otherwise. */
1668
1669 int
1670 spu_elf_size_stubs (struct bfd_link_info *info)
1671 {
1672 struct spu_link_hash_table *htab;
1673 bfd *ibfd;
1674 bfd_size_type amt;
1675 flagword flags;
1676 unsigned int i;
1677 asection *stub;
1678
1679 if (!process_stubs (info, false))
1680 return 0;
1681
1682 htab = spu_hash_table (info);
1683 elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, info);
1684 if (htab->stub_err)
1685 return 0;
1686
1687 ibfd = info->input_bfds;
1688 if (htab->stub_count != NULL)
1689 {
1690 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_sec);
1691 htab->stub_sec = bfd_zmalloc (amt);
1692 if (htab->stub_sec == NULL)
1693 return 0;
1694
1695 flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
1696 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1697 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1698 htab->stub_sec[0] = stub;
1699 if (stub == NULL
1700 || !bfd_set_section_alignment (stub,
1701 ovl_stub_size_log2 (htab->params)))
1702 return 0;
1703 stub->size = htab->stub_count[0] * ovl_stub_size (htab->params);
1704 if (htab->params->ovly_flavour == ovly_soft_icache)
1705 /* Extra space for linked list entries. */
1706 stub->size += htab->stub_count[0] * 16;
1707
1708 for (i = 0; i < htab->num_overlays; ++i)
1709 {
1710 asection *osec = htab->ovl_sec[i];
1711 unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1712 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1713 htab->stub_sec[ovl] = stub;
1714 if (stub == NULL
1715 || !bfd_set_section_alignment (stub,
1716 ovl_stub_size_log2 (htab->params)))
1717 return 0;
1718 stub->size = htab->stub_count[ovl] * ovl_stub_size (htab->params);
1719 }
1720 }
1721
1722 if (htab->params->ovly_flavour == ovly_soft_icache)
1723 {
1724 /* Space for icache manager tables.
1725 a) Tag array, one quadword per cache line.
1726 b) Rewrite "to" list, one quadword per cache line.
1727 c) Rewrite "from" list, one byte per outgoing branch (rounded up to
1728 a power-of-two number of full quadwords) per cache line. */
1729
1730 flags = SEC_ALLOC;
1731 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1732 if (htab->ovtab == NULL
1733 || !bfd_set_section_alignment (htab->ovtab, 4))
1734 return 0;
1735
1736 htab->ovtab->size = (16 + 16 + (16 << htab->fromelem_size_log2))
1737 << htab->num_lines_log2;
1738
1739 flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1740 htab->init = bfd_make_section_anyway_with_flags (ibfd, ".ovini", flags);
1741 if (htab->init == NULL
1742 || !bfd_set_section_alignment (htab->init, 4))
1743 return 0;
1744
1745 htab->init->size = 16;
1746 }
1747 else if (htab->stub_count == NULL)
1748 return 1;
1749 else
1750 {
1751 /* htab->ovtab consists of two arrays.
1752 . struct {
1753 . u32 vma;
1754 . u32 size;
1755 . u32 file_off;
1756 . u32 buf;
1757 . } _ovly_table[];
1758 .
1759 . struct {
1760 . u32 mapped;
1761 . } _ovly_buf_table[];
1762 . */
1763
1764 flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1765 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1766 if (htab->ovtab == NULL
1767 || !bfd_set_section_alignment (htab->ovtab, 4))
1768 return 0;
1769
1770 htab->ovtab->size = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
1771 }
1772
1773 htab->toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1774 if (htab->toe == NULL
1775 || !bfd_set_section_alignment (htab->toe, 4))
1776 return 0;
1777 htab->toe->size = 16;
1778
1779 return 2;
1780 }
1781
1782 /* Called from ld to place overlay manager data sections. This is done
1783 after the overlay manager itself is loaded, mainly so that the
1784 linker's htab->init section is placed after any other .ovl.init
1785 sections. */
1786
1787 void
1788 spu_elf_place_overlay_data (struct bfd_link_info *info)
1789 {
1790 struct spu_link_hash_table *htab = spu_hash_table (info);
1791 unsigned int i;
1792
1793 if (htab->stub_sec != NULL)
1794 {
1795 (*htab->params->place_spu_section) (htab->stub_sec[0], NULL, ".text");
1796
1797 for (i = 0; i < htab->num_overlays; ++i)
1798 {
1799 asection *osec = htab->ovl_sec[i];
1800 unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1801 (*htab->params->place_spu_section) (htab->stub_sec[ovl], osec, NULL);
1802 }
1803 }
1804
1805 if (htab->params->ovly_flavour == ovly_soft_icache)
1806 (*htab->params->place_spu_section) (htab->init, NULL, ".ovl.init");
1807
1808 if (htab->ovtab != NULL)
1809 {
1810 const char *ovout = ".data";
1811 if (htab->params->ovly_flavour == ovly_soft_icache)
1812 ovout = ".bss";
1813 (*htab->params->place_spu_section) (htab->ovtab, NULL, ovout);
1814 }
1815
1816 if (htab->toe != NULL)
1817 (*htab->params->place_spu_section) (htab->toe, NULL, ".toe");
1818 }
1819
1820 /* Functions to handle embedded spu_ovl.o object. */
1821
1822 static void *
1823 ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1824 {
1825 return stream;
1826 }
1827
1828 static file_ptr
1829 ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1830 void *stream,
1831 void *buf,
1832 file_ptr nbytes,
1833 file_ptr offset)
1834 {
1835 struct _ovl_stream *os;
1836 size_t count;
1837 size_t max;
1838
1839 os = (struct _ovl_stream *) stream;
1840 max = (const char *) os->end - (const char *) os->start;
1841
1842 if ((ufile_ptr) offset >= max)
1843 return 0;
1844
1845 count = nbytes;
1846 if (count > max - offset)
1847 count = max - offset;
1848
1849 memcpy (buf, (const char *) os->start + offset, count);
1850 return count;
1851 }
1852
1853 static int
1854 ovl_mgr_stat (struct bfd *abfd ATTRIBUTE_UNUSED,
1855 void *stream,
1856 struct stat *sb)
1857 {
1858 struct _ovl_stream *os = (struct _ovl_stream *) stream;
1859
1860 memset (sb, 0, sizeof (*sb));
1861 sb->st_size = (const char *) os->end - (const char *) os->start;
1862 return 0;
1863 }
1864
1865 bool
1866 spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1867 {
1868 *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1869 "elf32-spu",
1870 ovl_mgr_open,
1871 (void *) stream,
1872 ovl_mgr_pread,
1873 NULL,
1874 ovl_mgr_stat);
1875 return *ovl_bfd != NULL;
1876 }
1877
1878 static unsigned int
1879 overlay_index (asection *sec)
1880 {
1881 if (sec == NULL
1882 || sec->output_section == bfd_abs_section_ptr)
1883 return 0;
1884 return spu_elf_section_data (sec->output_section)->u.o.ovl_index;
1885 }
1886
1887 /* Define an STT_OBJECT symbol. */
1888
1889 static struct elf_link_hash_entry *
1890 define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1891 {
1892 struct elf_link_hash_entry *h;
1893
1894 h = elf_link_hash_lookup (&htab->elf, name, true, false, false);
1895 if (h == NULL)
1896 return NULL;
1897
1898 if (h->root.type != bfd_link_hash_defined
1899 || !h->def_regular)
1900 {
1901 h->root.type = bfd_link_hash_defined;
1902 h->root.u.def.section = htab->ovtab;
1903 h->type = STT_OBJECT;
1904 h->ref_regular = 1;
1905 h->def_regular = 1;
1906 h->ref_regular_nonweak = 1;
1907 h->non_elf = 0;
1908 }
1909 else if (h->root.u.def.section->owner != NULL)
1910 {
1911 /* xgettext:c-format */
1912 _bfd_error_handler (_("%pB is not allowed to define %s"),
1913 h->root.u.def.section->owner,
1914 h->root.root.string);
1915 bfd_set_error (bfd_error_bad_value);
1916 return NULL;
1917 }
1918 else
1919 {
1920 _bfd_error_handler (_("you are not allowed to define %s in a script"),
1921 h->root.root.string);
1922 bfd_set_error (bfd_error_bad_value);
1923 return NULL;
1924 }
1925
1926 return h;
1927 }
1928
1929 /* Fill in all stubs and the overlay tables. */
1930
1931 static bool
1932 spu_elf_build_stubs (struct bfd_link_info *info)
1933 {
1934 struct spu_link_hash_table *htab = spu_hash_table (info);
1935 struct elf_link_hash_entry *h;
1936 bfd_byte *p;
1937 asection *s;
1938 bfd *obfd;
1939 unsigned int i;
1940
1941 if (htab->num_overlays != 0)
1942 {
1943 for (i = 0; i < 2; i++)
1944 {
1945 h = htab->ovly_entry[i];
1946 if (h != NULL
1947 && (h->root.type == bfd_link_hash_defined
1948 || h->root.type == bfd_link_hash_defweak)
1949 && h->def_regular)
1950 {
1951 s = h->root.u.def.section->output_section;
1952 if (spu_elf_section_data (s)->u.o.ovl_index)
1953 {
1954 _bfd_error_handler (_("%s in overlay section"),
1955 h->root.root.string);
1956 bfd_set_error (bfd_error_bad_value);
1957 return false;
1958 }
1959 }
1960 }
1961 }
1962
1963 if (htab->stub_sec != NULL)
1964 {
1965 for (i = 0; i <= htab->num_overlays; i++)
1966 if (htab->stub_sec[i]->size != 0)
1967 {
1968 htab->stub_sec[i]->contents = bfd_zalloc (htab->stub_sec[i]->owner,
1969 htab->stub_sec[i]->size);
1970 if (htab->stub_sec[i]->contents == NULL)
1971 return false;
1972 htab->stub_sec[i]->rawsize = htab->stub_sec[i]->size;
1973 htab->stub_sec[i]->size = 0;
1974 }
1975
1976 /* Fill in all the stubs. */
1977 process_stubs (info, true);
1978 if (!htab->stub_err)
1979 elf_link_hash_traverse (&htab->elf, build_spuear_stubs, info);
1980
1981 if (htab->stub_err)
1982 {
1983 _bfd_error_handler (_("overlay stub relocation overflow"));
1984 bfd_set_error (bfd_error_bad_value);
1985 return false;
1986 }
1987
1988 for (i = 0; i <= htab->num_overlays; i++)
1989 {
1990 if (htab->stub_sec[i]->size != htab->stub_sec[i]->rawsize)
1991 {
1992 _bfd_error_handler (_("stubs don't match calculated size"));
1993 bfd_set_error (bfd_error_bad_value);
1994 return false;
1995 }
1996 htab->stub_sec[i]->rawsize = 0;
1997 }
1998 }
1999
2000 if (htab->ovtab == NULL || htab->ovtab->size == 0)
2001 return true;
2002
2003 htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
2004 if (htab->ovtab->contents == NULL)
2005 return false;
2006
2007 p = htab->ovtab->contents;
2008 if (htab->params->ovly_flavour == ovly_soft_icache)
2009 {
2010 bfd_vma off;
2011
2012 h = define_ovtab_symbol (htab, "__icache_tag_array");
2013 if (h == NULL)
2014 return false;
2015 h->root.u.def.value = 0;
2016 h->size = 16 << htab->num_lines_log2;
2017 off = h->size;
2018
2019 h = define_ovtab_symbol (htab, "__icache_tag_array_size");
2020 if (h == NULL)
2021 return false;
2022 h->root.u.def.value = 16 << htab->num_lines_log2;
2023 h->root.u.def.section = bfd_abs_section_ptr;
2024
2025 h = define_ovtab_symbol (htab, "__icache_rewrite_to");
2026 if (h == NULL)
2027 return false;
2028 h->root.u.def.value = off;
2029 h->size = 16 << htab->num_lines_log2;
2030 off += h->size;
2031
2032 h = define_ovtab_symbol (htab, "__icache_rewrite_to_size");
2033 if (h == NULL)
2034 return false;
2035 h->root.u.def.value = 16 << htab->num_lines_log2;
2036 h->root.u.def.section = bfd_abs_section_ptr;
2037
2038 h = define_ovtab_symbol (htab, "__icache_rewrite_from");
2039 if (h == NULL)
2040 return false;
2041 h->root.u.def.value = off;
2042 h->size = 16 << (htab->fromelem_size_log2 + htab->num_lines_log2);
2043 off += h->size;
2044
2045 h = define_ovtab_symbol (htab, "__icache_rewrite_from_size");
2046 if (h == NULL)
2047 return false;
2048 h->root.u.def.value = 16 << (htab->fromelem_size_log2
2049 + htab->num_lines_log2);
2050 h->root.u.def.section = bfd_abs_section_ptr;
2051
2052 h = define_ovtab_symbol (htab, "__icache_log2_fromelemsize");
2053 if (h == NULL)
2054 return false;
2055 h->root.u.def.value = htab->fromelem_size_log2;
2056 h->root.u.def.section = bfd_abs_section_ptr;
2057
2058 h = define_ovtab_symbol (htab, "__icache_base");
2059 if (h == NULL)
2060 return false;
2061 h->root.u.def.value = htab->ovl_sec[0]->vma;
2062 h->root.u.def.section = bfd_abs_section_ptr;
2063 h->size = htab->num_buf << htab->line_size_log2;
2064
2065 h = define_ovtab_symbol (htab, "__icache_linesize");
2066 if (h == NULL)
2067 return false;
2068 h->root.u.def.value = 1 << htab->line_size_log2;
2069 h->root.u.def.section = bfd_abs_section_ptr;
2070
2071 h = define_ovtab_symbol (htab, "__icache_log2_linesize");
2072 if (h == NULL)
2073 return false;
2074 h->root.u.def.value = htab->line_size_log2;
2075 h->root.u.def.section = bfd_abs_section_ptr;
2076
2077 h = define_ovtab_symbol (htab, "__icache_neg_log2_linesize");
2078 if (h == NULL)
2079 return false;
2080 h->root.u.def.value = -htab->line_size_log2;
2081 h->root.u.def.section = bfd_abs_section_ptr;
2082
2083 h = define_ovtab_symbol (htab, "__icache_cachesize");
2084 if (h == NULL)
2085 return false;
2086 h->root.u.def.value = 1 << (htab->num_lines_log2 + htab->line_size_log2);
2087 h->root.u.def.section = bfd_abs_section_ptr;
2088
2089 h = define_ovtab_symbol (htab, "__icache_log2_cachesize");
2090 if (h == NULL)
2091 return false;
2092 h->root.u.def.value = htab->num_lines_log2 + htab->line_size_log2;
2093 h->root.u.def.section = bfd_abs_section_ptr;
2094
2095 h = define_ovtab_symbol (htab, "__icache_neg_log2_cachesize");
2096 if (h == NULL)
2097 return false;
2098 h->root.u.def.value = -(htab->num_lines_log2 + htab->line_size_log2);
2099 h->root.u.def.section = bfd_abs_section_ptr;
2100
2101 if (htab->init != NULL && htab->init->size != 0)
2102 {
2103 htab->init->contents = bfd_zalloc (htab->init->owner,
2104 htab->init->size);
2105 if (htab->init->contents == NULL)
2106 return false;
2107
2108 h = define_ovtab_symbol (htab, "__icache_fileoff");
2109 if (h == NULL)
2110 return false;
2111 h->root.u.def.value = 0;
2112 h->root.u.def.section = htab->init;
2113 h->size = 8;
2114 }
2115 }
2116 else
2117 {
2118 /* Write out _ovly_table. */
2119 /* set low bit of .size to mark non-overlay area as present. */
2120 p[7] = 1;
2121 obfd = htab->ovtab->output_section->owner;
2122 for (s = obfd->sections; s != NULL; s = s->next)
2123 {
2124 unsigned int ovl_index = spu_elf_section_data (s)->u.o.ovl_index;
2125
2126 if (ovl_index != 0)
2127 {
2128 unsigned long off = ovl_index * 16;
2129 unsigned int ovl_buf = spu_elf_section_data (s)->u.o.ovl_buf;
2130
2131 bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
2132 bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16,
2133 p + off + 4);
2134 /* file_off written later in spu_elf_modify_headers. */
2135 bfd_put_32 (htab->ovtab->owner, ovl_buf, p + off + 12);
2136 }
2137 }
2138
2139 h = define_ovtab_symbol (htab, "_ovly_table");
2140 if (h == NULL)
2141 return false;
2142 h->root.u.def.value = 16;
2143 h->size = htab->num_overlays * 16;
2144
2145 h = define_ovtab_symbol (htab, "_ovly_table_end");
2146 if (h == NULL)
2147 return false;
2148 h->root.u.def.value = htab->num_overlays * 16 + 16;
2149 h->size = 0;
2150
2151 h = define_ovtab_symbol (htab, "_ovly_buf_table");
2152 if (h == NULL)
2153 return false;
2154 h->root.u.def.value = htab->num_overlays * 16 + 16;
2155 h->size = htab->num_buf * 4;
2156
2157 h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
2158 if (h == NULL)
2159 return false;
2160 h->root.u.def.value = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
2161 h->size = 0;
2162 }
2163
2164 h = define_ovtab_symbol (htab, "_EAR_");
2165 if (h == NULL)
2166 return false;
2167 h->root.u.def.section = htab->toe;
2168 h->root.u.def.value = 0;
2169 h->size = 16;
2170
2171 return true;
2172 }
2173
2174 /* Check that all loadable section VMAs lie in the range
2175 LO .. HI inclusive, and stash some parameters for --auto-overlay. */
2176
2177 asection *
2178 spu_elf_check_vma (struct bfd_link_info *info)
2179 {
2180 struct elf_segment_map *m;
2181 unsigned int i;
2182 struct spu_link_hash_table *htab = spu_hash_table (info);
2183 bfd *abfd = info->output_bfd;
2184 bfd_vma hi = htab->params->local_store_hi;
2185 bfd_vma lo = htab->params->local_store_lo;
2186
2187 htab->local_store = hi + 1 - lo;
2188
2189 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
2190 if (m->p_type == PT_LOAD)
2191 for (i = 0; i < m->count; i++)
2192 if (m->sections[i]->size != 0
2193 && (m->sections[i]->vma < lo
2194 || m->sections[i]->vma > hi
2195 || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
2196 return m->sections[i];
2197
2198 return NULL;
2199 }
2200
2201 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
2202 Search for stack adjusting insns, and return the sp delta.
2203 If a store of lr is found save the instruction offset to *LR_STORE.
2204 If a stack adjusting instruction is found, save that offset to
2205 *SP_ADJUST. */
2206
2207 static int
2208 find_function_stack_adjust (asection *sec,
2209 bfd_vma offset,
2210 bfd_vma *lr_store,
2211 bfd_vma *sp_adjust)
2212 {
2213 int32_t reg[128];
2214
2215 memset (reg, 0, sizeof (reg));
2216 for ( ; offset + 4 <= sec->size; offset += 4)
2217 {
2218 unsigned char buf[4];
2219 int rt, ra;
2220 uint32_t imm;
2221
2222 /* Assume no relocs on stack adjusing insns. */
2223 if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
2224 break;
2225
2226 rt = buf[3] & 0x7f;
2227 ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
2228
2229 if (buf[0] == 0x24 /* stqd */)
2230 {
2231 if (rt == 0 /* lr */ && ra == 1 /* sp */)
2232 *lr_store = offset;
2233 continue;
2234 }
2235
2236 /* Partly decoded immediate field. */
2237 imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
2238
2239 if (buf[0] == 0x1c /* ai */)
2240 {
2241 imm >>= 7;
2242 imm = (imm ^ 0x200) - 0x200;
2243 reg[rt] = reg[ra] + imm;
2244
2245 if (rt == 1 /* sp */)
2246 {
2247 if (reg[rt] > 0)
2248 break;
2249 *sp_adjust = offset;
2250 return reg[rt];
2251 }
2252 }
2253 else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
2254 {
2255 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2256
2257 reg[rt] = reg[ra] + reg[rb];
2258 if (rt == 1)
2259 {
2260 if (reg[rt] > 0)
2261 break;
2262 *sp_adjust = offset;
2263 return reg[rt];
2264 }
2265 }
2266 else if (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */)
2267 {
2268 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2269
2270 reg[rt] = reg[rb] - reg[ra];
2271 if (rt == 1)
2272 {
2273 if (reg[rt] > 0)
2274 break;
2275 *sp_adjust = offset;
2276 return reg[rt];
2277 }
2278 }
2279 else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
2280 {
2281 if (buf[0] >= 0x42 /* ila */)
2282 imm |= (buf[0] & 1) << 17;
2283 else
2284 {
2285 imm &= 0xffff;
2286
2287 if (buf[0] == 0x40 /* il */)
2288 {
2289 if ((buf[1] & 0x80) == 0)
2290 continue;
2291 imm = (imm ^ 0x8000) - 0x8000;
2292 }
2293 else if ((buf[1] & 0x80) == 0 /* ilhu */)
2294 imm <<= 16;
2295 }
2296 reg[rt] = imm;
2297 continue;
2298 }
2299 else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
2300 {
2301 reg[rt] |= imm & 0xffff;
2302 continue;
2303 }
2304 else if (buf[0] == 0x04 /* ori */)
2305 {
2306 imm >>= 7;
2307 imm = (imm ^ 0x200) - 0x200;
2308 reg[rt] = reg[ra] | imm;
2309 continue;
2310 }
2311 else if (buf[0] == 0x32 && (buf[1] & 0x80) != 0 /* fsmbi */)
2312 {
2313 reg[rt] = ( ((imm & 0x8000) ? 0xff000000 : 0)
2314 | ((imm & 0x4000) ? 0x00ff0000 : 0)
2315 | ((imm & 0x2000) ? 0x0000ff00 : 0)
2316 | ((imm & 0x1000) ? 0x000000ff : 0));
2317 continue;
2318 }
2319 else if (buf[0] == 0x16 /* andbi */)
2320 {
2321 imm >>= 7;
2322 imm &= 0xff;
2323 imm |= imm << 8;
2324 imm |= imm << 16;
2325 reg[rt] = reg[ra] & imm;
2326 continue;
2327 }
2328 else if (buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
2329 {
2330 /* Used in pic reg load. Say rt is trashed. Won't be used
2331 in stack adjust, but we need to continue past this branch. */
2332 reg[rt] = 0;
2333 continue;
2334 }
2335 else if (is_branch (buf) || is_indirect_branch (buf))
2336 /* If we hit a branch then we must be out of the prologue. */
2337 break;
2338 }
2339
2340 return 0;
2341 }
2342
2343 /* qsort predicate to sort symbols by section and value. */
2344
2345 static Elf_Internal_Sym *sort_syms_syms;
2346 static asection **sort_syms_psecs;
2347
2348 static int
2349 sort_syms (const void *a, const void *b)
2350 {
2351 Elf_Internal_Sym *const *s1 = a;
2352 Elf_Internal_Sym *const *s2 = b;
2353 asection *sec1,*sec2;
2354 bfd_signed_vma delta;
2355
2356 sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
2357 sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
2358
2359 if (sec1 != sec2)
2360 return sec1->index - sec2->index;
2361
2362 delta = (*s1)->st_value - (*s2)->st_value;
2363 if (delta != 0)
2364 return delta < 0 ? -1 : 1;
2365
2366 delta = (*s2)->st_size - (*s1)->st_size;
2367 if (delta != 0)
2368 return delta < 0 ? -1 : 1;
2369
2370 return *s1 < *s2 ? -1 : 1;
2371 }
2372
2373 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
2374 entries for section SEC. */
2375
2376 static struct spu_elf_stack_info *
2377 alloc_stack_info (asection *sec, int max_fun)
2378 {
2379 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2380 bfd_size_type amt;
2381
2382 amt = sizeof (struct spu_elf_stack_info);
2383 amt += (max_fun - 1) * sizeof (struct function_info);
2384 sec_data->u.i.stack_info = bfd_zmalloc (amt);
2385 if (sec_data->u.i.stack_info != NULL)
2386 sec_data->u.i.stack_info->max_fun = max_fun;
2387 return sec_data->u.i.stack_info;
2388 }
2389
2390 /* Add a new struct function_info describing a (part of a) function
2391 starting at SYM_H. Keep the array sorted by address. */
2392
2393 static struct function_info *
2394 maybe_insert_function (asection *sec,
2395 void *sym_h,
2396 bool global,
2397 bool is_func)
2398 {
2399 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2400 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2401 int i;
2402 bfd_vma off, size;
2403
2404 if (sinfo == NULL)
2405 {
2406 sinfo = alloc_stack_info (sec, 20);
2407 if (sinfo == NULL)
2408 return NULL;
2409 }
2410
2411 if (!global)
2412 {
2413 Elf_Internal_Sym *sym = sym_h;
2414 off = sym->st_value;
2415 size = sym->st_size;
2416 }
2417 else
2418 {
2419 struct elf_link_hash_entry *h = sym_h;
2420 off = h->root.u.def.value;
2421 size = h->size;
2422 }
2423
2424 for (i = sinfo->num_fun; --i >= 0; )
2425 if (sinfo->fun[i].lo <= off)
2426 break;
2427
2428 if (i >= 0)
2429 {
2430 /* Don't add another entry for an alias, but do update some
2431 info. */
2432 if (sinfo->fun[i].lo == off)
2433 {
2434 /* Prefer globals over local syms. */
2435 if (global && !sinfo->fun[i].global)
2436 {
2437 sinfo->fun[i].global = true;
2438 sinfo->fun[i].u.h = sym_h;
2439 }
2440 if (is_func)
2441 sinfo->fun[i].is_func = true;
2442 return &sinfo->fun[i];
2443 }
2444 /* Ignore a zero-size symbol inside an existing function. */
2445 else if (sinfo->fun[i].hi > off && size == 0)
2446 return &sinfo->fun[i];
2447 }
2448
2449 if (sinfo->num_fun >= sinfo->max_fun)
2450 {
2451 bfd_size_type amt = sizeof (struct spu_elf_stack_info);
2452 bfd_size_type old = amt;
2453
2454 old += (sinfo->max_fun - 1) * sizeof (struct function_info);
2455 sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
2456 amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
2457 sinfo = bfd_realloc (sinfo, amt);
2458 if (sinfo == NULL)
2459 return NULL;
2460 memset ((char *) sinfo + old, 0, amt - old);
2461 sec_data->u.i.stack_info = sinfo;
2462 }
2463
2464 if (++i < sinfo->num_fun)
2465 memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
2466 (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
2467 sinfo->fun[i].is_func = is_func;
2468 sinfo->fun[i].global = global;
2469 sinfo->fun[i].sec = sec;
2470 if (global)
2471 sinfo->fun[i].u.h = sym_h;
2472 else
2473 sinfo->fun[i].u.sym = sym_h;
2474 sinfo->fun[i].lo = off;
2475 sinfo->fun[i].hi = off + size;
2476 sinfo->fun[i].lr_store = -1;
2477 sinfo->fun[i].sp_adjust = -1;
2478 sinfo->fun[i].stack = -find_function_stack_adjust (sec, off,
2479 &sinfo->fun[i].lr_store,
2480 &sinfo->fun[i].sp_adjust);
2481 sinfo->num_fun += 1;
2482 return &sinfo->fun[i];
2483 }
2484
2485 /* Return the name of FUN. */
2486
2487 static const char *
2488 func_name (struct function_info *fun)
2489 {
2490 asection *sec;
2491 bfd *ibfd;
2492 Elf_Internal_Shdr *symtab_hdr;
2493
2494 while (fun->start != NULL)
2495 fun = fun->start;
2496
2497 if (fun->global)
2498 return fun->u.h->root.root.string;
2499
2500 sec = fun->sec;
2501 if (fun->u.sym->st_name == 0)
2502 {
2503 size_t len = strlen (sec->name);
2504 char *name = bfd_malloc (len + 10);
2505 if (name == NULL)
2506 return "(null)";
2507 sprintf (name, "%s+%lx", sec->name,
2508 (unsigned long) fun->u.sym->st_value & 0xffffffff);
2509 return name;
2510 }
2511 ibfd = sec->owner;
2512 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2513 return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
2514 }
2515
2516 /* Read the instruction at OFF in SEC. Return true iff the instruction
2517 is a nop, lnop, or stop 0 (all zero insn). */
2518
2519 static bool
2520 is_nop (asection *sec, bfd_vma off)
2521 {
2522 unsigned char insn[4];
2523
2524 if (off + 4 > sec->size
2525 || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
2526 return false;
2527 if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
2528 return true;
2529 if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
2530 return true;
2531 return false;
2532 }
2533
2534 /* Extend the range of FUN to cover nop padding up to LIMIT.
2535 Return TRUE iff some instruction other than a NOP was found. */
2536
2537 static bool
2538 insns_at_end (struct function_info *fun, bfd_vma limit)
2539 {
2540 bfd_vma off = (fun->hi + 3) & -4;
2541
2542 while (off < limit && is_nop (fun->sec, off))
2543 off += 4;
2544 if (off < limit)
2545 {
2546 fun->hi = off;
2547 return true;
2548 }
2549 fun->hi = limit;
2550 return false;
2551 }
2552
2553 /* Check and fix overlapping function ranges. Return TRUE iff there
2554 are gaps in the current info we have about functions in SEC. */
2555
2556 static bool
2557 check_function_ranges (asection *sec, struct bfd_link_info *info)
2558 {
2559 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2560 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2561 int i;
2562 bool gaps = false;
2563
2564 if (sinfo == NULL)
2565 return false;
2566
2567 for (i = 1; i < sinfo->num_fun; i++)
2568 if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
2569 {
2570 /* Fix overlapping symbols. */
2571 const char *f1 = func_name (&sinfo->fun[i - 1]);
2572 const char *f2 = func_name (&sinfo->fun[i]);
2573
2574 /* xgettext:c-format */
2575 info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
2576 sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
2577 }
2578 else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
2579 gaps = true;
2580
2581 if (sinfo->num_fun == 0)
2582 gaps = true;
2583 else
2584 {
2585 if (sinfo->fun[0].lo != 0)
2586 gaps = true;
2587 if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
2588 {
2589 const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
2590
2591 info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
2592 sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
2593 }
2594 else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
2595 gaps = true;
2596 }
2597 return gaps;
2598 }
2599
2600 /* Search current function info for a function that contains address
2601 OFFSET in section SEC. */
2602
2603 static struct function_info *
2604 find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
2605 {
2606 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2607 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2608 int lo, hi, mid;
2609
2610 lo = 0;
2611 hi = sinfo->num_fun;
2612 while (lo < hi)
2613 {
2614 mid = (lo + hi) / 2;
2615 if (offset < sinfo->fun[mid].lo)
2616 hi = mid;
2617 else if (offset >= sinfo->fun[mid].hi)
2618 lo = mid + 1;
2619 else
2620 return &sinfo->fun[mid];
2621 }
2622 /* xgettext:c-format */
2623 info->callbacks->einfo (_("%pA:0x%v not found in function table\n"),
2624 sec, offset);
2625 bfd_set_error (bfd_error_bad_value);
2626 return NULL;
2627 }
2628
2629 /* Add CALLEE to CALLER call list if not already present. Return TRUE
2630 if CALLEE was new. If this function return FALSE, CALLEE should
2631 be freed. */
2632
2633 static bool
2634 insert_callee (struct function_info *caller, struct call_info *callee)
2635 {
2636 struct call_info **pp, *p;
2637
2638 for (pp = &caller->call_list; (p = *pp) != NULL; pp = &p->next)
2639 if (p->fun == callee->fun)
2640 {
2641 /* Tail calls use less stack than normal calls. Retain entry
2642 for normal call over one for tail call. */
2643 p->is_tail &= callee->is_tail;
2644 if (!p->is_tail)
2645 {
2646 p->fun->start = NULL;
2647 p->fun->is_func = true;
2648 }
2649 p->count += callee->count;
2650 /* Reorder list so most recent call is first. */
2651 *pp = p->next;
2652 p->next = caller->call_list;
2653 caller->call_list = p;
2654 return false;
2655 }
2656 callee->next = caller->call_list;
2657 caller->call_list = callee;
2658 return true;
2659 }
2660
2661 /* Copy CALL and insert the copy into CALLER. */
2662
2663 static bool
2664 copy_callee (struct function_info *caller, const struct call_info *call)
2665 {
2666 struct call_info *callee;
2667 callee = bfd_malloc (sizeof (*callee));
2668 if (callee == NULL)
2669 return false;
2670 *callee = *call;
2671 if (!insert_callee (caller, callee))
2672 free (callee);
2673 return true;
2674 }
2675
2676 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2677 overlay stub sections. */
2678
2679 static bool
2680 interesting_section (asection *s)
2681 {
2682 return (s->output_section != bfd_abs_section_ptr
2683 && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_IN_MEMORY))
2684 == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2685 && s->size != 0);
2686 }
2687
2688 /* Rummage through the relocs for SEC, looking for function calls.
2689 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
2690 mark destination symbols on calls as being functions. Also
2691 look at branches, which may be tail calls or go to hot/cold
2692 section part of same function. */
2693
2694 static bool
2695 mark_functions_via_relocs (asection *sec,
2696 struct bfd_link_info *info,
2697 int call_tree)
2698 {
2699 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
2700 Elf_Internal_Shdr *symtab_hdr;
2701 void *psyms;
2702 unsigned int priority = 0;
2703 static bool warned;
2704
2705 if (!interesting_section (sec)
2706 || sec->reloc_count == 0)
2707 return true;
2708
2709 internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
2710 info->keep_memory);
2711 if (internal_relocs == NULL)
2712 return false;
2713
2714 symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
2715 psyms = &symtab_hdr->contents;
2716 irela = internal_relocs;
2717 irelaend = irela + sec->reloc_count;
2718 for (; irela < irelaend; irela++)
2719 {
2720 enum elf_spu_reloc_type r_type;
2721 unsigned int r_indx;
2722 asection *sym_sec;
2723 Elf_Internal_Sym *sym;
2724 struct elf_link_hash_entry *h;
2725 bfd_vma val;
2726 bool nonbranch, is_call;
2727 struct function_info *caller;
2728 struct call_info *callee;
2729
2730 r_type = ELF32_R_TYPE (irela->r_info);
2731 nonbranch = r_type != R_SPU_REL16 && r_type != R_SPU_ADDR16;
2732
2733 r_indx = ELF32_R_SYM (irela->r_info);
2734 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
2735 return false;
2736
2737 if (sym_sec == NULL
2738 || sym_sec->output_section == bfd_abs_section_ptr)
2739 continue;
2740
2741 is_call = false;
2742 if (!nonbranch)
2743 {
2744 unsigned char insn[4];
2745
2746 if (!bfd_get_section_contents (sec->owner, sec, insn,
2747 irela->r_offset, 4))
2748 return false;
2749 if (is_branch (insn))
2750 {
2751 is_call = (insn[0] & 0xfd) == 0x31;
2752 priority = insn[1] & 0x0f;
2753 priority <<= 8;
2754 priority |= insn[2];
2755 priority <<= 8;
2756 priority |= insn[3];
2757 priority >>= 7;
2758 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2759 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2760 {
2761 if (!warned)
2762 info->callbacks->einfo
2763 /* xgettext:c-format */
2764 (_("%pB(%pA+0x%v): call to non-code section"
2765 " %pB(%pA), analysis incomplete\n"),
2766 sec->owner, sec, irela->r_offset,
2767 sym_sec->owner, sym_sec);
2768 warned = true;
2769 continue;
2770 }
2771 }
2772 else
2773 {
2774 nonbranch = true;
2775 if (is_hint (insn))
2776 continue;
2777 }
2778 }
2779
2780 if (nonbranch)
2781 {
2782 /* For --auto-overlay, count possible stubs we need for
2783 function pointer references. */
2784 unsigned int sym_type;
2785 if (h)
2786 sym_type = h->type;
2787 else
2788 sym_type = ELF_ST_TYPE (sym->st_info);
2789 if (sym_type == STT_FUNC)
2790 {
2791 if (call_tree && spu_hash_table (info)->params->auto_overlay)
2792 spu_hash_table (info)->non_ovly_stub += 1;
2793 /* If the symbol type is STT_FUNC then this must be a
2794 function pointer initialisation. */
2795 continue;
2796 }
2797 /* Ignore data references. */
2798 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2799 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2800 continue;
2801 /* Otherwise we probably have a jump table reloc for
2802 a switch statement or some other reference to a
2803 code label. */
2804 }
2805
2806 if (h)
2807 val = h->root.u.def.value;
2808 else
2809 val = sym->st_value;
2810 val += irela->r_addend;
2811
2812 if (!call_tree)
2813 {
2814 struct function_info *fun;
2815
2816 if (irela->r_addend != 0)
2817 {
2818 Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
2819 if (fake == NULL)
2820 return false;
2821 fake->st_value = val;
2822 fake->st_shndx
2823 = _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
2824 sym = fake;
2825 }
2826 if (sym)
2827 fun = maybe_insert_function (sym_sec, sym, false, is_call);
2828 else
2829 fun = maybe_insert_function (sym_sec, h, true, is_call);
2830 if (fun == NULL)
2831 return false;
2832 if (irela->r_addend != 0
2833 && fun->u.sym != sym)
2834 free (sym);
2835 continue;
2836 }
2837
2838 caller = find_function (sec, irela->r_offset, info);
2839 if (caller == NULL)
2840 return false;
2841 callee = bfd_malloc (sizeof *callee);
2842 if (callee == NULL)
2843 return false;
2844
2845 callee->fun = find_function (sym_sec, val, info);
2846 if (callee->fun == NULL)
2847 return false;
2848 callee->is_tail = !is_call;
2849 callee->is_pasted = false;
2850 callee->broken_cycle = false;
2851 callee->priority = priority;
2852 callee->count = nonbranch? 0 : 1;
2853 if (callee->fun->last_caller != sec)
2854 {
2855 callee->fun->last_caller = sec;
2856 callee->fun->call_count += 1;
2857 }
2858 if (!insert_callee (caller, callee))
2859 free (callee);
2860 else if (!is_call
2861 && !callee->fun->is_func
2862 && callee->fun->stack == 0)
2863 {
2864 /* This is either a tail call or a branch from one part of
2865 the function to another, ie. hot/cold section. If the
2866 destination has been called by some other function then
2867 it is a separate function. We also assume that functions
2868 are not split across input files. */
2869 if (sec->owner != sym_sec->owner)
2870 {
2871 callee->fun->start = NULL;
2872 callee->fun->is_func = true;
2873 }
2874 else if (callee->fun->start == NULL)
2875 {
2876 struct function_info *caller_start = caller;
2877 while (caller_start->start)
2878 caller_start = caller_start->start;
2879
2880 if (caller_start != callee->fun)
2881 callee->fun->start = caller_start;
2882 }
2883 else
2884 {
2885 struct function_info *callee_start;
2886 struct function_info *caller_start;
2887 callee_start = callee->fun;
2888 while (callee_start->start)
2889 callee_start = callee_start->start;
2890 caller_start = caller;
2891 while (caller_start->start)
2892 caller_start = caller_start->start;
2893 if (caller_start != callee_start)
2894 {
2895 callee->fun->start = NULL;
2896 callee->fun->is_func = true;
2897 }
2898 }
2899 }
2900 }
2901
2902 return true;
2903 }
2904
2905 /* Handle something like .init or .fini, which has a piece of a function.
2906 These sections are pasted together to form a single function. */
2907
2908 static bool
2909 pasted_function (asection *sec)
2910 {
2911 struct bfd_link_order *l;
2912 struct _spu_elf_section_data *sec_data;
2913 struct spu_elf_stack_info *sinfo;
2914 Elf_Internal_Sym *fake;
2915 struct function_info *fun, *fun_start;
2916
2917 fake = bfd_zmalloc (sizeof (*fake));
2918 if (fake == NULL)
2919 return false;
2920 fake->st_value = 0;
2921 fake->st_size = sec->size;
2922 fake->st_shndx
2923 = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2924 fun = maybe_insert_function (sec, fake, false, false);
2925 if (!fun)
2926 return false;
2927
2928 /* Find a function immediately preceding this section. */
2929 fun_start = NULL;
2930 for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2931 {
2932 if (l->u.indirect.section == sec)
2933 {
2934 if (fun_start != NULL)
2935 {
2936 struct call_info *callee = bfd_malloc (sizeof *callee);
2937 if (callee == NULL)
2938 return false;
2939
2940 fun->start = fun_start;
2941 callee->fun = fun;
2942 callee->is_tail = true;
2943 callee->is_pasted = true;
2944 callee->broken_cycle = false;
2945 callee->priority = 0;
2946 callee->count = 1;
2947 if (!insert_callee (fun_start, callee))
2948 free (callee);
2949 return true;
2950 }
2951 break;
2952 }
2953 if (l->type == bfd_indirect_link_order
2954 && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
2955 && (sinfo = sec_data->u.i.stack_info) != NULL
2956 && sinfo->num_fun != 0)
2957 fun_start = &sinfo->fun[sinfo->num_fun - 1];
2958 }
2959
2960 /* Don't return an error if we did not find a function preceding this
2961 section. The section may have incorrect flags. */
2962 return true;
2963 }
2964
2965 /* Map address ranges in code sections to functions. */
2966
2967 static bool
2968 discover_functions (struct bfd_link_info *info)
2969 {
2970 bfd *ibfd;
2971 int bfd_idx;
2972 Elf_Internal_Sym ***psym_arr;
2973 asection ***sec_arr;
2974 bool gaps = false;
2975
2976 bfd_idx = 0;
2977 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
2978 bfd_idx++;
2979
2980 psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2981 if (psym_arr == NULL)
2982 return false;
2983 sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2984 if (sec_arr == NULL)
2985 return false;
2986
2987 for (ibfd = info->input_bfds, bfd_idx = 0;
2988 ibfd != NULL;
2989 ibfd = ibfd->link.next, bfd_idx++)
2990 {
2991 extern const bfd_target spu_elf32_vec;
2992 Elf_Internal_Shdr *symtab_hdr;
2993 asection *sec;
2994 size_t symcount;
2995 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2996 asection **psecs, **p;
2997
2998 if (ibfd->xvec != &spu_elf32_vec)
2999 continue;
3000
3001 /* Read all the symbols. */
3002 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
3003 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
3004 if (symcount == 0)
3005 {
3006 if (!gaps)
3007 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3008 if (interesting_section (sec))
3009 {
3010 gaps = true;
3011 break;
3012 }
3013 continue;
3014 }
3015
3016 /* Don't use cached symbols since the generic ELF linker
3017 code only reads local symbols, and we need globals too. */
3018 free (symtab_hdr->contents);
3019 symtab_hdr->contents = NULL;
3020 syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
3021 NULL, NULL, NULL);
3022 symtab_hdr->contents = (void *) syms;
3023 if (syms == NULL)
3024 return false;
3025
3026 /* Select defined function symbols that are going to be output. */
3027 psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
3028 if (psyms == NULL)
3029 return false;
3030 psym_arr[bfd_idx] = psyms;
3031 psecs = bfd_malloc (symcount * sizeof (*psecs));
3032 if (psecs == NULL)
3033 return false;
3034 sec_arr[bfd_idx] = psecs;
3035 for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
3036 if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
3037 || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
3038 {
3039 asection *s;
3040
3041 *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
3042 if (s != NULL && interesting_section (s))
3043 *psy++ = sy;
3044 }
3045 symcount = psy - psyms;
3046 *psy = NULL;
3047
3048 /* Sort them by section and offset within section. */
3049 sort_syms_syms = syms;
3050 sort_syms_psecs = psecs;
3051 qsort (psyms, symcount, sizeof (*psyms), sort_syms);
3052
3053 /* Now inspect the function symbols. */
3054 for (psy = psyms; psy < psyms + symcount; )
3055 {
3056 asection *s = psecs[*psy - syms];
3057 Elf_Internal_Sym **psy2;
3058
3059 for (psy2 = psy; ++psy2 < psyms + symcount; )
3060 if (psecs[*psy2 - syms] != s)
3061 break;
3062
3063 if (!alloc_stack_info (s, psy2 - psy))
3064 return false;
3065 psy = psy2;
3066 }
3067
3068 /* First install info about properly typed and sized functions.
3069 In an ideal world this will cover all code sections, except
3070 when partitioning functions into hot and cold sections,
3071 and the horrible pasted together .init and .fini functions. */
3072 for (psy = psyms; psy < psyms + symcount; ++psy)
3073 {
3074 sy = *psy;
3075 if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
3076 {
3077 asection *s = psecs[sy - syms];
3078 if (!maybe_insert_function (s, sy, false, true))
3079 return false;
3080 }
3081 }
3082
3083 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3084 if (interesting_section (sec))
3085 gaps |= check_function_ranges (sec, info);
3086 }
3087
3088 if (gaps)
3089 {
3090 /* See if we can discover more function symbols by looking at
3091 relocations. */
3092 for (ibfd = info->input_bfds, bfd_idx = 0;
3093 ibfd != NULL;
3094 ibfd = ibfd->link.next, bfd_idx++)
3095 {
3096 asection *sec;
3097
3098 if (psym_arr[bfd_idx] == NULL)
3099 continue;
3100
3101 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3102 if (!mark_functions_via_relocs (sec, info, false))
3103 return false;
3104 }
3105
3106 for (ibfd = info->input_bfds, bfd_idx = 0;
3107 ibfd != NULL;
3108 ibfd = ibfd->link.next, bfd_idx++)
3109 {
3110 Elf_Internal_Shdr *symtab_hdr;
3111 asection *sec;
3112 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
3113 asection **psecs;
3114
3115 if ((psyms = psym_arr[bfd_idx]) == NULL)
3116 continue;
3117
3118 psecs = sec_arr[bfd_idx];
3119
3120 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
3121 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
3122
3123 gaps = false;
3124 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3125 if (interesting_section (sec))
3126 gaps |= check_function_ranges (sec, info);
3127 if (!gaps)
3128 continue;
3129
3130 /* Finally, install all globals. */
3131 for (psy = psyms; (sy = *psy) != NULL; ++psy)
3132 {
3133 asection *s;
3134
3135 s = psecs[sy - syms];
3136
3137 /* Global syms might be improperly typed functions. */
3138 if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
3139 && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
3140 {
3141 if (!maybe_insert_function (s, sy, false, false))
3142 return false;
3143 }
3144 }
3145 }
3146
3147 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3148 {
3149 extern const bfd_target spu_elf32_vec;
3150 asection *sec;
3151
3152 if (ibfd->xvec != &spu_elf32_vec)
3153 continue;
3154
3155 /* Some of the symbols we've installed as marking the
3156 beginning of functions may have a size of zero. Extend
3157 the range of such functions to the beginning of the
3158 next symbol of interest. */
3159 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3160 if (interesting_section (sec))
3161 {
3162 struct _spu_elf_section_data *sec_data;
3163 struct spu_elf_stack_info *sinfo;
3164
3165 sec_data = spu_elf_section_data (sec);
3166 sinfo = sec_data->u.i.stack_info;
3167 if (sinfo != NULL && sinfo->num_fun != 0)
3168 {
3169 int fun_idx;
3170 bfd_vma hi = sec->size;
3171
3172 for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
3173 {
3174 sinfo->fun[fun_idx].hi = hi;
3175 hi = sinfo->fun[fun_idx].lo;
3176 }
3177
3178 sinfo->fun[0].lo = 0;
3179 }
3180 /* No symbols in this section. Must be .init or .fini
3181 or something similar. */
3182 else if (!pasted_function (sec))
3183 return false;
3184 }
3185 }
3186 }
3187
3188 for (ibfd = info->input_bfds, bfd_idx = 0;
3189 ibfd != NULL;
3190 ibfd = ibfd->link.next, bfd_idx++)
3191 {
3192 if (psym_arr[bfd_idx] == NULL)
3193 continue;
3194
3195 free (psym_arr[bfd_idx]);
3196 free (sec_arr[bfd_idx]);
3197 }
3198
3199 free (psym_arr);
3200 free (sec_arr);
3201
3202 return true;
3203 }
3204
3205 /* Iterate over all function_info we have collected, calling DOIT on
3206 each node if ROOT_ONLY is false. Only call DOIT on root nodes
3207 if ROOT_ONLY. */
3208
3209 static bool
3210 for_each_node (bool (*doit) (struct function_info *,
3211 struct bfd_link_info *,
3212 void *),
3213 struct bfd_link_info *info,
3214 void *param,
3215 int root_only)
3216 {
3217 bfd *ibfd;
3218
3219 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3220 {
3221 extern const bfd_target spu_elf32_vec;
3222 asection *sec;
3223
3224 if (ibfd->xvec != &spu_elf32_vec)
3225 continue;
3226
3227 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3228 {
3229 struct _spu_elf_section_data *sec_data;
3230 struct spu_elf_stack_info *sinfo;
3231
3232 if ((sec_data = spu_elf_section_data (sec)) != NULL
3233 && (sinfo = sec_data->u.i.stack_info) != NULL)
3234 {
3235 int i;
3236 for (i = 0; i < sinfo->num_fun; ++i)
3237 if (!root_only || !sinfo->fun[i].non_root)
3238 if (!doit (&sinfo->fun[i], info, param))
3239 return false;
3240 }
3241 }
3242 }
3243 return true;
3244 }
3245
3246 /* Transfer call info attached to struct function_info entries for
3247 all of a given function's sections to the first entry. */
3248
3249 static bool
3250 transfer_calls (struct function_info *fun,
3251 struct bfd_link_info *info ATTRIBUTE_UNUSED,
3252 void *param ATTRIBUTE_UNUSED)
3253 {
3254 struct function_info *start = fun->start;
3255
3256 if (start != NULL)
3257 {
3258 struct call_info *call, *call_next;
3259
3260 while (start->start != NULL)
3261 start = start->start;
3262 for (call = fun->call_list; call != NULL; call = call_next)
3263 {
3264 call_next = call->next;
3265 if (!insert_callee (start, call))
3266 free (call);
3267 }
3268 fun->call_list = NULL;
3269 }
3270 return true;
3271 }
3272
3273 /* Mark nodes in the call graph that are called by some other node. */
3274
3275 static bool
3276 mark_non_root (struct function_info *fun,
3277 struct bfd_link_info *info ATTRIBUTE_UNUSED,
3278 void *param ATTRIBUTE_UNUSED)
3279 {
3280 struct call_info *call;
3281
3282 if (fun->visit1)
3283 return true;
3284 fun->visit1 = true;
3285 for (call = fun->call_list; call; call = call->next)
3286 {
3287 call->fun->non_root = true;
3288 mark_non_root (call->fun, 0, 0);
3289 }
3290 return true;
3291 }
3292
3293 /* Remove cycles from the call graph. Set depth of nodes. */
3294
3295 static bool
3296 remove_cycles (struct function_info *fun,
3297 struct bfd_link_info *info,
3298 void *param)
3299 {
3300 struct call_info **callp, *call;
3301 unsigned int depth = *(unsigned int *) param;
3302 unsigned int max_depth = depth;
3303
3304 fun->depth = depth;
3305 fun->visit2 = true;
3306 fun->marking = true;
3307
3308 callp = &fun->call_list;
3309 while ((call = *callp) != NULL)
3310 {
3311 call->max_depth = depth + !call->is_pasted;
3312 if (!call->fun->visit2)
3313 {
3314 if (!remove_cycles (call->fun, info, &call->max_depth))
3315 return false;
3316 if (max_depth < call->max_depth)
3317 max_depth = call->max_depth;
3318 }
3319 else if (call->fun->marking)
3320 {
3321 struct spu_link_hash_table *htab = spu_hash_table (info);
3322
3323 if (!htab->params->auto_overlay
3324 && htab->params->stack_analysis)
3325 {
3326 const char *f1 = func_name (fun);
3327 const char *f2 = func_name (call->fun);
3328
3329 /* xgettext:c-format */
3330 info->callbacks->info (_("stack analysis will ignore the call "
3331 "from %s to %s\n"),
3332 f1, f2);
3333 }
3334
3335 call->broken_cycle = true;
3336 }
3337 callp = &call->next;
3338 }
3339 fun->marking = false;
3340 *(unsigned int *) param = max_depth;
3341 return true;
3342 }
3343
3344 /* Check that we actually visited all nodes in remove_cycles. If we
3345 didn't, then there is some cycle in the call graph not attached to
3346 any root node. Arbitrarily choose a node in the cycle as a new
3347 root and break the cycle. */
3348
3349 static bool
3350 mark_detached_root (struct function_info *fun,
3351 struct bfd_link_info *info,
3352 void *param)
3353 {
3354 if (fun->visit2)
3355 return true;
3356 fun->non_root = false;
3357 *(unsigned int *) param = 0;
3358 return remove_cycles (fun, info, param);
3359 }
3360
3361 /* Populate call_list for each function. */
3362
3363 static bool
3364 build_call_tree (struct bfd_link_info *info)
3365 {
3366 bfd *ibfd;
3367 unsigned int depth;
3368
3369 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3370 {
3371 extern const bfd_target spu_elf32_vec;
3372 asection *sec;
3373
3374 if (ibfd->xvec != &spu_elf32_vec)
3375 continue;
3376
3377 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3378 if (!mark_functions_via_relocs (sec, info, true))
3379 return false;
3380 }
3381
3382 /* Transfer call info from hot/cold section part of function
3383 to main entry. */
3384 if (!spu_hash_table (info)->params->auto_overlay
3385 && !for_each_node (transfer_calls, info, 0, false))
3386 return false;
3387
3388 /* Find the call graph root(s). */
3389 if (!for_each_node (mark_non_root, info, 0, false))
3390 return false;
3391
3392 /* Remove cycles from the call graph. We start from the root node(s)
3393 so that we break cycles in a reasonable place. */
3394 depth = 0;
3395 if (!for_each_node (remove_cycles, info, &depth, true))
3396 return false;
3397
3398 return for_each_node (mark_detached_root, info, &depth, false);
3399 }
3400
3401 /* qsort predicate to sort calls by priority, max_depth then count. */
3402
3403 static int
3404 sort_calls (const void *a, const void *b)
3405 {
3406 struct call_info *const *c1 = a;
3407 struct call_info *const *c2 = b;
3408 int delta;
3409
3410 delta = (*c2)->priority - (*c1)->priority;
3411 if (delta != 0)
3412 return delta;
3413
3414 delta = (*c2)->max_depth - (*c1)->max_depth;
3415 if (delta != 0)
3416 return delta;
3417
3418 delta = (*c2)->count - (*c1)->count;
3419 if (delta != 0)
3420 return delta;
3421
3422 return (char *) c1 - (char *) c2;
3423 }
3424
3425 struct _mos_param {
3426 unsigned int max_overlay_size;
3427 };
3428
3429 /* Set linker_mark and gc_mark on any sections that we will put in
3430 overlays. These flags are used by the generic ELF linker, but we
3431 won't be continuing on to bfd_elf_final_link so it is OK to use
3432 them. linker_mark is clear before we get here. Set segment_mark
3433 on sections that are part of a pasted function (excluding the last
3434 section).
3435
3436 Set up function rodata section if --overlay-rodata. We don't
3437 currently include merged string constant rodata sections since
3438
3439 Sort the call graph so that the deepest nodes will be visited
3440 first. */
3441
3442 static bool
3443 mark_overlay_section (struct function_info *fun,
3444 struct bfd_link_info *info,
3445 void *param)
3446 {
3447 struct call_info *call;
3448 unsigned int count;
3449 struct _mos_param *mos_param = param;
3450 struct spu_link_hash_table *htab = spu_hash_table (info);
3451
3452 if (fun->visit4)
3453 return true;
3454
3455 fun->visit4 = true;
3456 if (!fun->sec->linker_mark
3457 && (htab->params->ovly_flavour != ovly_soft_icache
3458 || htab->params->non_ia_text
3459 || startswith (fun->sec->name, ".text.ia.")
3460 || strcmp (fun->sec->name, ".init") == 0
3461 || strcmp (fun->sec->name, ".fini") == 0))
3462 {
3463 unsigned int size;
3464
3465 fun->sec->linker_mark = 1;
3466 fun->sec->gc_mark = 1;
3467 fun->sec->segment_mark = 0;
3468 /* Ensure SEC_CODE is set on this text section (it ought to
3469 be!), and SEC_CODE is clear on rodata sections. We use
3470 this flag to differentiate the two overlay section types. */
3471 fun->sec->flags |= SEC_CODE;
3472
3473 size = fun->sec->size;
3474 if (htab->params->auto_overlay & OVERLAY_RODATA)
3475 {
3476 char *name = NULL;
3477
3478 /* Find the rodata section corresponding to this function's
3479 text section. */
3480 if (strcmp (fun->sec->name, ".text") == 0)
3481 {
3482 name = bfd_malloc (sizeof (".rodata"));
3483 if (name == NULL)
3484 return false;
3485 memcpy (name, ".rodata", sizeof (".rodata"));
3486 }
3487 else if (startswith (fun->sec->name, ".text."))
3488 {
3489 size_t len = strlen (fun->sec->name);
3490 name = bfd_malloc (len + 3);
3491 if (name == NULL)
3492 return false;
3493 memcpy (name, ".rodata", sizeof (".rodata"));
3494 memcpy (name + 7, fun->sec->name + 5, len - 4);
3495 }
3496 else if (startswith (fun->sec->name, ".gnu.linkonce.t."))
3497 {
3498 size_t len = strlen (fun->sec->name) + 1;
3499 name = bfd_malloc (len);
3500 if (name == NULL)
3501 return false;
3502 memcpy (name, fun->sec->name, len);
3503 name[14] = 'r';
3504 }
3505
3506 if (name != NULL)
3507 {
3508 asection *rodata = NULL;
3509 asection *group_sec = elf_section_data (fun->sec)->next_in_group;
3510 if (group_sec == NULL)
3511 rodata = bfd_get_section_by_name (fun->sec->owner, name);
3512 else
3513 while (group_sec != NULL && group_sec != fun->sec)
3514 {
3515 if (strcmp (group_sec->name, name) == 0)
3516 {
3517 rodata = group_sec;
3518 break;
3519 }
3520 group_sec = elf_section_data (group_sec)->next_in_group;
3521 }
3522 fun->rodata = rodata;
3523 if (fun->rodata)
3524 {
3525 size += fun->rodata->size;
3526 if (htab->params->line_size != 0
3527 && size > htab->params->line_size)
3528 {
3529 size -= fun->rodata->size;
3530 fun->rodata = NULL;
3531 }
3532 else
3533 {
3534 fun->rodata->linker_mark = 1;
3535 fun->rodata->gc_mark = 1;
3536 fun->rodata->flags &= ~SEC_CODE;
3537 }
3538 }
3539 free (name);
3540 }
3541 }
3542 if (mos_param->max_overlay_size < size)
3543 mos_param->max_overlay_size = size;
3544 }
3545
3546 for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3547 count += 1;
3548
3549 if (count > 1)
3550 {
3551 struct call_info **calls = bfd_malloc (count * sizeof (*calls));
3552 if (calls == NULL)
3553 return false;
3554
3555 for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3556 calls[count++] = call;
3557
3558 qsort (calls, count, sizeof (*calls), sort_calls);
3559
3560 fun->call_list = NULL;
3561 while (count != 0)
3562 {
3563 --count;
3564 calls[count]->next = fun->call_list;
3565 fun->call_list = calls[count];
3566 }
3567 free (calls);
3568 }
3569
3570 for (call = fun->call_list; call != NULL; call = call->next)
3571 {
3572 if (call->is_pasted)
3573 {
3574 /* There can only be one is_pasted call per function_info. */
3575 BFD_ASSERT (!fun->sec->segment_mark);
3576 fun->sec->segment_mark = 1;
3577 }
3578 if (!call->broken_cycle
3579 && !mark_overlay_section (call->fun, info, param))
3580 return false;
3581 }
3582
3583 /* Don't put entry code into an overlay. The overlay manager needs
3584 a stack! Also, don't mark .ovl.init as an overlay. */
3585 if (fun->lo + fun->sec->output_offset + fun->sec->output_section->vma
3586 == info->output_bfd->start_address
3587 || startswith (fun->sec->output_section->name, ".ovl.init"))
3588 {
3589 fun->sec->linker_mark = 0;
3590 if (fun->rodata != NULL)
3591 fun->rodata->linker_mark = 0;
3592 }
3593 return true;
3594 }
3595
3596 /* If non-zero then unmark functions called from those within sections
3597 that we need to unmark. Unfortunately this isn't reliable since the
3598 call graph cannot know the destination of function pointer calls. */
3599 #define RECURSE_UNMARK 0
3600
3601 struct _uos_param {
3602 asection *exclude_input_section;
3603 asection *exclude_output_section;
3604 unsigned long clearing;
3605 };
3606
3607 /* Undo some of mark_overlay_section's work. */
3608
3609 static bool
3610 unmark_overlay_section (struct function_info *fun,
3611 struct bfd_link_info *info,
3612 void *param)
3613 {
3614 struct call_info *call;
3615 struct _uos_param *uos_param = param;
3616 unsigned int excluded = 0;
3617
3618 if (fun->visit5)
3619 return true;
3620
3621 fun->visit5 = true;
3622
3623 excluded = 0;
3624 if (fun->sec == uos_param->exclude_input_section
3625 || fun->sec->output_section == uos_param->exclude_output_section)
3626 excluded = 1;
3627
3628 if (RECURSE_UNMARK)
3629 uos_param->clearing += excluded;
3630
3631 if (RECURSE_UNMARK ? uos_param->clearing : excluded)
3632 {
3633 fun->sec->linker_mark = 0;
3634 if (fun->rodata)
3635 fun->rodata->linker_mark = 0;
3636 }
3637
3638 for (call = fun->call_list; call != NULL; call = call->next)
3639 if (!call->broken_cycle
3640 && !unmark_overlay_section (call->fun, info, param))
3641 return false;
3642
3643 if (RECURSE_UNMARK)
3644 uos_param->clearing -= excluded;
3645 return true;
3646 }
3647
3648 struct _cl_param {
3649 unsigned int lib_size;
3650 asection **lib_sections;
3651 };
3652
3653 /* Add sections we have marked as belonging to overlays to an array
3654 for consideration as non-overlay sections. The array consist of
3655 pairs of sections, (text,rodata), for functions in the call graph. */
3656
3657 static bool
3658 collect_lib_sections (struct function_info *fun,
3659 struct bfd_link_info *info,
3660 void *param)
3661 {
3662 struct _cl_param *lib_param = param;
3663 struct call_info *call;
3664 unsigned int size;
3665
3666 if (fun->visit6)
3667 return true;
3668
3669 fun->visit6 = true;
3670 if (!fun->sec->linker_mark || !fun->sec->gc_mark || fun->sec->segment_mark)
3671 return true;
3672
3673 size = fun->sec->size;
3674 if (fun->rodata)
3675 size += fun->rodata->size;
3676
3677 if (size <= lib_param->lib_size)
3678 {
3679 *lib_param->lib_sections++ = fun->sec;
3680 fun->sec->gc_mark = 0;
3681 if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3682 {
3683 *lib_param->lib_sections++ = fun->rodata;
3684 fun->rodata->gc_mark = 0;
3685 }
3686 else
3687 *lib_param->lib_sections++ = NULL;
3688 }
3689
3690 for (call = fun->call_list; call != NULL; call = call->next)
3691 if (!call->broken_cycle)
3692 collect_lib_sections (call->fun, info, param);
3693
3694 return true;
3695 }
3696
3697 /* qsort predicate to sort sections by call count. */
3698
3699 static int
3700 sort_lib (const void *a, const void *b)
3701 {
3702 asection *const *s1 = a;
3703 asection *const *s2 = b;
3704 struct _spu_elf_section_data *sec_data;
3705 struct spu_elf_stack_info *sinfo;
3706 int delta;
3707
3708 delta = 0;
3709 if ((sec_data = spu_elf_section_data (*s1)) != NULL
3710 && (sinfo = sec_data->u.i.stack_info) != NULL)
3711 {
3712 int i;
3713 for (i = 0; i < sinfo->num_fun; ++i)
3714 delta -= sinfo->fun[i].call_count;
3715 }
3716
3717 if ((sec_data = spu_elf_section_data (*s2)) != NULL
3718 && (sinfo = sec_data->u.i.stack_info) != NULL)
3719 {
3720 int i;
3721 for (i = 0; i < sinfo->num_fun; ++i)
3722 delta += sinfo->fun[i].call_count;
3723 }
3724
3725 if (delta != 0)
3726 return delta;
3727
3728 return s1 - s2;
3729 }
3730
3731 /* Remove some sections from those marked to be in overlays. Choose
3732 those that are called from many places, likely library functions. */
3733
3734 static unsigned int
3735 auto_ovl_lib_functions (struct bfd_link_info *info, unsigned int lib_size)
3736 {
3737 bfd *ibfd;
3738 asection **lib_sections;
3739 unsigned int i, lib_count;
3740 struct _cl_param collect_lib_param;
3741 struct function_info dummy_caller;
3742 struct spu_link_hash_table *htab;
3743
3744 memset (&dummy_caller, 0, sizeof (dummy_caller));
3745 lib_count = 0;
3746 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3747 {
3748 extern const bfd_target spu_elf32_vec;
3749 asection *sec;
3750
3751 if (ibfd->xvec != &spu_elf32_vec)
3752 continue;
3753
3754 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3755 if (sec->linker_mark
3756 && sec->size < lib_size
3757 && (sec->flags & SEC_CODE) != 0)
3758 lib_count += 1;
3759 }
3760 lib_sections = bfd_malloc (lib_count * 2 * sizeof (*lib_sections));
3761 if (lib_sections == NULL)
3762 return (unsigned int) -1;
3763 collect_lib_param.lib_size = lib_size;
3764 collect_lib_param.lib_sections = lib_sections;
3765 if (!for_each_node (collect_lib_sections, info, &collect_lib_param,
3766 true))
3767 return (unsigned int) -1;
3768 lib_count = (collect_lib_param.lib_sections - lib_sections) / 2;
3769
3770 /* Sort sections so that those with the most calls are first. */
3771 if (lib_count > 1)
3772 qsort (lib_sections, lib_count, 2 * sizeof (*lib_sections), sort_lib);
3773
3774 htab = spu_hash_table (info);
3775 for (i = 0; i < lib_count; i++)
3776 {
3777 unsigned int tmp, stub_size;
3778 asection *sec;
3779 struct _spu_elf_section_data *sec_data;
3780 struct spu_elf_stack_info *sinfo;
3781
3782 sec = lib_sections[2 * i];
3783 /* If this section is OK, its size must be less than lib_size. */
3784 tmp = sec->size;
3785 /* If it has a rodata section, then add that too. */
3786 if (lib_sections[2 * i + 1])
3787 tmp += lib_sections[2 * i + 1]->size;
3788 /* Add any new overlay call stubs needed by the section. */
3789 stub_size = 0;
3790 if (tmp < lib_size
3791 && (sec_data = spu_elf_section_data (sec)) != NULL
3792 && (sinfo = sec_data->u.i.stack_info) != NULL)
3793 {
3794 int k;
3795 struct call_info *call;
3796
3797 for (k = 0; k < sinfo->num_fun; ++k)
3798 for (call = sinfo->fun[k].call_list; call; call = call->next)
3799 if (call->fun->sec->linker_mark)
3800 {
3801 struct call_info *p;
3802 for (p = dummy_caller.call_list; p; p = p->next)
3803 if (p->fun == call->fun)
3804 break;
3805 if (!p)
3806 stub_size += ovl_stub_size (htab->params);
3807 }
3808 }
3809 if (tmp + stub_size < lib_size)
3810 {
3811 struct call_info **pp, *p;
3812
3813 /* This section fits. Mark it as non-overlay. */
3814 lib_sections[2 * i]->linker_mark = 0;
3815 if (lib_sections[2 * i + 1])
3816 lib_sections[2 * i + 1]->linker_mark = 0;
3817 lib_size -= tmp + stub_size;
3818 /* Call stubs to the section we just added are no longer
3819 needed. */
3820 pp = &dummy_caller.call_list;
3821 while ((p = *pp) != NULL)
3822 if (!p->fun->sec->linker_mark)
3823 {
3824 lib_size += ovl_stub_size (htab->params);
3825 *pp = p->next;
3826 free (p);
3827 }
3828 else
3829 pp = &p->next;
3830 /* Add new call stubs to dummy_caller. */
3831 if ((sec_data = spu_elf_section_data (sec)) != NULL
3832 && (sinfo = sec_data->u.i.stack_info) != NULL)
3833 {
3834 int k;
3835 struct call_info *call;
3836
3837 for (k = 0; k < sinfo->num_fun; ++k)
3838 for (call = sinfo->fun[k].call_list;
3839 call;
3840 call = call->next)
3841 if (call->fun->sec->linker_mark)
3842 {
3843 struct call_info *callee;
3844 callee = bfd_malloc (sizeof (*callee));
3845 if (callee == NULL)
3846 return (unsigned int) -1;
3847 *callee = *call;
3848 if (!insert_callee (&dummy_caller, callee))
3849 free (callee);
3850 }
3851 }
3852 }
3853 }
3854 while (dummy_caller.call_list != NULL)
3855 {
3856 struct call_info *call = dummy_caller.call_list;
3857 dummy_caller.call_list = call->next;
3858 free (call);
3859 }
3860 for (i = 0; i < 2 * lib_count; i++)
3861 if (lib_sections[i])
3862 lib_sections[i]->gc_mark = 1;
3863 free (lib_sections);
3864 return lib_size;
3865 }
3866
3867 /* Build an array of overlay sections. The deepest node's section is
3868 added first, then its parent node's section, then everything called
3869 from the parent section. The idea being to group sections to
3870 minimise calls between different overlays. */
3871
3872 static bool
3873 collect_overlays (struct function_info *fun,
3874 struct bfd_link_info *info,
3875 void *param)
3876 {
3877 struct call_info *call;
3878 bool added_fun;
3879 asection ***ovly_sections = param;
3880
3881 if (fun->visit7)
3882 return true;
3883
3884 fun->visit7 = true;
3885 for (call = fun->call_list; call != NULL; call = call->next)
3886 if (!call->is_pasted && !call->broken_cycle)
3887 {
3888 if (!collect_overlays (call->fun, info, ovly_sections))
3889 return false;
3890 break;
3891 }
3892
3893 added_fun = false;
3894 if (fun->sec->linker_mark && fun->sec->gc_mark)
3895 {
3896 fun->sec->gc_mark = 0;
3897 *(*ovly_sections)++ = fun->sec;
3898 if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3899 {
3900 fun->rodata->gc_mark = 0;
3901 *(*ovly_sections)++ = fun->rodata;
3902 }
3903 else
3904 *(*ovly_sections)++ = NULL;
3905 added_fun = true;
3906
3907 /* Pasted sections must stay with the first section. We don't
3908 put pasted sections in the array, just the first section.
3909 Mark subsequent sections as already considered. */
3910 if (fun->sec->segment_mark)
3911 {
3912 struct function_info *call_fun = fun;
3913 do
3914 {
3915 for (call = call_fun->call_list; call != NULL; call = call->next)
3916 if (call->is_pasted)
3917 {
3918 call_fun = call->fun;
3919 call_fun->sec->gc_mark = 0;
3920 if (call_fun->rodata)
3921 call_fun->rodata->gc_mark = 0;
3922 break;
3923 }
3924 if (call == NULL)
3925 abort ();
3926 }
3927 while (call_fun->sec->segment_mark);
3928 }
3929 }
3930
3931 for (call = fun->call_list; call != NULL; call = call->next)
3932 if (!call->broken_cycle
3933 && !collect_overlays (call->fun, info, ovly_sections))
3934 return false;
3935
3936 if (added_fun)
3937 {
3938 struct _spu_elf_section_data *sec_data;
3939 struct spu_elf_stack_info *sinfo;
3940
3941 if ((sec_data = spu_elf_section_data (fun->sec)) != NULL
3942 && (sinfo = sec_data->u.i.stack_info) != NULL)
3943 {
3944 int i;
3945 for (i = 0; i < sinfo->num_fun; ++i)
3946 if (!collect_overlays (&sinfo->fun[i], info, ovly_sections))
3947 return false;
3948 }
3949 }
3950
3951 return true;
3952 }
3953
3954 struct _sum_stack_param {
3955 size_t cum_stack;
3956 size_t overall_stack;
3957 bool emit_stack_syms;
3958 };
3959
3960 /* Descend the call graph for FUN, accumulating total stack required. */
3961
3962 static bool
3963 sum_stack (struct function_info *fun,
3964 struct bfd_link_info *info,
3965 void *param)
3966 {
3967 struct call_info *call;
3968 struct function_info *max;
3969 size_t stack, cum_stack;
3970 const char *f1;
3971 bool has_call;
3972 struct _sum_stack_param *sum_stack_param = param;
3973 struct spu_link_hash_table *htab;
3974
3975 cum_stack = fun->stack;
3976 sum_stack_param->cum_stack = cum_stack;
3977 if (fun->visit3)
3978 return true;
3979
3980 has_call = false;
3981 max = NULL;
3982 for (call = fun->call_list; call; call = call->next)
3983 {
3984 if (call->broken_cycle)
3985 continue;
3986 if (!call->is_pasted)
3987 has_call = true;
3988 if (!sum_stack (call->fun, info, sum_stack_param))
3989 return false;
3990 stack = sum_stack_param->cum_stack;
3991 /* Include caller stack for normal calls, don't do so for
3992 tail calls. fun->stack here is local stack usage for
3993 this function. */
3994 if (!call->is_tail || call->is_pasted || call->fun->start != NULL)
3995 stack += fun->stack;
3996 if (cum_stack < stack)
3997 {
3998 cum_stack = stack;
3999 max = call->fun;
4000 }
4001 }
4002
4003 sum_stack_param->cum_stack = cum_stack;
4004 stack = fun->stack;
4005 /* Now fun->stack holds cumulative stack. */
4006 fun->stack = cum_stack;
4007 fun->visit3 = true;
4008
4009 if (!fun->non_root
4010 && sum_stack_param->overall_stack < cum_stack)
4011 sum_stack_param->overall_stack = cum_stack;
4012
4013 htab = spu_hash_table (info);
4014 if (htab->params->auto_overlay)
4015 return true;
4016
4017 f1 = func_name (fun);
4018 if (htab->params->stack_analysis)
4019 {
4020 if (!fun->non_root)
4021 info->callbacks->info (" %s: 0x%v\n", f1, (bfd_vma) cum_stack);
4022 info->callbacks->minfo ("%s: 0x%v 0x%v\n",
4023 f1, (bfd_vma) stack, (bfd_vma) cum_stack);
4024
4025 if (has_call)
4026 {
4027 info->callbacks->minfo (_(" calls:\n"));
4028 for (call = fun->call_list; call; call = call->next)
4029 if (!call->is_pasted && !call->broken_cycle)
4030 {
4031 const char *f2 = func_name (call->fun);
4032 const char *ann1 = call->fun == max ? "*" : " ";
4033 const char *ann2 = call->is_tail ? "t" : " ";
4034
4035 info->callbacks->minfo (" %s%s %s\n", ann1, ann2, f2);
4036 }
4037 }
4038 }
4039
4040 if (sum_stack_param->emit_stack_syms)
4041 {
4042 char *name = bfd_malloc (18 + strlen (f1));
4043 struct elf_link_hash_entry *h;
4044
4045 if (name == NULL)
4046 return false;
4047
4048 if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
4049 sprintf (name, "__stack_%s", f1);
4050 else
4051 sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
4052
4053 h = elf_link_hash_lookup (&htab->elf, name, true, true, false);
4054 free (name);
4055 if (h != NULL
4056 && (h->root.type == bfd_link_hash_new
4057 || h->root.type == bfd_link_hash_undefined
4058 || h->root.type == bfd_link_hash_undefweak))
4059 {
4060 h->root.type = bfd_link_hash_defined;
4061 h->root.u.def.section = bfd_abs_section_ptr;
4062 h->root.u.def.value = cum_stack;
4063 h->size = 0;
4064 h->type = 0;
4065 h->ref_regular = 1;
4066 h->def_regular = 1;
4067 h->ref_regular_nonweak = 1;
4068 h->forced_local = 1;
4069 h->non_elf = 0;
4070 }
4071 }
4072
4073 return true;
4074 }
4075
4076 /* SEC is part of a pasted function. Return the call_info for the
4077 next section of this function. */
4078
4079 static struct call_info *
4080 find_pasted_call (asection *sec)
4081 {
4082 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
4083 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
4084 struct call_info *call;
4085 int k;
4086
4087 for (k = 0; k < sinfo->num_fun; ++k)
4088 for (call = sinfo->fun[k].call_list; call != NULL; call = call->next)
4089 if (call->is_pasted)
4090 return call;
4091 abort ();
4092 return 0;
4093 }
4094
4095 /* qsort predicate to sort bfds by file name. */
4096
4097 static int
4098 sort_bfds (const void *a, const void *b)
4099 {
4100 bfd *const *abfd1 = a;
4101 bfd *const *abfd2 = b;
4102
4103 return filename_cmp (bfd_get_filename (*abfd1), bfd_get_filename (*abfd2));
4104 }
4105
4106 static unsigned int
4107 print_one_overlay_section (FILE *script,
4108 unsigned int base,
4109 unsigned int count,
4110 unsigned int ovlynum,
4111 unsigned int *ovly_map,
4112 asection **ovly_sections,
4113 struct bfd_link_info *info)
4114 {
4115 unsigned int j;
4116
4117 for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4118 {
4119 asection *sec = ovly_sections[2 * j];
4120
4121 if (fprintf (script, " %s%c%s (%s)\n",
4122 (sec->owner->my_archive != NULL
4123 ? bfd_get_filename (sec->owner->my_archive) : ""),
4124 info->path_separator,
4125 bfd_get_filename (sec->owner),
4126 sec->name) <= 0)
4127 return -1;
4128 if (sec->segment_mark)
4129 {
4130 struct call_info *call = find_pasted_call (sec);
4131 while (call != NULL)
4132 {
4133 struct function_info *call_fun = call->fun;
4134 sec = call_fun->sec;
4135 if (fprintf (script, " %s%c%s (%s)\n",
4136 (sec->owner->my_archive != NULL
4137 ? bfd_get_filename (sec->owner->my_archive) : ""),
4138 info->path_separator,
4139 bfd_get_filename (sec->owner),
4140 sec->name) <= 0)
4141 return -1;
4142 for (call = call_fun->call_list; call; call = call->next)
4143 if (call->is_pasted)
4144 break;
4145 }
4146 }
4147 }
4148
4149 for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4150 {
4151 asection *sec = ovly_sections[2 * j + 1];
4152 if (sec != NULL
4153 && fprintf (script, " %s%c%s (%s)\n",
4154 (sec->owner->my_archive != NULL
4155 ? bfd_get_filename (sec->owner->my_archive) : ""),
4156 info->path_separator,
4157 bfd_get_filename (sec->owner),
4158 sec->name) <= 0)
4159 return -1;
4160
4161 sec = ovly_sections[2 * j];
4162 if (sec->segment_mark)
4163 {
4164 struct call_info *call = find_pasted_call (sec);
4165 while (call != NULL)
4166 {
4167 struct function_info *call_fun = call->fun;
4168 sec = call_fun->rodata;
4169 if (sec != NULL
4170 && fprintf (script, " %s%c%s (%s)\n",
4171 (sec->owner->my_archive != NULL
4172 ? bfd_get_filename (sec->owner->my_archive) : ""),
4173 info->path_separator,
4174 bfd_get_filename (sec->owner),
4175 sec->name) <= 0)
4176 return -1;
4177 for (call = call_fun->call_list; call; call = call->next)
4178 if (call->is_pasted)
4179 break;
4180 }
4181 }
4182 }
4183
4184 return j;
4185 }
4186
4187 /* Handle --auto-overlay. */
4188
4189 static void
4190 spu_elf_auto_overlay (struct bfd_link_info *info)
4191 {
4192 bfd *ibfd;
4193 bfd **bfd_arr;
4194 struct elf_segment_map *m;
4195 unsigned int fixed_size, lo, hi;
4196 unsigned int reserved;
4197 struct spu_link_hash_table *htab;
4198 unsigned int base, i, count, bfd_count;
4199 unsigned int region, ovlynum;
4200 asection **ovly_sections, **ovly_p;
4201 unsigned int *ovly_map;
4202 FILE *script;
4203 unsigned int total_overlay_size, overlay_size;
4204 const char *ovly_mgr_entry;
4205 struct elf_link_hash_entry *h;
4206 struct _mos_param mos_param;
4207 struct _uos_param uos_param;
4208 struct function_info dummy_caller;
4209
4210 /* Find the extents of our loadable image. */
4211 lo = (unsigned int) -1;
4212 hi = 0;
4213 for (m = elf_seg_map (info->output_bfd); m != NULL; m = m->next)
4214 if (m->p_type == PT_LOAD)
4215 for (i = 0; i < m->count; i++)
4216 if (m->sections[i]->size != 0)
4217 {
4218 if (m->sections[i]->vma < lo)
4219 lo = m->sections[i]->vma;
4220 if (m->sections[i]->vma + m->sections[i]->size - 1 > hi)
4221 hi = m->sections[i]->vma + m->sections[i]->size - 1;
4222 }
4223 fixed_size = hi + 1 - lo;
4224
4225 if (!discover_functions (info))
4226 goto err_exit;
4227
4228 if (!build_call_tree (info))
4229 goto err_exit;
4230
4231 htab = spu_hash_table (info);
4232 reserved = htab->params->auto_overlay_reserved;
4233 if (reserved == 0)
4234 {
4235 struct _sum_stack_param sum_stack_param;
4236
4237 sum_stack_param.emit_stack_syms = 0;
4238 sum_stack_param.overall_stack = 0;
4239 if (!for_each_node (sum_stack, info, &sum_stack_param, true))
4240 goto err_exit;
4241 reserved = (sum_stack_param.overall_stack
4242 + htab->params->extra_stack_space);
4243 }
4244
4245 /* No need for overlays if everything already fits. */
4246 if (fixed_size + reserved <= htab->local_store
4247 && htab->params->ovly_flavour != ovly_soft_icache)
4248 {
4249 htab->params->auto_overlay = 0;
4250 return;
4251 }
4252
4253 uos_param.exclude_input_section = 0;
4254 uos_param.exclude_output_section
4255 = bfd_get_section_by_name (info->output_bfd, ".interrupt");
4256
4257 ovly_mgr_entry = "__ovly_load";
4258 if (htab->params->ovly_flavour == ovly_soft_icache)
4259 ovly_mgr_entry = "__icache_br_handler";
4260 h = elf_link_hash_lookup (&htab->elf, ovly_mgr_entry,
4261 false, false, false);
4262 if (h != NULL
4263 && (h->root.type == bfd_link_hash_defined
4264 || h->root.type == bfd_link_hash_defweak)
4265 && h->def_regular)
4266 {
4267 /* We have a user supplied overlay manager. */
4268 uos_param.exclude_input_section = h->root.u.def.section;
4269 }
4270 else
4271 {
4272 /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
4273 builtin version to .text, and will adjust .text size. */
4274 fixed_size += (*htab->params->spu_elf_load_ovl_mgr) ();
4275 }
4276
4277 /* Mark overlay sections, and find max overlay section size. */
4278 mos_param.max_overlay_size = 0;
4279 if (!for_each_node (mark_overlay_section, info, &mos_param, true))
4280 goto err_exit;
4281
4282 /* We can't put the overlay manager or interrupt routines in
4283 overlays. */
4284 uos_param.clearing = 0;
4285 if ((uos_param.exclude_input_section
4286 || uos_param.exclude_output_section)
4287 && !for_each_node (unmark_overlay_section, info, &uos_param, true))
4288 goto err_exit;
4289
4290 bfd_count = 0;
4291 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
4292 ++bfd_count;
4293 bfd_arr = bfd_malloc (bfd_count * sizeof (*bfd_arr));
4294 if (bfd_arr == NULL)
4295 goto err_exit;
4296
4297 /* Count overlay sections, and subtract their sizes from "fixed_size". */
4298 count = 0;
4299 bfd_count = 0;
4300 total_overlay_size = 0;
4301 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
4302 {
4303 extern const bfd_target spu_elf32_vec;
4304 asection *sec;
4305 unsigned int old_count;
4306
4307 if (ibfd->xvec != &spu_elf32_vec)
4308 continue;
4309
4310 old_count = count;
4311 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
4312 if (sec->linker_mark)
4313 {
4314 if ((sec->flags & SEC_CODE) != 0)
4315 count += 1;
4316 fixed_size -= sec->size;
4317 total_overlay_size += sec->size;
4318 }
4319 else if ((sec->flags & (SEC_ALLOC | SEC_LOAD)) == (SEC_ALLOC | SEC_LOAD)
4320 && sec->output_section->owner == info->output_bfd
4321 && startswith (sec->output_section->name, ".ovl.init"))
4322 fixed_size -= sec->size;
4323 if (count != old_count)
4324 bfd_arr[bfd_count++] = ibfd;
4325 }
4326
4327 /* Since the overlay link script selects sections by file name and
4328 section name, ensure that file names are unique. */
4329 if (bfd_count > 1)
4330 {
4331 bool ok = true;
4332
4333 qsort (bfd_arr, bfd_count, sizeof (*bfd_arr), sort_bfds);
4334 for (i = 1; i < bfd_count; ++i)
4335 if (filename_cmp (bfd_get_filename (bfd_arr[i - 1]),
4336 bfd_get_filename (bfd_arr[i])) == 0)
4337 {
4338 if (bfd_arr[i - 1]->my_archive == bfd_arr[i]->my_archive)
4339 {
4340 if (bfd_arr[i - 1]->my_archive && bfd_arr[i]->my_archive)
4341 /* xgettext:c-format */
4342 info->callbacks->einfo (_("%s duplicated in %s\n"),
4343 bfd_get_filename (bfd_arr[i]),
4344 bfd_get_filename (bfd_arr[i]->my_archive));
4345 else
4346 info->callbacks->einfo (_("%s duplicated\n"),
4347 bfd_get_filename (bfd_arr[i]));
4348 ok = false;
4349 }
4350 }
4351 if (!ok)
4352 {
4353 info->callbacks->einfo (_("sorry, no support for duplicate "
4354 "object files in auto-overlay script\n"));
4355 bfd_set_error (bfd_error_bad_value);
4356 goto err_exit;
4357 }
4358 }
4359 free (bfd_arr);
4360
4361 fixed_size += reserved;
4362 fixed_size += htab->non_ovly_stub * ovl_stub_size (htab->params);
4363 if (fixed_size + mos_param.max_overlay_size <= htab->local_store)
4364 {
4365 if (htab->params->ovly_flavour == ovly_soft_icache)
4366 {
4367 /* Stubs in the non-icache area are bigger. */
4368 fixed_size += htab->non_ovly_stub * 16;
4369 /* Space for icache manager tables.
4370 a) Tag array, one quadword per cache line.
4371 - word 0: ia address of present line, init to zero. */
4372 fixed_size += 16 << htab->num_lines_log2;
4373 /* b) Rewrite "to" list, one quadword per cache line. */
4374 fixed_size += 16 << htab->num_lines_log2;
4375 /* c) Rewrite "from" list, one byte per outgoing branch (rounded up
4376 to a power-of-two number of full quadwords) per cache line. */
4377 fixed_size += 16 << (htab->fromelem_size_log2
4378 + htab->num_lines_log2);
4379 /* d) Pointer to __ea backing store (toe), 1 quadword. */
4380 fixed_size += 16;
4381 }
4382 else
4383 {
4384 /* Guess number of overlays. Assuming overlay buffer is on
4385 average only half full should be conservative. */
4386 ovlynum = (total_overlay_size * 2 * htab->params->num_lines
4387 / (htab->local_store - fixed_size));
4388 /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
4389 fixed_size += ovlynum * 16 + 16 + 4 + 16;
4390 }
4391 }
4392
4393 if (fixed_size + mos_param.max_overlay_size > htab->local_store)
4394 /* xgettext:c-format */
4395 info->callbacks->einfo (_("non-overlay size of 0x%v plus maximum overlay "
4396 "size of 0x%v exceeds local store\n"),
4397 (bfd_vma) fixed_size,
4398 (bfd_vma) mos_param.max_overlay_size);
4399
4400 /* Now see if we should put some functions in the non-overlay area. */
4401 else if (fixed_size < htab->params->auto_overlay_fixed)
4402 {
4403 unsigned int max_fixed, lib_size;
4404
4405 max_fixed = htab->local_store - mos_param.max_overlay_size;
4406 if (max_fixed > htab->params->auto_overlay_fixed)
4407 max_fixed = htab->params->auto_overlay_fixed;
4408 lib_size = max_fixed - fixed_size;
4409 lib_size = auto_ovl_lib_functions (info, lib_size);
4410 if (lib_size == (unsigned int) -1)
4411 goto err_exit;
4412 fixed_size = max_fixed - lib_size;
4413 }
4414
4415 /* Build an array of sections, suitably sorted to place into
4416 overlays. */
4417 ovly_sections = bfd_malloc (2 * count * sizeof (*ovly_sections));
4418 if (ovly_sections == NULL)
4419 goto err_exit;
4420 ovly_p = ovly_sections;
4421 if (!for_each_node (collect_overlays, info, &ovly_p, true))
4422 goto err_exit;
4423 count = (size_t) (ovly_p - ovly_sections) / 2;
4424 ovly_map = bfd_malloc (count * sizeof (*ovly_map));
4425 if (ovly_map == NULL)
4426 goto err_exit;
4427
4428 memset (&dummy_caller, 0, sizeof (dummy_caller));
4429 overlay_size = (htab->local_store - fixed_size) / htab->params->num_lines;
4430 if (htab->params->line_size != 0)
4431 overlay_size = htab->params->line_size;
4432 base = 0;
4433 ovlynum = 0;
4434 while (base < count)
4435 {
4436 unsigned int size = 0, rosize = 0, roalign = 0;
4437
4438 for (i = base; i < count; i++)
4439 {
4440 asection *sec, *rosec;
4441 unsigned int tmp, rotmp;
4442 unsigned int num_stubs;
4443 struct call_info *call, *pasty;
4444 struct _spu_elf_section_data *sec_data;
4445 struct spu_elf_stack_info *sinfo;
4446 unsigned int k;
4447
4448 /* See whether we can add this section to the current
4449 overlay without overflowing our overlay buffer. */
4450 sec = ovly_sections[2 * i];
4451 tmp = align_power (size, sec->alignment_power) + sec->size;
4452 rotmp = rosize;
4453 rosec = ovly_sections[2 * i + 1];
4454 if (rosec != NULL)
4455 {
4456 rotmp = align_power (rotmp, rosec->alignment_power) + rosec->size;
4457 if (roalign < rosec->alignment_power)
4458 roalign = rosec->alignment_power;
4459 }
4460 if (align_power (tmp, roalign) + rotmp > overlay_size)
4461 break;
4462 if (sec->segment_mark)
4463 {
4464 /* Pasted sections must stay together, so add their
4465 sizes too. */
4466 pasty = find_pasted_call (sec);
4467 while (pasty != NULL)
4468 {
4469 struct function_info *call_fun = pasty->fun;
4470 tmp = (align_power (tmp, call_fun->sec->alignment_power)
4471 + call_fun->sec->size);
4472 if (call_fun->rodata)
4473 {
4474 rotmp = (align_power (rotmp,
4475 call_fun->rodata->alignment_power)
4476 + call_fun->rodata->size);
4477 if (roalign < rosec->alignment_power)
4478 roalign = rosec->alignment_power;
4479 }
4480 for (pasty = call_fun->call_list; pasty; pasty = pasty->next)
4481 if (pasty->is_pasted)
4482 break;
4483 }
4484 }
4485 if (align_power (tmp, roalign) + rotmp > overlay_size)
4486 break;
4487
4488 /* If we add this section, we might need new overlay call
4489 stubs. Add any overlay section calls to dummy_call. */
4490 pasty = NULL;
4491 sec_data = spu_elf_section_data (sec);
4492 sinfo = sec_data->u.i.stack_info;
4493 for (k = 0; k < (unsigned) sinfo->num_fun; ++k)
4494 for (call = sinfo->fun[k].call_list; call; call = call->next)
4495 if (call->is_pasted)
4496 {
4497 BFD_ASSERT (pasty == NULL);
4498 pasty = call;
4499 }
4500 else if (call->fun->sec->linker_mark)
4501 {
4502 if (!copy_callee (&dummy_caller, call))
4503 goto err_exit;
4504 }
4505 while (pasty != NULL)
4506 {
4507 struct function_info *call_fun = pasty->fun;
4508 pasty = NULL;
4509 for (call = call_fun->call_list; call; call = call->next)
4510 if (call->is_pasted)
4511 {
4512 BFD_ASSERT (pasty == NULL);
4513 pasty = call;
4514 }
4515 else if (!copy_callee (&dummy_caller, call))
4516 goto err_exit;
4517 }
4518
4519 /* Calculate call stub size. */
4520 num_stubs = 0;
4521 for (call = dummy_caller.call_list; call; call = call->next)
4522 {
4523 unsigned int stub_delta = 1;
4524
4525 if (htab->params->ovly_flavour == ovly_soft_icache)
4526 stub_delta = call->count;
4527 num_stubs += stub_delta;
4528
4529 /* If the call is within this overlay, we won't need a
4530 stub. */
4531 for (k = base; k < i + 1; k++)
4532 if (call->fun->sec == ovly_sections[2 * k])
4533 {
4534 num_stubs -= stub_delta;
4535 break;
4536 }
4537 }
4538 if (htab->params->ovly_flavour == ovly_soft_icache
4539 && num_stubs > htab->params->max_branch)
4540 break;
4541 if (align_power (tmp, roalign) + rotmp
4542 + num_stubs * ovl_stub_size (htab->params) > overlay_size)
4543 break;
4544 size = tmp;
4545 rosize = rotmp;
4546 }
4547
4548 if (i == base)
4549 {
4550 /* xgettext:c-format */
4551 info->callbacks->einfo (_("%pB:%pA%s exceeds overlay size\n"),
4552 ovly_sections[2 * i]->owner,
4553 ovly_sections[2 * i],
4554 ovly_sections[2 * i + 1] ? " + rodata" : "");
4555 bfd_set_error (bfd_error_bad_value);
4556 goto err_exit;
4557 }
4558
4559 while (dummy_caller.call_list != NULL)
4560 {
4561 struct call_info *call = dummy_caller.call_list;
4562 dummy_caller.call_list = call->next;
4563 free (call);
4564 }
4565
4566 ++ovlynum;
4567 while (base < i)
4568 ovly_map[base++] = ovlynum;
4569 }
4570
4571 script = htab->params->spu_elf_open_overlay_script ();
4572
4573 if (htab->params->ovly_flavour == ovly_soft_icache)
4574 {
4575 if (fprintf (script, "SECTIONS\n{\n") <= 0)
4576 goto file_err;
4577
4578 if (fprintf (script,
4579 " . = ALIGN (%u);\n"
4580 " .ovl.init : { *(.ovl.init) }\n"
4581 " . = ABSOLUTE (ADDR (.ovl.init));\n",
4582 htab->params->line_size) <= 0)
4583 goto file_err;
4584
4585 base = 0;
4586 ovlynum = 1;
4587 while (base < count)
4588 {
4589 unsigned int indx = ovlynum - 1;
4590 unsigned int vma, lma;
4591
4592 vma = (indx & (htab->params->num_lines - 1)) << htab->line_size_log2;
4593 lma = vma + (((indx >> htab->num_lines_log2) + 1) << 18);
4594
4595 if (fprintf (script, " .ovly%u ABSOLUTE (ADDR (.ovl.init)) + %u "
4596 ": AT (LOADADDR (.ovl.init) + %u) {\n",
4597 ovlynum, vma, lma) <= 0)
4598 goto file_err;
4599
4600 base = print_one_overlay_section (script, base, count, ovlynum,
4601 ovly_map, ovly_sections, info);
4602 if (base == (unsigned) -1)
4603 goto file_err;
4604
4605 if (fprintf (script, " }\n") <= 0)
4606 goto file_err;
4607
4608 ovlynum++;
4609 }
4610
4611 if (fprintf (script, " . = ABSOLUTE (ADDR (.ovl.init)) + %u;\n",
4612 1 << (htab->num_lines_log2 + htab->line_size_log2)) <= 0)
4613 goto file_err;
4614
4615 if (fprintf (script, "}\nINSERT AFTER .toe;\n") <= 0)
4616 goto file_err;
4617 }
4618 else
4619 {
4620 if (fprintf (script, "SECTIONS\n{\n") <= 0)
4621 goto file_err;
4622
4623 if (fprintf (script,
4624 " . = ALIGN (16);\n"
4625 " .ovl.init : { *(.ovl.init) }\n"
4626 " . = ABSOLUTE (ADDR (.ovl.init));\n") <= 0)
4627 goto file_err;
4628
4629 for (region = 1; region <= htab->params->num_lines; region++)
4630 {
4631 ovlynum = region;
4632 base = 0;
4633 while (base < count && ovly_map[base] < ovlynum)
4634 base++;
4635
4636 if (base == count)
4637 break;
4638
4639 if (region == 1)
4640 {
4641 /* We need to set lma since we are overlaying .ovl.init. */
4642 if (fprintf (script,
4643 " OVERLAY : AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16))\n {\n") <= 0)
4644 goto file_err;
4645 }
4646 else
4647 {
4648 if (fprintf (script, " OVERLAY :\n {\n") <= 0)
4649 goto file_err;
4650 }
4651
4652 while (base < count)
4653 {
4654 if (fprintf (script, " .ovly%u {\n", ovlynum) <= 0)
4655 goto file_err;
4656
4657 base = print_one_overlay_section (script, base, count, ovlynum,
4658 ovly_map, ovly_sections, info);
4659 if (base == (unsigned) -1)
4660 goto file_err;
4661
4662 if (fprintf (script, " }\n") <= 0)
4663 goto file_err;
4664
4665 ovlynum += htab->params->num_lines;
4666 while (base < count && ovly_map[base] < ovlynum)
4667 base++;
4668 }
4669
4670 if (fprintf (script, " }\n") <= 0)
4671 goto file_err;
4672 }
4673
4674 if (fprintf (script, "}\nINSERT BEFORE .text;\n") <= 0)
4675 goto file_err;
4676 }
4677
4678 free (ovly_map);
4679 free (ovly_sections);
4680
4681 if (fclose (script) != 0)
4682 goto file_err;
4683
4684 if (htab->params->auto_overlay & AUTO_RELINK)
4685 (*htab->params->spu_elf_relink) ();
4686
4687 xexit (0);
4688
4689 file_err:
4690 bfd_set_error (bfd_error_system_call);
4691 err_exit:
4692 info->callbacks->einfo (_("%F%P: auto overlay error: %E\n"));
4693 xexit (1);
4694 }
4695
4696 /* Provide an estimate of total stack required. */
4697
4698 static bool
4699 spu_elf_stack_analysis (struct bfd_link_info *info)
4700 {
4701 struct spu_link_hash_table *htab;
4702 struct _sum_stack_param sum_stack_param;
4703
4704 if (!discover_functions (info))
4705 return false;
4706
4707 if (!build_call_tree (info))
4708 return false;
4709
4710 htab = spu_hash_table (info);
4711 if (htab->params->stack_analysis)
4712 {
4713 info->callbacks->info (_("Stack size for call graph root nodes.\n"));
4714 info->callbacks->minfo (_("\nStack size for functions. "
4715 "Annotations: '*' max stack, 't' tail call\n"));
4716 }
4717
4718 sum_stack_param.emit_stack_syms = htab->params->emit_stack_syms;
4719 sum_stack_param.overall_stack = 0;
4720 if (!for_each_node (sum_stack, info, &sum_stack_param, true))
4721 return false;
4722
4723 if (htab->params->stack_analysis)
4724 info->callbacks->info (_("Maximum stack required is 0x%v\n"),
4725 (bfd_vma) sum_stack_param.overall_stack);
4726 return true;
4727 }
4728
4729 /* Perform a final link. */
4730
4731 static bool
4732 spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
4733 {
4734 struct spu_link_hash_table *htab = spu_hash_table (info);
4735
4736 if (htab->params->auto_overlay)
4737 spu_elf_auto_overlay (info);
4738
4739 if ((htab->params->stack_analysis
4740 || (htab->params->ovly_flavour == ovly_soft_icache
4741 && htab->params->lrlive_analysis))
4742 && !spu_elf_stack_analysis (info))
4743 info->callbacks->einfo (_("%X%P: stack/lrlive analysis error: %E\n"));
4744
4745 if (!spu_elf_build_stubs (info))
4746 info->callbacks->einfo (_("%F%P: can not build overlay stubs: %E\n"));
4747
4748 return bfd_elf_final_link (output_bfd, info);
4749 }
4750
4751 /* Called when not normally emitting relocs, ie. !bfd_link_relocatable (info)
4752 and !info->emitrelocations. Returns a count of special relocs
4753 that need to be emitted. */
4754
4755 static unsigned int
4756 spu_elf_count_relocs (struct bfd_link_info *info, asection *sec)
4757 {
4758 Elf_Internal_Rela *relocs;
4759 unsigned int count = 0;
4760
4761 relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
4762 info->keep_memory);
4763 if (relocs != NULL)
4764 {
4765 Elf_Internal_Rela *rel;
4766 Elf_Internal_Rela *relend = relocs + sec->reloc_count;
4767
4768 for (rel = relocs; rel < relend; rel++)
4769 {
4770 int r_type = ELF32_R_TYPE (rel->r_info);
4771 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4772 ++count;
4773 }
4774
4775 if (elf_section_data (sec)->relocs != relocs)
4776 free (relocs);
4777 }
4778
4779 return count;
4780 }
4781
4782 /* Functions for adding fixup records to .fixup */
4783
4784 #define FIXUP_RECORD_SIZE 4
4785
4786 #define FIXUP_PUT(output_bfd,htab,index,addr) \
4787 bfd_put_32 (output_bfd, addr, \
4788 htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4789 #define FIXUP_GET(output_bfd,htab,index) \
4790 bfd_get_32 (output_bfd, \
4791 htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4792
4793 /* Store OFFSET in .fixup. This assumes it will be called with an
4794 increasing OFFSET. When this OFFSET fits with the last base offset,
4795 it just sets a bit, otherwise it adds a new fixup record. */
4796 static void
4797 spu_elf_emit_fixup (bfd * output_bfd, struct bfd_link_info *info,
4798 bfd_vma offset)
4799 {
4800 struct spu_link_hash_table *htab = spu_hash_table (info);
4801 asection *sfixup = htab->sfixup;
4802 bfd_vma qaddr = offset & ~(bfd_vma) 15;
4803 bfd_vma bit = ((bfd_vma) 8) >> ((offset & 15) >> 2);
4804 if (sfixup->reloc_count == 0)
4805 {
4806 FIXUP_PUT (output_bfd, htab, 0, qaddr | bit);
4807 sfixup->reloc_count++;
4808 }
4809 else
4810 {
4811 bfd_vma base = FIXUP_GET (output_bfd, htab, sfixup->reloc_count - 1);
4812 if (qaddr != (base & ~(bfd_vma) 15))
4813 {
4814 if ((sfixup->reloc_count + 1) * FIXUP_RECORD_SIZE > sfixup->size)
4815 _bfd_error_handler (_("fatal error while creating .fixup"));
4816 FIXUP_PUT (output_bfd, htab, sfixup->reloc_count, qaddr | bit);
4817 sfixup->reloc_count++;
4818 }
4819 else
4820 FIXUP_PUT (output_bfd, htab, sfixup->reloc_count - 1, base | bit);
4821 }
4822 }
4823
4824 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
4825
4826 static int
4827 spu_elf_relocate_section (bfd *output_bfd,
4828 struct bfd_link_info *info,
4829 bfd *input_bfd,
4830 asection *input_section,
4831 bfd_byte *contents,
4832 Elf_Internal_Rela *relocs,
4833 Elf_Internal_Sym *local_syms,
4834 asection **local_sections)
4835 {
4836 Elf_Internal_Shdr *symtab_hdr;
4837 struct elf_link_hash_entry **sym_hashes;
4838 Elf_Internal_Rela *rel, *relend;
4839 struct spu_link_hash_table *htab;
4840 asection *ea;
4841 int ret = true;
4842 bool emit_these_relocs = false;
4843 bool is_ea_sym;
4844 bool stubs;
4845 unsigned int iovl = 0;
4846
4847 htab = spu_hash_table (info);
4848 stubs = (htab->stub_sec != NULL
4849 && maybe_needs_stubs (input_section));
4850 iovl = overlay_index (input_section);
4851 ea = bfd_get_section_by_name (output_bfd, "._ea");
4852 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4853 sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
4854
4855 rel = relocs;
4856 relend = relocs + input_section->reloc_count;
4857 for (; rel < relend; rel++)
4858 {
4859 int r_type;
4860 reloc_howto_type *howto;
4861 unsigned int r_symndx;
4862 Elf_Internal_Sym *sym;
4863 asection *sec;
4864 struct elf_link_hash_entry *h;
4865 const char *sym_name;
4866 bfd_vma relocation;
4867 bfd_vma addend;
4868 bfd_reloc_status_type r;
4869 bool unresolved_reloc;
4870 enum _stub_type stub_type;
4871
4872 r_symndx = ELF32_R_SYM (rel->r_info);
4873 r_type = ELF32_R_TYPE (rel->r_info);
4874 howto = elf_howto_table + r_type;
4875 unresolved_reloc = false;
4876 h = NULL;
4877 sym = NULL;
4878 sec = NULL;
4879 if (r_symndx < symtab_hdr->sh_info)
4880 {
4881 sym = local_syms + r_symndx;
4882 sec = local_sections[r_symndx];
4883 sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
4884 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
4885 }
4886 else
4887 {
4888 if (sym_hashes == NULL)
4889 return false;
4890
4891 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
4892
4893 if (info->wrap_hash != NULL
4894 && (input_section->flags & SEC_DEBUGGING) != 0)
4895 h = ((struct elf_link_hash_entry *)
4896 unwrap_hash_lookup (info, input_bfd, &h->root));
4897
4898 while (h->root.type == bfd_link_hash_indirect
4899 || h->root.type == bfd_link_hash_warning)
4900 h = (struct elf_link_hash_entry *) h->root.u.i.link;
4901
4902 relocation = 0;
4903 if (h->root.type == bfd_link_hash_defined
4904 || h->root.type == bfd_link_hash_defweak)
4905 {
4906 sec = h->root.u.def.section;
4907 if (sec == NULL
4908 || sec->output_section == NULL)
4909 /* Set a flag that will be cleared later if we find a
4910 relocation value for this symbol. output_section
4911 is typically NULL for symbols satisfied by a shared
4912 library. */
4913 unresolved_reloc = true;
4914 else
4915 relocation = (h->root.u.def.value
4916 + sec->output_section->vma
4917 + sec->output_offset);
4918 }
4919 else if (h->root.type == bfd_link_hash_undefweak)
4920 ;
4921 else if (info->unresolved_syms_in_objects == RM_IGNORE
4922 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
4923 ;
4924 else if (!bfd_link_relocatable (info)
4925 && !(r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64))
4926 {
4927 bool err;
4928
4929 err = (info->unresolved_syms_in_objects == RM_DIAGNOSE
4930 && !info->warn_unresolved_syms)
4931 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT;
4932
4933 info->callbacks->undefined_symbol
4934 (info, h->root.root.string, input_bfd,
4935 input_section, rel->r_offset, err);
4936 }
4937 sym_name = h->root.root.string;
4938 }
4939
4940 if (sec != NULL && discarded_section (sec))
4941 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
4942 rel, 1, relend, howto, 0, contents);
4943
4944 if (bfd_link_relocatable (info))
4945 continue;
4946
4947 /* Change "a rt,ra,rb" to "ai rt,ra,0". */
4948 if (r_type == R_SPU_ADD_PIC
4949 && h != NULL
4950 && !(h->def_regular || ELF_COMMON_DEF_P (h)))
4951 {
4952 bfd_byte *loc = contents + rel->r_offset;
4953 loc[0] = 0x1c;
4954 loc[1] = 0x00;
4955 loc[2] &= 0x3f;
4956 }
4957
4958 is_ea_sym = (ea != NULL
4959 && sec != NULL
4960 && sec->output_section == ea);
4961
4962 /* If this symbol is in an overlay area, we may need to relocate
4963 to the overlay stub. */
4964 addend = rel->r_addend;
4965 if (stubs
4966 && !is_ea_sym
4967 && (stub_type = needs_ovl_stub (h, sym, sec, input_section, rel,
4968 contents, info)) != no_stub)
4969 {
4970 unsigned int ovl = 0;
4971 struct got_entry *g, **head;
4972
4973 if (stub_type != nonovl_stub)
4974 ovl = iovl;
4975
4976 if (h != NULL)
4977 head = &h->got.glist;
4978 else
4979 head = elf_local_got_ents (input_bfd) + r_symndx;
4980
4981 for (g = *head; g != NULL; g = g->next)
4982 if (htab->params->ovly_flavour == ovly_soft_icache
4983 ? (g->ovl == ovl
4984 && g->br_addr == (rel->r_offset
4985 + input_section->output_offset
4986 + input_section->output_section->vma))
4987 : g->addend == addend && (g->ovl == ovl || g->ovl == 0))
4988 break;
4989 if (g == NULL)
4990 abort ();
4991
4992 relocation = g->stub_addr;
4993 addend = 0;
4994 }
4995 else
4996 {
4997 /* For soft icache, encode the overlay index into addresses. */
4998 if (htab->params->ovly_flavour == ovly_soft_icache
4999 && (r_type == R_SPU_ADDR16_HI
5000 || r_type == R_SPU_ADDR32 || r_type == R_SPU_REL32)
5001 && !is_ea_sym)
5002 {
5003 unsigned int ovl = overlay_index (sec);
5004 if (ovl != 0)
5005 {
5006 unsigned int set_id = ((ovl - 1) >> htab->num_lines_log2) + 1;
5007 relocation += set_id << 18;
5008 }
5009 }
5010 }
5011
5012 if (htab->params->emit_fixups && !bfd_link_relocatable (info)
5013 && (input_section->flags & SEC_ALLOC) != 0
5014 && r_type == R_SPU_ADDR32)
5015 {
5016 bfd_vma offset;
5017 offset = rel->r_offset + input_section->output_section->vma
5018 + input_section->output_offset;
5019 spu_elf_emit_fixup (output_bfd, info, offset);
5020 }
5021
5022 if (unresolved_reloc)
5023 ;
5024 else if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
5025 {
5026 if (is_ea_sym)
5027 {
5028 /* ._ea is a special section that isn't allocated in SPU
5029 memory, but rather occupies space in PPU memory as
5030 part of an embedded ELF image. If this reloc is
5031 against a symbol defined in ._ea, then transform the
5032 reloc into an equivalent one without a symbol
5033 relative to the start of the ELF image. */
5034 rel->r_addend += (relocation
5035 - ea->vma
5036 + elf_section_data (ea)->this_hdr.sh_offset);
5037 rel->r_info = ELF32_R_INFO (0, r_type);
5038 }
5039 emit_these_relocs = true;
5040 continue;
5041 }
5042 else if (is_ea_sym)
5043 unresolved_reloc = true;
5044
5045 if (unresolved_reloc
5046 && _bfd_elf_section_offset (output_bfd, info, input_section,
5047 rel->r_offset) != (bfd_vma) -1)
5048 {
5049 _bfd_error_handler
5050 /* xgettext:c-format */
5051 (_("%pB(%s+%#" PRIx64 "): "
5052 "unresolvable %s relocation against symbol `%s'"),
5053 input_bfd,
5054 bfd_section_name (input_section),
5055 (uint64_t) rel->r_offset,
5056 howto->name,
5057 sym_name);
5058 ret = false;
5059 }
5060
5061 r = _bfd_final_link_relocate (howto,
5062 input_bfd,
5063 input_section,
5064 contents,
5065 rel->r_offset, relocation, addend);
5066
5067 if (r != bfd_reloc_ok)
5068 {
5069 const char *msg = (const char *) 0;
5070
5071 switch (r)
5072 {
5073 case bfd_reloc_overflow:
5074 (*info->callbacks->reloc_overflow)
5075 (info, (h ? &h->root : NULL), sym_name, howto->name,
5076 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
5077 break;
5078
5079 case bfd_reloc_undefined:
5080 (*info->callbacks->undefined_symbol)
5081 (info, sym_name, input_bfd, input_section, rel->r_offset, true);
5082 break;
5083
5084 case bfd_reloc_outofrange:
5085 msg = _("internal error: out of range error");
5086 goto common_error;
5087
5088 case bfd_reloc_notsupported:
5089 msg = _("internal error: unsupported relocation error");
5090 goto common_error;
5091
5092 case bfd_reloc_dangerous:
5093 msg = _("internal error: dangerous error");
5094 goto common_error;
5095
5096 default:
5097 msg = _("internal error: unknown error");
5098 /* fall through */
5099
5100 common_error:
5101 ret = false;
5102 (*info->callbacks->warning) (info, msg, sym_name, input_bfd,
5103 input_section, rel->r_offset);
5104 break;
5105 }
5106 }
5107 }
5108
5109 if (ret
5110 && emit_these_relocs
5111 && !info->emitrelocations)
5112 {
5113 Elf_Internal_Rela *wrel;
5114 Elf_Internal_Shdr *rel_hdr;
5115
5116 wrel = rel = relocs;
5117 relend = relocs + input_section->reloc_count;
5118 for (; rel < relend; rel++)
5119 {
5120 int r_type;
5121
5122 r_type = ELF32_R_TYPE (rel->r_info);
5123 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
5124 *wrel++ = *rel;
5125 }
5126 input_section->reloc_count = wrel - relocs;
5127 /* Backflips for _bfd_elf_link_output_relocs. */
5128 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
5129 rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
5130 ret = 2;
5131 }
5132
5133 return ret;
5134 }
5135
5136 static bool
5137 spu_elf_finish_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
5138 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5139 {
5140 return true;
5141 }
5142
5143 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
5144
5145 static int
5146 spu_elf_output_symbol_hook (struct bfd_link_info *info,
5147 const char *sym_name ATTRIBUTE_UNUSED,
5148 Elf_Internal_Sym *sym,
5149 asection *sym_sec ATTRIBUTE_UNUSED,
5150 struct elf_link_hash_entry *h)
5151 {
5152 struct spu_link_hash_table *htab = spu_hash_table (info);
5153
5154 if (!bfd_link_relocatable (info)
5155 && htab->stub_sec != NULL
5156 && h != NULL
5157 && (h->root.type == bfd_link_hash_defined
5158 || h->root.type == bfd_link_hash_defweak)
5159 && h->def_regular
5160 && startswith (h->root.root.string, "_SPUEAR_"))
5161 {
5162 struct got_entry *g;
5163
5164 for (g = h->got.glist; g != NULL; g = g->next)
5165 if (htab->params->ovly_flavour == ovly_soft_icache
5166 ? g->br_addr == g->stub_addr
5167 : g->addend == 0 && g->ovl == 0)
5168 {
5169 sym->st_shndx = (_bfd_elf_section_from_bfd_section
5170 (htab->stub_sec[0]->output_section->owner,
5171 htab->stub_sec[0]->output_section));
5172 sym->st_value = g->stub_addr;
5173 break;
5174 }
5175 }
5176
5177 return 1;
5178 }
5179
5180 static int spu_plugin = 0;
5181
5182 void
5183 spu_elf_plugin (int val)
5184 {
5185 spu_plugin = val;
5186 }
5187
5188 /* Set ELF header e_type for plugins. */
5189
5190 static bool
5191 spu_elf_init_file_header (bfd *abfd, struct bfd_link_info *info)
5192 {
5193 if (!_bfd_elf_init_file_header (abfd, info))
5194 return false;
5195
5196 if (spu_plugin)
5197 {
5198 Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
5199
5200 i_ehdrp->e_type = ET_DYN;
5201 }
5202 return true;
5203 }
5204
5205 /* We may add an extra PT_LOAD segment for .toe. We also need extra
5206 segments for overlays. */
5207
5208 static int
5209 spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
5210 {
5211 int extra = 0;
5212 asection *sec;
5213
5214 if (info != NULL)
5215 {
5216 struct spu_link_hash_table *htab = spu_hash_table (info);
5217 extra = htab->num_overlays;
5218 }
5219
5220 if (extra)
5221 ++extra;
5222
5223 sec = bfd_get_section_by_name (abfd, ".toe");
5224 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
5225 ++extra;
5226
5227 return extra;
5228 }
5229
5230 /* Remove .toe section from other PT_LOAD segments and put it in
5231 a segment of its own. Put overlays in separate segments too. */
5232
5233 static bool
5234 spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
5235 {
5236 asection *toe, *s;
5237 struct elf_segment_map *m, *m_overlay;
5238 struct elf_segment_map **p, **p_overlay, **first_load;
5239 unsigned int i;
5240
5241 if (info == NULL)
5242 return true;
5243
5244 toe = bfd_get_section_by_name (abfd, ".toe");
5245 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
5246 if (m->p_type == PT_LOAD && m->count > 1)
5247 for (i = 0; i < m->count; i++)
5248 if ((s = m->sections[i]) == toe
5249 || spu_elf_section_data (s)->u.o.ovl_index != 0)
5250 {
5251 struct elf_segment_map *m2;
5252 bfd_vma amt;
5253
5254 if (i + 1 < m->count)
5255 {
5256 amt = sizeof (struct elf_segment_map);
5257 amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
5258 m2 = bfd_zalloc (abfd, amt);
5259 if (m2 == NULL)
5260 return false;
5261 m2->count = m->count - (i + 1);
5262 memcpy (m2->sections, m->sections + i + 1,
5263 m2->count * sizeof (m->sections[0]));
5264 m2->p_type = PT_LOAD;
5265 m2->next = m->next;
5266 m->next = m2;
5267 }
5268 m->count = 1;
5269 if (i != 0)
5270 {
5271 m->count = i;
5272 amt = sizeof (struct elf_segment_map);
5273 m2 = bfd_zalloc (abfd, amt);
5274 if (m2 == NULL)
5275 return false;
5276 m2->p_type = PT_LOAD;
5277 m2->count = 1;
5278 m2->sections[0] = s;
5279 m2->next = m->next;
5280 m->next = m2;
5281 }
5282 break;
5283 }
5284
5285
5286 /* Some SPU ELF loaders ignore the PF_OVERLAY flag and just load all
5287 PT_LOAD segments. This can cause the .ovl.init section to be
5288 overwritten with the contents of some overlay segment. To work
5289 around this issue, we ensure that all PF_OVERLAY segments are
5290 sorted first amongst the program headers; this ensures that even
5291 with a broken loader, the .ovl.init section (which is not marked
5292 as PF_OVERLAY) will be placed into SPU local store on startup. */
5293
5294 /* Move all overlay segments onto a separate list. */
5295 p = &elf_seg_map (abfd);
5296 p_overlay = &m_overlay;
5297 m_overlay = NULL;
5298 first_load = NULL;
5299 while (*p != NULL)
5300 {
5301 if ((*p)->p_type == PT_LOAD)
5302 {
5303 if (!first_load)
5304 first_load = p;
5305 if ((*p)->count == 1
5306 && spu_elf_section_data ((*p)->sections[0])->u.o.ovl_index != 0)
5307 {
5308 m = *p;
5309 m->no_sort_lma = 1;
5310 *p = m->next;
5311 *p_overlay = m;
5312 p_overlay = &m->next;
5313 continue;
5314 }
5315 }
5316 p = &((*p)->next);
5317 }
5318
5319 /* Re-insert overlay segments at the head of the segment map. */
5320 if (m_overlay != NULL)
5321 {
5322 p = first_load;
5323 if (*p != NULL && (*p)->p_type == PT_LOAD && (*p)->includes_filehdr)
5324 /* It doesn't really make sense for someone to include the ELF
5325 file header into an spu image, but if they do the code that
5326 assigns p_offset needs to see the segment containing the
5327 header first. */
5328 p = &(*p)->next;
5329 *p_overlay = *p;
5330 *p = m_overlay;
5331 }
5332
5333 return true;
5334 }
5335
5336 /* Tweak the section type of .note.spu_name. */
5337
5338 static bool
5339 spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
5340 Elf_Internal_Shdr *hdr,
5341 asection *sec)
5342 {
5343 if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
5344 hdr->sh_type = SHT_NOTE;
5345 return true;
5346 }
5347
5348 /* Tweak phdrs before writing them out. */
5349
5350 static bool
5351 spu_elf_modify_headers (bfd *abfd, struct bfd_link_info *info)
5352 {
5353 if (info != NULL)
5354 {
5355 const struct elf_backend_data *bed;
5356 struct elf_obj_tdata *tdata;
5357 Elf_Internal_Phdr *phdr, *last;
5358 struct spu_link_hash_table *htab;
5359 unsigned int count;
5360 unsigned int i;
5361
5362 bed = get_elf_backend_data (abfd);
5363 tdata = elf_tdata (abfd);
5364 phdr = tdata->phdr;
5365 count = elf_program_header_size (abfd) / bed->s->sizeof_phdr;
5366 htab = spu_hash_table (info);
5367 if (htab->num_overlays != 0)
5368 {
5369 struct elf_segment_map *m;
5370 unsigned int o;
5371
5372 for (i = 0, m = elf_seg_map (abfd); m; ++i, m = m->next)
5373 if (m->count != 0
5374 && ((o = spu_elf_section_data (m->sections[0])->u.o.ovl_index)
5375 != 0))
5376 {
5377 /* Mark this as an overlay header. */
5378 phdr[i].p_flags |= PF_OVERLAY;
5379
5380 if (htab->ovtab != NULL && htab->ovtab->size != 0
5381 && htab->params->ovly_flavour != ovly_soft_icache)
5382 {
5383 bfd_byte *p = htab->ovtab->contents;
5384 unsigned int off = o * 16 + 8;
5385
5386 /* Write file_off into _ovly_table. */
5387 bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
5388 }
5389 }
5390 /* Soft-icache has its file offset put in .ovl.init. */
5391 if (htab->init != NULL && htab->init->size != 0)
5392 {
5393 bfd_vma val
5394 = elf_section_data (htab->ovl_sec[0])->this_hdr.sh_offset;
5395
5396 bfd_put_32 (htab->init->owner, val, htab->init->contents + 4);
5397 }
5398 }
5399
5400 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
5401 of 16. This should always be possible when using the standard
5402 linker scripts, but don't create overlapping segments if
5403 someone is playing games with linker scripts. */
5404 last = NULL;
5405 for (i = count; i-- != 0; )
5406 if (phdr[i].p_type == PT_LOAD)
5407 {
5408 unsigned adjust;
5409
5410 adjust = -phdr[i].p_filesz & 15;
5411 if (adjust != 0
5412 && last != NULL
5413 && (phdr[i].p_offset + phdr[i].p_filesz
5414 > last->p_offset - adjust))
5415 break;
5416
5417 adjust = -phdr[i].p_memsz & 15;
5418 if (adjust != 0
5419 && last != NULL
5420 && phdr[i].p_filesz != 0
5421 && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
5422 && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
5423 break;
5424
5425 if (phdr[i].p_filesz != 0)
5426 last = &phdr[i];
5427 }
5428
5429 if (i == (unsigned int) -1)
5430 for (i = count; i-- != 0; )
5431 if (phdr[i].p_type == PT_LOAD)
5432 {
5433 unsigned adjust;
5434
5435 adjust = -phdr[i].p_filesz & 15;
5436 phdr[i].p_filesz += adjust;
5437
5438 adjust = -phdr[i].p_memsz & 15;
5439 phdr[i].p_memsz += adjust;
5440 }
5441 }
5442
5443 return _bfd_elf_modify_headers (abfd, info);
5444 }
5445
5446 bool
5447 spu_elf_size_sections (bfd *obfd ATTRIBUTE_UNUSED, struct bfd_link_info *info)
5448 {
5449 struct spu_link_hash_table *htab = spu_hash_table (info);
5450 if (htab->params->emit_fixups)
5451 {
5452 asection *sfixup = htab->sfixup;
5453 int fixup_count = 0;
5454 bfd *ibfd;
5455 size_t size;
5456
5457 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
5458 {
5459 asection *isec;
5460
5461 if (bfd_get_flavour (ibfd) != bfd_target_elf_flavour)
5462 continue;
5463
5464 /* Walk over each section attached to the input bfd. */
5465 for (isec = ibfd->sections; isec != NULL; isec = isec->next)
5466 {
5467 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
5468 bfd_vma base_end;
5469
5470 /* If there aren't any relocs, then there's nothing more
5471 to do. */
5472 if ((isec->flags & SEC_ALLOC) == 0
5473 || (isec->flags & SEC_RELOC) == 0
5474 || isec->reloc_count == 0)
5475 continue;
5476
5477 /* Get the relocs. */
5478 internal_relocs =
5479 _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
5480 info->keep_memory);
5481 if (internal_relocs == NULL)
5482 return false;
5483
5484 /* 1 quadword can contain up to 4 R_SPU_ADDR32
5485 relocations. They are stored in a single word by
5486 saving the upper 28 bits of the address and setting the
5487 lower 4 bits to a bit mask of the words that have the
5488 relocation. BASE_END keeps track of the next quadword. */
5489 irela = internal_relocs;
5490 irelaend = irela + isec->reloc_count;
5491 base_end = 0;
5492 for (; irela < irelaend; irela++)
5493 if (ELF32_R_TYPE (irela->r_info) == R_SPU_ADDR32
5494 && irela->r_offset >= base_end)
5495 {
5496 base_end = (irela->r_offset & ~(bfd_vma) 15) + 16;
5497 fixup_count++;
5498 }
5499 }
5500 }
5501
5502 /* We always have a NULL fixup as a sentinel */
5503 size = (fixup_count + 1) * FIXUP_RECORD_SIZE;
5504 if (!bfd_set_section_size (sfixup, size))
5505 return false;
5506 sfixup->contents = (bfd_byte *) bfd_zalloc (info->input_bfds, size);
5507 if (sfixup->contents == NULL)
5508 return false;
5509 }
5510 return true;
5511 }
5512
5513 #define TARGET_BIG_SYM spu_elf32_vec
5514 #define TARGET_BIG_NAME "elf32-spu"
5515 #define ELF_ARCH bfd_arch_spu
5516 #define ELF_TARGET_ID SPU_ELF_DATA
5517 #define ELF_MACHINE_CODE EM_SPU
5518 /* This matches the alignment need for DMA. */
5519 #define ELF_MAXPAGESIZE 0x80
5520 #define elf_backend_rela_normal 1
5521 #define elf_backend_can_gc_sections 1
5522
5523 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
5524 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
5525 #define elf_info_to_howto spu_elf_info_to_howto
5526 #define elf_backend_count_relocs spu_elf_count_relocs
5527 #define elf_backend_relocate_section spu_elf_relocate_section
5528 #define elf_backend_finish_dynamic_sections spu_elf_finish_dynamic_sections
5529 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
5530 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
5531 #define elf_backend_object_p spu_elf_object_p
5532 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
5533 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
5534
5535 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
5536 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
5537 #define elf_backend_modify_headers spu_elf_modify_headers
5538 #define elf_backend_init_file_header spu_elf_init_file_header
5539 #define elf_backend_fake_sections spu_elf_fake_sections
5540 #define elf_backend_special_sections spu_elf_special_sections
5541 #define bfd_elf32_bfd_final_link spu_elf_final_link
5542
5543 #include "elf32-target.h"