]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - bfd/coff-sh.c
bfd/
[thirdparty/binutils-gdb.git] / bfd / coff-sh.c
1 /* BFD back-end for Renesas Super-H COFF binaries.
2 Copyright 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
3 2003, 2004 Free Software Foundation, Inc.
4 Contributed by Cygnus Support.
5 Written by Steve Chamberlain, <sac@cygnus.com>.
6 Relaxing code written by Ian Lance Taylor, <ian@cygnus.com>.
7
8 This file is part of BFD, the Binary File Descriptor library.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
23
24 #include "bfd.h"
25 #include "sysdep.h"
26 #include "libiberty.h"
27 #include "libbfd.h"
28 #include "bfdlink.h"
29 #include "coff/sh.h"
30 #include "coff/internal.h"
31
32 #ifdef COFF_WITH_PE
33 #include "coff/pe.h"
34
35 #ifndef COFF_IMAGE_WITH_PE
36 static bfd_boolean sh_align_load_span
37 PARAMS ((bfd *, asection *, bfd_byte *,
38 bfd_boolean (*) (bfd *, asection *, PTR, bfd_byte *, bfd_vma),
39 PTR, bfd_vma **, bfd_vma *, bfd_vma, bfd_vma, bfd_boolean *));
40
41 #define _bfd_sh_align_load_span sh_align_load_span
42 #endif
43 #endif
44
45 #include "libcoff.h"
46
47 /* Internal functions. */
48 static bfd_reloc_status_type sh_reloc
49 PARAMS ((bfd *, arelent *, asymbol *, PTR, asection *, bfd *, char **));
50 static long get_symbol_value PARAMS ((asymbol *));
51 static bfd_boolean sh_relax_section
52 PARAMS ((bfd *, asection *, struct bfd_link_info *, bfd_boolean *));
53 static bfd_boolean sh_relax_delete_bytes
54 PARAMS ((bfd *, asection *, bfd_vma, int));
55 #ifndef COFF_IMAGE_WITH_PE
56 static const struct sh_opcode *sh_insn_info PARAMS ((unsigned int));
57 #endif
58 static bfd_boolean sh_align_loads
59 PARAMS ((bfd *, asection *, struct internal_reloc *, bfd_byte *,
60 bfd_boolean *));
61 static bfd_boolean sh_swap_insns
62 PARAMS ((bfd *, asection *, PTR, bfd_byte *, bfd_vma));
63 static bfd_boolean sh_relocate_section
64 PARAMS ((bfd *, struct bfd_link_info *, bfd *, asection *, bfd_byte *,
65 struct internal_reloc *, struct internal_syment *, asection **));
66 static bfd_byte *sh_coff_get_relocated_section_contents
67 PARAMS ((bfd *, struct bfd_link_info *, struct bfd_link_order *,
68 bfd_byte *, bfd_boolean, asymbol **));
69 static reloc_howto_type * sh_coff_reloc_type_lookup PARAMS ((bfd *, bfd_reloc_code_real_type));
70
71 #ifdef COFF_WITH_PE
72 /* Can't build import tables with 2**4 alignment. */
73 #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER 2
74 #else
75 /* Default section alignment to 2**4. */
76 #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER 4
77 #endif
78
79 #ifdef COFF_IMAGE_WITH_PE
80 /* Align PE executables. */
81 #define COFF_PAGE_SIZE 0x1000
82 #endif
83
84 /* Generate long file names. */
85 #define COFF_LONG_FILENAMES
86
87 #ifdef COFF_WITH_PE
88 static bfd_boolean in_reloc_p PARAMS ((bfd *, reloc_howto_type *));
89 /* Return TRUE if this relocation should
90 appear in the output .reloc section. */
91 static bfd_boolean in_reloc_p (abfd, howto)
92 bfd * abfd ATTRIBUTE_UNUSED;
93 reloc_howto_type * howto;
94 {
95 return ! howto->pc_relative && howto->type != R_SH_IMAGEBASE;
96 }
97 #endif
98
99 /* The supported relocations. There are a lot of relocations defined
100 in coff/internal.h which we do not expect to ever see. */
101 static reloc_howto_type sh_coff_howtos[] =
102 {
103 EMPTY_HOWTO (0),
104 EMPTY_HOWTO (1),
105 #ifdef COFF_WITH_PE
106 /* Windows CE */
107 HOWTO (R_SH_IMM32CE, /* type */
108 0, /* rightshift */
109 2, /* size (0 = byte, 1 = short, 2 = long) */
110 32, /* bitsize */
111 FALSE, /* pc_relative */
112 0, /* bitpos */
113 complain_overflow_bitfield, /* complain_on_overflow */
114 sh_reloc, /* special_function */
115 "r_imm32ce", /* name */
116 TRUE, /* partial_inplace */
117 0xffffffff, /* src_mask */
118 0xffffffff, /* dst_mask */
119 FALSE), /* pcrel_offset */
120 #else
121 EMPTY_HOWTO (2),
122 #endif
123 EMPTY_HOWTO (3), /* R_SH_PCREL8 */
124 EMPTY_HOWTO (4), /* R_SH_PCREL16 */
125 EMPTY_HOWTO (5), /* R_SH_HIGH8 */
126 EMPTY_HOWTO (6), /* R_SH_IMM24 */
127 EMPTY_HOWTO (7), /* R_SH_LOW16 */
128 EMPTY_HOWTO (8),
129 EMPTY_HOWTO (9), /* R_SH_PCDISP8BY4 */
130
131 HOWTO (R_SH_PCDISP8BY2, /* type */
132 1, /* rightshift */
133 1, /* size (0 = byte, 1 = short, 2 = long) */
134 8, /* bitsize */
135 TRUE, /* pc_relative */
136 0, /* bitpos */
137 complain_overflow_signed, /* complain_on_overflow */
138 sh_reloc, /* special_function */
139 "r_pcdisp8by2", /* name */
140 TRUE, /* partial_inplace */
141 0xff, /* src_mask */
142 0xff, /* dst_mask */
143 TRUE), /* pcrel_offset */
144
145 EMPTY_HOWTO (11), /* R_SH_PCDISP8 */
146
147 HOWTO (R_SH_PCDISP, /* type */
148 1, /* rightshift */
149 1, /* size (0 = byte, 1 = short, 2 = long) */
150 12, /* bitsize */
151 TRUE, /* pc_relative */
152 0, /* bitpos */
153 complain_overflow_signed, /* complain_on_overflow */
154 sh_reloc, /* special_function */
155 "r_pcdisp12by2", /* name */
156 TRUE, /* partial_inplace */
157 0xfff, /* src_mask */
158 0xfff, /* dst_mask */
159 TRUE), /* pcrel_offset */
160
161 EMPTY_HOWTO (13),
162
163 HOWTO (R_SH_IMM32, /* type */
164 0, /* rightshift */
165 2, /* size (0 = byte, 1 = short, 2 = long) */
166 32, /* bitsize */
167 FALSE, /* pc_relative */
168 0, /* bitpos */
169 complain_overflow_bitfield, /* complain_on_overflow */
170 sh_reloc, /* special_function */
171 "r_imm32", /* name */
172 TRUE, /* partial_inplace */
173 0xffffffff, /* src_mask */
174 0xffffffff, /* dst_mask */
175 FALSE), /* pcrel_offset */
176
177 EMPTY_HOWTO (15),
178 #ifdef COFF_WITH_PE
179 HOWTO (R_SH_IMAGEBASE, /* type */
180 0, /* rightshift */
181 2, /* size (0 = byte, 1 = short, 2 = long) */
182 32, /* bitsize */
183 FALSE, /* pc_relative */
184 0, /* bitpos */
185 complain_overflow_bitfield, /* complain_on_overflow */
186 sh_reloc, /* special_function */
187 "rva32", /* name */
188 TRUE, /* partial_inplace */
189 0xffffffff, /* src_mask */
190 0xffffffff, /* dst_mask */
191 FALSE), /* pcrel_offset */
192 #else
193 EMPTY_HOWTO (16), /* R_SH_IMM8 */
194 #endif
195 EMPTY_HOWTO (17), /* R_SH_IMM8BY2 */
196 EMPTY_HOWTO (18), /* R_SH_IMM8BY4 */
197 EMPTY_HOWTO (19), /* R_SH_IMM4 */
198 EMPTY_HOWTO (20), /* R_SH_IMM4BY2 */
199 EMPTY_HOWTO (21), /* R_SH_IMM4BY4 */
200
201 HOWTO (R_SH_PCRELIMM8BY2, /* type */
202 1, /* rightshift */
203 1, /* size (0 = byte, 1 = short, 2 = long) */
204 8, /* bitsize */
205 TRUE, /* pc_relative */
206 0, /* bitpos */
207 complain_overflow_unsigned, /* complain_on_overflow */
208 sh_reloc, /* special_function */
209 "r_pcrelimm8by2", /* name */
210 TRUE, /* partial_inplace */
211 0xff, /* src_mask */
212 0xff, /* dst_mask */
213 TRUE), /* pcrel_offset */
214
215 HOWTO (R_SH_PCRELIMM8BY4, /* type */
216 2, /* rightshift */
217 1, /* size (0 = byte, 1 = short, 2 = long) */
218 8, /* bitsize */
219 TRUE, /* pc_relative */
220 0, /* bitpos */
221 complain_overflow_unsigned, /* complain_on_overflow */
222 sh_reloc, /* special_function */
223 "r_pcrelimm8by4", /* name */
224 TRUE, /* partial_inplace */
225 0xff, /* src_mask */
226 0xff, /* dst_mask */
227 TRUE), /* pcrel_offset */
228
229 HOWTO (R_SH_IMM16, /* type */
230 0, /* rightshift */
231 1, /* size (0 = byte, 1 = short, 2 = long) */
232 16, /* bitsize */
233 FALSE, /* pc_relative */
234 0, /* bitpos */
235 complain_overflow_bitfield, /* complain_on_overflow */
236 sh_reloc, /* special_function */
237 "r_imm16", /* name */
238 TRUE, /* partial_inplace */
239 0xffff, /* src_mask */
240 0xffff, /* dst_mask */
241 FALSE), /* pcrel_offset */
242
243 HOWTO (R_SH_SWITCH16, /* type */
244 0, /* rightshift */
245 1, /* size (0 = byte, 1 = short, 2 = long) */
246 16, /* bitsize */
247 FALSE, /* pc_relative */
248 0, /* bitpos */
249 complain_overflow_bitfield, /* complain_on_overflow */
250 sh_reloc, /* special_function */
251 "r_switch16", /* name */
252 TRUE, /* partial_inplace */
253 0xffff, /* src_mask */
254 0xffff, /* dst_mask */
255 FALSE), /* pcrel_offset */
256
257 HOWTO (R_SH_SWITCH32, /* type */
258 0, /* rightshift */
259 2, /* size (0 = byte, 1 = short, 2 = long) */
260 32, /* bitsize */
261 FALSE, /* pc_relative */
262 0, /* bitpos */
263 complain_overflow_bitfield, /* complain_on_overflow */
264 sh_reloc, /* special_function */
265 "r_switch32", /* name */
266 TRUE, /* partial_inplace */
267 0xffffffff, /* src_mask */
268 0xffffffff, /* dst_mask */
269 FALSE), /* pcrel_offset */
270
271 HOWTO (R_SH_USES, /* type */
272 0, /* rightshift */
273 1, /* size (0 = byte, 1 = short, 2 = long) */
274 16, /* bitsize */
275 FALSE, /* pc_relative */
276 0, /* bitpos */
277 complain_overflow_bitfield, /* complain_on_overflow */
278 sh_reloc, /* special_function */
279 "r_uses", /* name */
280 TRUE, /* partial_inplace */
281 0xffff, /* src_mask */
282 0xffff, /* dst_mask */
283 FALSE), /* pcrel_offset */
284
285 HOWTO (R_SH_COUNT, /* type */
286 0, /* rightshift */
287 2, /* size (0 = byte, 1 = short, 2 = long) */
288 32, /* bitsize */
289 FALSE, /* pc_relative */
290 0, /* bitpos */
291 complain_overflow_bitfield, /* complain_on_overflow */
292 sh_reloc, /* special_function */
293 "r_count", /* name */
294 TRUE, /* partial_inplace */
295 0xffffffff, /* src_mask */
296 0xffffffff, /* dst_mask */
297 FALSE), /* pcrel_offset */
298
299 HOWTO (R_SH_ALIGN, /* type */
300 0, /* rightshift */
301 2, /* size (0 = byte, 1 = short, 2 = long) */
302 32, /* bitsize */
303 FALSE, /* pc_relative */
304 0, /* bitpos */
305 complain_overflow_bitfield, /* complain_on_overflow */
306 sh_reloc, /* special_function */
307 "r_align", /* name */
308 TRUE, /* partial_inplace */
309 0xffffffff, /* src_mask */
310 0xffffffff, /* dst_mask */
311 FALSE), /* pcrel_offset */
312
313 HOWTO (R_SH_CODE, /* type */
314 0, /* rightshift */
315 2, /* size (0 = byte, 1 = short, 2 = long) */
316 32, /* bitsize */
317 FALSE, /* pc_relative */
318 0, /* bitpos */
319 complain_overflow_bitfield, /* complain_on_overflow */
320 sh_reloc, /* special_function */
321 "r_code", /* name */
322 TRUE, /* partial_inplace */
323 0xffffffff, /* src_mask */
324 0xffffffff, /* dst_mask */
325 FALSE), /* pcrel_offset */
326
327 HOWTO (R_SH_DATA, /* type */
328 0, /* rightshift */
329 2, /* size (0 = byte, 1 = short, 2 = long) */
330 32, /* bitsize */
331 FALSE, /* pc_relative */
332 0, /* bitpos */
333 complain_overflow_bitfield, /* complain_on_overflow */
334 sh_reloc, /* special_function */
335 "r_data", /* name */
336 TRUE, /* partial_inplace */
337 0xffffffff, /* src_mask */
338 0xffffffff, /* dst_mask */
339 FALSE), /* pcrel_offset */
340
341 HOWTO (R_SH_LABEL, /* type */
342 0, /* rightshift */
343 2, /* size (0 = byte, 1 = short, 2 = long) */
344 32, /* bitsize */
345 FALSE, /* pc_relative */
346 0, /* bitpos */
347 complain_overflow_bitfield, /* complain_on_overflow */
348 sh_reloc, /* special_function */
349 "r_label", /* name */
350 TRUE, /* partial_inplace */
351 0xffffffff, /* src_mask */
352 0xffffffff, /* dst_mask */
353 FALSE), /* pcrel_offset */
354
355 HOWTO (R_SH_SWITCH8, /* type */
356 0, /* rightshift */
357 0, /* size (0 = byte, 1 = short, 2 = long) */
358 8, /* bitsize */
359 FALSE, /* pc_relative */
360 0, /* bitpos */
361 complain_overflow_bitfield, /* complain_on_overflow */
362 sh_reloc, /* special_function */
363 "r_switch8", /* name */
364 TRUE, /* partial_inplace */
365 0xff, /* src_mask */
366 0xff, /* dst_mask */
367 FALSE) /* pcrel_offset */
368 };
369
370 #define SH_COFF_HOWTO_COUNT (sizeof sh_coff_howtos / sizeof sh_coff_howtos[0])
371
372 /* Check for a bad magic number. */
373 #define BADMAG(x) SHBADMAG(x)
374
375 /* Customize coffcode.h (this is not currently used). */
376 #define SH 1
377
378 /* FIXME: This should not be set here. */
379 #define __A_MAGIC_SET__
380
381 #ifndef COFF_WITH_PE
382 /* Swap the r_offset field in and out. */
383 #define SWAP_IN_RELOC_OFFSET H_GET_32
384 #define SWAP_OUT_RELOC_OFFSET H_PUT_32
385
386 /* Swap out extra information in the reloc structure. */
387 #define SWAP_OUT_RELOC_EXTRA(abfd, src, dst) \
388 do \
389 { \
390 dst->r_stuff[0] = 'S'; \
391 dst->r_stuff[1] = 'C'; \
392 } \
393 while (0)
394 #endif
395
396 /* Get the value of a symbol, when performing a relocation. */
397
398 static long
399 get_symbol_value (symbol)
400 asymbol *symbol;
401 {
402 bfd_vma relocation;
403
404 if (bfd_is_com_section (symbol->section))
405 relocation = 0;
406 else
407 relocation = (symbol->value +
408 symbol->section->output_section->vma +
409 symbol->section->output_offset);
410
411 return relocation;
412 }
413
414 #ifdef COFF_WITH_PE
415 /* Convert an rtype to howto for the COFF backend linker.
416 Copied from coff-i386. */
417 #define coff_rtype_to_howto coff_sh_rtype_to_howto
418 static reloc_howto_type * coff_sh_rtype_to_howto PARAMS ((bfd *, asection *, struct internal_reloc *, struct coff_link_hash_entry *, struct internal_syment *, bfd_vma *));
419
420 static reloc_howto_type *
421 coff_sh_rtype_to_howto (abfd, sec, rel, h, sym, addendp)
422 bfd * abfd ATTRIBUTE_UNUSED;
423 asection * sec;
424 struct internal_reloc * rel;
425 struct coff_link_hash_entry * h;
426 struct internal_syment * sym;
427 bfd_vma * addendp;
428 {
429 reloc_howto_type * howto;
430
431 howto = sh_coff_howtos + rel->r_type;
432
433 *addendp = 0;
434
435 if (howto->pc_relative)
436 *addendp += sec->vma;
437
438 if (sym != NULL && sym->n_scnum == 0 && sym->n_value != 0)
439 {
440 /* This is a common symbol. The section contents include the
441 size (sym->n_value) as an addend. The relocate_section
442 function will be adding in the final value of the symbol. We
443 need to subtract out the current size in order to get the
444 correct result. */
445 BFD_ASSERT (h != NULL);
446 }
447
448 if (howto->pc_relative)
449 {
450 *addendp -= 4;
451
452 /* If the symbol is defined, then the generic code is going to
453 add back the symbol value in order to cancel out an
454 adjustment it made to the addend. However, we set the addend
455 to 0 at the start of this function. We need to adjust here,
456 to avoid the adjustment the generic code will make. FIXME:
457 This is getting a bit hackish. */
458 if (sym != NULL && sym->n_scnum != 0)
459 *addendp -= sym->n_value;
460 }
461
462 if (rel->r_type == R_SH_IMAGEBASE)
463 *addendp -= pe_data (sec->output_section->owner)->pe_opthdr.ImageBase;
464
465 return howto;
466 }
467
468 #endif /* COFF_WITH_PE */
469
470 /* This structure is used to map BFD reloc codes to SH PE relocs. */
471 struct shcoff_reloc_map
472 {
473 bfd_reloc_code_real_type bfd_reloc_val;
474 unsigned char shcoff_reloc_val;
475 };
476
477 #ifdef COFF_WITH_PE
478 /* An array mapping BFD reloc codes to SH PE relocs. */
479 static const struct shcoff_reloc_map sh_reloc_map[] =
480 {
481 { BFD_RELOC_32, R_SH_IMM32CE },
482 { BFD_RELOC_RVA, R_SH_IMAGEBASE },
483 { BFD_RELOC_CTOR, R_SH_IMM32CE },
484 };
485 #else
486 /* An array mapping BFD reloc codes to SH PE relocs. */
487 static const struct shcoff_reloc_map sh_reloc_map[] =
488 {
489 { BFD_RELOC_32, R_SH_IMM32 },
490 { BFD_RELOC_CTOR, R_SH_IMM32 },
491 };
492 #endif
493
494 /* Given a BFD reloc code, return the howto structure for the
495 corresponding SH PE reloc. */
496 #define coff_bfd_reloc_type_lookup sh_coff_reloc_type_lookup
497
498 static reloc_howto_type *
499 sh_coff_reloc_type_lookup (abfd, code)
500 bfd * abfd ATTRIBUTE_UNUSED;
501 bfd_reloc_code_real_type code;
502 {
503 unsigned int i;
504
505 for (i = ARRAY_SIZE (sh_reloc_map); i--;)
506 if (sh_reloc_map[i].bfd_reloc_val == code)
507 return &sh_coff_howtos[(int) sh_reloc_map[i].shcoff_reloc_val];
508
509 fprintf (stderr, "SH Error: unknown reloc type %d\n", code);
510 return NULL;
511 }
512
513 /* This macro is used in coffcode.h to get the howto corresponding to
514 an internal reloc. */
515
516 #define RTYPE2HOWTO(relent, internal) \
517 ((relent)->howto = \
518 ((internal)->r_type < SH_COFF_HOWTO_COUNT \
519 ? &sh_coff_howtos[(internal)->r_type] \
520 : (reloc_howto_type *) NULL))
521
522 /* This is the same as the macro in coffcode.h, except that it copies
523 r_offset into reloc_entry->addend for some relocs. */
524 #define CALC_ADDEND(abfd, ptr, reloc, cache_ptr) \
525 { \
526 coff_symbol_type *coffsym = (coff_symbol_type *) NULL; \
527 if (ptr && bfd_asymbol_bfd (ptr) != abfd) \
528 coffsym = (obj_symbols (abfd) \
529 + (cache_ptr->sym_ptr_ptr - symbols)); \
530 else if (ptr) \
531 coffsym = coff_symbol_from (abfd, ptr); \
532 if (coffsym != (coff_symbol_type *) NULL \
533 && coffsym->native->u.syment.n_scnum == 0) \
534 cache_ptr->addend = 0; \
535 else if (ptr && bfd_asymbol_bfd (ptr) == abfd \
536 && ptr->section != (asection *) NULL) \
537 cache_ptr->addend = - (ptr->section->vma + ptr->value); \
538 else \
539 cache_ptr->addend = 0; \
540 if ((reloc).r_type == R_SH_SWITCH8 \
541 || (reloc).r_type == R_SH_SWITCH16 \
542 || (reloc).r_type == R_SH_SWITCH32 \
543 || (reloc).r_type == R_SH_USES \
544 || (reloc).r_type == R_SH_COUNT \
545 || (reloc).r_type == R_SH_ALIGN) \
546 cache_ptr->addend = (reloc).r_offset; \
547 }
548
549 /* This is the howto function for the SH relocations. */
550
551 static bfd_reloc_status_type
552 sh_reloc (abfd, reloc_entry, symbol_in, data, input_section, output_bfd,
553 error_message)
554 bfd *abfd;
555 arelent *reloc_entry;
556 asymbol *symbol_in;
557 PTR data;
558 asection *input_section;
559 bfd *output_bfd;
560 char **error_message ATTRIBUTE_UNUSED;
561 {
562 unsigned long insn;
563 bfd_vma sym_value;
564 unsigned short r_type;
565 bfd_vma addr = reloc_entry->address;
566 bfd_byte *hit_data = addr + (bfd_byte *) data;
567
568 r_type = reloc_entry->howto->type;
569
570 if (output_bfd != NULL)
571 {
572 /* Partial linking--do nothing. */
573 reloc_entry->address += input_section->output_offset;
574 return bfd_reloc_ok;
575 }
576
577 /* Almost all relocs have to do with relaxing. If any work must be
578 done for them, it has been done in sh_relax_section. */
579 if (r_type != R_SH_IMM32
580 #ifdef COFF_WITH_PE
581 && r_type != R_SH_IMM32CE
582 && r_type != R_SH_IMAGEBASE
583 #endif
584 && (r_type != R_SH_PCDISP
585 || (symbol_in->flags & BSF_LOCAL) != 0))
586 return bfd_reloc_ok;
587
588 if (symbol_in != NULL
589 && bfd_is_und_section (symbol_in->section))
590 return bfd_reloc_undefined;
591
592 sym_value = get_symbol_value (symbol_in);
593
594 switch (r_type)
595 {
596 case R_SH_IMM32:
597 #ifdef COFF_WITH_PE
598 case R_SH_IMM32CE:
599 #endif
600 insn = bfd_get_32 (abfd, hit_data);
601 insn += sym_value + reloc_entry->addend;
602 bfd_put_32 (abfd, (bfd_vma) insn, hit_data);
603 break;
604 #ifdef COFF_WITH_PE
605 case R_SH_IMAGEBASE:
606 insn = bfd_get_32 (abfd, hit_data);
607 insn += sym_value + reloc_entry->addend;
608 insn -= pe_data (input_section->output_section->owner)->pe_opthdr.ImageBase;
609 bfd_put_32 (abfd, (bfd_vma) insn, hit_data);
610 break;
611 #endif
612 case R_SH_PCDISP:
613 insn = bfd_get_16 (abfd, hit_data);
614 sym_value += reloc_entry->addend;
615 sym_value -= (input_section->output_section->vma
616 + input_section->output_offset
617 + addr
618 + 4);
619 sym_value += (insn & 0xfff) << 1;
620 if (insn & 0x800)
621 sym_value -= 0x1000;
622 insn = (insn & 0xf000) | (sym_value & 0xfff);
623 bfd_put_16 (abfd, (bfd_vma) insn, hit_data);
624 if (sym_value < (bfd_vma) -0x1000 || sym_value >= 0x1000)
625 return bfd_reloc_overflow;
626 break;
627 default:
628 abort ();
629 break;
630 }
631
632 return bfd_reloc_ok;
633 }
634
635 #define coff_bfd_merge_private_bfd_data _bfd_generic_verify_endian_match
636
637 /* We can do relaxing. */
638 #define coff_bfd_relax_section sh_relax_section
639
640 /* We use the special COFF backend linker. */
641 #define coff_relocate_section sh_relocate_section
642
643 /* When relaxing, we need to use special code to get the relocated
644 section contents. */
645 #define coff_bfd_get_relocated_section_contents \
646 sh_coff_get_relocated_section_contents
647
648 #include "coffcode.h"
649 \f
650 /* This function handles relaxing on the SH.
651
652 Function calls on the SH look like this:
653
654 movl L1,r0
655 ...
656 jsr @r0
657 ...
658 L1:
659 .long function
660
661 The compiler and assembler will cooperate to create R_SH_USES
662 relocs on the jsr instructions. The r_offset field of the
663 R_SH_USES reloc is the PC relative offset to the instruction which
664 loads the register (the r_offset field is computed as though it
665 were a jump instruction, so the offset value is actually from four
666 bytes past the instruction). The linker can use this reloc to
667 determine just which function is being called, and thus decide
668 whether it is possible to replace the jsr with a bsr.
669
670 If multiple function calls are all based on a single register load
671 (i.e., the same function is called multiple times), the compiler
672 guarantees that each function call will have an R_SH_USES reloc.
673 Therefore, if the linker is able to convert each R_SH_USES reloc
674 which refers to that address, it can safely eliminate the register
675 load.
676
677 When the assembler creates an R_SH_USES reloc, it examines it to
678 determine which address is being loaded (L1 in the above example).
679 It then counts the number of references to that address, and
680 creates an R_SH_COUNT reloc at that address. The r_offset field of
681 the R_SH_COUNT reloc will be the number of references. If the
682 linker is able to eliminate a register load, it can use the
683 R_SH_COUNT reloc to see whether it can also eliminate the function
684 address.
685
686 SH relaxing also handles another, unrelated, matter. On the SH, if
687 a load or store instruction is not aligned on a four byte boundary,
688 the memory cycle interferes with the 32 bit instruction fetch,
689 causing a one cycle bubble in the pipeline. Therefore, we try to
690 align load and store instructions on four byte boundaries if we
691 can, by swapping them with one of the adjacent instructions. */
692
693 static bfd_boolean
694 sh_relax_section (abfd, sec, link_info, again)
695 bfd *abfd;
696 asection *sec;
697 struct bfd_link_info *link_info;
698 bfd_boolean *again;
699 {
700 struct internal_reloc *internal_relocs;
701 bfd_boolean have_code;
702 struct internal_reloc *irel, *irelend;
703 bfd_byte *contents = NULL;
704
705 *again = FALSE;
706
707 if (link_info->relocatable
708 || (sec->flags & SEC_RELOC) == 0
709 || sec->reloc_count == 0)
710 return TRUE;
711
712 if (coff_section_data (abfd, sec) == NULL)
713 {
714 bfd_size_type amt = sizeof (struct coff_section_tdata);
715 sec->used_by_bfd = (PTR) bfd_zalloc (abfd, amt);
716 if (sec->used_by_bfd == NULL)
717 return FALSE;
718 }
719
720 internal_relocs = (_bfd_coff_read_internal_relocs
721 (abfd, sec, link_info->keep_memory,
722 (bfd_byte *) NULL, FALSE,
723 (struct internal_reloc *) NULL));
724 if (internal_relocs == NULL)
725 goto error_return;
726
727 have_code = FALSE;
728
729 irelend = internal_relocs + sec->reloc_count;
730 for (irel = internal_relocs; irel < irelend; irel++)
731 {
732 bfd_vma laddr, paddr, symval;
733 unsigned short insn;
734 struct internal_reloc *irelfn, *irelscan, *irelcount;
735 struct internal_syment sym;
736 bfd_signed_vma foff;
737
738 if (irel->r_type == R_SH_CODE)
739 have_code = TRUE;
740
741 if (irel->r_type != R_SH_USES)
742 continue;
743
744 /* Get the section contents. */
745 if (contents == NULL)
746 {
747 if (coff_section_data (abfd, sec)->contents != NULL)
748 contents = coff_section_data (abfd, sec)->contents;
749 else
750 {
751 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
752 goto error_return;
753 }
754 }
755
756 /* The r_offset field of the R_SH_USES reloc will point us to
757 the register load. The 4 is because the r_offset field is
758 computed as though it were a jump offset, which are based
759 from 4 bytes after the jump instruction. */
760 laddr = irel->r_vaddr - sec->vma + 4;
761 /* Careful to sign extend the 32-bit offset. */
762 laddr += ((irel->r_offset & 0xffffffff) ^ 0x80000000) - 0x80000000;
763 if (laddr >= sec->size)
764 {
765 (*_bfd_error_handler) ("%B: 0x%lx: warning: bad R_SH_USES offset",
766 abfd, (unsigned long) irel->r_vaddr);
767 continue;
768 }
769 insn = bfd_get_16 (abfd, contents + laddr);
770
771 /* If the instruction is not mov.l NN,rN, we don't know what to do. */
772 if ((insn & 0xf000) != 0xd000)
773 {
774 ((*_bfd_error_handler)
775 ("%B: 0x%lx: warning: R_SH_USES points to unrecognized insn 0x%x",
776 abfd, (unsigned long) irel->r_vaddr, insn));
777 continue;
778 }
779
780 /* Get the address from which the register is being loaded. The
781 displacement in the mov.l instruction is quadrupled. It is a
782 displacement from four bytes after the movl instruction, but,
783 before adding in the PC address, two least significant bits
784 of the PC are cleared. We assume that the section is aligned
785 on a four byte boundary. */
786 paddr = insn & 0xff;
787 paddr *= 4;
788 paddr += (laddr + 4) &~ (bfd_vma) 3;
789 if (paddr >= sec->size)
790 {
791 ((*_bfd_error_handler)
792 ("%B: 0x%lx: warning: bad R_SH_USES load offset",
793 abfd, (unsigned long) irel->r_vaddr));
794 continue;
795 }
796
797 /* Get the reloc for the address from which the register is
798 being loaded. This reloc will tell us which function is
799 actually being called. */
800 paddr += sec->vma;
801 for (irelfn = internal_relocs; irelfn < irelend; irelfn++)
802 if (irelfn->r_vaddr == paddr
803 #ifdef COFF_WITH_PE
804 && (irelfn->r_type == R_SH_IMM32
805 || irelfn->r_type == R_SH_IMM32CE
806 || irelfn->r_type == R_SH_IMAGEBASE)
807
808 #else
809 && irelfn->r_type == R_SH_IMM32
810 #endif
811 )
812 break;
813 if (irelfn >= irelend)
814 {
815 ((*_bfd_error_handler)
816 ("%B: 0x%lx: warning: could not find expected reloc",
817 abfd, (unsigned long) paddr));
818 continue;
819 }
820
821 /* Get the value of the symbol referred to by the reloc. */
822 if (! _bfd_coff_get_external_symbols (abfd))
823 goto error_return;
824 bfd_coff_swap_sym_in (abfd,
825 ((bfd_byte *) obj_coff_external_syms (abfd)
826 + (irelfn->r_symndx
827 * bfd_coff_symesz (abfd))),
828 &sym);
829 if (sym.n_scnum != 0 && sym.n_scnum != sec->target_index)
830 {
831 ((*_bfd_error_handler)
832 ("%B: 0x%lx: warning: symbol in unexpected section",
833 abfd, (unsigned long) paddr));
834 continue;
835 }
836
837 if (sym.n_sclass != C_EXT)
838 {
839 symval = (sym.n_value
840 - sec->vma
841 + sec->output_section->vma
842 + sec->output_offset);
843 }
844 else
845 {
846 struct coff_link_hash_entry *h;
847
848 h = obj_coff_sym_hashes (abfd)[irelfn->r_symndx];
849 BFD_ASSERT (h != NULL);
850 if (h->root.type != bfd_link_hash_defined
851 && h->root.type != bfd_link_hash_defweak)
852 {
853 /* This appears to be a reference to an undefined
854 symbol. Just ignore it--it will be caught by the
855 regular reloc processing. */
856 continue;
857 }
858
859 symval = (h->root.u.def.value
860 + h->root.u.def.section->output_section->vma
861 + h->root.u.def.section->output_offset);
862 }
863
864 symval += bfd_get_32 (abfd, contents + paddr - sec->vma);
865
866 /* See if this function call can be shortened. */
867 foff = (symval
868 - (irel->r_vaddr
869 - sec->vma
870 + sec->output_section->vma
871 + sec->output_offset
872 + 4));
873 if (foff < -0x1000 || foff >= 0x1000)
874 {
875 /* After all that work, we can't shorten this function call. */
876 continue;
877 }
878
879 /* Shorten the function call. */
880
881 /* For simplicity of coding, we are going to modify the section
882 contents, the section relocs, and the BFD symbol table. We
883 must tell the rest of the code not to free up this
884 information. It would be possible to instead create a table
885 of changes which have to be made, as is done in coff-mips.c;
886 that would be more work, but would require less memory when
887 the linker is run. */
888
889 coff_section_data (abfd, sec)->relocs = internal_relocs;
890 coff_section_data (abfd, sec)->keep_relocs = TRUE;
891
892 coff_section_data (abfd, sec)->contents = contents;
893 coff_section_data (abfd, sec)->keep_contents = TRUE;
894
895 obj_coff_keep_syms (abfd) = TRUE;
896
897 /* Replace the jsr with a bsr. */
898
899 /* Change the R_SH_USES reloc into an R_SH_PCDISP reloc, and
900 replace the jsr with a bsr. */
901 irel->r_type = R_SH_PCDISP;
902 irel->r_symndx = irelfn->r_symndx;
903 if (sym.n_sclass != C_EXT)
904 {
905 /* If this needs to be changed because of future relaxing,
906 it will be handled here like other internal PCDISP
907 relocs. */
908 bfd_put_16 (abfd,
909 (bfd_vma) 0xb000 | ((foff >> 1) & 0xfff),
910 contents + irel->r_vaddr - sec->vma);
911 }
912 else
913 {
914 /* We can't fully resolve this yet, because the external
915 symbol value may be changed by future relaxing. We let
916 the final link phase handle it. */
917 bfd_put_16 (abfd, (bfd_vma) 0xb000,
918 contents + irel->r_vaddr - sec->vma);
919 }
920
921 /* See if there is another R_SH_USES reloc referring to the same
922 register load. */
923 for (irelscan = internal_relocs; irelscan < irelend; irelscan++)
924 if (irelscan->r_type == R_SH_USES
925 && laddr == irelscan->r_vaddr - sec->vma + 4 + irelscan->r_offset)
926 break;
927 if (irelscan < irelend)
928 {
929 /* Some other function call depends upon this register load,
930 and we have not yet converted that function call.
931 Indeed, we may never be able to convert it. There is
932 nothing else we can do at this point. */
933 continue;
934 }
935
936 /* Look for a R_SH_COUNT reloc on the location where the
937 function address is stored. Do this before deleting any
938 bytes, to avoid confusion about the address. */
939 for (irelcount = internal_relocs; irelcount < irelend; irelcount++)
940 if (irelcount->r_vaddr == paddr
941 && irelcount->r_type == R_SH_COUNT)
942 break;
943
944 /* Delete the register load. */
945 if (! sh_relax_delete_bytes (abfd, sec, laddr, 2))
946 goto error_return;
947
948 /* That will change things, so, just in case it permits some
949 other function call to come within range, we should relax
950 again. Note that this is not required, and it may be slow. */
951 *again = TRUE;
952
953 /* Now check whether we got a COUNT reloc. */
954 if (irelcount >= irelend)
955 {
956 ((*_bfd_error_handler)
957 ("%B: 0x%lx: warning: could not find expected COUNT reloc",
958 abfd, (unsigned long) paddr));
959 continue;
960 }
961
962 /* The number of uses is stored in the r_offset field. We've
963 just deleted one. */
964 if (irelcount->r_offset == 0)
965 {
966 ((*_bfd_error_handler) ("%B: 0x%lx: warning: bad count",
967 abfd, (unsigned long) paddr));
968 continue;
969 }
970
971 --irelcount->r_offset;
972
973 /* If there are no more uses, we can delete the address. Reload
974 the address from irelfn, in case it was changed by the
975 previous call to sh_relax_delete_bytes. */
976 if (irelcount->r_offset == 0)
977 {
978 if (! sh_relax_delete_bytes (abfd, sec,
979 irelfn->r_vaddr - sec->vma, 4))
980 goto error_return;
981 }
982
983 /* We've done all we can with that function call. */
984 }
985
986 /* Look for load and store instructions that we can align on four
987 byte boundaries. */
988 if (have_code)
989 {
990 bfd_boolean swapped;
991
992 /* Get the section contents. */
993 if (contents == NULL)
994 {
995 if (coff_section_data (abfd, sec)->contents != NULL)
996 contents = coff_section_data (abfd, sec)->contents;
997 else
998 {
999 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1000 goto error_return;
1001 }
1002 }
1003
1004 if (! sh_align_loads (abfd, sec, internal_relocs, contents, &swapped))
1005 goto error_return;
1006
1007 if (swapped)
1008 {
1009 coff_section_data (abfd, sec)->relocs = internal_relocs;
1010 coff_section_data (abfd, sec)->keep_relocs = TRUE;
1011
1012 coff_section_data (abfd, sec)->contents = contents;
1013 coff_section_data (abfd, sec)->keep_contents = TRUE;
1014
1015 obj_coff_keep_syms (abfd) = TRUE;
1016 }
1017 }
1018
1019 if (internal_relocs != NULL
1020 && internal_relocs != coff_section_data (abfd, sec)->relocs)
1021 {
1022 if (! link_info->keep_memory)
1023 free (internal_relocs);
1024 else
1025 coff_section_data (abfd, sec)->relocs = internal_relocs;
1026 }
1027
1028 if (contents != NULL && contents != coff_section_data (abfd, sec)->contents)
1029 {
1030 if (! link_info->keep_memory)
1031 free (contents);
1032 else
1033 /* Cache the section contents for coff_link_input_bfd. */
1034 coff_section_data (abfd, sec)->contents = contents;
1035 }
1036
1037 return TRUE;
1038
1039 error_return:
1040 if (internal_relocs != NULL
1041 && internal_relocs != coff_section_data (abfd, sec)->relocs)
1042 free (internal_relocs);
1043 if (contents != NULL && contents != coff_section_data (abfd, sec)->contents)
1044 free (contents);
1045 return FALSE;
1046 }
1047
1048 /* Delete some bytes from a section while relaxing. */
1049
1050 static bfd_boolean
1051 sh_relax_delete_bytes (abfd, sec, addr, count)
1052 bfd *abfd;
1053 asection *sec;
1054 bfd_vma addr;
1055 int count;
1056 {
1057 bfd_byte *contents;
1058 struct internal_reloc *irel, *irelend;
1059 struct internal_reloc *irelalign;
1060 bfd_vma toaddr;
1061 bfd_byte *esym, *esymend;
1062 bfd_size_type symesz;
1063 struct coff_link_hash_entry **sym_hash;
1064 asection *o;
1065
1066 contents = coff_section_data (abfd, sec)->contents;
1067
1068 /* The deletion must stop at the next ALIGN reloc for an aligment
1069 power larger than the number of bytes we are deleting. */
1070
1071 irelalign = NULL;
1072 toaddr = sec->size;
1073
1074 irel = coff_section_data (abfd, sec)->relocs;
1075 irelend = irel + sec->reloc_count;
1076 for (; irel < irelend; irel++)
1077 {
1078 if (irel->r_type == R_SH_ALIGN
1079 && irel->r_vaddr - sec->vma > addr
1080 && count < (1 << irel->r_offset))
1081 {
1082 irelalign = irel;
1083 toaddr = irel->r_vaddr - sec->vma;
1084 break;
1085 }
1086 }
1087
1088 /* Actually delete the bytes. */
1089 memmove (contents + addr, contents + addr + count,
1090 (size_t) (toaddr - addr - count));
1091 if (irelalign == NULL)
1092 sec->size -= count;
1093 else
1094 {
1095 int i;
1096
1097 #define NOP_OPCODE (0x0009)
1098
1099 BFD_ASSERT ((count & 1) == 0);
1100 for (i = 0; i < count; i += 2)
1101 bfd_put_16 (abfd, (bfd_vma) NOP_OPCODE, contents + toaddr - count + i);
1102 }
1103
1104 /* Adjust all the relocs. */
1105 for (irel = coff_section_data (abfd, sec)->relocs; irel < irelend; irel++)
1106 {
1107 bfd_vma nraddr, stop;
1108 bfd_vma start = 0;
1109 int insn = 0;
1110 struct internal_syment sym;
1111 int off, adjust, oinsn;
1112 bfd_signed_vma voff = 0;
1113 bfd_boolean overflow;
1114
1115 /* Get the new reloc address. */
1116 nraddr = irel->r_vaddr - sec->vma;
1117 if ((irel->r_vaddr - sec->vma > addr
1118 && irel->r_vaddr - sec->vma < toaddr)
1119 || (irel->r_type == R_SH_ALIGN
1120 && irel->r_vaddr - sec->vma == toaddr))
1121 nraddr -= count;
1122
1123 /* See if this reloc was for the bytes we have deleted, in which
1124 case we no longer care about it. Don't delete relocs which
1125 represent addresses, though. */
1126 if (irel->r_vaddr - sec->vma >= addr
1127 && irel->r_vaddr - sec->vma < addr + count
1128 && irel->r_type != R_SH_ALIGN
1129 && irel->r_type != R_SH_CODE
1130 && irel->r_type != R_SH_DATA
1131 && irel->r_type != R_SH_LABEL)
1132 irel->r_type = R_SH_UNUSED;
1133
1134 /* If this is a PC relative reloc, see if the range it covers
1135 includes the bytes we have deleted. */
1136 switch (irel->r_type)
1137 {
1138 default:
1139 break;
1140
1141 case R_SH_PCDISP8BY2:
1142 case R_SH_PCDISP:
1143 case R_SH_PCRELIMM8BY2:
1144 case R_SH_PCRELIMM8BY4:
1145 start = irel->r_vaddr - sec->vma;
1146 insn = bfd_get_16 (abfd, contents + nraddr);
1147 break;
1148 }
1149
1150 switch (irel->r_type)
1151 {
1152 default:
1153 start = stop = addr;
1154 break;
1155
1156 case R_SH_IMM32:
1157 #ifdef COFF_WITH_PE
1158 case R_SH_IMM32CE:
1159 case R_SH_IMAGEBASE:
1160 #endif
1161 /* If this reloc is against a symbol defined in this
1162 section, and the symbol will not be adjusted below, we
1163 must check the addend to see it will put the value in
1164 range to be adjusted, and hence must be changed. */
1165 bfd_coff_swap_sym_in (abfd,
1166 ((bfd_byte *) obj_coff_external_syms (abfd)
1167 + (irel->r_symndx
1168 * bfd_coff_symesz (abfd))),
1169 &sym);
1170 if (sym.n_sclass != C_EXT
1171 && sym.n_scnum == sec->target_index
1172 && ((bfd_vma) sym.n_value <= addr
1173 || (bfd_vma) sym.n_value >= toaddr))
1174 {
1175 bfd_vma val;
1176
1177 val = bfd_get_32 (abfd, contents + nraddr);
1178 val += sym.n_value;
1179 if (val > addr && val < toaddr)
1180 bfd_put_32 (abfd, val - count, contents + nraddr);
1181 }
1182 start = stop = addr;
1183 break;
1184
1185 case R_SH_PCDISP8BY2:
1186 off = insn & 0xff;
1187 if (off & 0x80)
1188 off -= 0x100;
1189 stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1190 break;
1191
1192 case R_SH_PCDISP:
1193 bfd_coff_swap_sym_in (abfd,
1194 ((bfd_byte *) obj_coff_external_syms (abfd)
1195 + (irel->r_symndx
1196 * bfd_coff_symesz (abfd))),
1197 &sym);
1198 if (sym.n_sclass == C_EXT)
1199 start = stop = addr;
1200 else
1201 {
1202 off = insn & 0xfff;
1203 if (off & 0x800)
1204 off -= 0x1000;
1205 stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1206 }
1207 break;
1208
1209 case R_SH_PCRELIMM8BY2:
1210 off = insn & 0xff;
1211 stop = start + 4 + off * 2;
1212 break;
1213
1214 case R_SH_PCRELIMM8BY4:
1215 off = insn & 0xff;
1216 stop = (start &~ (bfd_vma) 3) + 4 + off * 4;
1217 break;
1218
1219 case R_SH_SWITCH8:
1220 case R_SH_SWITCH16:
1221 case R_SH_SWITCH32:
1222 /* These relocs types represent
1223 .word L2-L1
1224 The r_offset field holds the difference between the reloc
1225 address and L1. That is the start of the reloc, and
1226 adding in the contents gives us the top. We must adjust
1227 both the r_offset field and the section contents. */
1228
1229 start = irel->r_vaddr - sec->vma;
1230 stop = (bfd_vma) ((bfd_signed_vma) start - (long) irel->r_offset);
1231
1232 if (start > addr
1233 && start < toaddr
1234 && (stop <= addr || stop >= toaddr))
1235 irel->r_offset += count;
1236 else if (stop > addr
1237 && stop < toaddr
1238 && (start <= addr || start >= toaddr))
1239 irel->r_offset -= count;
1240
1241 start = stop;
1242
1243 if (irel->r_type == R_SH_SWITCH16)
1244 voff = bfd_get_signed_16 (abfd, contents + nraddr);
1245 else if (irel->r_type == R_SH_SWITCH8)
1246 voff = bfd_get_8 (abfd, contents + nraddr);
1247 else
1248 voff = bfd_get_signed_32 (abfd, contents + nraddr);
1249 stop = (bfd_vma) ((bfd_signed_vma) start + voff);
1250
1251 break;
1252
1253 case R_SH_USES:
1254 start = irel->r_vaddr - sec->vma;
1255 stop = (bfd_vma) ((bfd_signed_vma) start
1256 + (long) irel->r_offset
1257 + 4);
1258 break;
1259 }
1260
1261 if (start > addr
1262 && start < toaddr
1263 && (stop <= addr || stop >= toaddr))
1264 adjust = count;
1265 else if (stop > addr
1266 && stop < toaddr
1267 && (start <= addr || start >= toaddr))
1268 adjust = - count;
1269 else
1270 adjust = 0;
1271
1272 if (adjust != 0)
1273 {
1274 oinsn = insn;
1275 overflow = FALSE;
1276 switch (irel->r_type)
1277 {
1278 default:
1279 abort ();
1280 break;
1281
1282 case R_SH_PCDISP8BY2:
1283 case R_SH_PCRELIMM8BY2:
1284 insn += adjust / 2;
1285 if ((oinsn & 0xff00) != (insn & 0xff00))
1286 overflow = TRUE;
1287 bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1288 break;
1289
1290 case R_SH_PCDISP:
1291 insn += adjust / 2;
1292 if ((oinsn & 0xf000) != (insn & 0xf000))
1293 overflow = TRUE;
1294 bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1295 break;
1296
1297 case R_SH_PCRELIMM8BY4:
1298 BFD_ASSERT (adjust == count || count >= 4);
1299 if (count >= 4)
1300 insn += adjust / 4;
1301 else
1302 {
1303 if ((irel->r_vaddr & 3) == 0)
1304 ++insn;
1305 }
1306 if ((oinsn & 0xff00) != (insn & 0xff00))
1307 overflow = TRUE;
1308 bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1309 break;
1310
1311 case R_SH_SWITCH8:
1312 voff += adjust;
1313 if (voff < 0 || voff >= 0xff)
1314 overflow = TRUE;
1315 bfd_put_8 (abfd, (bfd_vma) voff, contents + nraddr);
1316 break;
1317
1318 case R_SH_SWITCH16:
1319 voff += adjust;
1320 if (voff < - 0x8000 || voff >= 0x8000)
1321 overflow = TRUE;
1322 bfd_put_signed_16 (abfd, (bfd_vma) voff, contents + nraddr);
1323 break;
1324
1325 case R_SH_SWITCH32:
1326 voff += adjust;
1327 bfd_put_signed_32 (abfd, (bfd_vma) voff, contents + nraddr);
1328 break;
1329
1330 case R_SH_USES:
1331 irel->r_offset += adjust;
1332 break;
1333 }
1334
1335 if (overflow)
1336 {
1337 ((*_bfd_error_handler)
1338 ("%B: 0x%lx: fatal: reloc overflow while relaxing",
1339 abfd, (unsigned long) irel->r_vaddr));
1340 bfd_set_error (bfd_error_bad_value);
1341 return FALSE;
1342 }
1343 }
1344
1345 irel->r_vaddr = nraddr + sec->vma;
1346 }
1347
1348 /* Look through all the other sections. If there contain any IMM32
1349 relocs against internal symbols which we are not going to adjust
1350 below, we may need to adjust the addends. */
1351 for (o = abfd->sections; o != NULL; o = o->next)
1352 {
1353 struct internal_reloc *internal_relocs;
1354 struct internal_reloc *irelscan, *irelscanend;
1355 bfd_byte *ocontents;
1356
1357 if (o == sec
1358 || (o->flags & SEC_RELOC) == 0
1359 || o->reloc_count == 0)
1360 continue;
1361
1362 /* We always cache the relocs. Perhaps, if info->keep_memory is
1363 FALSE, we should free them, if we are permitted to, when we
1364 leave sh_coff_relax_section. */
1365 internal_relocs = (_bfd_coff_read_internal_relocs
1366 (abfd, o, TRUE, (bfd_byte *) NULL, FALSE,
1367 (struct internal_reloc *) NULL));
1368 if (internal_relocs == NULL)
1369 return FALSE;
1370
1371 ocontents = NULL;
1372 irelscanend = internal_relocs + o->reloc_count;
1373 for (irelscan = internal_relocs; irelscan < irelscanend; irelscan++)
1374 {
1375 struct internal_syment sym;
1376
1377 #ifdef COFF_WITH_PE
1378 if (irelscan->r_type != R_SH_IMM32
1379 && irelscan->r_type != R_SH_IMAGEBASE
1380 && irelscan->r_type != R_SH_IMM32CE)
1381 #else
1382 if (irelscan->r_type != R_SH_IMM32)
1383 #endif
1384 continue;
1385
1386 bfd_coff_swap_sym_in (abfd,
1387 ((bfd_byte *) obj_coff_external_syms (abfd)
1388 + (irelscan->r_symndx
1389 * bfd_coff_symesz (abfd))),
1390 &sym);
1391 if (sym.n_sclass != C_EXT
1392 && sym.n_scnum == sec->target_index
1393 && ((bfd_vma) sym.n_value <= addr
1394 || (bfd_vma) sym.n_value >= toaddr))
1395 {
1396 bfd_vma val;
1397
1398 if (ocontents == NULL)
1399 {
1400 if (coff_section_data (abfd, o)->contents != NULL)
1401 ocontents = coff_section_data (abfd, o)->contents;
1402 else
1403 {
1404 if (!bfd_malloc_and_get_section (abfd, o, &ocontents))
1405 return FALSE;
1406 /* We always cache the section contents.
1407 Perhaps, if info->keep_memory is FALSE, we
1408 should free them, if we are permitted to,
1409 when we leave sh_coff_relax_section. */
1410 coff_section_data (abfd, o)->contents = ocontents;
1411 }
1412 }
1413
1414 val = bfd_get_32 (abfd, ocontents + irelscan->r_vaddr - o->vma);
1415 val += sym.n_value;
1416 if (val > addr && val < toaddr)
1417 bfd_put_32 (abfd, val - count,
1418 ocontents + irelscan->r_vaddr - o->vma);
1419
1420 coff_section_data (abfd, o)->keep_contents = TRUE;
1421 }
1422 }
1423 }
1424
1425 /* Adjusting the internal symbols will not work if something has
1426 already retrieved the generic symbols. It would be possible to
1427 make this work by adjusting the generic symbols at the same time.
1428 However, this case should not arise in normal usage. */
1429 if (obj_symbols (abfd) != NULL
1430 || obj_raw_syments (abfd) != NULL)
1431 {
1432 ((*_bfd_error_handler)
1433 ("%B: fatal: generic symbols retrieved before relaxing", abfd));
1434 bfd_set_error (bfd_error_invalid_operation);
1435 return FALSE;
1436 }
1437
1438 /* Adjust all the symbols. */
1439 sym_hash = obj_coff_sym_hashes (abfd);
1440 symesz = bfd_coff_symesz (abfd);
1441 esym = (bfd_byte *) obj_coff_external_syms (abfd);
1442 esymend = esym + obj_raw_syment_count (abfd) * symesz;
1443 while (esym < esymend)
1444 {
1445 struct internal_syment isym;
1446
1447 bfd_coff_swap_sym_in (abfd, (PTR) esym, (PTR) &isym);
1448
1449 if (isym.n_scnum == sec->target_index
1450 && (bfd_vma) isym.n_value > addr
1451 && (bfd_vma) isym.n_value < toaddr)
1452 {
1453 isym.n_value -= count;
1454
1455 bfd_coff_swap_sym_out (abfd, (PTR) &isym, (PTR) esym);
1456
1457 if (*sym_hash != NULL)
1458 {
1459 BFD_ASSERT ((*sym_hash)->root.type == bfd_link_hash_defined
1460 || (*sym_hash)->root.type == bfd_link_hash_defweak);
1461 BFD_ASSERT ((*sym_hash)->root.u.def.value >= addr
1462 && (*sym_hash)->root.u.def.value < toaddr);
1463 (*sym_hash)->root.u.def.value -= count;
1464 }
1465 }
1466
1467 esym += (isym.n_numaux + 1) * symesz;
1468 sym_hash += isym.n_numaux + 1;
1469 }
1470
1471 /* See if we can move the ALIGN reloc forward. We have adjusted
1472 r_vaddr for it already. */
1473 if (irelalign != NULL)
1474 {
1475 bfd_vma alignto, alignaddr;
1476
1477 alignto = BFD_ALIGN (toaddr, 1 << irelalign->r_offset);
1478 alignaddr = BFD_ALIGN (irelalign->r_vaddr - sec->vma,
1479 1 << irelalign->r_offset);
1480 if (alignto != alignaddr)
1481 {
1482 /* Tail recursion. */
1483 return sh_relax_delete_bytes (abfd, sec, alignaddr,
1484 (int) (alignto - alignaddr));
1485 }
1486 }
1487
1488 return TRUE;
1489 }
1490 \f
1491 /* This is yet another version of the SH opcode table, used to rapidly
1492 get information about a particular instruction. */
1493
1494 /* The opcode map is represented by an array of these structures. The
1495 array is indexed by the high order four bits in the instruction. */
1496
1497 struct sh_major_opcode
1498 {
1499 /* A pointer to the instruction list. This is an array which
1500 contains all the instructions with this major opcode. */
1501 const struct sh_minor_opcode *minor_opcodes;
1502 /* The number of elements in minor_opcodes. */
1503 unsigned short count;
1504 };
1505
1506 /* This structure holds information for a set of SH opcodes. The
1507 instruction code is anded with the mask value, and the resulting
1508 value is used to search the order opcode list. */
1509
1510 struct sh_minor_opcode
1511 {
1512 /* The sorted opcode list. */
1513 const struct sh_opcode *opcodes;
1514 /* The number of elements in opcodes. */
1515 unsigned short count;
1516 /* The mask value to use when searching the opcode list. */
1517 unsigned short mask;
1518 };
1519
1520 /* This structure holds information for an SH instruction. An array
1521 of these structures is sorted in order by opcode. */
1522
1523 struct sh_opcode
1524 {
1525 /* The code for this instruction, after it has been anded with the
1526 mask value in the sh_major_opcode structure. */
1527 unsigned short opcode;
1528 /* Flags for this instruction. */
1529 unsigned long flags;
1530 };
1531
1532 /* Flag which appear in the sh_opcode structure. */
1533
1534 /* This instruction loads a value from memory. */
1535 #define LOAD (0x1)
1536
1537 /* This instruction stores a value to memory. */
1538 #define STORE (0x2)
1539
1540 /* This instruction is a branch. */
1541 #define BRANCH (0x4)
1542
1543 /* This instruction has a delay slot. */
1544 #define DELAY (0x8)
1545
1546 /* This instruction uses the value in the register in the field at
1547 mask 0x0f00 of the instruction. */
1548 #define USES1 (0x10)
1549 #define USES1_REG(x) ((x & 0x0f00) >> 8)
1550
1551 /* This instruction uses the value in the register in the field at
1552 mask 0x00f0 of the instruction. */
1553 #define USES2 (0x20)
1554 #define USES2_REG(x) ((x & 0x00f0) >> 4)
1555
1556 /* This instruction uses the value in register 0. */
1557 #define USESR0 (0x40)
1558
1559 /* This instruction sets the value in the register in the field at
1560 mask 0x0f00 of the instruction. */
1561 #define SETS1 (0x80)
1562 #define SETS1_REG(x) ((x & 0x0f00) >> 8)
1563
1564 /* This instruction sets the value in the register in the field at
1565 mask 0x00f0 of the instruction. */
1566 #define SETS2 (0x100)
1567 #define SETS2_REG(x) ((x & 0x00f0) >> 4)
1568
1569 /* This instruction sets register 0. */
1570 #define SETSR0 (0x200)
1571
1572 /* This instruction sets a special register. */
1573 #define SETSSP (0x400)
1574
1575 /* This instruction uses a special register. */
1576 #define USESSP (0x800)
1577
1578 /* This instruction uses the floating point register in the field at
1579 mask 0x0f00 of the instruction. */
1580 #define USESF1 (0x1000)
1581 #define USESF1_REG(x) ((x & 0x0f00) >> 8)
1582
1583 /* This instruction uses the floating point register in the field at
1584 mask 0x00f0 of the instruction. */
1585 #define USESF2 (0x2000)
1586 #define USESF2_REG(x) ((x & 0x00f0) >> 4)
1587
1588 /* This instruction uses floating point register 0. */
1589 #define USESF0 (0x4000)
1590
1591 /* This instruction sets the floating point register in the field at
1592 mask 0x0f00 of the instruction. */
1593 #define SETSF1 (0x8000)
1594 #define SETSF1_REG(x) ((x & 0x0f00) >> 8)
1595
1596 #define USESAS (0x10000)
1597 #define USESAS_REG(x) (((((x) >> 8) - 2) & 3) + 2)
1598 #define USESR8 (0x20000)
1599 #define SETSAS (0x40000)
1600 #define SETSAS_REG(x) USESAS_REG (x)
1601
1602 #define MAP(a) a, sizeof a / sizeof a[0]
1603
1604 #ifndef COFF_IMAGE_WITH_PE
1605 static bfd_boolean sh_insn_uses_reg
1606 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1607 static bfd_boolean sh_insn_sets_reg
1608 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1609 static bfd_boolean sh_insn_uses_or_sets_reg
1610 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1611 static bfd_boolean sh_insn_uses_freg
1612 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1613 static bfd_boolean sh_insn_sets_freg
1614 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1615 static bfd_boolean sh_insn_uses_or_sets_freg
1616 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1617 static bfd_boolean sh_insns_conflict
1618 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int,
1619 const struct sh_opcode *));
1620 static bfd_boolean sh_load_use
1621 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int,
1622 const struct sh_opcode *));
1623
1624 /* The opcode maps. */
1625
1626 static const struct sh_opcode sh_opcode00[] =
1627 {
1628 { 0x0008, SETSSP }, /* clrt */
1629 { 0x0009, 0 }, /* nop */
1630 { 0x000b, BRANCH | DELAY | USESSP }, /* rts */
1631 { 0x0018, SETSSP }, /* sett */
1632 { 0x0019, SETSSP }, /* div0u */
1633 { 0x001b, 0 }, /* sleep */
1634 { 0x0028, SETSSP }, /* clrmac */
1635 { 0x002b, BRANCH | DELAY | SETSSP }, /* rte */
1636 { 0x0038, USESSP | SETSSP }, /* ldtlb */
1637 { 0x0048, SETSSP }, /* clrs */
1638 { 0x0058, SETSSP } /* sets */
1639 };
1640
1641 static const struct sh_opcode sh_opcode01[] =
1642 {
1643 { 0x0003, BRANCH | DELAY | USES1 | SETSSP }, /* bsrf rn */
1644 { 0x000a, SETS1 | USESSP }, /* sts mach,rn */
1645 { 0x001a, SETS1 | USESSP }, /* sts macl,rn */
1646 { 0x0023, BRANCH | DELAY | USES1 }, /* braf rn */
1647 { 0x0029, SETS1 | USESSP }, /* movt rn */
1648 { 0x002a, SETS1 | USESSP }, /* sts pr,rn */
1649 { 0x005a, SETS1 | USESSP }, /* sts fpul,rn */
1650 { 0x006a, SETS1 | USESSP }, /* sts fpscr,rn / sts dsr,rn */
1651 { 0x0083, LOAD | USES1 }, /* pref @rn */
1652 { 0x007a, SETS1 | USESSP }, /* sts a0,rn */
1653 { 0x008a, SETS1 | USESSP }, /* sts x0,rn */
1654 { 0x009a, SETS1 | USESSP }, /* sts x1,rn */
1655 { 0x00aa, SETS1 | USESSP }, /* sts y0,rn */
1656 { 0x00ba, SETS1 | USESSP } /* sts y1,rn */
1657 };
1658
1659 /* These sixteen instructions can be handled with one table entry below. */
1660 #if 0
1661 { 0x0002, SETS1 | USESSP }, /* stc sr,rn */
1662 { 0x0012, SETS1 | USESSP }, /* stc gbr,rn */
1663 { 0x0022, SETS1 | USESSP }, /* stc vbr,rn */
1664 { 0x0032, SETS1 | USESSP }, /* stc ssr,rn */
1665 { 0x0042, SETS1 | USESSP }, /* stc spc,rn */
1666 { 0x0052, SETS1 | USESSP }, /* stc mod,rn */
1667 { 0x0062, SETS1 | USESSP }, /* stc rs,rn */
1668 { 0x0072, SETS1 | USESSP }, /* stc re,rn */
1669 { 0x0082, SETS1 | USESSP }, /* stc r0_bank,rn */
1670 { 0x0092, SETS1 | USESSP }, /* stc r1_bank,rn */
1671 { 0x00a2, SETS1 | USESSP }, /* stc r2_bank,rn */
1672 { 0x00b2, SETS1 | USESSP }, /* stc r3_bank,rn */
1673 { 0x00c2, SETS1 | USESSP }, /* stc r4_bank,rn */
1674 { 0x00d2, SETS1 | USESSP }, /* stc r5_bank,rn */
1675 { 0x00e2, SETS1 | USESSP }, /* stc r6_bank,rn */
1676 { 0x00f2, SETS1 | USESSP } /* stc r7_bank,rn */
1677 #endif
1678
1679 static const struct sh_opcode sh_opcode02[] =
1680 {
1681 { 0x0002, SETS1 | USESSP }, /* stc <special_reg>,rn */
1682 { 0x0004, STORE | USES1 | USES2 | USESR0 }, /* mov.b rm,@(r0,rn) */
1683 { 0x0005, STORE | USES1 | USES2 | USESR0 }, /* mov.w rm,@(r0,rn) */
1684 { 0x0006, STORE | USES1 | USES2 | USESR0 }, /* mov.l rm,@(r0,rn) */
1685 { 0x0007, SETSSP | USES1 | USES2 }, /* mul.l rm,rn */
1686 { 0x000c, LOAD | SETS1 | USES2 | USESR0 }, /* mov.b @(r0,rm),rn */
1687 { 0x000d, LOAD | SETS1 | USES2 | USESR0 }, /* mov.w @(r0,rm),rn */
1688 { 0x000e, LOAD | SETS1 | USES2 | USESR0 }, /* mov.l @(r0,rm),rn */
1689 { 0x000f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.l @rm+,@rn+ */
1690 };
1691
1692 static const struct sh_minor_opcode sh_opcode0[] =
1693 {
1694 { MAP (sh_opcode00), 0xffff },
1695 { MAP (sh_opcode01), 0xf0ff },
1696 { MAP (sh_opcode02), 0xf00f }
1697 };
1698
1699 static const struct sh_opcode sh_opcode10[] =
1700 {
1701 { 0x1000, STORE | USES1 | USES2 } /* mov.l rm,@(disp,rn) */
1702 };
1703
1704 static const struct sh_minor_opcode sh_opcode1[] =
1705 {
1706 { MAP (sh_opcode10), 0xf000 }
1707 };
1708
1709 static const struct sh_opcode sh_opcode20[] =
1710 {
1711 { 0x2000, STORE | USES1 | USES2 }, /* mov.b rm,@rn */
1712 { 0x2001, STORE | USES1 | USES2 }, /* mov.w rm,@rn */
1713 { 0x2002, STORE | USES1 | USES2 }, /* mov.l rm,@rn */
1714 { 0x2004, STORE | SETS1 | USES1 | USES2 }, /* mov.b rm,@-rn */
1715 { 0x2005, STORE | SETS1 | USES1 | USES2 }, /* mov.w rm,@-rn */
1716 { 0x2006, STORE | SETS1 | USES1 | USES2 }, /* mov.l rm,@-rn */
1717 { 0x2007, SETSSP | USES1 | USES2 | USESSP }, /* div0s */
1718 { 0x2008, SETSSP | USES1 | USES2 }, /* tst rm,rn */
1719 { 0x2009, SETS1 | USES1 | USES2 }, /* and rm,rn */
1720 { 0x200a, SETS1 | USES1 | USES2 }, /* xor rm,rn */
1721 { 0x200b, SETS1 | USES1 | USES2 }, /* or rm,rn */
1722 { 0x200c, SETSSP | USES1 | USES2 }, /* cmp/str rm,rn */
1723 { 0x200d, SETS1 | USES1 | USES2 }, /* xtrct rm,rn */
1724 { 0x200e, SETSSP | USES1 | USES2 }, /* mulu.w rm,rn */
1725 { 0x200f, SETSSP | USES1 | USES2 } /* muls.w rm,rn */
1726 };
1727
1728 static const struct sh_minor_opcode sh_opcode2[] =
1729 {
1730 { MAP (sh_opcode20), 0xf00f }
1731 };
1732
1733 static const struct sh_opcode sh_opcode30[] =
1734 {
1735 { 0x3000, SETSSP | USES1 | USES2 }, /* cmp/eq rm,rn */
1736 { 0x3002, SETSSP | USES1 | USES2 }, /* cmp/hs rm,rn */
1737 { 0x3003, SETSSP | USES1 | USES2 }, /* cmp/ge rm,rn */
1738 { 0x3004, SETSSP | USESSP | USES1 | USES2 }, /* div1 rm,rn */
1739 { 0x3005, SETSSP | USES1 | USES2 }, /* dmulu.l rm,rn */
1740 { 0x3006, SETSSP | USES1 | USES2 }, /* cmp/hi rm,rn */
1741 { 0x3007, SETSSP | USES1 | USES2 }, /* cmp/gt rm,rn */
1742 { 0x3008, SETS1 | USES1 | USES2 }, /* sub rm,rn */
1743 { 0x300a, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* subc rm,rn */
1744 { 0x300b, SETS1 | SETSSP | USES1 | USES2 }, /* subv rm,rn */
1745 { 0x300c, SETS1 | USES1 | USES2 }, /* add rm,rn */
1746 { 0x300d, SETSSP | USES1 | USES2 }, /* dmuls.l rm,rn */
1747 { 0x300e, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* addc rm,rn */
1748 { 0x300f, SETS1 | SETSSP | USES1 | USES2 } /* addv rm,rn */
1749 };
1750
1751 static const struct sh_minor_opcode sh_opcode3[] =
1752 {
1753 { MAP (sh_opcode30), 0xf00f }
1754 };
1755
1756 static const struct sh_opcode sh_opcode40[] =
1757 {
1758 { 0x4000, SETS1 | SETSSP | USES1 }, /* shll rn */
1759 { 0x4001, SETS1 | SETSSP | USES1 }, /* shlr rn */
1760 { 0x4002, STORE | SETS1 | USES1 | USESSP }, /* sts.l mach,@-rn */
1761 { 0x4004, SETS1 | SETSSP | USES1 }, /* rotl rn */
1762 { 0x4005, SETS1 | SETSSP | USES1 }, /* rotr rn */
1763 { 0x4006, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,mach */
1764 { 0x4008, SETS1 | USES1 }, /* shll2 rn */
1765 { 0x4009, SETS1 | USES1 }, /* shlr2 rn */
1766 { 0x400a, SETSSP | USES1 }, /* lds rm,mach */
1767 { 0x400b, BRANCH | DELAY | USES1 }, /* jsr @rn */
1768 { 0x4010, SETS1 | SETSSP | USES1 }, /* dt rn */
1769 { 0x4011, SETSSP | USES1 }, /* cmp/pz rn */
1770 { 0x4012, STORE | SETS1 | USES1 | USESSP }, /* sts.l macl,@-rn */
1771 { 0x4014, SETSSP | USES1 }, /* setrc rm */
1772 { 0x4015, SETSSP | USES1 }, /* cmp/pl rn */
1773 { 0x4016, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,macl */
1774 { 0x4018, SETS1 | USES1 }, /* shll8 rn */
1775 { 0x4019, SETS1 | USES1 }, /* shlr8 rn */
1776 { 0x401a, SETSSP | USES1 }, /* lds rm,macl */
1777 { 0x401b, LOAD | SETSSP | USES1 }, /* tas.b @rn */
1778 { 0x4020, SETS1 | SETSSP | USES1 }, /* shal rn */
1779 { 0x4021, SETS1 | SETSSP | USES1 }, /* shar rn */
1780 { 0x4022, STORE | SETS1 | USES1 | USESSP }, /* sts.l pr,@-rn */
1781 { 0x4024, SETS1 | SETSSP | USES1 | USESSP }, /* rotcl rn */
1782 { 0x4025, SETS1 | SETSSP | USES1 | USESSP }, /* rotcr rn */
1783 { 0x4026, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,pr */
1784 { 0x4028, SETS1 | USES1 }, /* shll16 rn */
1785 { 0x4029, SETS1 | USES1 }, /* shlr16 rn */
1786 { 0x402a, SETSSP | USES1 }, /* lds rm,pr */
1787 { 0x402b, BRANCH | DELAY | USES1 }, /* jmp @rn */
1788 { 0x4052, STORE | SETS1 | USES1 | USESSP }, /* sts.l fpul,@-rn */
1789 { 0x4056, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,fpul */
1790 { 0x405a, SETSSP | USES1 }, /* lds.l rm,fpul */
1791 { 0x4062, STORE | SETS1 | USES1 | USESSP }, /* sts.l fpscr / dsr,@-rn */
1792 { 0x4066, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,fpscr / dsr */
1793 { 0x406a, SETSSP | USES1 }, /* lds rm,fpscr / lds rm,dsr */
1794 { 0x4072, STORE | SETS1 | USES1 | USESSP }, /* sts.l a0,@-rn */
1795 { 0x4076, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,a0 */
1796 { 0x407a, SETSSP | USES1 }, /* lds.l rm,a0 */
1797 { 0x4082, STORE | SETS1 | USES1 | USESSP }, /* sts.l x0,@-rn */
1798 { 0x4086, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,x0 */
1799 { 0x408a, SETSSP | USES1 }, /* lds.l rm,x0 */
1800 { 0x4092, STORE | SETS1 | USES1 | USESSP }, /* sts.l x1,@-rn */
1801 { 0x4096, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,x1 */
1802 { 0x409a, SETSSP | USES1 }, /* lds.l rm,x1 */
1803 { 0x40a2, STORE | SETS1 | USES1 | USESSP }, /* sts.l y0,@-rn */
1804 { 0x40a6, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,y0 */
1805 { 0x40aa, SETSSP | USES1 }, /* lds.l rm,y0 */
1806 { 0x40b2, STORE | SETS1 | USES1 | USESSP }, /* sts.l y1,@-rn */
1807 { 0x40b6, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,y1 */
1808 { 0x40ba, SETSSP | USES1 } /* lds.l rm,y1 */
1809 #if 0 /* These groups sixteen insns can be
1810 handled with one table entry each below. */
1811 { 0x4003, STORE | SETS1 | USES1 | USESSP }, /* stc.l sr,@-rn */
1812 { 0x4013, STORE | SETS1 | USES1 | USESSP }, /* stc.l gbr,@-rn */
1813 { 0x4023, STORE | SETS1 | USES1 | USESSP }, /* stc.l vbr,@-rn */
1814 { 0x4033, STORE | SETS1 | USES1 | USESSP }, /* stc.l ssr,@-rn */
1815 { 0x4043, STORE | SETS1 | USES1 | USESSP }, /* stc.l spc,@-rn */
1816 { 0x4053, STORE | SETS1 | USES1 | USESSP }, /* stc.l mod,@-rn */
1817 { 0x4063, STORE | SETS1 | USES1 | USESSP }, /* stc.l rs,@-rn */
1818 { 0x4073, STORE | SETS1 | USES1 | USESSP }, /* stc.l re,@-rn */
1819 { 0x4083, STORE | SETS1 | USES1 | USESSP }, /* stc.l r0_bank,@-rn */
1820 ..
1821 { 0x40f3, STORE | SETS1 | USES1 | USESSP }, /* stc.l r7_bank,@-rn */
1822
1823 { 0x4007, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,sr */
1824 { 0x4017, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,gbr */
1825 { 0x4027, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,vbr */
1826 { 0x4037, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,ssr */
1827 { 0x4047, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,spc */
1828 { 0x4057, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,mod */
1829 { 0x4067, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,rs */
1830 { 0x4077, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,re */
1831 { 0x4087, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,r0_bank */
1832 ..
1833 { 0x40f7, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,r7_bank */
1834
1835 { 0x400e, SETSSP | USES1 }, /* ldc rm,sr */
1836 { 0x401e, SETSSP | USES1 }, /* ldc rm,gbr */
1837 { 0x402e, SETSSP | USES1 }, /* ldc rm,vbr */
1838 { 0x403e, SETSSP | USES1 }, /* ldc rm,ssr */
1839 { 0x404e, SETSSP | USES1 }, /* ldc rm,spc */
1840 { 0x405e, SETSSP | USES1 }, /* ldc rm,mod */
1841 { 0x406e, SETSSP | USES1 }, /* ldc rm,rs */
1842 { 0x407e, SETSSP | USES1 } /* ldc rm,re */
1843 { 0x408e, SETSSP | USES1 } /* ldc rm,r0_bank */
1844 ..
1845 { 0x40fe, SETSSP | USES1 } /* ldc rm,r7_bank */
1846 #endif
1847 };
1848
1849 static const struct sh_opcode sh_opcode41[] =
1850 {
1851 { 0x4003, STORE | SETS1 | USES1 | USESSP }, /* stc.l <special_reg>,@-rn */
1852 { 0x4007, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,<special_reg> */
1853 { 0x400c, SETS1 | USES1 | USES2 }, /* shad rm,rn */
1854 { 0x400d, SETS1 | USES1 | USES2 }, /* shld rm,rn */
1855 { 0x400e, SETSSP | USES1 }, /* ldc rm,<special_reg> */
1856 { 0x400f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.w @rm+,@rn+ */
1857 };
1858
1859 static const struct sh_minor_opcode sh_opcode4[] =
1860 {
1861 { MAP (sh_opcode40), 0xf0ff },
1862 { MAP (sh_opcode41), 0xf00f }
1863 };
1864
1865 static const struct sh_opcode sh_opcode50[] =
1866 {
1867 { 0x5000, LOAD | SETS1 | USES2 } /* mov.l @(disp,rm),rn */
1868 };
1869
1870 static const struct sh_minor_opcode sh_opcode5[] =
1871 {
1872 { MAP (sh_opcode50), 0xf000 }
1873 };
1874
1875 static const struct sh_opcode sh_opcode60[] =
1876 {
1877 { 0x6000, LOAD | SETS1 | USES2 }, /* mov.b @rm,rn */
1878 { 0x6001, LOAD | SETS1 | USES2 }, /* mov.w @rm,rn */
1879 { 0x6002, LOAD | SETS1 | USES2 }, /* mov.l @rm,rn */
1880 { 0x6003, SETS1 | USES2 }, /* mov rm,rn */
1881 { 0x6004, LOAD | SETS1 | SETS2 | USES2 }, /* mov.b @rm+,rn */
1882 { 0x6005, LOAD | SETS1 | SETS2 | USES2 }, /* mov.w @rm+,rn */
1883 { 0x6006, LOAD | SETS1 | SETS2 | USES2 }, /* mov.l @rm+,rn */
1884 { 0x6007, SETS1 | USES2 }, /* not rm,rn */
1885 { 0x6008, SETS1 | USES2 }, /* swap.b rm,rn */
1886 { 0x6009, SETS1 | USES2 }, /* swap.w rm,rn */
1887 { 0x600a, SETS1 | SETSSP | USES2 | USESSP }, /* negc rm,rn */
1888 { 0x600b, SETS1 | USES2 }, /* neg rm,rn */
1889 { 0x600c, SETS1 | USES2 }, /* extu.b rm,rn */
1890 { 0x600d, SETS1 | USES2 }, /* extu.w rm,rn */
1891 { 0x600e, SETS1 | USES2 }, /* exts.b rm,rn */
1892 { 0x600f, SETS1 | USES2 } /* exts.w rm,rn */
1893 };
1894
1895 static const struct sh_minor_opcode sh_opcode6[] =
1896 {
1897 { MAP (sh_opcode60), 0xf00f }
1898 };
1899
1900 static const struct sh_opcode sh_opcode70[] =
1901 {
1902 { 0x7000, SETS1 | USES1 } /* add #imm,rn */
1903 };
1904
1905 static const struct sh_minor_opcode sh_opcode7[] =
1906 {
1907 { MAP (sh_opcode70), 0xf000 }
1908 };
1909
1910 static const struct sh_opcode sh_opcode80[] =
1911 {
1912 { 0x8000, STORE | USES2 | USESR0 }, /* mov.b r0,@(disp,rn) */
1913 { 0x8100, STORE | USES2 | USESR0 }, /* mov.w r0,@(disp,rn) */
1914 { 0x8200, SETSSP }, /* setrc #imm */
1915 { 0x8400, LOAD | SETSR0 | USES2 }, /* mov.b @(disp,rm),r0 */
1916 { 0x8500, LOAD | SETSR0 | USES2 }, /* mov.w @(disp,rn),r0 */
1917 { 0x8800, SETSSP | USESR0 }, /* cmp/eq #imm,r0 */
1918 { 0x8900, BRANCH | USESSP }, /* bt label */
1919 { 0x8b00, BRANCH | USESSP }, /* bf label */
1920 { 0x8c00, SETSSP }, /* ldrs @(disp,pc) */
1921 { 0x8d00, BRANCH | DELAY | USESSP }, /* bt/s label */
1922 { 0x8e00, SETSSP }, /* ldre @(disp,pc) */
1923 { 0x8f00, BRANCH | DELAY | USESSP } /* bf/s label */
1924 };
1925
1926 static const struct sh_minor_opcode sh_opcode8[] =
1927 {
1928 { MAP (sh_opcode80), 0xff00 }
1929 };
1930
1931 static const struct sh_opcode sh_opcode90[] =
1932 {
1933 { 0x9000, LOAD | SETS1 } /* mov.w @(disp,pc),rn */
1934 };
1935
1936 static const struct sh_minor_opcode sh_opcode9[] =
1937 {
1938 { MAP (sh_opcode90), 0xf000 }
1939 };
1940
1941 static const struct sh_opcode sh_opcodea0[] =
1942 {
1943 { 0xa000, BRANCH | DELAY } /* bra label */
1944 };
1945
1946 static const struct sh_minor_opcode sh_opcodea[] =
1947 {
1948 { MAP (sh_opcodea0), 0xf000 }
1949 };
1950
1951 static const struct sh_opcode sh_opcodeb0[] =
1952 {
1953 { 0xb000, BRANCH | DELAY } /* bsr label */
1954 };
1955
1956 static const struct sh_minor_opcode sh_opcodeb[] =
1957 {
1958 { MAP (sh_opcodeb0), 0xf000 }
1959 };
1960
1961 static const struct sh_opcode sh_opcodec0[] =
1962 {
1963 { 0xc000, STORE | USESR0 | USESSP }, /* mov.b r0,@(disp,gbr) */
1964 { 0xc100, STORE | USESR0 | USESSP }, /* mov.w r0,@(disp,gbr) */
1965 { 0xc200, STORE | USESR0 | USESSP }, /* mov.l r0,@(disp,gbr) */
1966 { 0xc300, BRANCH | USESSP }, /* trapa #imm */
1967 { 0xc400, LOAD | SETSR0 | USESSP }, /* mov.b @(disp,gbr),r0 */
1968 { 0xc500, LOAD | SETSR0 | USESSP }, /* mov.w @(disp,gbr),r0 */
1969 { 0xc600, LOAD | SETSR0 | USESSP }, /* mov.l @(disp,gbr),r0 */
1970 { 0xc700, SETSR0 }, /* mova @(disp,pc),r0 */
1971 { 0xc800, SETSSP | USESR0 }, /* tst #imm,r0 */
1972 { 0xc900, SETSR0 | USESR0 }, /* and #imm,r0 */
1973 { 0xca00, SETSR0 | USESR0 }, /* xor #imm,r0 */
1974 { 0xcb00, SETSR0 | USESR0 }, /* or #imm,r0 */
1975 { 0xcc00, LOAD | SETSSP | USESR0 | USESSP }, /* tst.b #imm,@(r0,gbr) */
1976 { 0xcd00, LOAD | STORE | USESR0 | USESSP }, /* and.b #imm,@(r0,gbr) */
1977 { 0xce00, LOAD | STORE | USESR0 | USESSP }, /* xor.b #imm,@(r0,gbr) */
1978 { 0xcf00, LOAD | STORE | USESR0 | USESSP } /* or.b #imm,@(r0,gbr) */
1979 };
1980
1981 static const struct sh_minor_opcode sh_opcodec[] =
1982 {
1983 { MAP (sh_opcodec0), 0xff00 }
1984 };
1985
1986 static const struct sh_opcode sh_opcoded0[] =
1987 {
1988 { 0xd000, LOAD | SETS1 } /* mov.l @(disp,pc),rn */
1989 };
1990
1991 static const struct sh_minor_opcode sh_opcoded[] =
1992 {
1993 { MAP (sh_opcoded0), 0xf000 }
1994 };
1995
1996 static const struct sh_opcode sh_opcodee0[] =
1997 {
1998 { 0xe000, SETS1 } /* mov #imm,rn */
1999 };
2000
2001 static const struct sh_minor_opcode sh_opcodee[] =
2002 {
2003 { MAP (sh_opcodee0), 0xf000 }
2004 };
2005
2006 static const struct sh_opcode sh_opcodef0[] =
2007 {
2008 { 0xf000, SETSF1 | USESF1 | USESF2 }, /* fadd fm,fn */
2009 { 0xf001, SETSF1 | USESF1 | USESF2 }, /* fsub fm,fn */
2010 { 0xf002, SETSF1 | USESF1 | USESF2 }, /* fmul fm,fn */
2011 { 0xf003, SETSF1 | USESF1 | USESF2 }, /* fdiv fm,fn */
2012 { 0xf004, SETSSP | USESF1 | USESF2 }, /* fcmp/eq fm,fn */
2013 { 0xf005, SETSSP | USESF1 | USESF2 }, /* fcmp/gt fm,fn */
2014 { 0xf006, LOAD | SETSF1 | USES2 | USESR0 }, /* fmov.s @(r0,rm),fn */
2015 { 0xf007, STORE | USES1 | USESF2 | USESR0 }, /* fmov.s fm,@(r0,rn) */
2016 { 0xf008, LOAD | SETSF1 | USES2 }, /* fmov.s @rm,fn */
2017 { 0xf009, LOAD | SETS2 | SETSF1 | USES2 }, /* fmov.s @rm+,fn */
2018 { 0xf00a, STORE | USES1 | USESF2 }, /* fmov.s fm,@rn */
2019 { 0xf00b, STORE | SETS1 | USES1 | USESF2 }, /* fmov.s fm,@-rn */
2020 { 0xf00c, SETSF1 | USESF2 }, /* fmov fm,fn */
2021 { 0xf00e, SETSF1 | USESF1 | USESF2 | USESF0 } /* fmac f0,fm,fn */
2022 };
2023
2024 static const struct sh_opcode sh_opcodef1[] =
2025 {
2026 { 0xf00d, SETSF1 | USESSP }, /* fsts fpul,fn */
2027 { 0xf01d, SETSSP | USESF1 }, /* flds fn,fpul */
2028 { 0xf02d, SETSF1 | USESSP }, /* float fpul,fn */
2029 { 0xf03d, SETSSP | USESF1 }, /* ftrc fn,fpul */
2030 { 0xf04d, SETSF1 | USESF1 }, /* fneg fn */
2031 { 0xf05d, SETSF1 | USESF1 }, /* fabs fn */
2032 { 0xf06d, SETSF1 | USESF1 }, /* fsqrt fn */
2033 { 0xf07d, SETSSP | USESF1 }, /* ftst/nan fn */
2034 { 0xf08d, SETSF1 }, /* fldi0 fn */
2035 { 0xf09d, SETSF1 } /* fldi1 fn */
2036 };
2037
2038 static const struct sh_minor_opcode sh_opcodef[] =
2039 {
2040 { MAP (sh_opcodef0), 0xf00f },
2041 { MAP (sh_opcodef1), 0xf0ff }
2042 };
2043
2044 static struct sh_major_opcode sh_opcodes[] =
2045 {
2046 { MAP (sh_opcode0) },
2047 { MAP (sh_opcode1) },
2048 { MAP (sh_opcode2) },
2049 { MAP (sh_opcode3) },
2050 { MAP (sh_opcode4) },
2051 { MAP (sh_opcode5) },
2052 { MAP (sh_opcode6) },
2053 { MAP (sh_opcode7) },
2054 { MAP (sh_opcode8) },
2055 { MAP (sh_opcode9) },
2056 { MAP (sh_opcodea) },
2057 { MAP (sh_opcodeb) },
2058 { MAP (sh_opcodec) },
2059 { MAP (sh_opcoded) },
2060 { MAP (sh_opcodee) },
2061 { MAP (sh_opcodef) }
2062 };
2063
2064 /* The double data transfer / parallel processing insns are not
2065 described here. This will cause sh_align_load_span to leave them alone. */
2066
2067 static const struct sh_opcode sh_dsp_opcodef0[] =
2068 {
2069 { 0xf400, USESAS | SETSAS | LOAD | SETSSP }, /* movs.x @-as,ds */
2070 { 0xf401, USESAS | SETSAS | STORE | USESSP }, /* movs.x ds,@-as */
2071 { 0xf404, USESAS | LOAD | SETSSP }, /* movs.x @as,ds */
2072 { 0xf405, USESAS | STORE | USESSP }, /* movs.x ds,@as */
2073 { 0xf408, USESAS | SETSAS | LOAD | SETSSP }, /* movs.x @as+,ds */
2074 { 0xf409, USESAS | SETSAS | STORE | USESSP }, /* movs.x ds,@as+ */
2075 { 0xf40c, USESAS | SETSAS | LOAD | SETSSP | USESR8 }, /* movs.x @as+r8,ds */
2076 { 0xf40d, USESAS | SETSAS | STORE | USESSP | USESR8 } /* movs.x ds,@as+r8 */
2077 };
2078
2079 static const struct sh_minor_opcode sh_dsp_opcodef[] =
2080 {
2081 { MAP (sh_dsp_opcodef0), 0xfc0d }
2082 };
2083
2084 /* Given an instruction, return a pointer to the corresponding
2085 sh_opcode structure. Return NULL if the instruction is not
2086 recognized. */
2087
2088 static const struct sh_opcode *
2089 sh_insn_info (insn)
2090 unsigned int insn;
2091 {
2092 const struct sh_major_opcode *maj;
2093 const struct sh_minor_opcode *min, *minend;
2094
2095 maj = &sh_opcodes[(insn & 0xf000) >> 12];
2096 min = maj->minor_opcodes;
2097 minend = min + maj->count;
2098 for (; min < minend; min++)
2099 {
2100 unsigned int l;
2101 const struct sh_opcode *op, *opend;
2102
2103 l = insn & min->mask;
2104 op = min->opcodes;
2105 opend = op + min->count;
2106
2107 /* Since the opcodes tables are sorted, we could use a binary
2108 search here if the count were above some cutoff value. */
2109 for (; op < opend; op++)
2110 if (op->opcode == l)
2111 return op;
2112 }
2113
2114 return NULL;
2115 }
2116
2117 /* See whether an instruction uses or sets a general purpose register */
2118
2119 static bfd_boolean
2120 sh_insn_uses_or_sets_reg (insn, op, reg)
2121 unsigned int insn;
2122 const struct sh_opcode *op;
2123 unsigned int reg;
2124 {
2125 if (sh_insn_uses_reg (insn, op, reg))
2126 return TRUE;
2127
2128 return sh_insn_sets_reg (insn, op, reg);
2129 }
2130
2131 /* See whether an instruction uses a general purpose register. */
2132
2133 static bfd_boolean
2134 sh_insn_uses_reg (insn, op, reg)
2135 unsigned int insn;
2136 const struct sh_opcode *op;
2137 unsigned int reg;
2138 {
2139 unsigned int f;
2140
2141 f = op->flags;
2142
2143 if ((f & USES1) != 0
2144 && USES1_REG (insn) == reg)
2145 return TRUE;
2146 if ((f & USES2) != 0
2147 && USES2_REG (insn) == reg)
2148 return TRUE;
2149 if ((f & USESR0) != 0
2150 && reg == 0)
2151 return TRUE;
2152 if ((f & USESAS) && reg == USESAS_REG (insn))
2153 return TRUE;
2154 if ((f & USESR8) && reg == 8)
2155 return TRUE;
2156
2157 return FALSE;
2158 }
2159
2160 /* See whether an instruction sets a general purpose register. */
2161
2162 static bfd_boolean
2163 sh_insn_sets_reg (insn, op, reg)
2164 unsigned int insn;
2165 const struct sh_opcode *op;
2166 unsigned int reg;
2167 {
2168 unsigned int f;
2169
2170 f = op->flags;
2171
2172 if ((f & SETS1) != 0
2173 && SETS1_REG (insn) == reg)
2174 return TRUE;
2175 if ((f & SETS2) != 0
2176 && SETS2_REG (insn) == reg)
2177 return TRUE;
2178 if ((f & SETSR0) != 0
2179 && reg == 0)
2180 return TRUE;
2181 if ((f & SETSAS) && reg == SETSAS_REG (insn))
2182 return TRUE;
2183
2184 return FALSE;
2185 }
2186
2187 /* See whether an instruction uses or sets a floating point register */
2188
2189 static bfd_boolean
2190 sh_insn_uses_or_sets_freg (insn, op, reg)
2191 unsigned int insn;
2192 const struct sh_opcode *op;
2193 unsigned int reg;
2194 {
2195 if (sh_insn_uses_freg (insn, op, reg))
2196 return TRUE;
2197
2198 return sh_insn_sets_freg (insn, op, reg);
2199 }
2200
2201 /* See whether an instruction uses a floating point register. */
2202
2203 static bfd_boolean
2204 sh_insn_uses_freg (insn, op, freg)
2205 unsigned int insn;
2206 const struct sh_opcode *op;
2207 unsigned int freg;
2208 {
2209 unsigned int f;
2210
2211 f = op->flags;
2212
2213 /* We can't tell if this is a double-precision insn, so just play safe
2214 and assume that it might be. So not only have we test FREG against
2215 itself, but also even FREG against FREG+1 - if the using insn uses
2216 just the low part of a double precision value - but also an odd
2217 FREG against FREG-1 - if the setting insn sets just the low part
2218 of a double precision value.
2219 So what this all boils down to is that we have to ignore the lowest
2220 bit of the register number. */
2221
2222 if ((f & USESF1) != 0
2223 && (USESF1_REG (insn) & 0xe) == (freg & 0xe))
2224 return TRUE;
2225 if ((f & USESF2) != 0
2226 && (USESF2_REG (insn) & 0xe) == (freg & 0xe))
2227 return TRUE;
2228 if ((f & USESF0) != 0
2229 && freg == 0)
2230 return TRUE;
2231
2232 return FALSE;
2233 }
2234
2235 /* See whether an instruction sets a floating point register. */
2236
2237 static bfd_boolean
2238 sh_insn_sets_freg (insn, op, freg)
2239 unsigned int insn;
2240 const struct sh_opcode *op;
2241 unsigned int freg;
2242 {
2243 unsigned int f;
2244
2245 f = op->flags;
2246
2247 /* We can't tell if this is a double-precision insn, so just play safe
2248 and assume that it might be. So not only have we test FREG against
2249 itself, but also even FREG against FREG+1 - if the using insn uses
2250 just the low part of a double precision value - but also an odd
2251 FREG against FREG-1 - if the setting insn sets just the low part
2252 of a double precision value.
2253 So what this all boils down to is that we have to ignore the lowest
2254 bit of the register number. */
2255
2256 if ((f & SETSF1) != 0
2257 && (SETSF1_REG (insn) & 0xe) == (freg & 0xe))
2258 return TRUE;
2259
2260 return FALSE;
2261 }
2262
2263 /* See whether instructions I1 and I2 conflict, assuming I1 comes
2264 before I2. OP1 and OP2 are the corresponding sh_opcode structures.
2265 This should return TRUE if there is a conflict, or FALSE if the
2266 instructions can be swapped safely. */
2267
2268 static bfd_boolean
2269 sh_insns_conflict (i1, op1, i2, op2)
2270 unsigned int i1;
2271 const struct sh_opcode *op1;
2272 unsigned int i2;
2273 const struct sh_opcode *op2;
2274 {
2275 unsigned int f1, f2;
2276
2277 f1 = op1->flags;
2278 f2 = op2->flags;
2279
2280 /* Load of fpscr conflicts with floating point operations.
2281 FIXME: shouldn't test raw opcodes here. */
2282 if (((i1 & 0xf0ff) == 0x4066 && (i2 & 0xf000) == 0xf000)
2283 || ((i2 & 0xf0ff) == 0x4066 && (i1 & 0xf000) == 0xf000))
2284 return TRUE;
2285
2286 if ((f1 & (BRANCH | DELAY)) != 0
2287 || (f2 & (BRANCH | DELAY)) != 0)
2288 return TRUE;
2289
2290 if (((f1 | f2) & SETSSP)
2291 && (f1 & (SETSSP | USESSP))
2292 && (f2 & (SETSSP | USESSP)))
2293 return TRUE;
2294
2295 if ((f1 & SETS1) != 0
2296 && sh_insn_uses_or_sets_reg (i2, op2, SETS1_REG (i1)))
2297 return TRUE;
2298 if ((f1 & SETS2) != 0
2299 && sh_insn_uses_or_sets_reg (i2, op2, SETS2_REG (i1)))
2300 return TRUE;
2301 if ((f1 & SETSR0) != 0
2302 && sh_insn_uses_or_sets_reg (i2, op2, 0))
2303 return TRUE;
2304 if ((f1 & SETSAS)
2305 && sh_insn_uses_or_sets_reg (i2, op2, SETSAS_REG (i1)))
2306 return TRUE;
2307 if ((f1 & SETSF1) != 0
2308 && sh_insn_uses_or_sets_freg (i2, op2, SETSF1_REG (i1)))
2309 return TRUE;
2310
2311 if ((f2 & SETS1) != 0
2312 && sh_insn_uses_or_sets_reg (i1, op1, SETS1_REG (i2)))
2313 return TRUE;
2314 if ((f2 & SETS2) != 0
2315 && sh_insn_uses_or_sets_reg (i1, op1, SETS2_REG (i2)))
2316 return TRUE;
2317 if ((f2 & SETSR0) != 0
2318 && sh_insn_uses_or_sets_reg (i1, op1, 0))
2319 return TRUE;
2320 if ((f2 & SETSAS)
2321 && sh_insn_uses_or_sets_reg (i1, op1, SETSAS_REG (i2)))
2322 return TRUE;
2323 if ((f2 & SETSF1) != 0
2324 && sh_insn_uses_or_sets_freg (i1, op1, SETSF1_REG (i2)))
2325 return TRUE;
2326
2327 /* The instructions do not conflict. */
2328 return FALSE;
2329 }
2330
2331 /* I1 is a load instruction, and I2 is some other instruction. Return
2332 TRUE if I1 loads a register which I2 uses. */
2333
2334 static bfd_boolean
2335 sh_load_use (i1, op1, i2, op2)
2336 unsigned int i1;
2337 const struct sh_opcode *op1;
2338 unsigned int i2;
2339 const struct sh_opcode *op2;
2340 {
2341 unsigned int f1;
2342
2343 f1 = op1->flags;
2344
2345 if ((f1 & LOAD) == 0)
2346 return FALSE;
2347
2348 /* If both SETS1 and SETSSP are set, that means a load to a special
2349 register using postincrement addressing mode, which we don't care
2350 about here. */
2351 if ((f1 & SETS1) != 0
2352 && (f1 & SETSSP) == 0
2353 && sh_insn_uses_reg (i2, op2, (i1 & 0x0f00) >> 8))
2354 return TRUE;
2355
2356 if ((f1 & SETSR0) != 0
2357 && sh_insn_uses_reg (i2, op2, 0))
2358 return TRUE;
2359
2360 if ((f1 & SETSF1) != 0
2361 && sh_insn_uses_freg (i2, op2, (i1 & 0x0f00) >> 8))
2362 return TRUE;
2363
2364 return FALSE;
2365 }
2366
2367 /* Try to align loads and stores within a span of memory. This is
2368 called by both the ELF and the COFF sh targets. ABFD and SEC are
2369 the BFD and section we are examining. CONTENTS is the contents of
2370 the section. SWAP is the routine to call to swap two instructions.
2371 RELOCS is a pointer to the internal relocation information, to be
2372 passed to SWAP. PLABEL is a pointer to the current label in a
2373 sorted list of labels; LABEL_END is the end of the list. START and
2374 STOP are the range of memory to examine. If a swap is made,
2375 *PSWAPPED is set to TRUE. */
2376
2377 #ifdef COFF_WITH_PE
2378 static
2379 #endif
2380 bfd_boolean
2381 _bfd_sh_align_load_span (abfd, sec, contents, swap, relocs,
2382 plabel, label_end, start, stop, pswapped)
2383 bfd *abfd;
2384 asection *sec;
2385 bfd_byte *contents;
2386 bfd_boolean (*swap) PARAMS ((bfd *, asection *, PTR, bfd_byte *, bfd_vma));
2387 PTR relocs;
2388 bfd_vma **plabel;
2389 bfd_vma *label_end;
2390 bfd_vma start;
2391 bfd_vma stop;
2392 bfd_boolean *pswapped;
2393 {
2394 int dsp = (abfd->arch_info->mach == bfd_mach_sh_dsp
2395 || abfd->arch_info->mach == bfd_mach_sh3_dsp);
2396 bfd_vma i;
2397
2398 /* The SH4 has a Harvard architecture, hence aligning loads is not
2399 desirable. In fact, it is counter-productive, since it interferes
2400 with the schedules generated by the compiler. */
2401 if (abfd->arch_info->mach == bfd_mach_sh4)
2402 return TRUE;
2403
2404 /* If we are linking sh[3]-dsp code, swap the FPU instructions for DSP
2405 instructions. */
2406 if (dsp)
2407 {
2408 sh_opcodes[0xf].minor_opcodes = sh_dsp_opcodef;
2409 sh_opcodes[0xf].count = sizeof sh_dsp_opcodef / sizeof sh_dsp_opcodef;
2410 }
2411
2412 /* Instructions should be aligned on 2 byte boundaries. */
2413 if ((start & 1) == 1)
2414 ++start;
2415
2416 /* Now look through the unaligned addresses. */
2417 i = start;
2418 if ((i & 2) == 0)
2419 i += 2;
2420 for (; i < stop; i += 4)
2421 {
2422 unsigned int insn;
2423 const struct sh_opcode *op;
2424 unsigned int prev_insn = 0;
2425 const struct sh_opcode *prev_op = NULL;
2426
2427 insn = bfd_get_16 (abfd, contents + i);
2428 op = sh_insn_info (insn);
2429 if (op == NULL
2430 || (op->flags & (LOAD | STORE)) == 0)
2431 continue;
2432
2433 /* This is a load or store which is not on a four byte boundary. */
2434
2435 while (*plabel < label_end && **plabel < i)
2436 ++*plabel;
2437
2438 if (i > start)
2439 {
2440 prev_insn = bfd_get_16 (abfd, contents + i - 2);
2441 /* If INSN is the field b of a parallel processing insn, it is not
2442 a load / store after all. Note that the test here might mistake
2443 the field_b of a pcopy insn for the starting code of a parallel
2444 processing insn; this might miss a swapping opportunity, but at
2445 least we're on the safe side. */
2446 if (dsp && (prev_insn & 0xfc00) == 0xf800)
2447 continue;
2448
2449 /* Check if prev_insn is actually the field b of a parallel
2450 processing insn. Again, this can give a spurious match
2451 after a pcopy. */
2452 if (dsp && i - 2 > start)
2453 {
2454 unsigned pprev_insn = bfd_get_16 (abfd, contents + i - 4);
2455
2456 if ((pprev_insn & 0xfc00) == 0xf800)
2457 prev_op = NULL;
2458 else
2459 prev_op = sh_insn_info (prev_insn);
2460 }
2461 else
2462 prev_op = sh_insn_info (prev_insn);
2463
2464 /* If the load/store instruction is in a delay slot, we
2465 can't swap. */
2466 if (prev_op == NULL
2467 || (prev_op->flags & DELAY) != 0)
2468 continue;
2469 }
2470 if (i > start
2471 && (*plabel >= label_end || **plabel != i)
2472 && prev_op != NULL
2473 && (prev_op->flags & (LOAD | STORE)) == 0
2474 && ! sh_insns_conflict (prev_insn, prev_op, insn, op))
2475 {
2476 bfd_boolean ok;
2477
2478 /* The load/store instruction does not have a label, and
2479 there is a previous instruction; PREV_INSN is not
2480 itself a load/store instruction, and PREV_INSN and
2481 INSN do not conflict. */
2482
2483 ok = TRUE;
2484
2485 if (i >= start + 4)
2486 {
2487 unsigned int prev2_insn;
2488 const struct sh_opcode *prev2_op;
2489
2490 prev2_insn = bfd_get_16 (abfd, contents + i - 4);
2491 prev2_op = sh_insn_info (prev2_insn);
2492
2493 /* If the instruction before PREV_INSN has a delay
2494 slot--that is, PREV_INSN is in a delay slot--we
2495 can not swap. */
2496 if (prev2_op == NULL
2497 || (prev2_op->flags & DELAY) != 0)
2498 ok = FALSE;
2499
2500 /* If the instruction before PREV_INSN is a load,
2501 and it sets a register which INSN uses, then
2502 putting INSN immediately after PREV_INSN will
2503 cause a pipeline bubble, so there is no point to
2504 making the swap. */
2505 if (ok
2506 && (prev2_op->flags & LOAD) != 0
2507 && sh_load_use (prev2_insn, prev2_op, insn, op))
2508 ok = FALSE;
2509 }
2510
2511 if (ok)
2512 {
2513 if (! (*swap) (abfd, sec, relocs, contents, i - 2))
2514 return FALSE;
2515 *pswapped = TRUE;
2516 continue;
2517 }
2518 }
2519
2520 while (*plabel < label_end && **plabel < i + 2)
2521 ++*plabel;
2522
2523 if (i + 2 < stop
2524 && (*plabel >= label_end || **plabel != i + 2))
2525 {
2526 unsigned int next_insn;
2527 const struct sh_opcode *next_op;
2528
2529 /* There is an instruction after the load/store
2530 instruction, and it does not have a label. */
2531 next_insn = bfd_get_16 (abfd, contents + i + 2);
2532 next_op = sh_insn_info (next_insn);
2533 if (next_op != NULL
2534 && (next_op->flags & (LOAD | STORE)) == 0
2535 && ! sh_insns_conflict (insn, op, next_insn, next_op))
2536 {
2537 bfd_boolean ok;
2538
2539 /* NEXT_INSN is not itself a load/store instruction,
2540 and it does not conflict with INSN. */
2541
2542 ok = TRUE;
2543
2544 /* If PREV_INSN is a load, and it sets a register
2545 which NEXT_INSN uses, then putting NEXT_INSN
2546 immediately after PREV_INSN will cause a pipeline
2547 bubble, so there is no reason to make this swap. */
2548 if (prev_op != NULL
2549 && (prev_op->flags & LOAD) != 0
2550 && sh_load_use (prev_insn, prev_op, next_insn, next_op))
2551 ok = FALSE;
2552
2553 /* If INSN is a load, and it sets a register which
2554 the insn after NEXT_INSN uses, then doing the
2555 swap will cause a pipeline bubble, so there is no
2556 reason to make the swap. However, if the insn
2557 after NEXT_INSN is itself a load or store
2558 instruction, then it is misaligned, so
2559 optimistically hope that it will be swapped
2560 itself, and just live with the pipeline bubble if
2561 it isn't. */
2562 if (ok
2563 && i + 4 < stop
2564 && (op->flags & LOAD) != 0)
2565 {
2566 unsigned int next2_insn;
2567 const struct sh_opcode *next2_op;
2568
2569 next2_insn = bfd_get_16 (abfd, contents + i + 4);
2570 next2_op = sh_insn_info (next2_insn);
2571 if ((next2_op->flags & (LOAD | STORE)) == 0
2572 && sh_load_use (insn, op, next2_insn, next2_op))
2573 ok = FALSE;
2574 }
2575
2576 if (ok)
2577 {
2578 if (! (*swap) (abfd, sec, relocs, contents, i))
2579 return FALSE;
2580 *pswapped = TRUE;
2581 continue;
2582 }
2583 }
2584 }
2585 }
2586
2587 return TRUE;
2588 }
2589 #endif /* not COFF_IMAGE_WITH_PE */
2590
2591 /* Look for loads and stores which we can align to four byte
2592 boundaries. See the longer comment above sh_relax_section for why
2593 this is desirable. This sets *PSWAPPED if some instruction was
2594 swapped. */
2595
2596 static bfd_boolean
2597 sh_align_loads (abfd, sec, internal_relocs, contents, pswapped)
2598 bfd *abfd;
2599 asection *sec;
2600 struct internal_reloc *internal_relocs;
2601 bfd_byte *contents;
2602 bfd_boolean *pswapped;
2603 {
2604 struct internal_reloc *irel, *irelend;
2605 bfd_vma *labels = NULL;
2606 bfd_vma *label, *label_end;
2607 bfd_size_type amt;
2608
2609 *pswapped = FALSE;
2610
2611 irelend = internal_relocs + sec->reloc_count;
2612
2613 /* Get all the addresses with labels on them. */
2614 amt = (bfd_size_type) sec->reloc_count * sizeof (bfd_vma);
2615 labels = (bfd_vma *) bfd_malloc (amt);
2616 if (labels == NULL)
2617 goto error_return;
2618 label_end = labels;
2619 for (irel = internal_relocs; irel < irelend; irel++)
2620 {
2621 if (irel->r_type == R_SH_LABEL)
2622 {
2623 *label_end = irel->r_vaddr - sec->vma;
2624 ++label_end;
2625 }
2626 }
2627
2628 /* Note that the assembler currently always outputs relocs in
2629 address order. If that ever changes, this code will need to sort
2630 the label values and the relocs. */
2631
2632 label = labels;
2633
2634 for (irel = internal_relocs; irel < irelend; irel++)
2635 {
2636 bfd_vma start, stop;
2637
2638 if (irel->r_type != R_SH_CODE)
2639 continue;
2640
2641 start = irel->r_vaddr - sec->vma;
2642
2643 for (irel++; irel < irelend; irel++)
2644 if (irel->r_type == R_SH_DATA)
2645 break;
2646 if (irel < irelend)
2647 stop = irel->r_vaddr - sec->vma;
2648 else
2649 stop = sec->size;
2650
2651 if (! _bfd_sh_align_load_span (abfd, sec, contents, sh_swap_insns,
2652 (PTR) internal_relocs, &label,
2653 label_end, start, stop, pswapped))
2654 goto error_return;
2655 }
2656
2657 free (labels);
2658
2659 return TRUE;
2660
2661 error_return:
2662 if (labels != NULL)
2663 free (labels);
2664 return FALSE;
2665 }
2666
2667 /* Swap two SH instructions. */
2668
2669 static bfd_boolean
2670 sh_swap_insns (abfd, sec, relocs, contents, addr)
2671 bfd *abfd;
2672 asection *sec;
2673 PTR relocs;
2674 bfd_byte *contents;
2675 bfd_vma addr;
2676 {
2677 struct internal_reloc *internal_relocs = (struct internal_reloc *) relocs;
2678 unsigned short i1, i2;
2679 struct internal_reloc *irel, *irelend;
2680
2681 /* Swap the instructions themselves. */
2682 i1 = bfd_get_16 (abfd, contents + addr);
2683 i2 = bfd_get_16 (abfd, contents + addr + 2);
2684 bfd_put_16 (abfd, (bfd_vma) i2, contents + addr);
2685 bfd_put_16 (abfd, (bfd_vma) i1, contents + addr + 2);
2686
2687 /* Adjust all reloc addresses. */
2688 irelend = internal_relocs + sec->reloc_count;
2689 for (irel = internal_relocs; irel < irelend; irel++)
2690 {
2691 int type, add;
2692
2693 /* There are a few special types of relocs that we don't want to
2694 adjust. These relocs do not apply to the instruction itself,
2695 but are only associated with the address. */
2696 type = irel->r_type;
2697 if (type == R_SH_ALIGN
2698 || type == R_SH_CODE
2699 || type == R_SH_DATA
2700 || type == R_SH_LABEL)
2701 continue;
2702
2703 /* If an R_SH_USES reloc points to one of the addresses being
2704 swapped, we must adjust it. It would be incorrect to do this
2705 for a jump, though, since we want to execute both
2706 instructions after the jump. (We have avoided swapping
2707 around a label, so the jump will not wind up executing an
2708 instruction it shouldn't). */
2709 if (type == R_SH_USES)
2710 {
2711 bfd_vma off;
2712
2713 off = irel->r_vaddr - sec->vma + 4 + irel->r_offset;
2714 if (off == addr)
2715 irel->r_offset += 2;
2716 else if (off == addr + 2)
2717 irel->r_offset -= 2;
2718 }
2719
2720 if (irel->r_vaddr - sec->vma == addr)
2721 {
2722 irel->r_vaddr += 2;
2723 add = -2;
2724 }
2725 else if (irel->r_vaddr - sec->vma == addr + 2)
2726 {
2727 irel->r_vaddr -= 2;
2728 add = 2;
2729 }
2730 else
2731 add = 0;
2732
2733 if (add != 0)
2734 {
2735 bfd_byte *loc;
2736 unsigned short insn, oinsn;
2737 bfd_boolean overflow;
2738
2739 loc = contents + irel->r_vaddr - sec->vma;
2740 overflow = FALSE;
2741 switch (type)
2742 {
2743 default:
2744 break;
2745
2746 case R_SH_PCDISP8BY2:
2747 case R_SH_PCRELIMM8BY2:
2748 insn = bfd_get_16 (abfd, loc);
2749 oinsn = insn;
2750 insn += add / 2;
2751 if ((oinsn & 0xff00) != (insn & 0xff00))
2752 overflow = TRUE;
2753 bfd_put_16 (abfd, (bfd_vma) insn, loc);
2754 break;
2755
2756 case R_SH_PCDISP:
2757 insn = bfd_get_16 (abfd, loc);
2758 oinsn = insn;
2759 insn += add / 2;
2760 if ((oinsn & 0xf000) != (insn & 0xf000))
2761 overflow = TRUE;
2762 bfd_put_16 (abfd, (bfd_vma) insn, loc);
2763 break;
2764
2765 case R_SH_PCRELIMM8BY4:
2766 /* This reloc ignores the least significant 3 bits of
2767 the program counter before adding in the offset.
2768 This means that if ADDR is at an even address, the
2769 swap will not affect the offset. If ADDR is an at an
2770 odd address, then the instruction will be crossing a
2771 four byte boundary, and must be adjusted. */
2772 if ((addr & 3) != 0)
2773 {
2774 insn = bfd_get_16 (abfd, loc);
2775 oinsn = insn;
2776 insn += add / 2;
2777 if ((oinsn & 0xff00) != (insn & 0xff00))
2778 overflow = TRUE;
2779 bfd_put_16 (abfd, (bfd_vma) insn, loc);
2780 }
2781
2782 break;
2783 }
2784
2785 if (overflow)
2786 {
2787 ((*_bfd_error_handler)
2788 ("%B: 0x%lx: fatal: reloc overflow while relaxing",
2789 abfd, (unsigned long) irel->r_vaddr));
2790 bfd_set_error (bfd_error_bad_value);
2791 return FALSE;
2792 }
2793 }
2794 }
2795
2796 return TRUE;
2797 }
2798 \f
2799 /* This is a modification of _bfd_coff_generic_relocate_section, which
2800 will handle SH relaxing. */
2801
2802 static bfd_boolean
2803 sh_relocate_section (output_bfd, info, input_bfd, input_section, contents,
2804 relocs, syms, sections)
2805 bfd *output_bfd ATTRIBUTE_UNUSED;
2806 struct bfd_link_info *info;
2807 bfd *input_bfd;
2808 asection *input_section;
2809 bfd_byte *contents;
2810 struct internal_reloc *relocs;
2811 struct internal_syment *syms;
2812 asection **sections;
2813 {
2814 struct internal_reloc *rel;
2815 struct internal_reloc *relend;
2816
2817 rel = relocs;
2818 relend = rel + input_section->reloc_count;
2819 for (; rel < relend; rel++)
2820 {
2821 long symndx;
2822 struct coff_link_hash_entry *h;
2823 struct internal_syment *sym;
2824 bfd_vma addend;
2825 bfd_vma val;
2826 reloc_howto_type *howto;
2827 bfd_reloc_status_type rstat;
2828
2829 /* Almost all relocs have to do with relaxing. If any work must
2830 be done for them, it has been done in sh_relax_section. */
2831 if (rel->r_type != R_SH_IMM32
2832 #ifdef COFF_WITH_PE
2833 && rel->r_type != R_SH_IMM32CE
2834 && rel->r_type != R_SH_IMAGEBASE
2835 #endif
2836 && rel->r_type != R_SH_PCDISP)
2837 continue;
2838
2839 symndx = rel->r_symndx;
2840
2841 if (symndx == -1)
2842 {
2843 h = NULL;
2844 sym = NULL;
2845 }
2846 else
2847 {
2848 if (symndx < 0
2849 || (unsigned long) symndx >= obj_raw_syment_count (input_bfd))
2850 {
2851 (*_bfd_error_handler)
2852 ("%B: illegal symbol index %ld in relocs",
2853 input_bfd, symndx);
2854 bfd_set_error (bfd_error_bad_value);
2855 return FALSE;
2856 }
2857 h = obj_coff_sym_hashes (input_bfd)[symndx];
2858 sym = syms + symndx;
2859 }
2860
2861 if (sym != NULL && sym->n_scnum != 0)
2862 addend = - sym->n_value;
2863 else
2864 addend = 0;
2865
2866 if (rel->r_type == R_SH_PCDISP)
2867 addend -= 4;
2868
2869 if (rel->r_type >= SH_COFF_HOWTO_COUNT)
2870 howto = NULL;
2871 else
2872 howto = &sh_coff_howtos[rel->r_type];
2873
2874 if (howto == NULL)
2875 {
2876 bfd_set_error (bfd_error_bad_value);
2877 return FALSE;
2878 }
2879
2880 #ifdef COFF_WITH_PE
2881 if (rel->r_type == R_SH_IMAGEBASE)
2882 addend -= pe_data (input_section->output_section->owner)->pe_opthdr.ImageBase;
2883 #endif
2884
2885 val = 0;
2886
2887 if (h == NULL)
2888 {
2889 asection *sec;
2890
2891 /* There is nothing to do for an internal PCDISP reloc. */
2892 if (rel->r_type == R_SH_PCDISP)
2893 continue;
2894
2895 if (symndx == -1)
2896 {
2897 sec = bfd_abs_section_ptr;
2898 val = 0;
2899 }
2900 else
2901 {
2902 sec = sections[symndx];
2903 val = (sec->output_section->vma
2904 + sec->output_offset
2905 + sym->n_value
2906 - sec->vma);
2907 }
2908 }
2909 else
2910 {
2911 if (h->root.type == bfd_link_hash_defined
2912 || h->root.type == bfd_link_hash_defweak)
2913 {
2914 asection *sec;
2915
2916 sec = h->root.u.def.section;
2917 val = (h->root.u.def.value
2918 + sec->output_section->vma
2919 + sec->output_offset);
2920 }
2921 else if (! info->relocatable)
2922 {
2923 if (! ((*info->callbacks->undefined_symbol)
2924 (info, h->root.root.string, input_bfd, input_section,
2925 rel->r_vaddr - input_section->vma, TRUE)))
2926 return FALSE;
2927 }
2928 }
2929
2930 rstat = _bfd_final_link_relocate (howto, input_bfd, input_section,
2931 contents,
2932 rel->r_vaddr - input_section->vma,
2933 val, addend);
2934
2935 switch (rstat)
2936 {
2937 default:
2938 abort ();
2939 case bfd_reloc_ok:
2940 break;
2941 case bfd_reloc_overflow:
2942 {
2943 const char *name;
2944 char buf[SYMNMLEN + 1];
2945
2946 if (symndx == -1)
2947 name = "*ABS*";
2948 else if (h != NULL)
2949 name = h->root.root.string;
2950 else if (sym->_n._n_n._n_zeroes == 0
2951 && sym->_n._n_n._n_offset != 0)
2952 name = obj_coff_strings (input_bfd) + sym->_n._n_n._n_offset;
2953 else
2954 {
2955 strncpy (buf, sym->_n._n_name, SYMNMLEN);
2956 buf[SYMNMLEN] = '\0';
2957 name = buf;
2958 }
2959
2960 if (! ((*info->callbacks->reloc_overflow)
2961 (info, name, howto->name, (bfd_vma) 0, input_bfd,
2962 input_section, rel->r_vaddr - input_section->vma)))
2963 return FALSE;
2964 }
2965 }
2966 }
2967
2968 return TRUE;
2969 }
2970
2971 /* This is a version of bfd_generic_get_relocated_section_contents
2972 which uses sh_relocate_section. */
2973
2974 static bfd_byte *
2975 sh_coff_get_relocated_section_contents (output_bfd, link_info, link_order,
2976 data, relocatable, symbols)
2977 bfd *output_bfd;
2978 struct bfd_link_info *link_info;
2979 struct bfd_link_order *link_order;
2980 bfd_byte *data;
2981 bfd_boolean relocatable;
2982 asymbol **symbols;
2983 {
2984 asection *input_section = link_order->u.indirect.section;
2985 bfd *input_bfd = input_section->owner;
2986 asection **sections = NULL;
2987 struct internal_reloc *internal_relocs = NULL;
2988 struct internal_syment *internal_syms = NULL;
2989
2990 /* We only need to handle the case of relaxing, or of having a
2991 particular set of section contents, specially. */
2992 if (relocatable
2993 || coff_section_data (input_bfd, input_section) == NULL
2994 || coff_section_data (input_bfd, input_section)->contents == NULL)
2995 return bfd_generic_get_relocated_section_contents (output_bfd, link_info,
2996 link_order, data,
2997 relocatable,
2998 symbols);
2999
3000 memcpy (data, coff_section_data (input_bfd, input_section)->contents,
3001 (size_t) input_section->size);
3002
3003 if ((input_section->flags & SEC_RELOC) != 0
3004 && input_section->reloc_count > 0)
3005 {
3006 bfd_size_type symesz = bfd_coff_symesz (input_bfd);
3007 bfd_byte *esym, *esymend;
3008 struct internal_syment *isymp;
3009 asection **secpp;
3010 bfd_size_type amt;
3011
3012 if (! _bfd_coff_get_external_symbols (input_bfd))
3013 goto error_return;
3014
3015 internal_relocs = (_bfd_coff_read_internal_relocs
3016 (input_bfd, input_section, FALSE, (bfd_byte *) NULL,
3017 FALSE, (struct internal_reloc *) NULL));
3018 if (internal_relocs == NULL)
3019 goto error_return;
3020
3021 amt = obj_raw_syment_count (input_bfd);
3022 amt *= sizeof (struct internal_syment);
3023 internal_syms = (struct internal_syment *) bfd_malloc (amt);
3024 if (internal_syms == NULL)
3025 goto error_return;
3026
3027 amt = obj_raw_syment_count (input_bfd);
3028 amt *= sizeof (asection *);
3029 sections = (asection **) bfd_malloc (amt);
3030 if (sections == NULL)
3031 goto error_return;
3032
3033 isymp = internal_syms;
3034 secpp = sections;
3035 esym = (bfd_byte *) obj_coff_external_syms (input_bfd);
3036 esymend = esym + obj_raw_syment_count (input_bfd) * symesz;
3037 while (esym < esymend)
3038 {
3039 bfd_coff_swap_sym_in (input_bfd, (PTR) esym, (PTR) isymp);
3040
3041 if (isymp->n_scnum != 0)
3042 *secpp = coff_section_from_bfd_index (input_bfd, isymp->n_scnum);
3043 else
3044 {
3045 if (isymp->n_value == 0)
3046 *secpp = bfd_und_section_ptr;
3047 else
3048 *secpp = bfd_com_section_ptr;
3049 }
3050
3051 esym += (isymp->n_numaux + 1) * symesz;
3052 secpp += isymp->n_numaux + 1;
3053 isymp += isymp->n_numaux + 1;
3054 }
3055
3056 if (! sh_relocate_section (output_bfd, link_info, input_bfd,
3057 input_section, data, internal_relocs,
3058 internal_syms, sections))
3059 goto error_return;
3060
3061 free (sections);
3062 sections = NULL;
3063 free (internal_syms);
3064 internal_syms = NULL;
3065 free (internal_relocs);
3066 internal_relocs = NULL;
3067 }
3068
3069 return data;
3070
3071 error_return:
3072 if (internal_relocs != NULL)
3073 free (internal_relocs);
3074 if (internal_syms != NULL)
3075 free (internal_syms);
3076 if (sections != NULL)
3077 free (sections);
3078 return NULL;
3079 }
3080
3081 /* The target vectors. */
3082
3083 #ifndef TARGET_SHL_SYM
3084 CREATE_BIG_COFF_TARGET_VEC (shcoff_vec, "coff-sh", BFD_IS_RELAXABLE, 0, '_', NULL, COFF_SWAP_TABLE)
3085 #endif
3086
3087 #ifdef TARGET_SHL_SYM
3088 #define TARGET_SYM TARGET_SHL_SYM
3089 #else
3090 #define TARGET_SYM shlcoff_vec
3091 #endif
3092
3093 #ifndef TARGET_SHL_NAME
3094 #define TARGET_SHL_NAME "coff-shl"
3095 #endif
3096
3097 #ifdef COFF_WITH_PE
3098 CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM, TARGET_SHL_NAME, BFD_IS_RELAXABLE,
3099 SEC_CODE | SEC_DATA, '_', NULL, COFF_SWAP_TABLE);
3100 #else
3101 CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM, TARGET_SHL_NAME, BFD_IS_RELAXABLE,
3102 0, '_', NULL, COFF_SWAP_TABLE)
3103 #endif
3104
3105 #ifndef TARGET_SHL_SYM
3106 static const bfd_target * coff_small_object_p PARAMS ((bfd *));
3107 static bfd_boolean coff_small_new_section_hook PARAMS ((bfd *, asection *));
3108 /* Some people want versions of the SH COFF target which do not align
3109 to 16 byte boundaries. We implement that by adding a couple of new
3110 target vectors. These are just like the ones above, but they
3111 change the default section alignment. To generate them in the
3112 assembler, use -small. To use them in the linker, use -b
3113 coff-sh{l}-small and -oformat coff-sh{l}-small.
3114
3115 Yes, this is a horrible hack. A general solution for setting
3116 section alignment in COFF is rather complex. ELF handles this
3117 correctly. */
3118
3119 /* Only recognize the small versions if the target was not defaulted.
3120 Otherwise we won't recognize the non default endianness. */
3121
3122 static const bfd_target *
3123 coff_small_object_p (abfd)
3124 bfd *abfd;
3125 {
3126 if (abfd->target_defaulted)
3127 {
3128 bfd_set_error (bfd_error_wrong_format);
3129 return NULL;
3130 }
3131 return coff_object_p (abfd);
3132 }
3133
3134 /* Set the section alignment for the small versions. */
3135
3136 static bfd_boolean
3137 coff_small_new_section_hook (abfd, section)
3138 bfd *abfd;
3139 asection *section;
3140 {
3141 if (! coff_new_section_hook (abfd, section))
3142 return FALSE;
3143
3144 /* We must align to at least a four byte boundary, because longword
3145 accesses must be on a four byte boundary. */
3146 if (section->alignment_power == COFF_DEFAULT_SECTION_ALIGNMENT_POWER)
3147 section->alignment_power = 2;
3148
3149 return TRUE;
3150 }
3151
3152 /* This is copied from bfd_coff_std_swap_table so that we can change
3153 the default section alignment power. */
3154
3155 static const bfd_coff_backend_data bfd_coff_small_swap_table =
3156 {
3157 coff_swap_aux_in, coff_swap_sym_in, coff_swap_lineno_in,
3158 coff_swap_aux_out, coff_swap_sym_out,
3159 coff_swap_lineno_out, coff_swap_reloc_out,
3160 coff_swap_filehdr_out, coff_swap_aouthdr_out,
3161 coff_swap_scnhdr_out,
3162 FILHSZ, AOUTSZ, SCNHSZ, SYMESZ, AUXESZ, RELSZ, LINESZ, FILNMLEN,
3163 #ifdef COFF_LONG_FILENAMES
3164 TRUE,
3165 #else
3166 FALSE,
3167 #endif
3168 #ifdef COFF_LONG_SECTION_NAMES
3169 TRUE,
3170 #else
3171 FALSE,
3172 #endif
3173 2,
3174 #ifdef COFF_FORCE_SYMBOLS_IN_STRINGS
3175 TRUE,
3176 #else
3177 FALSE,
3178 #endif
3179 #ifdef COFF_DEBUG_STRING_WIDE_PREFIX
3180 4,
3181 #else
3182 2,
3183 #endif
3184 coff_swap_filehdr_in, coff_swap_aouthdr_in, coff_swap_scnhdr_in,
3185 coff_swap_reloc_in, coff_bad_format_hook, coff_set_arch_mach_hook,
3186 coff_mkobject_hook, styp_to_sec_flags, coff_set_alignment_hook,
3187 coff_slurp_symbol_table, symname_in_debug_hook, coff_pointerize_aux_hook,
3188 coff_print_aux, coff_reloc16_extra_cases, coff_reloc16_estimate,
3189 coff_classify_symbol, coff_compute_section_file_positions,
3190 coff_start_final_link, coff_relocate_section, coff_rtype_to_howto,
3191 coff_adjust_symndx, coff_link_add_one_symbol,
3192 coff_link_output_has_begun, coff_final_link_postscript
3193 };
3194
3195 #define coff_small_close_and_cleanup \
3196 coff_close_and_cleanup
3197 #define coff_small_bfd_free_cached_info \
3198 coff_bfd_free_cached_info
3199 #define coff_small_get_section_contents \
3200 coff_get_section_contents
3201 #define coff_small_get_section_contents_in_window \
3202 coff_get_section_contents_in_window
3203
3204 extern const bfd_target shlcoff_small_vec;
3205
3206 const bfd_target shcoff_small_vec =
3207 {
3208 "coff-sh-small", /* name */
3209 bfd_target_coff_flavour,
3210 BFD_ENDIAN_BIG, /* data byte order is big */
3211 BFD_ENDIAN_BIG, /* header byte order is big */
3212
3213 (HAS_RELOC | EXEC_P | /* object flags */
3214 HAS_LINENO | HAS_DEBUG |
3215 HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
3216
3217 (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
3218 '_', /* leading symbol underscore */
3219 '/', /* ar_pad_char */
3220 15, /* ar_max_namelen */
3221 bfd_getb64, bfd_getb_signed_64, bfd_putb64,
3222 bfd_getb32, bfd_getb_signed_32, bfd_putb32,
3223 bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* data */
3224 bfd_getb64, bfd_getb_signed_64, bfd_putb64,
3225 bfd_getb32, bfd_getb_signed_32, bfd_putb32,
3226 bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* hdrs */
3227
3228 {_bfd_dummy_target, coff_small_object_p, /* bfd_check_format */
3229 bfd_generic_archive_p, _bfd_dummy_target},
3230 {bfd_false, coff_mkobject, _bfd_generic_mkarchive, /* bfd_set_format */
3231 bfd_false},
3232 {bfd_false, coff_write_object_contents, /* bfd_write_contents */
3233 _bfd_write_archive_contents, bfd_false},
3234
3235 BFD_JUMP_TABLE_GENERIC (coff_small),
3236 BFD_JUMP_TABLE_COPY (coff),
3237 BFD_JUMP_TABLE_CORE (_bfd_nocore),
3238 BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
3239 BFD_JUMP_TABLE_SYMBOLS (coff),
3240 BFD_JUMP_TABLE_RELOCS (coff),
3241 BFD_JUMP_TABLE_WRITE (coff),
3242 BFD_JUMP_TABLE_LINK (coff),
3243 BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
3244
3245 & shlcoff_small_vec,
3246
3247 (PTR) &bfd_coff_small_swap_table
3248 };
3249
3250 const bfd_target shlcoff_small_vec =
3251 {
3252 "coff-shl-small", /* name */
3253 bfd_target_coff_flavour,
3254 BFD_ENDIAN_LITTLE, /* data byte order is little */
3255 BFD_ENDIAN_LITTLE, /* header byte order is little endian too*/
3256
3257 (HAS_RELOC | EXEC_P | /* object flags */
3258 HAS_LINENO | HAS_DEBUG |
3259 HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
3260
3261 (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
3262 '_', /* leading symbol underscore */
3263 '/', /* ar_pad_char */
3264 15, /* ar_max_namelen */
3265 bfd_getl64, bfd_getl_signed_64, bfd_putl64,
3266 bfd_getl32, bfd_getl_signed_32, bfd_putl32,
3267 bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* data */
3268 bfd_getl64, bfd_getl_signed_64, bfd_putl64,
3269 bfd_getl32, bfd_getl_signed_32, bfd_putl32,
3270 bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* hdrs */
3271
3272 {_bfd_dummy_target, coff_small_object_p, /* bfd_check_format */
3273 bfd_generic_archive_p, _bfd_dummy_target},
3274 {bfd_false, coff_mkobject, _bfd_generic_mkarchive, /* bfd_set_format */
3275 bfd_false},
3276 {bfd_false, coff_write_object_contents, /* bfd_write_contents */
3277 _bfd_write_archive_contents, bfd_false},
3278
3279 BFD_JUMP_TABLE_GENERIC (coff_small),
3280 BFD_JUMP_TABLE_COPY (coff),
3281 BFD_JUMP_TABLE_CORE (_bfd_nocore),
3282 BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
3283 BFD_JUMP_TABLE_SYMBOLS (coff),
3284 BFD_JUMP_TABLE_RELOCS (coff),
3285 BFD_JUMP_TABLE_WRITE (coff),
3286 BFD_JUMP_TABLE_LINK (coff),
3287 BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
3288
3289 & shcoff_small_vec,
3290
3291 (PTR) &bfd_coff_small_swap_table
3292 };
3293 #endif