]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - bfd/coff-sh.c
Fix problem where -relax could reorder multiple consecutive sets of the
[thirdparty/binutils-gdb.git] / bfd / coff-sh.c
1 /* BFD back-end for Hitachi Super-H COFF binaries.
2 Copyright 1993, 94, 95, 96, 97, 98, 1999, 2000 Free Software Foundation, Inc.
3 Contributed by Cygnus Support.
4 Written by Steve Chamberlain, <sac@cygnus.com>.
5 Relaxing code written by Ian Lance Taylor, <ian@cygnus.com>.
6
7 This file is part of BFD, the Binary File Descriptor library.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
22
23 #include "bfd.h"
24 #include "sysdep.h"
25 #include "libbfd.h"
26 #include "bfdlink.h"
27 #include "coff/sh.h"
28 #include "coff/internal.h"
29 #include "libcoff.h"
30
31 /* Internal functions. */
32 static bfd_reloc_status_type sh_reloc
33 PARAMS ((bfd *, arelent *, asymbol *, PTR, asection *, bfd *, char **));
34 static long get_symbol_value PARAMS ((asymbol *));
35 static boolean sh_relax_section
36 PARAMS ((bfd *, asection *, struct bfd_link_info *, boolean *));
37 static boolean sh_relax_delete_bytes
38 PARAMS ((bfd *, asection *, bfd_vma, int));
39 static const struct sh_opcode *sh_insn_info PARAMS ((unsigned int));
40 static boolean sh_align_loads
41 PARAMS ((bfd *, asection *, struct internal_reloc *, bfd_byte *, boolean *));
42 static boolean sh_swap_insns
43 PARAMS ((bfd *, asection *, PTR, bfd_byte *, bfd_vma));
44 static boolean sh_relocate_section
45 PARAMS ((bfd *, struct bfd_link_info *, bfd *, asection *, bfd_byte *,
46 struct internal_reloc *, struct internal_syment *, asection **));
47 static bfd_byte *sh_coff_get_relocated_section_contents
48 PARAMS ((bfd *, struct bfd_link_info *, struct bfd_link_order *,
49 bfd_byte *, boolean, asymbol **));
50
51 /* Default section alignment to 2**4. */
52 #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER (4)
53
54 /* Generate long file names. */
55 #define COFF_LONG_FILENAMES
56
57 /* The supported relocations. There are a lot of relocations defined
58 in coff/internal.h which we do not expect to ever see. */
59 static reloc_howto_type sh_coff_howtos[] =
60 {
61 EMPTY_HOWTO (0),
62 EMPTY_HOWTO (1),
63 EMPTY_HOWTO (2),
64 EMPTY_HOWTO (3), /* R_SH_PCREL8 */
65 EMPTY_HOWTO (4), /* R_SH_PCREL16 */
66 EMPTY_HOWTO (5), /* R_SH_HIGH8 */
67 EMPTY_HOWTO (6), /* R_SH_IMM24 */
68 EMPTY_HOWTO (7), /* R_SH_LOW16 */
69 EMPTY_HOWTO (8),
70 EMPTY_HOWTO (9), /* R_SH_PCDISP8BY4 */
71
72 HOWTO (R_SH_PCDISP8BY2, /* type */
73 1, /* rightshift */
74 1, /* size (0 = byte, 1 = short, 2 = long) */
75 8, /* bitsize */
76 true, /* pc_relative */
77 0, /* bitpos */
78 complain_overflow_signed, /* complain_on_overflow */
79 sh_reloc, /* special_function */
80 "r_pcdisp8by2", /* name */
81 true, /* partial_inplace */
82 0xff, /* src_mask */
83 0xff, /* dst_mask */
84 true), /* pcrel_offset */
85
86 EMPTY_HOWTO (11), /* R_SH_PCDISP8 */
87
88 HOWTO (R_SH_PCDISP, /* type */
89 1, /* rightshift */
90 1, /* size (0 = byte, 1 = short, 2 = long) */
91 12, /* bitsize */
92 true, /* pc_relative */
93 0, /* bitpos */
94 complain_overflow_signed, /* complain_on_overflow */
95 sh_reloc, /* special_function */
96 "r_pcdisp12by2", /* name */
97 true, /* partial_inplace */
98 0xfff, /* src_mask */
99 0xfff, /* dst_mask */
100 true), /* pcrel_offset */
101
102 EMPTY_HOWTO (13),
103
104 HOWTO (R_SH_IMM32, /* type */
105 0, /* rightshift */
106 2, /* size (0 = byte, 1 = short, 2 = long) */
107 32, /* bitsize */
108 false, /* pc_relative */
109 0, /* bitpos */
110 complain_overflow_bitfield, /* complain_on_overflow */
111 sh_reloc, /* special_function */
112 "r_imm32", /* name */
113 true, /* partial_inplace */
114 0xffffffff, /* src_mask */
115 0xffffffff, /* dst_mask */
116 false), /* pcrel_offset */
117
118 EMPTY_HOWTO (15),
119 EMPTY_HOWTO (16), /* R_SH_IMM8 */
120 EMPTY_HOWTO (17), /* R_SH_IMM8BY2 */
121 EMPTY_HOWTO (18), /* R_SH_IMM8BY4 */
122 EMPTY_HOWTO (19), /* R_SH_IMM4 */
123 EMPTY_HOWTO (20), /* R_SH_IMM4BY2 */
124 EMPTY_HOWTO (21), /* R_SH_IMM4BY4 */
125
126 HOWTO (R_SH_PCRELIMM8BY2, /* type */
127 1, /* rightshift */
128 1, /* size (0 = byte, 1 = short, 2 = long) */
129 8, /* bitsize */
130 true, /* pc_relative */
131 0, /* bitpos */
132 complain_overflow_unsigned, /* complain_on_overflow */
133 sh_reloc, /* special_function */
134 "r_pcrelimm8by2", /* name */
135 true, /* partial_inplace */
136 0xff, /* src_mask */
137 0xff, /* dst_mask */
138 true), /* pcrel_offset */
139
140 HOWTO (R_SH_PCRELIMM8BY4, /* type */
141 2, /* rightshift */
142 1, /* size (0 = byte, 1 = short, 2 = long) */
143 8, /* bitsize */
144 true, /* pc_relative */
145 0, /* bitpos */
146 complain_overflow_unsigned, /* complain_on_overflow */
147 sh_reloc, /* special_function */
148 "r_pcrelimm8by4", /* name */
149 true, /* partial_inplace */
150 0xff, /* src_mask */
151 0xff, /* dst_mask */
152 true), /* pcrel_offset */
153
154 HOWTO (R_SH_IMM16, /* type */
155 0, /* rightshift */
156 1, /* size (0 = byte, 1 = short, 2 = long) */
157 16, /* bitsize */
158 false, /* pc_relative */
159 0, /* bitpos */
160 complain_overflow_bitfield, /* complain_on_overflow */
161 sh_reloc, /* special_function */
162 "r_imm16", /* name */
163 true, /* partial_inplace */
164 0xffff, /* src_mask */
165 0xffff, /* dst_mask */
166 false), /* pcrel_offset */
167
168 HOWTO (R_SH_SWITCH16, /* type */
169 0, /* rightshift */
170 1, /* size (0 = byte, 1 = short, 2 = long) */
171 16, /* bitsize */
172 false, /* pc_relative */
173 0, /* bitpos */
174 complain_overflow_bitfield, /* complain_on_overflow */
175 sh_reloc, /* special_function */
176 "r_switch16", /* name */
177 true, /* partial_inplace */
178 0xffff, /* src_mask */
179 0xffff, /* dst_mask */
180 false), /* pcrel_offset */
181
182 HOWTO (R_SH_SWITCH32, /* type */
183 0, /* rightshift */
184 2, /* size (0 = byte, 1 = short, 2 = long) */
185 32, /* bitsize */
186 false, /* pc_relative */
187 0, /* bitpos */
188 complain_overflow_bitfield, /* complain_on_overflow */
189 sh_reloc, /* special_function */
190 "r_switch32", /* name */
191 true, /* partial_inplace */
192 0xffffffff, /* src_mask */
193 0xffffffff, /* dst_mask */
194 false), /* pcrel_offset */
195
196 HOWTO (R_SH_USES, /* type */
197 0, /* rightshift */
198 1, /* size (0 = byte, 1 = short, 2 = long) */
199 16, /* bitsize */
200 false, /* pc_relative */
201 0, /* bitpos */
202 complain_overflow_bitfield, /* complain_on_overflow */
203 sh_reloc, /* special_function */
204 "r_uses", /* name */
205 true, /* partial_inplace */
206 0xffff, /* src_mask */
207 0xffff, /* dst_mask */
208 false), /* pcrel_offset */
209
210 HOWTO (R_SH_COUNT, /* type */
211 0, /* rightshift */
212 2, /* size (0 = byte, 1 = short, 2 = long) */
213 32, /* bitsize */
214 false, /* pc_relative */
215 0, /* bitpos */
216 complain_overflow_bitfield, /* complain_on_overflow */
217 sh_reloc, /* special_function */
218 "r_count", /* name */
219 true, /* partial_inplace */
220 0xffffffff, /* src_mask */
221 0xffffffff, /* dst_mask */
222 false), /* pcrel_offset */
223
224 HOWTO (R_SH_ALIGN, /* type */
225 0, /* rightshift */
226 2, /* size (0 = byte, 1 = short, 2 = long) */
227 32, /* bitsize */
228 false, /* pc_relative */
229 0, /* bitpos */
230 complain_overflow_bitfield, /* complain_on_overflow */
231 sh_reloc, /* special_function */
232 "r_align", /* name */
233 true, /* partial_inplace */
234 0xffffffff, /* src_mask */
235 0xffffffff, /* dst_mask */
236 false), /* pcrel_offset */
237
238 HOWTO (R_SH_CODE, /* type */
239 0, /* rightshift */
240 2, /* size (0 = byte, 1 = short, 2 = long) */
241 32, /* bitsize */
242 false, /* pc_relative */
243 0, /* bitpos */
244 complain_overflow_bitfield, /* complain_on_overflow */
245 sh_reloc, /* special_function */
246 "r_code", /* name */
247 true, /* partial_inplace */
248 0xffffffff, /* src_mask */
249 0xffffffff, /* dst_mask */
250 false), /* pcrel_offset */
251
252 HOWTO (R_SH_DATA, /* type */
253 0, /* rightshift */
254 2, /* size (0 = byte, 1 = short, 2 = long) */
255 32, /* bitsize */
256 false, /* pc_relative */
257 0, /* bitpos */
258 complain_overflow_bitfield, /* complain_on_overflow */
259 sh_reloc, /* special_function */
260 "r_data", /* name */
261 true, /* partial_inplace */
262 0xffffffff, /* src_mask */
263 0xffffffff, /* dst_mask */
264 false), /* pcrel_offset */
265
266 HOWTO (R_SH_LABEL, /* type */
267 0, /* rightshift */
268 2, /* size (0 = byte, 1 = short, 2 = long) */
269 32, /* bitsize */
270 false, /* pc_relative */
271 0, /* bitpos */
272 complain_overflow_bitfield, /* complain_on_overflow */
273 sh_reloc, /* special_function */
274 "r_label", /* name */
275 true, /* partial_inplace */
276 0xffffffff, /* src_mask */
277 0xffffffff, /* dst_mask */
278 false), /* pcrel_offset */
279
280 HOWTO (R_SH_SWITCH8, /* type */
281 0, /* rightshift */
282 0, /* size (0 = byte, 1 = short, 2 = long) */
283 8, /* bitsize */
284 false, /* pc_relative */
285 0, /* bitpos */
286 complain_overflow_bitfield, /* complain_on_overflow */
287 sh_reloc, /* special_function */
288 "r_switch8", /* name */
289 true, /* partial_inplace */
290 0xff, /* src_mask */
291 0xff, /* dst_mask */
292 false) /* pcrel_offset */
293 };
294
295 #define SH_COFF_HOWTO_COUNT (sizeof sh_coff_howtos / sizeof sh_coff_howtos[0])
296
297 /* Check for a bad magic number. */
298 #define BADMAG(x) SHBADMAG(x)
299
300 /* Customize coffcode.h (this is not currently used). */
301 #define SH 1
302
303 /* FIXME: This should not be set here. */
304 #define __A_MAGIC_SET__
305
306 /* Swap the r_offset field in and out. */
307 #define SWAP_IN_RELOC_OFFSET bfd_h_get_32
308 #define SWAP_OUT_RELOC_OFFSET bfd_h_put_32
309
310 /* Swap out extra information in the reloc structure. */
311 #define SWAP_OUT_RELOC_EXTRA(abfd, src, dst) \
312 do \
313 { \
314 dst->r_stuff[0] = 'S'; \
315 dst->r_stuff[1] = 'C'; \
316 } \
317 while (0)
318
319 /* Get the value of a symbol, when performing a relocation. */
320
321 static long
322 get_symbol_value (symbol)
323 asymbol *symbol;
324 {
325 bfd_vma relocation;
326
327 if (bfd_is_com_section (symbol->section))
328 relocation = 0;
329 else
330 relocation = (symbol->value +
331 symbol->section->output_section->vma +
332 symbol->section->output_offset);
333
334 return relocation;
335 }
336
337 /* This macro is used in coffcode.h to get the howto corresponding to
338 an internal reloc. */
339
340 #define RTYPE2HOWTO(relent, internal) \
341 ((relent)->howto = \
342 ((internal)->r_type < SH_COFF_HOWTO_COUNT \
343 ? &sh_coff_howtos[(internal)->r_type] \
344 : (reloc_howto_type *) NULL))
345
346 /* This is the same as the macro in coffcode.h, except that it copies
347 r_offset into reloc_entry->addend for some relocs. */
348 #define CALC_ADDEND(abfd, ptr, reloc, cache_ptr) \
349 { \
350 coff_symbol_type *coffsym = (coff_symbol_type *) NULL; \
351 if (ptr && bfd_asymbol_bfd (ptr) != abfd) \
352 coffsym = (obj_symbols (abfd) \
353 + (cache_ptr->sym_ptr_ptr - symbols)); \
354 else if (ptr) \
355 coffsym = coff_symbol_from (abfd, ptr); \
356 if (coffsym != (coff_symbol_type *) NULL \
357 && coffsym->native->u.syment.n_scnum == 0) \
358 cache_ptr->addend = 0; \
359 else if (ptr && bfd_asymbol_bfd (ptr) == abfd \
360 && ptr->section != (asection *) NULL) \
361 cache_ptr->addend = - (ptr->section->vma + ptr->value); \
362 else \
363 cache_ptr->addend = 0; \
364 if ((reloc).r_type == R_SH_SWITCH8 \
365 || (reloc).r_type == R_SH_SWITCH16 \
366 || (reloc).r_type == R_SH_SWITCH32 \
367 || (reloc).r_type == R_SH_USES \
368 || (reloc).r_type == R_SH_COUNT \
369 || (reloc).r_type == R_SH_ALIGN) \
370 cache_ptr->addend = (reloc).r_offset; \
371 }
372
373 /* This is the howto function for the SH relocations. */
374
375 static bfd_reloc_status_type
376 sh_reloc (abfd, reloc_entry, symbol_in, data, input_section, output_bfd,
377 error_message)
378 bfd *abfd;
379 arelent *reloc_entry;
380 asymbol *symbol_in;
381 PTR data;
382 asection *input_section;
383 bfd *output_bfd;
384 char **error_message ATTRIBUTE_UNUSED;
385 {
386 unsigned long insn;
387 bfd_vma sym_value;
388 unsigned short r_type;
389 bfd_vma addr = reloc_entry->address;
390 bfd_byte *hit_data = addr + (bfd_byte *) data;
391
392 r_type = reloc_entry->howto->type;
393
394 if (output_bfd != NULL)
395 {
396 /* Partial linking--do nothing. */
397 reloc_entry->address += input_section->output_offset;
398 return bfd_reloc_ok;
399 }
400
401 /* Almost all relocs have to do with relaxing. If any work must be
402 done for them, it has been done in sh_relax_section. */
403 if (r_type != R_SH_IMM32
404 && (r_type != R_SH_PCDISP
405 || (symbol_in->flags & BSF_LOCAL) != 0))
406 return bfd_reloc_ok;
407
408 if (symbol_in != NULL
409 && bfd_is_und_section (symbol_in->section))
410 return bfd_reloc_undefined;
411
412 sym_value = get_symbol_value (symbol_in);
413
414 switch (r_type)
415 {
416 case R_SH_IMM32:
417 insn = bfd_get_32 (abfd, hit_data);
418 insn += sym_value + reloc_entry->addend;
419 bfd_put_32 (abfd, insn, hit_data);
420 break;
421 case R_SH_PCDISP:
422 insn = bfd_get_16 (abfd, hit_data);
423 sym_value += reloc_entry->addend;
424 sym_value -= (input_section->output_section->vma
425 + input_section->output_offset
426 + addr
427 + 4);
428 sym_value += (insn & 0xfff) << 1;
429 if (insn & 0x800)
430 sym_value -= 0x1000;
431 insn = (insn & 0xf000) | (sym_value & 0xfff);
432 bfd_put_16 (abfd, insn, hit_data);
433 if (sym_value < (bfd_vma) -0x1000 || sym_value >= 0x1000)
434 return bfd_reloc_overflow;
435 break;
436 default:
437 abort ();
438 break;
439 }
440
441 return bfd_reloc_ok;
442 }
443
444 #define coff_bfd_merge_private_bfd_data _bfd_generic_verify_endian_match
445
446 /* We can do relaxing. */
447 #define coff_bfd_relax_section sh_relax_section
448
449 /* We use the special COFF backend linker. */
450 #define coff_relocate_section sh_relocate_section
451
452 /* When relaxing, we need to use special code to get the relocated
453 section contents. */
454 #define coff_bfd_get_relocated_section_contents \
455 sh_coff_get_relocated_section_contents
456
457 #include "coffcode.h"
458 \f
459 /* This function handles relaxing on the SH.
460
461 Function calls on the SH look like this:
462
463 movl L1,r0
464 ...
465 jsr @r0
466 ...
467 L1:
468 .long function
469
470 The compiler and assembler will cooperate to create R_SH_USES
471 relocs on the jsr instructions. The r_offset field of the
472 R_SH_USES reloc is the PC relative offset to the instruction which
473 loads the register (the r_offset field is computed as though it
474 were a jump instruction, so the offset value is actually from four
475 bytes past the instruction). The linker can use this reloc to
476 determine just which function is being called, and thus decide
477 whether it is possible to replace the jsr with a bsr.
478
479 If multiple function calls are all based on a single register load
480 (i.e., the same function is called multiple times), the compiler
481 guarantees that each function call will have an R_SH_USES reloc.
482 Therefore, if the linker is able to convert each R_SH_USES reloc
483 which refers to that address, it can safely eliminate the register
484 load.
485
486 When the assembler creates an R_SH_USES reloc, it examines it to
487 determine which address is being loaded (L1 in the above example).
488 It then counts the number of references to that address, and
489 creates an R_SH_COUNT reloc at that address. The r_offset field of
490 the R_SH_COUNT reloc will be the number of references. If the
491 linker is able to eliminate a register load, it can use the
492 R_SH_COUNT reloc to see whether it can also eliminate the function
493 address.
494
495 SH relaxing also handles another, unrelated, matter. On the SH, if
496 a load or store instruction is not aligned on a four byte boundary,
497 the memory cycle interferes with the 32 bit instruction fetch,
498 causing a one cycle bubble in the pipeline. Therefore, we try to
499 align load and store instructions on four byte boundaries if we
500 can, by swapping them with one of the adjacent instructions. */
501
502 static boolean
503 sh_relax_section (abfd, sec, link_info, again)
504 bfd *abfd;
505 asection *sec;
506 struct bfd_link_info *link_info;
507 boolean *again;
508 {
509 struct internal_reloc *internal_relocs;
510 struct internal_reloc *free_relocs = NULL;
511 boolean have_code;
512 struct internal_reloc *irel, *irelend;
513 bfd_byte *contents = NULL;
514 bfd_byte *free_contents = NULL;
515
516 *again = false;
517
518 if (link_info->relocateable
519 || (sec->flags & SEC_RELOC) == 0
520 || sec->reloc_count == 0)
521 return true;
522
523 /* If this is the first time we have been called for this section,
524 initialize the cooked size. */
525 if (sec->_cooked_size == 0)
526 sec->_cooked_size = sec->_raw_size;
527
528 internal_relocs = (_bfd_coff_read_internal_relocs
529 (abfd, sec, link_info->keep_memory,
530 (bfd_byte *) NULL, false,
531 (struct internal_reloc *) NULL));
532 if (internal_relocs == NULL)
533 goto error_return;
534 if (! link_info->keep_memory)
535 free_relocs = internal_relocs;
536
537 have_code = false;
538
539 irelend = internal_relocs + sec->reloc_count;
540 for (irel = internal_relocs; irel < irelend; irel++)
541 {
542 bfd_vma laddr, paddr, symval;
543 unsigned short insn;
544 struct internal_reloc *irelfn, *irelscan, *irelcount;
545 struct internal_syment sym;
546 bfd_signed_vma foff;
547
548 if (irel->r_type == R_SH_CODE)
549 have_code = true;
550
551 if (irel->r_type != R_SH_USES)
552 continue;
553
554 /* Get the section contents. */
555 if (contents == NULL)
556 {
557 if (coff_section_data (abfd, sec) != NULL
558 && coff_section_data (abfd, sec)->contents != NULL)
559 contents = coff_section_data (abfd, sec)->contents;
560 else
561 {
562 contents = (bfd_byte *) bfd_malloc (sec->_raw_size);
563 if (contents == NULL)
564 goto error_return;
565 free_contents = contents;
566
567 if (! bfd_get_section_contents (abfd, sec, contents,
568 (file_ptr) 0, sec->_raw_size))
569 goto error_return;
570 }
571 }
572
573 /* The r_offset field of the R_SH_USES reloc will point us to
574 the register load. The 4 is because the r_offset field is
575 computed as though it were a jump offset, which are based
576 from 4 bytes after the jump instruction. */
577 laddr = irel->r_vaddr - sec->vma + 4;
578 /* Careful to sign extend the 32-bit offset. */
579 laddr += ((irel->r_offset & 0xffffffff) ^ 0x80000000) - 0x80000000;
580 if (laddr >= sec->_raw_size)
581 {
582 (*_bfd_error_handler) ("%s: 0x%lx: warning: bad R_SH_USES offset",
583 bfd_get_filename (abfd),
584 (unsigned long) irel->r_vaddr);
585 continue;
586 }
587 insn = bfd_get_16 (abfd, contents + laddr);
588
589 /* If the instruction is not mov.l NN,rN, we don't know what to do. */
590 if ((insn & 0xf000) != 0xd000)
591 {
592 ((*_bfd_error_handler)
593 ("%s: 0x%lx: warning: R_SH_USES points to unrecognized insn 0x%x",
594 bfd_get_filename (abfd), (unsigned long) irel->r_vaddr, insn));
595 continue;
596 }
597
598 /* Get the address from which the register is being loaded. The
599 displacement in the mov.l instruction is quadrupled. It is a
600 displacement from four bytes after the movl instruction, but,
601 before adding in the PC address, two least significant bits
602 of the PC are cleared. We assume that the section is aligned
603 on a four byte boundary. */
604 paddr = insn & 0xff;
605 paddr *= 4;
606 paddr += (laddr + 4) &~ 3;
607 if (paddr >= sec->_raw_size)
608 {
609 ((*_bfd_error_handler)
610 ("%s: 0x%lx: warning: bad R_SH_USES load offset",
611 bfd_get_filename (abfd), (unsigned long) irel->r_vaddr));
612 continue;
613 }
614
615 /* Get the reloc for the address from which the register is
616 being loaded. This reloc will tell us which function is
617 actually being called. */
618 paddr += sec->vma;
619 for (irelfn = internal_relocs; irelfn < irelend; irelfn++)
620 if (irelfn->r_vaddr == paddr
621 && irelfn->r_type == R_SH_IMM32)
622 break;
623 if (irelfn >= irelend)
624 {
625 ((*_bfd_error_handler)
626 ("%s: 0x%lx: warning: could not find expected reloc",
627 bfd_get_filename (abfd), (unsigned long) paddr));
628 continue;
629 }
630
631 /* Get the value of the symbol referred to by the reloc. */
632 if (! _bfd_coff_get_external_symbols (abfd))
633 goto error_return;
634 bfd_coff_swap_sym_in (abfd,
635 ((bfd_byte *) obj_coff_external_syms (abfd)
636 + (irelfn->r_symndx
637 * bfd_coff_symesz (abfd))),
638 &sym);
639 if (sym.n_scnum != 0 && sym.n_scnum != sec->target_index)
640 {
641 ((*_bfd_error_handler)
642 ("%s: 0x%lx: warning: symbol in unexpected section",
643 bfd_get_filename (abfd), (unsigned long) paddr));
644 continue;
645 }
646
647 if (sym.n_sclass != C_EXT)
648 {
649 symval = (sym.n_value
650 - sec->vma
651 + sec->output_section->vma
652 + sec->output_offset);
653 }
654 else
655 {
656 struct coff_link_hash_entry *h;
657
658 h = obj_coff_sym_hashes (abfd)[irelfn->r_symndx];
659 BFD_ASSERT (h != NULL);
660 if (h->root.type != bfd_link_hash_defined
661 && h->root.type != bfd_link_hash_defweak)
662 {
663 /* This appears to be a reference to an undefined
664 symbol. Just ignore it--it will be caught by the
665 regular reloc processing. */
666 continue;
667 }
668
669 symval = (h->root.u.def.value
670 + h->root.u.def.section->output_section->vma
671 + h->root.u.def.section->output_offset);
672 }
673
674 symval += bfd_get_32 (abfd, contents + paddr - sec->vma);
675
676 /* See if this function call can be shortened. */
677 foff = (symval
678 - (irel->r_vaddr
679 - sec->vma
680 + sec->output_section->vma
681 + sec->output_offset
682 + 4));
683 if (foff < -0x1000 || foff >= 0x1000)
684 {
685 /* After all that work, we can't shorten this function call. */
686 continue;
687 }
688
689 /* Shorten the function call. */
690
691 /* For simplicity of coding, we are going to modify the section
692 contents, the section relocs, and the BFD symbol table. We
693 must tell the rest of the code not to free up this
694 information. It would be possible to instead create a table
695 of changes which have to be made, as is done in coff-mips.c;
696 that would be more work, but would require less memory when
697 the linker is run. */
698
699 if (coff_section_data (abfd, sec) == NULL)
700 {
701 sec->used_by_bfd =
702 ((PTR) bfd_zalloc (abfd, sizeof (struct coff_section_tdata)));
703 if (sec->used_by_bfd == NULL)
704 goto error_return;
705 }
706
707 coff_section_data (abfd, sec)->relocs = internal_relocs;
708 coff_section_data (abfd, sec)->keep_relocs = true;
709 free_relocs = NULL;
710
711 coff_section_data (abfd, sec)->contents = contents;
712 coff_section_data (abfd, sec)->keep_contents = true;
713 free_contents = NULL;
714
715 obj_coff_keep_syms (abfd) = true;
716
717 /* Replace the jsr with a bsr. */
718
719 /* Change the R_SH_USES reloc into an R_SH_PCDISP reloc, and
720 replace the jsr with a bsr. */
721 irel->r_type = R_SH_PCDISP;
722 irel->r_symndx = irelfn->r_symndx;
723 if (sym.n_sclass != C_EXT)
724 {
725 /* If this needs to be changed because of future relaxing,
726 it will be handled here like other internal PCDISP
727 relocs. */
728 bfd_put_16 (abfd,
729 0xb000 | ((foff >> 1) & 0xfff),
730 contents + irel->r_vaddr - sec->vma);
731 }
732 else
733 {
734 /* We can't fully resolve this yet, because the external
735 symbol value may be changed by future relaxing. We let
736 the final link phase handle it. */
737 bfd_put_16 (abfd, 0xb000, contents + irel->r_vaddr - sec->vma);
738 }
739
740 /* See if there is another R_SH_USES reloc referring to the same
741 register load. */
742 for (irelscan = internal_relocs; irelscan < irelend; irelscan++)
743 if (irelscan->r_type == R_SH_USES
744 && laddr == irelscan->r_vaddr - sec->vma + 4 + irelscan->r_offset)
745 break;
746 if (irelscan < irelend)
747 {
748 /* Some other function call depends upon this register load,
749 and we have not yet converted that function call.
750 Indeed, we may never be able to convert it. There is
751 nothing else we can do at this point. */
752 continue;
753 }
754
755 /* Look for a R_SH_COUNT reloc on the location where the
756 function address is stored. Do this before deleting any
757 bytes, to avoid confusion about the address. */
758 for (irelcount = internal_relocs; irelcount < irelend; irelcount++)
759 if (irelcount->r_vaddr == paddr
760 && irelcount->r_type == R_SH_COUNT)
761 break;
762
763 /* Delete the register load. */
764 if (! sh_relax_delete_bytes (abfd, sec, laddr, 2))
765 goto error_return;
766
767 /* That will change things, so, just in case it permits some
768 other function call to come within range, we should relax
769 again. Note that this is not required, and it may be slow. */
770 *again = true;
771
772 /* Now check whether we got a COUNT reloc. */
773 if (irelcount >= irelend)
774 {
775 ((*_bfd_error_handler)
776 ("%s: 0x%lx: warning: could not find expected COUNT reloc",
777 bfd_get_filename (abfd), (unsigned long) paddr));
778 continue;
779 }
780
781 /* The number of uses is stored in the r_offset field. We've
782 just deleted one. */
783 if (irelcount->r_offset == 0)
784 {
785 ((*_bfd_error_handler) ("%s: 0x%lx: warning: bad count",
786 bfd_get_filename (abfd),
787 (unsigned long) paddr));
788 continue;
789 }
790
791 --irelcount->r_offset;
792
793 /* If there are no more uses, we can delete the address. Reload
794 the address from irelfn, in case it was changed by the
795 previous call to sh_relax_delete_bytes. */
796 if (irelcount->r_offset == 0)
797 {
798 if (! sh_relax_delete_bytes (abfd, sec,
799 irelfn->r_vaddr - sec->vma, 4))
800 goto error_return;
801 }
802
803 /* We've done all we can with that function call. */
804 }
805
806 /* Look for load and store instructions that we can align on four
807 byte boundaries. */
808 if (have_code)
809 {
810 boolean swapped;
811
812 /* Get the section contents. */
813 if (contents == NULL)
814 {
815 if (coff_section_data (abfd, sec) != NULL
816 && coff_section_data (abfd, sec)->contents != NULL)
817 contents = coff_section_data (abfd, sec)->contents;
818 else
819 {
820 contents = (bfd_byte *) bfd_malloc (sec->_raw_size);
821 if (contents == NULL)
822 goto error_return;
823 free_contents = contents;
824
825 if (! bfd_get_section_contents (abfd, sec, contents,
826 (file_ptr) 0, sec->_raw_size))
827 goto error_return;
828 }
829 }
830
831 if (! sh_align_loads (abfd, sec, internal_relocs, contents, &swapped))
832 goto error_return;
833
834 if (swapped)
835 {
836 if (coff_section_data (abfd, sec) == NULL)
837 {
838 sec->used_by_bfd =
839 ((PTR) bfd_zalloc (abfd, sizeof (struct coff_section_tdata)));
840 if (sec->used_by_bfd == NULL)
841 goto error_return;
842 }
843
844 coff_section_data (abfd, sec)->relocs = internal_relocs;
845 coff_section_data (abfd, sec)->keep_relocs = true;
846 free_relocs = NULL;
847
848 coff_section_data (abfd, sec)->contents = contents;
849 coff_section_data (abfd, sec)->keep_contents = true;
850 free_contents = NULL;
851
852 obj_coff_keep_syms (abfd) = true;
853 }
854 }
855
856 if (free_relocs != NULL)
857 {
858 free (free_relocs);
859 free_relocs = NULL;
860 }
861
862 if (free_contents != NULL)
863 {
864 if (! link_info->keep_memory)
865 free (free_contents);
866 else
867 {
868 /* Cache the section contents for coff_link_input_bfd. */
869 if (coff_section_data (abfd, sec) == NULL)
870 {
871 sec->used_by_bfd =
872 ((PTR) bfd_zalloc (abfd, sizeof (struct coff_section_tdata)));
873 if (sec->used_by_bfd == NULL)
874 goto error_return;
875 coff_section_data (abfd, sec)->relocs = NULL;
876 }
877 coff_section_data (abfd, sec)->contents = contents;
878 }
879 }
880
881 return true;
882
883 error_return:
884 if (free_relocs != NULL)
885 free (free_relocs);
886 if (free_contents != NULL)
887 free (free_contents);
888 return false;
889 }
890
891 /* Delete some bytes from a section while relaxing. */
892
893 static boolean
894 sh_relax_delete_bytes (abfd, sec, addr, count)
895 bfd *abfd;
896 asection *sec;
897 bfd_vma addr;
898 int count;
899 {
900 bfd_byte *contents;
901 struct internal_reloc *irel, *irelend;
902 struct internal_reloc *irelalign;
903 bfd_vma toaddr;
904 bfd_byte *esym, *esymend;
905 bfd_size_type symesz;
906 struct coff_link_hash_entry **sym_hash;
907 asection *o;
908
909 contents = coff_section_data (abfd, sec)->contents;
910
911 /* The deletion must stop at the next ALIGN reloc for an aligment
912 power larger than the number of bytes we are deleting. */
913
914 irelalign = NULL;
915 toaddr = sec->_cooked_size;
916
917 irel = coff_section_data (abfd, sec)->relocs;
918 irelend = irel + sec->reloc_count;
919 for (; irel < irelend; irel++)
920 {
921 if (irel->r_type == R_SH_ALIGN
922 && irel->r_vaddr - sec->vma > addr
923 && count < (1 << irel->r_offset))
924 {
925 irelalign = irel;
926 toaddr = irel->r_vaddr - sec->vma;
927 break;
928 }
929 }
930
931 /* Actually delete the bytes. */
932 memmove (contents + addr, contents + addr + count, toaddr - addr - count);
933 if (irelalign == NULL)
934 sec->_cooked_size -= count;
935 else
936 {
937 int i;
938
939 #define NOP_OPCODE (0x0009)
940
941 BFD_ASSERT ((count & 1) == 0);
942 for (i = 0; i < count; i += 2)
943 bfd_put_16 (abfd, NOP_OPCODE, contents + toaddr - count + i);
944 }
945
946 /* Adjust all the relocs. */
947 for (irel = coff_section_data (abfd, sec)->relocs; irel < irelend; irel++)
948 {
949 bfd_vma nraddr, stop;
950 bfd_vma start = 0;
951 int insn = 0;
952 struct internal_syment sym;
953 int off, adjust, oinsn;
954 bfd_signed_vma voff = 0;
955 boolean overflow;
956
957 /* Get the new reloc address. */
958 nraddr = irel->r_vaddr - sec->vma;
959 if ((irel->r_vaddr - sec->vma > addr
960 && irel->r_vaddr - sec->vma < toaddr)
961 || (irel->r_type == R_SH_ALIGN
962 && irel->r_vaddr - sec->vma == toaddr))
963 nraddr -= count;
964
965 /* See if this reloc was for the bytes we have deleted, in which
966 case we no longer care about it. Don't delete relocs which
967 represent addresses, though. */
968 if (irel->r_vaddr - sec->vma >= addr
969 && irel->r_vaddr - sec->vma < addr + count
970 && irel->r_type != R_SH_ALIGN
971 && irel->r_type != R_SH_CODE
972 && irel->r_type != R_SH_DATA
973 && irel->r_type != R_SH_LABEL)
974 irel->r_type = R_SH_UNUSED;
975
976 /* If this is a PC relative reloc, see if the range it covers
977 includes the bytes we have deleted. */
978 switch (irel->r_type)
979 {
980 default:
981 break;
982
983 case R_SH_PCDISP8BY2:
984 case R_SH_PCDISP:
985 case R_SH_PCRELIMM8BY2:
986 case R_SH_PCRELIMM8BY4:
987 start = irel->r_vaddr - sec->vma;
988 insn = bfd_get_16 (abfd, contents + nraddr);
989 break;
990 }
991
992 switch (irel->r_type)
993 {
994 default:
995 start = stop = addr;
996 break;
997
998 case R_SH_IMM32:
999 /* If this reloc is against a symbol defined in this
1000 section, and the symbol will not be adjusted below, we
1001 must check the addend to see it will put the value in
1002 range to be adjusted, and hence must be changed. */
1003 bfd_coff_swap_sym_in (abfd,
1004 ((bfd_byte *) obj_coff_external_syms (abfd)
1005 + (irel->r_symndx
1006 * bfd_coff_symesz (abfd))),
1007 &sym);
1008 if (sym.n_sclass != C_EXT
1009 && sym.n_scnum == sec->target_index
1010 && ((bfd_vma) sym.n_value <= addr
1011 || (bfd_vma) sym.n_value >= toaddr))
1012 {
1013 bfd_vma val;
1014
1015 val = bfd_get_32 (abfd, contents + nraddr);
1016 val += sym.n_value;
1017 if (val > addr && val < toaddr)
1018 bfd_put_32 (abfd, val - count, contents + nraddr);
1019 }
1020 start = stop = addr;
1021 break;
1022
1023 case R_SH_PCDISP8BY2:
1024 off = insn & 0xff;
1025 if (off & 0x80)
1026 off -= 0x100;
1027 stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1028 break;
1029
1030 case R_SH_PCDISP:
1031 bfd_coff_swap_sym_in (abfd,
1032 ((bfd_byte *) obj_coff_external_syms (abfd)
1033 + (irel->r_symndx
1034 * bfd_coff_symesz (abfd))),
1035 &sym);
1036 if (sym.n_sclass == C_EXT)
1037 start = stop = addr;
1038 else
1039 {
1040 off = insn & 0xfff;
1041 if (off & 0x800)
1042 off -= 0x1000;
1043 stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1044 }
1045 break;
1046
1047 case R_SH_PCRELIMM8BY2:
1048 off = insn & 0xff;
1049 stop = start + 4 + off * 2;
1050 break;
1051
1052 case R_SH_PCRELIMM8BY4:
1053 off = insn & 0xff;
1054 stop = (start &~ (bfd_vma) 3) + 4 + off * 4;
1055 break;
1056
1057 case R_SH_SWITCH8:
1058 case R_SH_SWITCH16:
1059 case R_SH_SWITCH32:
1060 /* These relocs types represent
1061 .word L2-L1
1062 The r_offset field holds the difference between the reloc
1063 address and L1. That is the start of the reloc, and
1064 adding in the contents gives us the top. We must adjust
1065 both the r_offset field and the section contents. */
1066
1067 start = irel->r_vaddr - sec->vma;
1068 stop = (bfd_vma) ((bfd_signed_vma) start - (long) irel->r_offset);
1069
1070 if (start > addr
1071 && start < toaddr
1072 && (stop <= addr || stop >= toaddr))
1073 irel->r_offset += count;
1074 else if (stop > addr
1075 && stop < toaddr
1076 && (start <= addr || start >= toaddr))
1077 irel->r_offset -= count;
1078
1079 start = stop;
1080
1081 if (irel->r_type == R_SH_SWITCH16)
1082 voff = bfd_get_signed_16 (abfd, contents + nraddr);
1083 else if (irel->r_type == R_SH_SWITCH8)
1084 voff = bfd_get_8 (abfd, contents + nraddr);
1085 else
1086 voff = bfd_get_signed_32 (abfd, contents + nraddr);
1087 stop = (bfd_vma) ((bfd_signed_vma) start + voff);
1088
1089 break;
1090
1091 case R_SH_USES:
1092 start = irel->r_vaddr - sec->vma;
1093 stop = (bfd_vma) ((bfd_signed_vma) start
1094 + (long) irel->r_offset
1095 + 4);
1096 break;
1097 }
1098
1099 if (start > addr
1100 && start < toaddr
1101 && (stop <= addr || stop >= toaddr))
1102 adjust = count;
1103 else if (stop > addr
1104 && stop < toaddr
1105 && (start <= addr || start >= toaddr))
1106 adjust = - count;
1107 else
1108 adjust = 0;
1109
1110 if (adjust != 0)
1111 {
1112 oinsn = insn;
1113 overflow = false;
1114 switch (irel->r_type)
1115 {
1116 default:
1117 abort ();
1118 break;
1119
1120 case R_SH_PCDISP8BY2:
1121 case R_SH_PCRELIMM8BY2:
1122 insn += adjust / 2;
1123 if ((oinsn & 0xff00) != (insn & 0xff00))
1124 overflow = true;
1125 bfd_put_16 (abfd, insn, contents + nraddr);
1126 break;
1127
1128 case R_SH_PCDISP:
1129 insn += adjust / 2;
1130 if ((oinsn & 0xf000) != (insn & 0xf000))
1131 overflow = true;
1132 bfd_put_16 (abfd, insn, contents + nraddr);
1133 break;
1134
1135 case R_SH_PCRELIMM8BY4:
1136 BFD_ASSERT (adjust == count || count >= 4);
1137 if (count >= 4)
1138 insn += adjust / 4;
1139 else
1140 {
1141 if ((irel->r_vaddr & 3) == 0)
1142 ++insn;
1143 }
1144 if ((oinsn & 0xff00) != (insn & 0xff00))
1145 overflow = true;
1146 bfd_put_16 (abfd, insn, contents + nraddr);
1147 break;
1148
1149 case R_SH_SWITCH8:
1150 voff += adjust;
1151 if (voff < 0 || voff >= 0xff)
1152 overflow = true;
1153 bfd_put_8 (abfd, voff, contents + nraddr);
1154 break;
1155
1156 case R_SH_SWITCH16:
1157 voff += adjust;
1158 if (voff < - 0x8000 || voff >= 0x8000)
1159 overflow = true;
1160 bfd_put_signed_16 (abfd, voff, contents + nraddr);
1161 break;
1162
1163 case R_SH_SWITCH32:
1164 voff += adjust;
1165 bfd_put_signed_32 (abfd, voff, contents + nraddr);
1166 break;
1167
1168 case R_SH_USES:
1169 irel->r_offset += adjust;
1170 break;
1171 }
1172
1173 if (overflow)
1174 {
1175 ((*_bfd_error_handler)
1176 ("%s: 0x%lx: fatal: reloc overflow while relaxing",
1177 bfd_get_filename (abfd), (unsigned long) irel->r_vaddr));
1178 bfd_set_error (bfd_error_bad_value);
1179 return false;
1180 }
1181 }
1182
1183 irel->r_vaddr = nraddr + sec->vma;
1184 }
1185
1186 /* Look through all the other sections. If there contain any IMM32
1187 relocs against internal symbols which we are not going to adjust
1188 below, we may need to adjust the addends. */
1189 for (o = abfd->sections; o != NULL; o = o->next)
1190 {
1191 struct internal_reloc *internal_relocs;
1192 struct internal_reloc *irelscan, *irelscanend;
1193 bfd_byte *ocontents;
1194
1195 if (o == sec
1196 || (o->flags & SEC_RELOC) == 0
1197 || o->reloc_count == 0)
1198 continue;
1199
1200 /* We always cache the relocs. Perhaps, if info->keep_memory is
1201 false, we should free them, if we are permitted to, when we
1202 leave sh_coff_relax_section. */
1203 internal_relocs = (_bfd_coff_read_internal_relocs
1204 (abfd, o, true, (bfd_byte *) NULL, false,
1205 (struct internal_reloc *) NULL));
1206 if (internal_relocs == NULL)
1207 return false;
1208
1209 ocontents = NULL;
1210 irelscanend = internal_relocs + o->reloc_count;
1211 for (irelscan = internal_relocs; irelscan < irelscanend; irelscan++)
1212 {
1213 struct internal_syment sym;
1214
1215 if (irelscan->r_type != R_SH_IMM32)
1216 continue;
1217
1218 bfd_coff_swap_sym_in (abfd,
1219 ((bfd_byte *) obj_coff_external_syms (abfd)
1220 + (irelscan->r_symndx
1221 * bfd_coff_symesz (abfd))),
1222 &sym);
1223 if (sym.n_sclass != C_EXT
1224 && sym.n_scnum == sec->target_index
1225 && ((bfd_vma) sym.n_value <= addr
1226 || (bfd_vma) sym.n_value >= toaddr))
1227 {
1228 bfd_vma val;
1229
1230 if (ocontents == NULL)
1231 {
1232 if (coff_section_data (abfd, o)->contents != NULL)
1233 ocontents = coff_section_data (abfd, o)->contents;
1234 else
1235 {
1236 /* We always cache the section contents.
1237 Perhaps, if info->keep_memory is false, we
1238 should free them, if we are permitted to,
1239 when we leave sh_coff_relax_section. */
1240 ocontents = (bfd_byte *) bfd_malloc (o->_raw_size);
1241 if (ocontents == NULL)
1242 return false;
1243 if (! bfd_get_section_contents (abfd, o, ocontents,
1244 (file_ptr) 0,
1245 o->_raw_size))
1246 return false;
1247 coff_section_data (abfd, o)->contents = ocontents;
1248 }
1249 }
1250
1251 val = bfd_get_32 (abfd, ocontents + irelscan->r_vaddr - o->vma);
1252 val += sym.n_value;
1253 if (val > addr && val < toaddr)
1254 bfd_put_32 (abfd, val - count,
1255 ocontents + irelscan->r_vaddr - o->vma);
1256
1257 coff_section_data (abfd, o)->keep_contents = true;
1258 }
1259 }
1260 }
1261
1262 /* Adjusting the internal symbols will not work if something has
1263 already retrieved the generic symbols. It would be possible to
1264 make this work by adjusting the generic symbols at the same time.
1265 However, this case should not arise in normal usage. */
1266 if (obj_symbols (abfd) != NULL
1267 || obj_raw_syments (abfd) != NULL)
1268 {
1269 ((*_bfd_error_handler)
1270 ("%s: fatal: generic symbols retrieved before relaxing",
1271 bfd_get_filename (abfd)));
1272 bfd_set_error (bfd_error_invalid_operation);
1273 return false;
1274 }
1275
1276 /* Adjust all the symbols. */
1277 sym_hash = obj_coff_sym_hashes (abfd);
1278 symesz = bfd_coff_symesz (abfd);
1279 esym = (bfd_byte *) obj_coff_external_syms (abfd);
1280 esymend = esym + obj_raw_syment_count (abfd) * symesz;
1281 while (esym < esymend)
1282 {
1283 struct internal_syment isym;
1284
1285 bfd_coff_swap_sym_in (abfd, (PTR) esym, (PTR) &isym);
1286
1287 if (isym.n_scnum == sec->target_index
1288 && (bfd_vma) isym.n_value > addr
1289 && (bfd_vma) isym.n_value < toaddr)
1290 {
1291 isym.n_value -= count;
1292
1293 bfd_coff_swap_sym_out (abfd, (PTR) &isym, (PTR) esym);
1294
1295 if (*sym_hash != NULL)
1296 {
1297 BFD_ASSERT ((*sym_hash)->root.type == bfd_link_hash_defined
1298 || (*sym_hash)->root.type == bfd_link_hash_defweak);
1299 BFD_ASSERT ((*sym_hash)->root.u.def.value >= addr
1300 && (*sym_hash)->root.u.def.value < toaddr);
1301 (*sym_hash)->root.u.def.value -= count;
1302 }
1303 }
1304
1305 esym += (isym.n_numaux + 1) * symesz;
1306 sym_hash += isym.n_numaux + 1;
1307 }
1308
1309 /* See if we can move the ALIGN reloc forward. We have adjusted
1310 r_vaddr for it already. */
1311 if (irelalign != NULL)
1312 {
1313 bfd_vma alignto, alignaddr;
1314
1315 alignto = BFD_ALIGN (toaddr, 1 << irelalign->r_offset);
1316 alignaddr = BFD_ALIGN (irelalign->r_vaddr - sec->vma,
1317 1 << irelalign->r_offset);
1318 if (alignto != alignaddr)
1319 {
1320 /* Tail recursion. */
1321 return sh_relax_delete_bytes (abfd, sec, alignaddr,
1322 alignto - alignaddr);
1323 }
1324 }
1325
1326 return true;
1327 }
1328 \f
1329 /* This is yet another version of the SH opcode table, used to rapidly
1330 get information about a particular instruction. */
1331
1332 /* The opcode map is represented by an array of these structures. The
1333 array is indexed by the high order four bits in the instruction. */
1334
1335 struct sh_major_opcode
1336 {
1337 /* A pointer to the instruction list. This is an array which
1338 contains all the instructions with this major opcode. */
1339 const struct sh_minor_opcode *minor_opcodes;
1340 /* The number of elements in minor_opcodes. */
1341 unsigned short count;
1342 };
1343
1344 /* This structure holds information for a set of SH opcodes. The
1345 instruction code is anded with the mask value, and the resulting
1346 value is used to search the order opcode list. */
1347
1348 struct sh_minor_opcode
1349 {
1350 /* The sorted opcode list. */
1351 const struct sh_opcode *opcodes;
1352 /* The number of elements in opcodes. */
1353 unsigned short count;
1354 /* The mask value to use when searching the opcode list. */
1355 unsigned short mask;
1356 };
1357
1358 /* This structure holds information for an SH instruction. An array
1359 of these structures is sorted in order by opcode. */
1360
1361 struct sh_opcode
1362 {
1363 /* The code for this instruction, after it has been anded with the
1364 mask value in the sh_major_opcode structure. */
1365 unsigned short opcode;
1366 /* Flags for this instruction. */
1367 unsigned short flags;
1368 };
1369
1370 /* Flag which appear in the sh_opcode structure. */
1371
1372 /* This instruction loads a value from memory. */
1373 #define LOAD (0x1)
1374
1375 /* This instruction stores a value to memory. */
1376 #define STORE (0x2)
1377
1378 /* This instruction is a branch. */
1379 #define BRANCH (0x4)
1380
1381 /* This instruction has a delay slot. */
1382 #define DELAY (0x8)
1383
1384 /* This instruction uses the value in the register in the field at
1385 mask 0x0f00 of the instruction. */
1386 #define USES1 (0x10)
1387 #define USES1_REG(x) ((x & 0x0f00) >> 8)
1388
1389 /* This instruction uses the value in the register in the field at
1390 mask 0x00f0 of the instruction. */
1391 #define USES2 (0x20)
1392 #define USES2_REG(x) ((x & 0x00f0) >> 4)
1393
1394 /* This instruction uses the value in register 0. */
1395 #define USESR0 (0x40)
1396
1397 /* This instruction sets the value in the register in the field at
1398 mask 0x0f00 of the instruction. */
1399 #define SETS1 (0x80)
1400 #define SETS1_REG(x) ((x & 0x0f00) >> 8)
1401
1402 /* This instruction sets the value in the register in the field at
1403 mask 0x00f0 of the instruction. */
1404 #define SETS2 (0x100)
1405 #define SETS2_REG(x) ((x & 0x00f0) >> 4)
1406
1407 /* This instruction sets register 0. */
1408 #define SETSR0 (0x200)
1409
1410 /* This instruction sets a special register. */
1411 #define SETSSP (0x400)
1412
1413 /* This instruction uses a special register. */
1414 #define USESSP (0x800)
1415
1416 /* This instruction uses the floating point register in the field at
1417 mask 0x0f00 of the instruction. */
1418 #define USESF1 (0x1000)
1419 #define USESF1_REG(x) ((x & 0x0f00) >> 8)
1420
1421 /* This instruction uses the floating point register in the field at
1422 mask 0x00f0 of the instruction. */
1423 #define USESF2 (0x2000)
1424 #define USESF2_REG(x) ((x & 0x00f0) >> 4)
1425
1426 /* This instruction uses floating point register 0. */
1427 #define USESF0 (0x4000)
1428
1429 /* This instruction sets the floating point register in the field at
1430 mask 0x0f00 of the instruction. */
1431 #define SETSF1 (0x8000)
1432 #define SETSF1_REG(x) ((x & 0x0f00) >> 8)
1433
1434 static boolean sh_insn_uses_reg
1435 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1436 static boolean sh_insn_sets_reg
1437 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1438 static boolean sh_insn_uses_or_sets_reg
1439 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1440 static boolean sh_insn_uses_freg
1441 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1442 static boolean sh_insn_sets_freg
1443 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1444 static boolean sh_insn_uses_or_sets_freg
1445 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1446 static boolean sh_insns_conflict
1447 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int,
1448 const struct sh_opcode *));
1449 static boolean sh_load_use
1450 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int,
1451 const struct sh_opcode *));
1452
1453 /* The opcode maps. */
1454
1455 #define MAP(a) a, sizeof a / sizeof a[0]
1456
1457 static const struct sh_opcode sh_opcode00[] =
1458 {
1459 { 0x0008, SETSSP }, /* clrt */
1460 { 0x0009, 0 }, /* nop */
1461 { 0x000b, BRANCH | DELAY | USESSP }, /* rts */
1462 { 0x0018, SETSSP }, /* sett */
1463 { 0x0019, SETSSP }, /* div0u */
1464 { 0x001b, 0 }, /* sleep */
1465 { 0x0028, SETSSP }, /* clrmac */
1466 { 0x002b, BRANCH | DELAY | SETSSP }, /* rte */
1467 { 0x0038, USESSP | SETSSP }, /* ldtlb */
1468 { 0x0048, SETSSP }, /* clrs */
1469 { 0x0058, SETSSP } /* sets */
1470 };
1471
1472 static const struct sh_opcode sh_opcode01[] =
1473 {
1474 { 0x0002, SETS1 | USESSP }, /* stc sr,rn */
1475 { 0x0003, BRANCH | DELAY | USES1 | SETSSP }, /* bsrf rn */
1476 { 0x000a, SETS1 | USESSP }, /* sts mach,rn */
1477 { 0x0012, SETS1 | USESSP }, /* stc gbr,rn */
1478 { 0x001a, SETS1 | USESSP }, /* sts macl,rn */
1479 { 0x0022, SETS1 | USESSP }, /* stc vbr,rn */
1480 { 0x0023, BRANCH | DELAY | USES1 }, /* braf rn */
1481 { 0x0029, SETS1 | USESSP }, /* movt rn */
1482 { 0x002a, SETS1 | USESSP }, /* sts pr,rn */
1483 { 0x0032, SETS1 | USESSP }, /* stc ssr,rn */
1484 { 0x0042, SETS1 | USESSP }, /* stc spc,rn */
1485 { 0x005a, SETS1 | USESSP }, /* sts fpul,rn */
1486 { 0x006a, SETS1 | USESSP }, /* sts fpscr,rn */
1487 { 0x0082, SETS1 | USESSP }, /* stc r0_bank,rn */
1488 { 0x0083, LOAD | USES1 }, /* pref @rn */
1489 { 0x0092, SETS1 | USESSP }, /* stc r1_bank,rn */
1490 { 0x00a2, SETS1 | USESSP }, /* stc r2_bank,rn */
1491 { 0x00b2, SETS1 | USESSP }, /* stc r3_bank,rn */
1492 { 0x00c2, SETS1 | USESSP }, /* stc r4_bank,rn */
1493 { 0x00d2, SETS1 | USESSP }, /* stc r5_bank,rn */
1494 { 0x00e2, SETS1 | USESSP }, /* stc r6_bank,rn */
1495 { 0x00f2, SETS1 | USESSP } /* stc r7_bank,rn */
1496 };
1497
1498 static const struct sh_opcode sh_opcode02[] =
1499 {
1500 { 0x0004, STORE | USES1 | USES2 | USESR0 }, /* mov.b rm,@(r0,rn) */
1501 { 0x0005, STORE | USES1 | USES2 | USESR0 }, /* mov.w rm,@(r0,rn) */
1502 { 0x0006, STORE | USES1 | USES2 | USESR0 }, /* mov.l rm,@(r0,rn) */
1503 { 0x0007, SETSSP | USES1 | USES2 }, /* mul.l rm,rn */
1504 { 0x000c, LOAD | SETS1 | USES2 | USESR0 }, /* mov.b @(r0,rm),rn */
1505 { 0x000d, LOAD | SETS1 | USES2 | USESR0 }, /* mov.w @(r0,rm),rn */
1506 { 0x000e, LOAD | SETS1 | USES2 | USESR0 }, /* mov.l @(r0,rm),rn */
1507 { 0x000f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.l @rm+,@rn+ */
1508 };
1509
1510 static const struct sh_minor_opcode sh_opcode0[] =
1511 {
1512 { MAP (sh_opcode00), 0xffff },
1513 { MAP (sh_opcode01), 0xf0ff },
1514 { MAP (sh_opcode02), 0xf00f }
1515 };
1516
1517 static const struct sh_opcode sh_opcode10[] =
1518 {
1519 { 0x1000, STORE | USES1 | USES2 } /* mov.l rm,@(disp,rn) */
1520 };
1521
1522 static const struct sh_minor_opcode sh_opcode1[] =
1523 {
1524 { MAP (sh_opcode10), 0xf000 }
1525 };
1526
1527 static const struct sh_opcode sh_opcode20[] =
1528 {
1529 { 0x2000, STORE | USES1 | USES2 }, /* mov.b rm,@rn */
1530 { 0x2001, STORE | USES1 | USES2 }, /* mov.w rm,@rn */
1531 { 0x2002, STORE | USES1 | USES2 }, /* mov.l rm,@rn */
1532 { 0x2004, STORE | SETS1 | USES1 | USES2 }, /* mov.b rm,@-rn */
1533 { 0x2005, STORE | SETS1 | USES1 | USES2 }, /* mov.w rm,@-rn */
1534 { 0x2006, STORE | SETS1 | USES1 | USES2 }, /* mov.l rm,@-rn */
1535 { 0x2007, SETSSP | USES1 | USES2 | USESSP }, /* div0s */
1536 { 0x2008, SETSSP | USES1 | USES2 }, /* tst rm,rn */
1537 { 0x2009, SETS1 | USES1 | USES2 }, /* and rm,rn */
1538 { 0x200a, SETS1 | USES1 | USES2 }, /* xor rm,rn */
1539 { 0x200b, SETS1 | USES1 | USES2 }, /* or rm,rn */
1540 { 0x200c, SETSSP | USES1 | USES2 }, /* cmp/str rm,rn */
1541 { 0x200d, SETS1 | USES1 | USES2 }, /* xtrct rm,rn */
1542 { 0x200e, SETSSP | USES1 | USES2 }, /* mulu.w rm,rn */
1543 { 0x200f, SETSSP | USES1 | USES2 } /* muls.w rm,rn */
1544 };
1545
1546 static const struct sh_minor_opcode sh_opcode2[] =
1547 {
1548 { MAP (sh_opcode20), 0xf00f }
1549 };
1550
1551 static const struct sh_opcode sh_opcode30[] =
1552 {
1553 { 0x3000, SETSSP | USES1 | USES2 }, /* cmp/eq rm,rn */
1554 { 0x3002, SETSSP | USES1 | USES2 }, /* cmp/hs rm,rn */
1555 { 0x3003, SETSSP | USES1 | USES2 }, /* cmp/ge rm,rn */
1556 { 0x3004, SETSSP | USESSP | USES1 | USES2 }, /* div1 rm,rn */
1557 { 0x3005, SETSSP | USES1 | USES2 }, /* dmulu.l rm,rn */
1558 { 0x3006, SETSSP | USES1 | USES2 }, /* cmp/hi rm,rn */
1559 { 0x3007, SETSSP | USES1 | USES2 }, /* cmp/gt rm,rn */
1560 { 0x3008, SETS1 | USES1 | USES2 }, /* sub rm,rn */
1561 { 0x300a, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* subc rm,rn */
1562 { 0x300b, SETS1 | SETSSP | USES1 | USES2 }, /* subv rm,rn */
1563 { 0x300c, SETS1 | USES1 | USES2 }, /* add rm,rn */
1564 { 0x300d, SETSSP | USES1 | USES2 }, /* dmuls.l rm,rn */
1565 { 0x300e, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* addc rm,rn */
1566 { 0x300f, SETS1 | SETSSP | USES1 | USES2 } /* addv rm,rn */
1567 };
1568
1569 static const struct sh_minor_opcode sh_opcode3[] =
1570 {
1571 { MAP (sh_opcode30), 0xf00f }
1572 };
1573
1574 static const struct sh_opcode sh_opcode40[] =
1575 {
1576 { 0x4000, SETS1 | SETSSP | USES1 }, /* shll rn */
1577 { 0x4001, SETS1 | SETSSP | USES1 }, /* shlr rn */
1578 { 0x4002, STORE | SETS1 | USES1 | USESSP }, /* sts.l mach,@-rn */
1579 { 0x4003, STORE | SETS1 | USES1 | USESSP }, /* stc.l sr,@-rn */
1580 { 0x4004, SETS1 | SETSSP | USES1 }, /* rotl rn */
1581 { 0x4005, SETS1 | SETSSP | USES1 }, /* rotr rn */
1582 { 0x4006, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,mach */
1583 { 0x4007, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,sr */
1584 { 0x4008, SETS1 | USES1 }, /* shll2 rn */
1585 { 0x4009, SETS1 | USES1 }, /* shlr2 rn */
1586 { 0x400a, SETSSP | USES1 }, /* lds rm,mach */
1587 { 0x400b, BRANCH | DELAY | USES1 }, /* jsr @rn */
1588 { 0x400e, SETSSP | USES1 }, /* ldc rm,sr */
1589 { 0x4010, SETS1 | SETSSP | USES1 }, /* dt rn */
1590 { 0x4011, SETSSP | USES1 }, /* cmp/pz rn */
1591 { 0x4012, STORE | SETS1 | USES1 | USESSP }, /* sts.l macl,@-rn */
1592 { 0x4013, STORE | SETS1 | USES1 | USESSP }, /* stc.l gbr,@-rn */
1593 { 0x4015, SETSSP | USES1 }, /* cmp/pl rn */
1594 { 0x4016, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,macl */
1595 { 0x4017, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,gbr */
1596 { 0x4018, SETS1 | USES1 }, /* shll8 rn */
1597 { 0x4019, SETS1 | USES1 }, /* shlr8 rn */
1598 { 0x401a, SETSSP | USES1 }, /* lds rm,macl */
1599 { 0x401b, LOAD | SETSSP | USES1 }, /* tas.b @rn */
1600 { 0x401e, SETSSP | USES1 }, /* ldc rm,gbr */
1601 { 0x4020, SETS1 | SETSSP | USES1 }, /* shal rn */
1602 { 0x4021, SETS1 | SETSSP | USES1 }, /* shar rn */
1603 { 0x4022, STORE | SETS1 | USES1 | USESSP }, /* sts.l pr,@-rn */
1604 { 0x4023, STORE | SETS1 | USES1 | USESSP }, /* stc.l vbr,@-rn */
1605 { 0x4024, SETS1 | SETSSP | USES1 | USESSP }, /* rotcl rn */
1606 { 0x4025, SETS1 | SETSSP | USES1 | USESSP }, /* rotcr rn */
1607 { 0x4026, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,pr */
1608 { 0x4027, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,vbr */
1609 { 0x4028, SETS1 | USES1 }, /* shll16 rn */
1610 { 0x4029, SETS1 | USES1 }, /* shlr16 rn */
1611 { 0x402a, SETSSP | USES1 }, /* lds rm,pr */
1612 { 0x402b, BRANCH | DELAY | USES1 }, /* jmp @rn */
1613 { 0x402e, SETSSP | USES1 }, /* ldc rm,vbr */
1614 { 0x4033, STORE | SETS1 | USES1 | USESSP }, /* stc.l ssr,@-rn */
1615 { 0x4037, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,ssr */
1616 { 0x403e, SETSSP | USES1 }, /* ldc rm,ssr */
1617 { 0x4043, STORE | SETS1 | USES1 | USESSP }, /* stc.l spc,@-rn */
1618 { 0x4047, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,spc */
1619 { 0x404e, SETSSP | USES1 }, /* ldc rm,spc */
1620 { 0x4052, STORE | SETS1 | USES1 | USESSP }, /* sts.l fpul,@-rn */
1621 { 0x4056, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,fpul */
1622 { 0x405a, SETSSP | USES1 }, /* lds.l rm,fpul */
1623 { 0x4062, STORE | SETS1 | USES1 | USESSP }, /* sts.l fpscr,@-rn */
1624 { 0x4066, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,fpscr */
1625 { 0x406a, SETSSP | USES1 } /* lds rm,fpscr */
1626 };
1627
1628 static const struct sh_opcode sh_opcode41[] =
1629 {
1630 { 0x4083, STORE | SETS1 | USES1 | USESSP }, /* stc.l rx_bank,@-rn */
1631 { 0x4087, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,rx_bank */
1632 { 0x408e, SETSSP | USES1 } /* ldc rm,rx_bank */
1633 };
1634
1635 static const struct sh_opcode sh_opcode42[] =
1636 {
1637 { 0x400c, SETS1 | USES1 | USES2 }, /* shad rm,rn */
1638 { 0x400d, SETS1 | USES1 | USES2 }, /* shld rm,rn */
1639 { 0x400f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.w @rm+,@rn+ */
1640 };
1641
1642 static const struct sh_minor_opcode sh_opcode4[] =
1643 {
1644 { MAP (sh_opcode40), 0xf0ff },
1645 { MAP (sh_opcode41), 0xf08f },
1646 { MAP (sh_opcode42), 0xf00f }
1647 };
1648
1649 static const struct sh_opcode sh_opcode50[] =
1650 {
1651 { 0x5000, LOAD | SETS1 | USES2 } /* mov.l @(disp,rm),rn */
1652 };
1653
1654 static const struct sh_minor_opcode sh_opcode5[] =
1655 {
1656 { MAP (sh_opcode50), 0xf000 }
1657 };
1658
1659 static const struct sh_opcode sh_opcode60[] =
1660 {
1661 { 0x6000, LOAD | SETS1 | USES2 }, /* mov.b @rm,rn */
1662 { 0x6001, LOAD | SETS1 | USES2 }, /* mov.w @rm,rn */
1663 { 0x6002, LOAD | SETS1 | USES2 }, /* mov.l @rm,rn */
1664 { 0x6003, SETS1 | USES2 }, /* mov rm,rn */
1665 { 0x6004, LOAD | SETS1 | SETS2 | USES2 }, /* mov.b @rm+,rn */
1666 { 0x6005, LOAD | SETS1 | SETS2 | USES2 }, /* mov.w @rm+,rn */
1667 { 0x6006, LOAD | SETS1 | SETS2 | USES2 }, /* mov.l @rm+,rn */
1668 { 0x6007, SETS1 | USES2 }, /* not rm,rn */
1669 { 0x6008, SETS1 | USES2 }, /* swap.b rm,rn */
1670 { 0x6009, SETS1 | USES2 }, /* swap.w rm,rn */
1671 { 0x600a, SETS1 | SETSSP | USES2 | USESSP }, /* negc rm,rn */
1672 { 0x600b, SETS1 | USES2 }, /* neg rm,rn */
1673 { 0x600c, SETS1 | USES2 }, /* extu.b rm,rn */
1674 { 0x600d, SETS1 | USES2 }, /* extu.w rm,rn */
1675 { 0x600e, SETS1 | USES2 }, /* exts.b rm,rn */
1676 { 0x600f, SETS1 | USES2 } /* exts.w rm,rn */
1677 };
1678
1679 static const struct sh_minor_opcode sh_opcode6[] =
1680 {
1681 { MAP (sh_opcode60), 0xf00f }
1682 };
1683
1684 static const struct sh_opcode sh_opcode70[] =
1685 {
1686 { 0x7000, SETS1 | USES1 } /* add #imm,rn */
1687 };
1688
1689 static const struct sh_minor_opcode sh_opcode7[] =
1690 {
1691 { MAP (sh_opcode70), 0xf000 }
1692 };
1693
1694 static const struct sh_opcode sh_opcode80[] =
1695 {
1696 { 0x8000, STORE | USES2 | USESR0 }, /* mov.b r0,@(disp,rn) */
1697 { 0x8100, STORE | USES2 | USESR0 }, /* mov.w r0,@(disp,rn) */
1698 { 0x8400, LOAD | SETSR0 | USES2 }, /* mov.b @(disp,rm),r0 */
1699 { 0x8500, LOAD | SETSR0 | USES2 }, /* mov.w @(disp,rn),r0 */
1700 { 0x8800, SETSSP | USESR0 }, /* cmp/eq #imm,r0 */
1701 { 0x8900, BRANCH | USESSP }, /* bt label */
1702 { 0x8b00, BRANCH | USESSP }, /* bf label */
1703 { 0x8d00, BRANCH | DELAY | USESSP }, /* bt/s label */
1704 { 0x8f00, BRANCH | DELAY | USESSP } /* bf/s label */
1705 };
1706
1707 static const struct sh_minor_opcode sh_opcode8[] =
1708 {
1709 { MAP (sh_opcode80), 0xff00 }
1710 };
1711
1712 static const struct sh_opcode sh_opcode90[] =
1713 {
1714 { 0x9000, LOAD | SETS1 } /* mov.w @(disp,pc),rn */
1715 };
1716
1717 static const struct sh_minor_opcode sh_opcode9[] =
1718 {
1719 { MAP (sh_opcode90), 0xf000 }
1720 };
1721
1722 static const struct sh_opcode sh_opcodea0[] =
1723 {
1724 { 0xa000, BRANCH | DELAY } /* bra label */
1725 };
1726
1727 static const struct sh_minor_opcode sh_opcodea[] =
1728 {
1729 { MAP (sh_opcodea0), 0xf000 }
1730 };
1731
1732 static const struct sh_opcode sh_opcodeb0[] =
1733 {
1734 { 0xb000, BRANCH | DELAY } /* bsr label */
1735 };
1736
1737 static const struct sh_minor_opcode sh_opcodeb[] =
1738 {
1739 { MAP (sh_opcodeb0), 0xf000 }
1740 };
1741
1742 static const struct sh_opcode sh_opcodec0[] =
1743 {
1744 { 0xc000, STORE | USESR0 | USESSP }, /* mov.b r0,@(disp,gbr) */
1745 { 0xc100, STORE | USESR0 | USESSP }, /* mov.w r0,@(disp,gbr) */
1746 { 0xc200, STORE | USESR0 | USESSP }, /* mov.l r0,@(disp,gbr) */
1747 { 0xc300, BRANCH | USESSP }, /* trapa #imm */
1748 { 0xc400, LOAD | SETSR0 | USESSP }, /* mov.b @(disp,gbr),r0 */
1749 { 0xc500, LOAD | SETSR0 | USESSP }, /* mov.w @(disp,gbr),r0 */
1750 { 0xc600, LOAD | SETSR0 | USESSP }, /* mov.l @(disp,gbr),r0 */
1751 { 0xc700, SETSR0 }, /* mova @(disp,pc),r0 */
1752 { 0xc800, SETSSP | USESR0 }, /* tst #imm,r0 */
1753 { 0xc900, SETSR0 | USESR0 }, /* and #imm,r0 */
1754 { 0xca00, SETSR0 | USESR0 }, /* xor #imm,r0 */
1755 { 0xcb00, SETSR0 | USESR0 }, /* or #imm,r0 */
1756 { 0xcc00, LOAD | SETSSP | USESR0 | USESSP }, /* tst.b #imm,@(r0,gbr) */
1757 { 0xcd00, LOAD | STORE | USESR0 | USESSP }, /* and.b #imm,@(r0,gbr) */
1758 { 0xce00, LOAD | STORE | USESR0 | USESSP }, /* xor.b #imm,@(r0,gbr) */
1759 { 0xcf00, LOAD | STORE | USESR0 | USESSP } /* or.b #imm,@(r0,gbr) */
1760 };
1761
1762 static const struct sh_minor_opcode sh_opcodec[] =
1763 {
1764 { MAP (sh_opcodec0), 0xff00 }
1765 };
1766
1767 static const struct sh_opcode sh_opcoded0[] =
1768 {
1769 { 0xd000, LOAD | SETS1 } /* mov.l @(disp,pc),rn */
1770 };
1771
1772 static const struct sh_minor_opcode sh_opcoded[] =
1773 {
1774 { MAP (sh_opcoded0), 0xf000 }
1775 };
1776
1777 static const struct sh_opcode sh_opcodee0[] =
1778 {
1779 { 0xe000, SETS1 } /* mov #imm,rn */
1780 };
1781
1782 static const struct sh_minor_opcode sh_opcodee[] =
1783 {
1784 { MAP (sh_opcodee0), 0xf000 }
1785 };
1786
1787 static const struct sh_opcode sh_opcodef0[] =
1788 {
1789 { 0xf000, SETSF1 | USESF1 | USESF2 }, /* fadd fm,fn */
1790 { 0xf001, SETSF1 | USESF1 | USESF2 }, /* fsub fm,fn */
1791 { 0xf002, SETSF1 | USESF1 | USESF2 }, /* fmul fm,fn */
1792 { 0xf003, SETSF1 | USESF1 | USESF2 }, /* fdiv fm,fn */
1793 { 0xf004, SETSSP | USESF1 | USESF2 }, /* fcmp/eq fm,fn */
1794 { 0xf005, SETSSP | USESF1 | USESF2 }, /* fcmp/gt fm,fn */
1795 { 0xf006, LOAD | SETSF1 | USES2 | USESR0 }, /* fmov.s @(r0,rm),fn */
1796 { 0xf007, STORE | USES1 | USESF2 | USESR0 }, /* fmov.s fm,@(r0,rn) */
1797 { 0xf008, LOAD | SETSF1 | USES2 }, /* fmov.s @rm,fn */
1798 { 0xf009, LOAD | SETS2 | SETSF1 | USES2 }, /* fmov.s @rm+,fn */
1799 { 0xf00a, STORE | USES1 | USESF2 }, /* fmov.s fm,@rn */
1800 { 0xf00b, STORE | SETS1 | USES1 | USESF2 }, /* fmov.s fm,@-rn */
1801 { 0xf00c, SETSF1 | USESF2 }, /* fmov fm,fn */
1802 { 0xf00e, SETSF1 | USESF1 | USESF2 | USESF0 } /* fmac f0,fm,fn */
1803 };
1804
1805 static const struct sh_opcode sh_opcodef1[] =
1806 {
1807 { 0xf00d, SETSF1 | USESSP }, /* fsts fpul,fn */
1808 { 0xf01d, SETSSP | USESF1 }, /* flds fn,fpul */
1809 { 0xf02d, SETSF1 | USESSP }, /* float fpul,fn */
1810 { 0xf03d, SETSSP | USESF1 }, /* ftrc fn,fpul */
1811 { 0xf04d, SETSF1 | USESF1 }, /* fneg fn */
1812 { 0xf05d, SETSF1 | USESF1 }, /* fabs fn */
1813 { 0xf06d, SETSF1 | USESF1 }, /* fsqrt fn */
1814 { 0xf07d, SETSSP | USESF1 }, /* ftst/nan fn */
1815 { 0xf08d, SETSF1 }, /* fldi0 fn */
1816 { 0xf09d, SETSF1 } /* fldi1 fn */
1817 };
1818
1819 static const struct sh_minor_opcode sh_opcodef[] =
1820 {
1821 { MAP (sh_opcodef0), 0xf00f },
1822 { MAP (sh_opcodef1), 0xf0ff }
1823 };
1824
1825 static const struct sh_major_opcode sh_opcodes[] =
1826 {
1827 { MAP (sh_opcode0) },
1828 { MAP (sh_opcode1) },
1829 { MAP (sh_opcode2) },
1830 { MAP (sh_opcode3) },
1831 { MAP (sh_opcode4) },
1832 { MAP (sh_opcode5) },
1833 { MAP (sh_opcode6) },
1834 { MAP (sh_opcode7) },
1835 { MAP (sh_opcode8) },
1836 { MAP (sh_opcode9) },
1837 { MAP (sh_opcodea) },
1838 { MAP (sh_opcodeb) },
1839 { MAP (sh_opcodec) },
1840 { MAP (sh_opcoded) },
1841 { MAP (sh_opcodee) },
1842 { MAP (sh_opcodef) }
1843 };
1844
1845 /* Given an instruction, return a pointer to the corresponding
1846 sh_opcode structure. Return NULL if the instruction is not
1847 recognized. */
1848
1849 static const struct sh_opcode *
1850 sh_insn_info (insn)
1851 unsigned int insn;
1852 {
1853 const struct sh_major_opcode *maj;
1854 const struct sh_minor_opcode *min, *minend;
1855
1856 maj = &sh_opcodes[(insn & 0xf000) >> 12];
1857 min = maj->minor_opcodes;
1858 minend = min + maj->count;
1859 for (; min < minend; min++)
1860 {
1861 unsigned int l;
1862 const struct sh_opcode *op, *opend;
1863
1864 l = insn & min->mask;
1865 op = min->opcodes;
1866 opend = op + min->count;
1867
1868 /* Since the opcodes tables are sorted, we could use a binary
1869 search here if the count were above some cutoff value. */
1870 for (; op < opend; op++)
1871 if (op->opcode == l)
1872 return op;
1873 }
1874
1875 return NULL;
1876 }
1877
1878 /* See whether an instruction uses or sets a general purpose register */
1879
1880 static boolean
1881 sh_insn_uses_or_sets_reg (insn, op, reg)
1882 unsigned int insn;
1883 const struct sh_opcode *op;
1884 unsigned int reg;
1885 {
1886 if (sh_insn_uses_reg (insn, op, reg))
1887 return true;
1888
1889 return sh_insn_sets_reg (insn, op, reg);
1890 }
1891
1892 /* See whether an instruction uses a general purpose register. */
1893
1894 static boolean
1895 sh_insn_uses_reg (insn, op, reg)
1896 unsigned int insn;
1897 const struct sh_opcode *op;
1898 unsigned int reg;
1899 {
1900 unsigned int f;
1901
1902 f = op->flags;
1903
1904 if ((f & USES1) != 0
1905 && USES1_REG (insn) == reg)
1906 return true;
1907 if ((f & USES2) != 0
1908 && USES2_REG (insn) == reg)
1909 return true;
1910 if ((f & USESR0) != 0
1911 && reg == 0)
1912 return true;
1913
1914 return false;
1915 }
1916 /* See whether an instruction sets a general purpose register. */
1917
1918 static boolean
1919 sh_insn_sets_reg (insn, op, reg)
1920 unsigned int insn;
1921 const struct sh_opcode *op;
1922 unsigned int reg;
1923 {
1924 unsigned int f;
1925
1926 f = op->flags;
1927
1928 if ((f & SETS1) != 0
1929 && SETS1_REG (insn) == reg)
1930 return true;
1931 if ((f & SETS2) != 0
1932 && SETS2_REG (insn) == reg)
1933 return true;
1934 if ((f & SETSR0) != 0
1935 && reg == 0)
1936 return true;
1937
1938 return false;
1939 }
1940
1941 /* See whether an instruction uses or sets a floating point register */
1942
1943 static boolean
1944 sh_insn_uses_or_sets_freg (insn, op, reg)
1945 unsigned int insn;
1946 const struct sh_opcode *op;
1947 unsigned int reg;
1948 {
1949 if (sh_insn_uses_freg (insn, op, reg))
1950 return true;
1951
1952 return sh_insn_sets_freg (insn, op, reg);
1953 }
1954
1955 /* See whether an instruction uses a floating point register. */
1956
1957 static boolean
1958 sh_insn_uses_freg (insn, op, freg)
1959 unsigned int insn;
1960 const struct sh_opcode *op;
1961 unsigned int freg;
1962 {
1963 unsigned int f;
1964
1965 f = op->flags;
1966
1967 /* We can't tell if this is a double-precision insn, so just play safe
1968 and assume that it might be. So not only have we test FREG against
1969 itself, but also even FREG against FREG+1 - if the using insn uses
1970 just the low part of a double precision value - but also an odd
1971 FREG against FREG-1 - if the setting insn sets just the low part
1972 of a double precision value.
1973 So what this all boils down to is that we have to ignore the lowest
1974 bit of the register number. */
1975
1976 if ((f & USESF1) != 0
1977 && (USESF1_REG (insn) & 0xe) == (freg & 0xe))
1978 return true;
1979 if ((f & USESF2) != 0
1980 && (USESF2_REG (insn) & 0xe) == (freg & 0xe))
1981 return true;
1982 if ((f & USESF0) != 0
1983 && freg == 0)
1984 return true;
1985
1986 return false;
1987 }
1988
1989 /* See whether an instruction sets a floating point register. */
1990
1991 static boolean
1992 sh_insn_sets_freg (insn, op, freg)
1993 unsigned int insn;
1994 const struct sh_opcode *op;
1995 unsigned int freg;
1996 {
1997 unsigned int f;
1998
1999 f = op->flags;
2000
2001 /* We can't tell if this is a double-precision insn, so just play safe
2002 and assume that it might be. So not only have we test FREG against
2003 itself, but also even FREG against FREG+1 - if the using insn uses
2004 just the low part of a double precision value - but also an odd
2005 FREG against FREG-1 - if the setting insn sets just the low part
2006 of a double precision value.
2007 So what this all boils down to is that we have to ignore the lowest
2008 bit of the register number. */
2009
2010 if ((f & SETSF1) != 0
2011 && (SETSF1_REG (insn) & 0xe) == (freg & 0xe))
2012 return true;
2013
2014 return false;
2015 }
2016
2017 /* See whether instructions I1 and I2 conflict, assuming I1 comes
2018 before I2. OP1 and OP2 are the corresponding sh_opcode structures.
2019 This should return true if there is a conflict, or false if the
2020 instructions can be swapped safely. */
2021
2022 static boolean
2023 sh_insns_conflict (i1, op1, i2, op2)
2024 unsigned int i1;
2025 const struct sh_opcode *op1;
2026 unsigned int i2;
2027 const struct sh_opcode *op2;
2028 {
2029 unsigned int f1, f2;
2030
2031 f1 = op1->flags;
2032 f2 = op2->flags;
2033
2034 /* Load of fpscr conflicts with floating point operations.
2035 FIXME: shouldn't test raw opcodes here. */
2036 if (((i1 & 0xf0ff) == 0x4066 && (i2 & 0xf000) == 0xf000)
2037 || ((i2 & 0xf0ff) == 0x4066 && (i1 & 0xf000) == 0xf000))
2038 return true;
2039
2040 if ((f1 & (BRANCH | DELAY)) != 0
2041 || (f2 & (BRANCH | DELAY)) != 0)
2042 return true;
2043
2044 if (((f1 | f2) & SETSSP)
2045 && (f1 & (SETSSP | USESSP))
2046 && (f2 & (SETSSP | USESSP)))
2047 return true;
2048
2049 if ((f1 & SETS1) != 0
2050 && sh_insn_uses_or_sets_reg (i2, op2, SETS1_REG (i1)))
2051 return true;
2052 if ((f1 & SETS2) != 0
2053 && sh_insn_uses_or_sets_reg (i2, op2, SETS2_REG (i1)))
2054 return true;
2055 if ((f1 & SETSR0) != 0
2056 && sh_insn_uses_or_sets_reg (i2, op2, 0))
2057 return true;
2058 if ((f1 & SETSF1) != 0
2059 && sh_insn_uses_or_sets_freg (i2, op2, SETSF1_REG (i1)))
2060 return true;
2061
2062 if ((f2 & SETS1) != 0
2063 && sh_insn_uses_or_sets_reg (i1, op1, SETS1_REG (i2)))
2064 return true;
2065 if ((f2 & SETS2) != 0
2066 && sh_insn_uses_or_sets_reg (i1, op1, SETS2_REG (i2)))
2067 return true;
2068 if ((f2 & SETSR0) != 0
2069 && sh_insn_uses_or_sets_reg (i1, op1, 0))
2070 return true;
2071 if ((f2 & SETSF1) != 0
2072 && sh_insn_uses_or_sets_freg (i1, op1, SETSF1_REG (i2)))
2073 return true;
2074
2075 /* The instructions do not conflict. */
2076 return false;
2077 }
2078
2079 /* I1 is a load instruction, and I2 is some other instruction. Return
2080 true if I1 loads a register which I2 uses. */
2081
2082 static boolean
2083 sh_load_use (i1, op1, i2, op2)
2084 unsigned int i1;
2085 const struct sh_opcode *op1;
2086 unsigned int i2;
2087 const struct sh_opcode *op2;
2088 {
2089 unsigned int f1;
2090
2091 f1 = op1->flags;
2092
2093 if ((f1 & LOAD) == 0)
2094 return false;
2095
2096 /* If both SETS1 and SETSSP are set, that means a load to a special
2097 register using postincrement addressing mode, which we don't care
2098 about here. */
2099 if ((f1 & SETS1) != 0
2100 && (f1 & SETSSP) == 0
2101 && sh_insn_uses_reg (i2, op2, (i1 & 0x0f00) >> 8))
2102 return true;
2103
2104 if ((f1 & SETSR0) != 0
2105 && sh_insn_uses_reg (i2, op2, 0))
2106 return true;
2107
2108 if ((f1 & SETSF1) != 0
2109 && sh_insn_uses_freg (i2, op2, (i1 & 0x0f00) >> 8))
2110 return true;
2111
2112 return false;
2113 }
2114
2115 /* Try to align loads and stores within a span of memory. This is
2116 called by both the ELF and the COFF sh targets. ABFD and SEC are
2117 the BFD and section we are examining. CONTENTS is the contents of
2118 the section. SWAP is the routine to call to swap two instructions.
2119 RELOCS is a pointer to the internal relocation information, to be
2120 passed to SWAP. PLABEL is a pointer to the current label in a
2121 sorted list of labels; LABEL_END is the end of the list. START and
2122 STOP are the range of memory to examine. If a swap is made,
2123 *PSWAPPED is set to true. */
2124
2125 boolean
2126 _bfd_sh_align_load_span (abfd, sec, contents, swap, relocs,
2127 plabel, label_end, start, stop, pswapped)
2128 bfd *abfd;
2129 asection *sec;
2130 bfd_byte *contents;
2131 boolean (*swap) PARAMS ((bfd *, asection *, PTR, bfd_byte *, bfd_vma));
2132 PTR relocs;
2133 bfd_vma **plabel;
2134 bfd_vma *label_end;
2135 bfd_vma start;
2136 bfd_vma stop;
2137 boolean *pswapped;
2138 {
2139 bfd_vma i;
2140
2141 /* Instructions should be aligned on 2 byte boundaries. */
2142 if ((start & 1) == 1)
2143 ++start;
2144
2145 /* Now look through the unaligned addresses. */
2146 i = start;
2147 if ((i & 2) == 0)
2148 i += 2;
2149 for (; i < stop; i += 4)
2150 {
2151 unsigned int insn;
2152 const struct sh_opcode *op;
2153 unsigned int prev_insn = 0;
2154 const struct sh_opcode *prev_op = NULL;
2155
2156 insn = bfd_get_16 (abfd, contents + i);
2157 op = sh_insn_info (insn);
2158 if (op == NULL
2159 || (op->flags & (LOAD | STORE)) == 0)
2160 continue;
2161
2162 /* This is a load or store which is not on a four byte boundary. */
2163
2164 while (*plabel < label_end && **plabel < i)
2165 ++*plabel;
2166
2167 if (i > start)
2168 {
2169 prev_insn = bfd_get_16 (abfd, contents + i - 2);
2170 prev_op = sh_insn_info (prev_insn);
2171
2172 /* If the load/store instruction is in a delay slot, we
2173 can't swap. */
2174 if (prev_op == NULL
2175 || (prev_op->flags & DELAY) != 0)
2176 continue;
2177 }
2178 if (i > start
2179 && (*plabel >= label_end || **plabel != i)
2180 && prev_op != NULL
2181 && (prev_op->flags & (LOAD | STORE)) == 0
2182 && ! sh_insns_conflict (prev_insn, prev_op, insn, op))
2183 {
2184 boolean ok;
2185
2186 /* The load/store instruction does not have a label, and
2187 there is a previous instruction; PREV_INSN is not
2188 itself a load/store instruction, and PREV_INSN and
2189 INSN do not conflict. */
2190
2191 ok = true;
2192
2193 if (i >= start + 4)
2194 {
2195 unsigned int prev2_insn;
2196 const struct sh_opcode *prev2_op;
2197
2198 prev2_insn = bfd_get_16 (abfd, contents + i - 4);
2199 prev2_op = sh_insn_info (prev2_insn);
2200
2201 /* If the instruction before PREV_INSN has a delay
2202 slot--that is, PREV_INSN is in a delay slot--we
2203 can not swap. */
2204 if (prev2_op == NULL
2205 || (prev2_op->flags & DELAY) != 0)
2206 ok = false;
2207
2208 /* If the instruction before PREV_INSN is a load,
2209 and it sets a register which INSN uses, then
2210 putting INSN immediately after PREV_INSN will
2211 cause a pipeline bubble, so there is no point to
2212 making the swap. */
2213 if (ok
2214 && (prev2_op->flags & LOAD) != 0
2215 && sh_load_use (prev2_insn, prev2_op, insn, op))
2216 ok = false;
2217 }
2218
2219 if (ok)
2220 {
2221 if (! (*swap) (abfd, sec, relocs, contents, i - 2))
2222 return false;
2223 *pswapped = true;
2224 continue;
2225 }
2226 }
2227
2228 while (*plabel < label_end && **plabel < i + 2)
2229 ++*plabel;
2230
2231 if (i + 2 < stop
2232 && (*plabel >= label_end || **plabel != i + 2))
2233 {
2234 unsigned int next_insn;
2235 const struct sh_opcode *next_op;
2236
2237 /* There is an instruction after the load/store
2238 instruction, and it does not have a label. */
2239 next_insn = bfd_get_16 (abfd, contents + i + 2);
2240 next_op = sh_insn_info (next_insn);
2241 if (next_op != NULL
2242 && (next_op->flags & (LOAD | STORE)) == 0
2243 && ! sh_insns_conflict (insn, op, next_insn, next_op))
2244 {
2245 boolean ok;
2246
2247 /* NEXT_INSN is not itself a load/store instruction,
2248 and it does not conflict with INSN. */
2249
2250 ok = true;
2251
2252 /* If PREV_INSN is a load, and it sets a register
2253 which NEXT_INSN uses, then putting NEXT_INSN
2254 immediately after PREV_INSN will cause a pipeline
2255 bubble, so there is no reason to make this swap. */
2256 if (prev_op != NULL
2257 && (prev_op->flags & LOAD) != 0
2258 && sh_load_use (prev_insn, prev_op, next_insn, next_op))
2259 ok = false;
2260
2261 /* If INSN is a load, and it sets a register which
2262 the insn after NEXT_INSN uses, then doing the
2263 swap will cause a pipeline bubble, so there is no
2264 reason to make the swap. However, if the insn
2265 after NEXT_INSN is itself a load or store
2266 instruction, then it is misaligned, so
2267 optimistically hope that it will be swapped
2268 itself, and just live with the pipeline bubble if
2269 it isn't. */
2270 if (ok
2271 && i + 4 < stop
2272 && (op->flags & LOAD) != 0)
2273 {
2274 unsigned int next2_insn;
2275 const struct sh_opcode *next2_op;
2276
2277 next2_insn = bfd_get_16 (abfd, contents + i + 4);
2278 next2_op = sh_insn_info (next2_insn);
2279 if ((next2_op->flags & (LOAD | STORE)) == 0
2280 && sh_load_use (insn, op, next2_insn, next2_op))
2281 ok = false;
2282 }
2283
2284 if (ok)
2285 {
2286 if (! (*swap) (abfd, sec, relocs, contents, i))
2287 return false;
2288 *pswapped = true;
2289 continue;
2290 }
2291 }
2292 }
2293 }
2294
2295 return true;
2296 }
2297
2298 /* Look for loads and stores which we can align to four byte
2299 boundaries. See the longer comment above sh_relax_section for why
2300 this is desirable. This sets *PSWAPPED if some instruction was
2301 swapped. */
2302
2303 static boolean
2304 sh_align_loads (abfd, sec, internal_relocs, contents, pswapped)
2305 bfd *abfd;
2306 asection *sec;
2307 struct internal_reloc *internal_relocs;
2308 bfd_byte *contents;
2309 boolean *pswapped;
2310 {
2311 struct internal_reloc *irel, *irelend;
2312 bfd_vma *labels = NULL;
2313 bfd_vma *label, *label_end;
2314
2315 *pswapped = false;
2316
2317 irelend = internal_relocs + sec->reloc_count;
2318
2319 /* Get all the addresses with labels on them. */
2320 labels = (bfd_vma *) bfd_malloc (sec->reloc_count * sizeof (bfd_vma));
2321 if (labels == NULL)
2322 goto error_return;
2323 label_end = labels;
2324 for (irel = internal_relocs; irel < irelend; irel++)
2325 {
2326 if (irel->r_type == R_SH_LABEL)
2327 {
2328 *label_end = irel->r_vaddr - sec->vma;
2329 ++label_end;
2330 }
2331 }
2332
2333 /* Note that the assembler currently always outputs relocs in
2334 address order. If that ever changes, this code will need to sort
2335 the label values and the relocs. */
2336
2337 label = labels;
2338
2339 for (irel = internal_relocs; irel < irelend; irel++)
2340 {
2341 bfd_vma start, stop;
2342
2343 if (irel->r_type != R_SH_CODE)
2344 continue;
2345
2346 start = irel->r_vaddr - sec->vma;
2347
2348 for (irel++; irel < irelend; irel++)
2349 if (irel->r_type == R_SH_DATA)
2350 break;
2351 if (irel < irelend)
2352 stop = irel->r_vaddr - sec->vma;
2353 else
2354 stop = sec->_cooked_size;
2355
2356 if (! _bfd_sh_align_load_span (abfd, sec, contents, sh_swap_insns,
2357 (PTR) internal_relocs, &label,
2358 label_end, start, stop, pswapped))
2359 goto error_return;
2360 }
2361
2362 free (labels);
2363
2364 return true;
2365
2366 error_return:
2367 if (labels != NULL)
2368 free (labels);
2369 return false;
2370 }
2371
2372 /* Swap two SH instructions. */
2373
2374 static boolean
2375 sh_swap_insns (abfd, sec, relocs, contents, addr)
2376 bfd *abfd;
2377 asection *sec;
2378 PTR relocs;
2379 bfd_byte *contents;
2380 bfd_vma addr;
2381 {
2382 struct internal_reloc *internal_relocs = (struct internal_reloc *) relocs;
2383 unsigned short i1, i2;
2384 struct internal_reloc *irel, *irelend;
2385
2386 /* Swap the instructions themselves. */
2387 i1 = bfd_get_16 (abfd, contents + addr);
2388 i2 = bfd_get_16 (abfd, contents + addr + 2);
2389 bfd_put_16 (abfd, i2, contents + addr);
2390 bfd_put_16 (abfd, i1, contents + addr + 2);
2391
2392 /* Adjust all reloc addresses. */
2393 irelend = internal_relocs + sec->reloc_count;
2394 for (irel = internal_relocs; irel < irelend; irel++)
2395 {
2396 int type, add;
2397
2398 /* There are a few special types of relocs that we don't want to
2399 adjust. These relocs do not apply to the instruction itself,
2400 but are only associated with the address. */
2401 type = irel->r_type;
2402 if (type == R_SH_ALIGN
2403 || type == R_SH_CODE
2404 || type == R_SH_DATA
2405 || type == R_SH_LABEL)
2406 continue;
2407
2408 /* If an R_SH_USES reloc points to one of the addresses being
2409 swapped, we must adjust it. It would be incorrect to do this
2410 for a jump, though, since we want to execute both
2411 instructions after the jump. (We have avoided swapping
2412 around a label, so the jump will not wind up executing an
2413 instruction it shouldn't). */
2414 if (type == R_SH_USES)
2415 {
2416 bfd_vma off;
2417
2418 off = irel->r_vaddr - sec->vma + 4 + irel->r_offset;
2419 if (off == addr)
2420 irel->r_offset += 2;
2421 else if (off == addr + 2)
2422 irel->r_offset -= 2;
2423 }
2424
2425 if (irel->r_vaddr - sec->vma == addr)
2426 {
2427 irel->r_vaddr += 2;
2428 add = -2;
2429 }
2430 else if (irel->r_vaddr - sec->vma == addr + 2)
2431 {
2432 irel->r_vaddr -= 2;
2433 add = 2;
2434 }
2435 else
2436 add = 0;
2437
2438 if (add != 0)
2439 {
2440 bfd_byte *loc;
2441 unsigned short insn, oinsn;
2442 boolean overflow;
2443
2444 loc = contents + irel->r_vaddr - sec->vma;
2445 overflow = false;
2446 switch (type)
2447 {
2448 default:
2449 break;
2450
2451 case R_SH_PCDISP8BY2:
2452 case R_SH_PCRELIMM8BY2:
2453 insn = bfd_get_16 (abfd, loc);
2454 oinsn = insn;
2455 insn += add / 2;
2456 if ((oinsn & 0xff00) != (insn & 0xff00))
2457 overflow = true;
2458 bfd_put_16 (abfd, insn, loc);
2459 break;
2460
2461 case R_SH_PCDISP:
2462 insn = bfd_get_16 (abfd, loc);
2463 oinsn = insn;
2464 insn += add / 2;
2465 if ((oinsn & 0xf000) != (insn & 0xf000))
2466 overflow = true;
2467 bfd_put_16 (abfd, insn, loc);
2468 break;
2469
2470 case R_SH_PCRELIMM8BY4:
2471 /* This reloc ignores the least significant 3 bits of
2472 the program counter before adding in the offset.
2473 This means that if ADDR is at an even address, the
2474 swap will not affect the offset. If ADDR is an at an
2475 odd address, then the instruction will be crossing a
2476 four byte boundary, and must be adjusted. */
2477 if ((addr & 3) != 0)
2478 {
2479 insn = bfd_get_16 (abfd, loc);
2480 oinsn = insn;
2481 insn += add / 2;
2482 if ((oinsn & 0xff00) != (insn & 0xff00))
2483 overflow = true;
2484 bfd_put_16 (abfd, insn, loc);
2485 }
2486
2487 break;
2488 }
2489
2490 if (overflow)
2491 {
2492 ((*_bfd_error_handler)
2493 ("%s: 0x%lx: fatal: reloc overflow while relaxing",
2494 bfd_get_filename (abfd), (unsigned long) irel->r_vaddr));
2495 bfd_set_error (bfd_error_bad_value);
2496 return false;
2497 }
2498 }
2499 }
2500
2501 return true;
2502 }
2503 \f
2504 /* This is a modification of _bfd_coff_generic_relocate_section, which
2505 will handle SH relaxing. */
2506
2507 static boolean
2508 sh_relocate_section (output_bfd, info, input_bfd, input_section, contents,
2509 relocs, syms, sections)
2510 bfd *output_bfd ATTRIBUTE_UNUSED;
2511 struct bfd_link_info *info;
2512 bfd *input_bfd;
2513 asection *input_section;
2514 bfd_byte *contents;
2515 struct internal_reloc *relocs;
2516 struct internal_syment *syms;
2517 asection **sections;
2518 {
2519 struct internal_reloc *rel;
2520 struct internal_reloc *relend;
2521
2522 rel = relocs;
2523 relend = rel + input_section->reloc_count;
2524 for (; rel < relend; rel++)
2525 {
2526 long symndx;
2527 struct coff_link_hash_entry *h;
2528 struct internal_syment *sym;
2529 bfd_vma addend;
2530 bfd_vma val;
2531 reloc_howto_type *howto;
2532 bfd_reloc_status_type rstat;
2533
2534 /* Almost all relocs have to do with relaxing. If any work must
2535 be done for them, it has been done in sh_relax_section. */
2536 if (rel->r_type != R_SH_IMM32
2537 && rel->r_type != R_SH_PCDISP)
2538 continue;
2539
2540 symndx = rel->r_symndx;
2541
2542 if (symndx == -1)
2543 {
2544 h = NULL;
2545 sym = NULL;
2546 }
2547 else
2548 {
2549 if (symndx < 0
2550 || (unsigned long) symndx >= obj_raw_syment_count (input_bfd))
2551 {
2552 (*_bfd_error_handler)
2553 ("%s: illegal symbol index %ld in relocs",
2554 bfd_get_filename (input_bfd), symndx);
2555 bfd_set_error (bfd_error_bad_value);
2556 return false;
2557 }
2558 h = obj_coff_sym_hashes (input_bfd)[symndx];
2559 sym = syms + symndx;
2560 }
2561
2562 if (sym != NULL && sym->n_scnum != 0)
2563 addend = - sym->n_value;
2564 else
2565 addend = 0;
2566
2567 if (rel->r_type == R_SH_PCDISP)
2568 addend -= 4;
2569
2570 if (rel->r_type >= SH_COFF_HOWTO_COUNT)
2571 howto = NULL;
2572 else
2573 howto = &sh_coff_howtos[rel->r_type];
2574
2575 if (howto == NULL)
2576 {
2577 bfd_set_error (bfd_error_bad_value);
2578 return false;
2579 }
2580
2581 val = 0;
2582
2583 if (h == NULL)
2584 {
2585 asection *sec;
2586
2587 /* There is nothing to do for an internal PCDISP reloc. */
2588 if (rel->r_type == R_SH_PCDISP)
2589 continue;
2590
2591 if (symndx == -1)
2592 {
2593 sec = bfd_abs_section_ptr;
2594 val = 0;
2595 }
2596 else
2597 {
2598 sec = sections[symndx];
2599 val = (sec->output_section->vma
2600 + sec->output_offset
2601 + sym->n_value
2602 - sec->vma);
2603 }
2604 }
2605 else
2606 {
2607 if (h->root.type == bfd_link_hash_defined
2608 || h->root.type == bfd_link_hash_defweak)
2609 {
2610 asection *sec;
2611
2612 sec = h->root.u.def.section;
2613 val = (h->root.u.def.value
2614 + sec->output_section->vma
2615 + sec->output_offset);
2616 }
2617 else if (! info->relocateable)
2618 {
2619 if (! ((*info->callbacks->undefined_symbol)
2620 (info, h->root.root.string, input_bfd, input_section,
2621 rel->r_vaddr - input_section->vma)))
2622 return false;
2623 }
2624 }
2625
2626 rstat = _bfd_final_link_relocate (howto, input_bfd, input_section,
2627 contents,
2628 rel->r_vaddr - input_section->vma,
2629 val, addend);
2630
2631 switch (rstat)
2632 {
2633 default:
2634 abort ();
2635 case bfd_reloc_ok:
2636 break;
2637 case bfd_reloc_overflow:
2638 {
2639 const char *name;
2640 char buf[SYMNMLEN + 1];
2641
2642 if (symndx == -1)
2643 name = "*ABS*";
2644 else if (h != NULL)
2645 name = h->root.root.string;
2646 else if (sym->_n._n_n._n_zeroes == 0
2647 && sym->_n._n_n._n_offset != 0)
2648 name = obj_coff_strings (input_bfd) + sym->_n._n_n._n_offset;
2649 else
2650 {
2651 strncpy (buf, sym->_n._n_name, SYMNMLEN);
2652 buf[SYMNMLEN] = '\0';
2653 name = buf;
2654 }
2655
2656 if (! ((*info->callbacks->reloc_overflow)
2657 (info, name, howto->name, (bfd_vma) 0, input_bfd,
2658 input_section, rel->r_vaddr - input_section->vma)))
2659 return false;
2660 }
2661 }
2662 }
2663
2664 return true;
2665 }
2666
2667 /* This is a version of bfd_generic_get_relocated_section_contents
2668 which uses sh_relocate_section. */
2669
2670 static bfd_byte *
2671 sh_coff_get_relocated_section_contents (output_bfd, link_info, link_order,
2672 data, relocateable, symbols)
2673 bfd *output_bfd;
2674 struct bfd_link_info *link_info;
2675 struct bfd_link_order *link_order;
2676 bfd_byte *data;
2677 boolean relocateable;
2678 asymbol **symbols;
2679 {
2680 asection *input_section = link_order->u.indirect.section;
2681 bfd *input_bfd = input_section->owner;
2682 asection **sections = NULL;
2683 struct internal_reloc *internal_relocs = NULL;
2684 struct internal_syment *internal_syms = NULL;
2685
2686 /* We only need to handle the case of relaxing, or of having a
2687 particular set of section contents, specially. */
2688 if (relocateable
2689 || coff_section_data (input_bfd, input_section) == NULL
2690 || coff_section_data (input_bfd, input_section)->contents == NULL)
2691 return bfd_generic_get_relocated_section_contents (output_bfd, link_info,
2692 link_order, data,
2693 relocateable,
2694 symbols);
2695
2696 memcpy (data, coff_section_data (input_bfd, input_section)->contents,
2697 input_section->_raw_size);
2698
2699 if ((input_section->flags & SEC_RELOC) != 0
2700 && input_section->reloc_count > 0)
2701 {
2702 bfd_size_type symesz = bfd_coff_symesz (input_bfd);
2703 bfd_byte *esym, *esymend;
2704 struct internal_syment *isymp;
2705 asection **secpp;
2706
2707 if (! _bfd_coff_get_external_symbols (input_bfd))
2708 goto error_return;
2709
2710 internal_relocs = (_bfd_coff_read_internal_relocs
2711 (input_bfd, input_section, false, (bfd_byte *) NULL,
2712 false, (struct internal_reloc *) NULL));
2713 if (internal_relocs == NULL)
2714 goto error_return;
2715
2716 internal_syms = ((struct internal_syment *)
2717 bfd_malloc (obj_raw_syment_count (input_bfd)
2718 * sizeof (struct internal_syment)));
2719 if (internal_syms == NULL)
2720 goto error_return;
2721
2722 sections = (asection **) bfd_malloc (obj_raw_syment_count (input_bfd)
2723 * sizeof (asection *));
2724 if (sections == NULL)
2725 goto error_return;
2726
2727 isymp = internal_syms;
2728 secpp = sections;
2729 esym = (bfd_byte *) obj_coff_external_syms (input_bfd);
2730 esymend = esym + obj_raw_syment_count (input_bfd) * symesz;
2731 while (esym < esymend)
2732 {
2733 bfd_coff_swap_sym_in (input_bfd, (PTR) esym, (PTR) isymp);
2734
2735 if (isymp->n_scnum != 0)
2736 *secpp = coff_section_from_bfd_index (input_bfd, isymp->n_scnum);
2737 else
2738 {
2739 if (isymp->n_value == 0)
2740 *secpp = bfd_und_section_ptr;
2741 else
2742 *secpp = bfd_com_section_ptr;
2743 }
2744
2745 esym += (isymp->n_numaux + 1) * symesz;
2746 secpp += isymp->n_numaux + 1;
2747 isymp += isymp->n_numaux + 1;
2748 }
2749
2750 if (! sh_relocate_section (output_bfd, link_info, input_bfd,
2751 input_section, data, internal_relocs,
2752 internal_syms, sections))
2753 goto error_return;
2754
2755 free (sections);
2756 sections = NULL;
2757 free (internal_syms);
2758 internal_syms = NULL;
2759 free (internal_relocs);
2760 internal_relocs = NULL;
2761 }
2762
2763 return data;
2764
2765 error_return:
2766 if (internal_relocs != NULL)
2767 free (internal_relocs);
2768 if (internal_syms != NULL)
2769 free (internal_syms);
2770 if (sections != NULL)
2771 free (sections);
2772 return NULL;
2773 }
2774
2775 /* The target vectors. */
2776
2777 CREATE_BIG_COFF_TARGET_VEC (shcoff_vec, "coff-sh", BFD_IS_RELAXABLE, 0, '_', NULL)
2778
2779 #ifdef TARGET_SHL_SYM
2780 #define TARGET_SYM TARGET_SHL_SYM
2781 #else
2782 #define TARGET_SYM shlcoff_vec
2783 #endif
2784
2785 #ifndef TARGET_SHL_NAME
2786 #define TARGET_SHL_NAME "coff-shl"
2787 #endif
2788
2789 CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM, TARGET_SHL_NAME, BFD_IS_RELAXABLE, 0, '_', NULL)
2790
2791
2792 /* Some people want versions of the SH COFF target which do not align
2793 to 16 byte boundaries. We implement that by adding a couple of new
2794 target vectors. These are just like the ones above, but they
2795 change the default section alignment. To generate them in the
2796 assembler, use -small. To use them in the linker, use -b
2797 coff-sh{l}-small and -oformat coff-sh{l}-small.
2798
2799 Yes, this is a horrible hack. A general solution for setting
2800 section alignment in COFF is rather complex. ELF handles this
2801 correctly. */
2802
2803 /* Only recognize the small versions if the target was not defaulted.
2804 Otherwise we won't recognize the non default endianness. */
2805
2806 static const bfd_target *
2807 coff_small_object_p (abfd)
2808 bfd *abfd;
2809 {
2810 if (abfd->target_defaulted)
2811 {
2812 bfd_set_error (bfd_error_wrong_format);
2813 return NULL;
2814 }
2815 return coff_object_p (abfd);
2816 }
2817
2818 /* Set the section alignment for the small versions. */
2819
2820 static boolean
2821 coff_small_new_section_hook (abfd, section)
2822 bfd *abfd;
2823 asection *section;
2824 {
2825 if (! coff_new_section_hook (abfd, section))
2826 return false;
2827
2828 /* We must align to at least a four byte boundary, because longword
2829 accesses must be on a four byte boundary. */
2830 if (section->alignment_power == COFF_DEFAULT_SECTION_ALIGNMENT_POWER)
2831 section->alignment_power = 2;
2832
2833 return true;
2834 }
2835
2836 /* This is copied from bfd_coff_std_swap_table so that we can change
2837 the default section alignment power. */
2838
2839 static const bfd_coff_backend_data bfd_coff_small_swap_table =
2840 {
2841 coff_swap_aux_in, coff_swap_sym_in, coff_swap_lineno_in,
2842 coff_swap_aux_out, coff_swap_sym_out,
2843 coff_swap_lineno_out, coff_swap_reloc_out,
2844 coff_swap_filehdr_out, coff_swap_aouthdr_out,
2845 coff_swap_scnhdr_out,
2846 FILHSZ, AOUTSZ, SCNHSZ, SYMESZ, AUXESZ, RELSZ, LINESZ, FILNMLEN,
2847 #ifdef COFF_LONG_FILENAMES
2848 true,
2849 #else
2850 false,
2851 #endif
2852 #ifdef COFF_LONG_SECTION_NAMES
2853 true,
2854 #else
2855 false,
2856 #endif
2857 2,
2858 coff_swap_filehdr_in, coff_swap_aouthdr_in, coff_swap_scnhdr_in,
2859 coff_swap_reloc_in, coff_bad_format_hook, coff_set_arch_mach_hook,
2860 coff_mkobject_hook, styp_to_sec_flags, coff_set_alignment_hook,
2861 coff_slurp_symbol_table, symname_in_debug_hook, coff_pointerize_aux_hook,
2862 coff_print_aux, coff_reloc16_extra_cases, coff_reloc16_estimate,
2863 coff_classify_symbol, coff_compute_section_file_positions,
2864 coff_start_final_link, coff_relocate_section, coff_rtype_to_howto,
2865 coff_adjust_symndx, coff_link_add_one_symbol,
2866 coff_link_output_has_begun, coff_final_link_postscript
2867 };
2868
2869 #define coff_small_close_and_cleanup \
2870 coff_close_and_cleanup
2871 #define coff_small_bfd_free_cached_info \
2872 coff_bfd_free_cached_info
2873 #define coff_small_get_section_contents \
2874 coff_get_section_contents
2875 #define coff_small_get_section_contents_in_window \
2876 coff_get_section_contents_in_window
2877
2878 extern const bfd_target shlcoff_small_vec;
2879
2880 const bfd_target shcoff_small_vec =
2881 {
2882 "coff-sh-small", /* name */
2883 bfd_target_coff_flavour,
2884 BFD_ENDIAN_BIG, /* data byte order is big */
2885 BFD_ENDIAN_BIG, /* header byte order is big */
2886
2887 (HAS_RELOC | EXEC_P | /* object flags */
2888 HAS_LINENO | HAS_DEBUG |
2889 HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
2890
2891 (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
2892 '_', /* leading symbol underscore */
2893 '/', /* ar_pad_char */
2894 15, /* ar_max_namelen */
2895 bfd_getb64, bfd_getb_signed_64, bfd_putb64,
2896 bfd_getb32, bfd_getb_signed_32, bfd_putb32,
2897 bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* data */
2898 bfd_getb64, bfd_getb_signed_64, bfd_putb64,
2899 bfd_getb32, bfd_getb_signed_32, bfd_putb32,
2900 bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* hdrs */
2901
2902 {_bfd_dummy_target, coff_small_object_p, /* bfd_check_format */
2903 bfd_generic_archive_p, _bfd_dummy_target},
2904 {bfd_false, coff_mkobject, _bfd_generic_mkarchive, /* bfd_set_format */
2905 bfd_false},
2906 {bfd_false, coff_write_object_contents, /* bfd_write_contents */
2907 _bfd_write_archive_contents, bfd_false},
2908
2909 BFD_JUMP_TABLE_GENERIC (coff_small),
2910 BFD_JUMP_TABLE_COPY (coff),
2911 BFD_JUMP_TABLE_CORE (_bfd_nocore),
2912 BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
2913 BFD_JUMP_TABLE_SYMBOLS (coff),
2914 BFD_JUMP_TABLE_RELOCS (coff),
2915 BFD_JUMP_TABLE_WRITE (coff),
2916 BFD_JUMP_TABLE_LINK (coff),
2917 BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
2918
2919 & shlcoff_small_vec,
2920
2921 (PTR) &bfd_coff_small_swap_table
2922 };
2923
2924 const bfd_target shlcoff_small_vec =
2925 {
2926 "coff-shl-small", /* name */
2927 bfd_target_coff_flavour,
2928 BFD_ENDIAN_LITTLE, /* data byte order is little */
2929 BFD_ENDIAN_LITTLE, /* header byte order is little endian too*/
2930
2931 (HAS_RELOC | EXEC_P | /* object flags */
2932 HAS_LINENO | HAS_DEBUG |
2933 HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
2934
2935 (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
2936 '_', /* leading symbol underscore */
2937 '/', /* ar_pad_char */
2938 15, /* ar_max_namelen */
2939 bfd_getl64, bfd_getl_signed_64, bfd_putl64,
2940 bfd_getl32, bfd_getl_signed_32, bfd_putl32,
2941 bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* data */
2942 bfd_getl64, bfd_getl_signed_64, bfd_putl64,
2943 bfd_getl32, bfd_getl_signed_32, bfd_putl32,
2944 bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* hdrs */
2945
2946 {_bfd_dummy_target, coff_small_object_p, /* bfd_check_format */
2947 bfd_generic_archive_p, _bfd_dummy_target},
2948 {bfd_false, coff_mkobject, _bfd_generic_mkarchive, /* bfd_set_format */
2949 bfd_false},
2950 {bfd_false, coff_write_object_contents, /* bfd_write_contents */
2951 _bfd_write_archive_contents, bfd_false},
2952
2953 BFD_JUMP_TABLE_GENERIC (coff_small),
2954 BFD_JUMP_TABLE_COPY (coff),
2955 BFD_JUMP_TABLE_CORE (_bfd_nocore),
2956 BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
2957 BFD_JUMP_TABLE_SYMBOLS (coff),
2958 BFD_JUMP_TABLE_RELOCS (coff),
2959 BFD_JUMP_TABLE_WRITE (coff),
2960 BFD_JUMP_TABLE_LINK (coff),
2961 BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
2962
2963 & shcoff_small_vec,
2964
2965 (PTR) &bfd_coff_small_swap_table
2966 };