]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - bfd/elf32-rx.c
bfd/
[thirdparty/binutils-gdb.git] / bfd / elf32-rx.c
1 /* Renesas RX specific support for 32-bit ELF.
2 Copyright (C) 2008, 2009, 2010
3 Free Software Foundation, Inc.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
20
21 #include "sysdep.h"
22 #include "bfd.h"
23 #include "libbfd.h"
24 #include "elf-bfd.h"
25 #include "elf/rx.h"
26 #include "libiberty.h"
27
28 #define RX_OPCODE_BIG_ENDIAN 0
29
30 #ifdef DEBUG
31 char * rx_get_reloc (long);
32 void rx_dump_symtab (bfd *, void *, void *);
33 #endif
34
35 #define RXREL(n,sz,bit,shift,complain,pcrel) \
36 HOWTO (R_RX_##n, shift, sz, bit, pcrel, 0, complain_overflow_ ## complain, \
37 bfd_elf_generic_reloc, "R_RX_" #n, FALSE, 0, ~0, FALSE)
38
39 /* Note that the relocations around 0x7f are internal to this file;
40 feel free to move them as needed to avoid conflicts with published
41 relocation numbers. */
42
43 static reloc_howto_type rx_elf_howto_table [] =
44 {
45 RXREL (NONE, 0, 0, 0, dont, FALSE),
46 RXREL (DIR32, 2, 32, 0, signed, FALSE),
47 RXREL (DIR24S, 2, 24, 0, signed, FALSE),
48 RXREL (DIR16, 1, 16, 0, dont, FALSE),
49 RXREL (DIR16U, 1, 16, 0, unsigned, FALSE),
50 RXREL (DIR16S, 1, 16, 0, signed, FALSE),
51 RXREL (DIR8, 0, 8, 0, dont, FALSE),
52 RXREL (DIR8U, 0, 8, 0, unsigned, FALSE),
53 RXREL (DIR8S, 0, 8, 0, signed, FALSE),
54 RXREL (DIR24S_PCREL, 2, 24, 0, signed, TRUE),
55 RXREL (DIR16S_PCREL, 1, 16, 0, signed, TRUE),
56 RXREL (DIR8S_PCREL, 0, 8, 0, signed, TRUE),
57 RXREL (DIR16UL, 1, 16, 2, unsigned, FALSE),
58 RXREL (DIR16UW, 1, 16, 1, unsigned, FALSE),
59 RXREL (DIR8UL, 0, 8, 2, unsigned, FALSE),
60 RXREL (DIR8UW, 0, 8, 1, unsigned, FALSE),
61 RXREL (DIR32_REV, 1, 16, 0, dont, FALSE),
62 RXREL (DIR16_REV, 1, 16, 0, dont, FALSE),
63 RXREL (DIR3U_PCREL, 0, 3, 0, dont, TRUE),
64
65 EMPTY_HOWTO (0x13),
66 EMPTY_HOWTO (0x14),
67 EMPTY_HOWTO (0x15),
68 EMPTY_HOWTO (0x16),
69 EMPTY_HOWTO (0x17),
70 EMPTY_HOWTO (0x18),
71 EMPTY_HOWTO (0x19),
72 EMPTY_HOWTO (0x1a),
73 EMPTY_HOWTO (0x1b),
74 EMPTY_HOWTO (0x1c),
75 EMPTY_HOWTO (0x1d),
76 EMPTY_HOWTO (0x1e),
77 EMPTY_HOWTO (0x1f),
78
79 RXREL (RH_3_PCREL, 0, 3, 0, signed, TRUE),
80 RXREL (RH_16_OP, 1, 16, 0, signed, FALSE),
81 RXREL (RH_24_OP, 2, 24, 0, signed, FALSE),
82 RXREL (RH_32_OP, 2, 32, 0, signed, FALSE),
83 RXREL (RH_24_UNS, 2, 24, 0, unsigned, FALSE),
84 RXREL (RH_8_NEG, 0, 8, 0, signed, FALSE),
85 RXREL (RH_16_NEG, 1, 16, 0, signed, FALSE),
86 RXREL (RH_24_NEG, 2, 24, 0, signed, FALSE),
87 RXREL (RH_32_NEG, 2, 32, 0, signed, FALSE),
88 RXREL (RH_DIFF, 2, 32, 0, signed, FALSE),
89 RXREL (RH_GPRELB, 1, 16, 0, unsigned, FALSE),
90 RXREL (RH_GPRELW, 1, 16, 0, unsigned, FALSE),
91 RXREL (RH_GPRELL, 1, 16, 0, unsigned, FALSE),
92 RXREL (RH_RELAX, 0, 0, 0, dont, FALSE),
93
94 EMPTY_HOWTO (0x2e),
95 EMPTY_HOWTO (0x2f),
96 EMPTY_HOWTO (0x30),
97 EMPTY_HOWTO (0x31),
98 EMPTY_HOWTO (0x32),
99 EMPTY_HOWTO (0x33),
100 EMPTY_HOWTO (0x34),
101 EMPTY_HOWTO (0x35),
102 EMPTY_HOWTO (0x36),
103 EMPTY_HOWTO (0x37),
104 EMPTY_HOWTO (0x38),
105 EMPTY_HOWTO (0x39),
106 EMPTY_HOWTO (0x3a),
107 EMPTY_HOWTO (0x3b),
108 EMPTY_HOWTO (0x3c),
109 EMPTY_HOWTO (0x3d),
110 EMPTY_HOWTO (0x3e),
111 EMPTY_HOWTO (0x3f),
112 EMPTY_HOWTO (0x40),
113
114 RXREL (ABS32, 2, 32, 0, dont, FALSE),
115 RXREL (ABS24S, 2, 24, 0, signed, FALSE),
116 RXREL (ABS16, 1, 16, 0, dont, FALSE),
117 RXREL (ABS16U, 1, 16, 0, unsigned, FALSE),
118 RXREL (ABS16S, 1, 16, 0, signed, FALSE),
119 RXREL (ABS8, 0, 8, 0, dont, FALSE),
120 RXREL (ABS8U, 0, 8, 0, unsigned, FALSE),
121 RXREL (ABS8S, 0, 8, 0, signed, FALSE),
122 RXREL (ABS24S_PCREL, 2, 24, 0, signed, TRUE),
123 RXREL (ABS16S_PCREL, 1, 16, 0, signed, TRUE),
124 RXREL (ABS8S_PCREL, 0, 8, 0, signed, TRUE),
125 RXREL (ABS16UL, 1, 16, 0, unsigned, FALSE),
126 RXREL (ABS16UW, 1, 16, 0, unsigned, FALSE),
127 RXREL (ABS8UL, 0, 8, 0, unsigned, FALSE),
128 RXREL (ABS8UW, 0, 8, 0, unsigned, FALSE),
129 RXREL (ABS32_REV, 2, 32, 0, dont, FALSE),
130 RXREL (ABS16_REV, 1, 16, 0, dont, FALSE),
131
132 #define STACK_REL_P(x) ((x) <= R_RX_ABS16_REV && (x) >= R_RX_ABS32)
133
134 EMPTY_HOWTO (0x52),
135 EMPTY_HOWTO (0x53),
136 EMPTY_HOWTO (0x54),
137 EMPTY_HOWTO (0x55),
138 EMPTY_HOWTO (0x56),
139 EMPTY_HOWTO (0x57),
140 EMPTY_HOWTO (0x58),
141 EMPTY_HOWTO (0x59),
142 EMPTY_HOWTO (0x5a),
143 EMPTY_HOWTO (0x5b),
144 EMPTY_HOWTO (0x5c),
145 EMPTY_HOWTO (0x5d),
146 EMPTY_HOWTO (0x5e),
147 EMPTY_HOWTO (0x5f),
148 EMPTY_HOWTO (0x60),
149 EMPTY_HOWTO (0x61),
150 EMPTY_HOWTO (0x62),
151 EMPTY_HOWTO (0x63),
152 EMPTY_HOWTO (0x64),
153 EMPTY_HOWTO (0x65),
154 EMPTY_HOWTO (0x66),
155 EMPTY_HOWTO (0x67),
156 EMPTY_HOWTO (0x68),
157 EMPTY_HOWTO (0x69),
158 EMPTY_HOWTO (0x6a),
159 EMPTY_HOWTO (0x6b),
160 EMPTY_HOWTO (0x6c),
161 EMPTY_HOWTO (0x6d),
162 EMPTY_HOWTO (0x6e),
163 EMPTY_HOWTO (0x6f),
164 EMPTY_HOWTO (0x70),
165 EMPTY_HOWTO (0x71),
166 EMPTY_HOWTO (0x72),
167 EMPTY_HOWTO (0x73),
168 EMPTY_HOWTO (0x74),
169 EMPTY_HOWTO (0x75),
170 EMPTY_HOWTO (0x76),
171 EMPTY_HOWTO (0x77),
172
173 /* These are internal. */
174 /* A 5-bit unsigned displacement to a B/W/L address, at bit position 8/12. */
175 /* ---- ---- 4--- 3210. */
176 #define R_RX_RH_ABS5p8B 0x78
177 RXREL (RH_ABS5p8B, 0, 0, 0, dont, FALSE),
178 #define R_RX_RH_ABS5p8W 0x79
179 RXREL (RH_ABS5p8W, 0, 0, 0, dont, FALSE),
180 #define R_RX_RH_ABS5p8L 0x7a
181 RXREL (RH_ABS5p8L, 0, 0, 0, dont, FALSE),
182 /* A 5-bit unsigned displacement to a B/W/L address, at bit position 5/12. */
183 /* ---- -432 1--- 0---. */
184 #define R_RX_RH_ABS5p5B 0x7b
185 RXREL (RH_ABS5p5B, 0, 0, 0, dont, FALSE),
186 #define R_RX_RH_ABS5p5W 0x7c
187 RXREL (RH_ABS5p5W, 0, 0, 0, dont, FALSE),
188 #define R_RX_RH_ABS5p5L 0x7d
189 RXREL (RH_ABS5p5L, 0, 0, 0, dont, FALSE),
190 /* A 4-bit unsigned immediate at bit position 8. */
191 #define R_RX_RH_UIMM4p8 0x7e
192 RXREL (RH_UIMM4p8, 0, 0, 0, dont, FALSE),
193 /* A 4-bit negative unsigned immediate at bit position 8. */
194 #define R_RX_RH_UNEG4p8 0x7f
195 RXREL (RH_UNEG4p8, 0, 0, 0, dont, FALSE),
196 /* End of internal relocs. */
197
198 RXREL (SYM, 2, 32, 0, dont, FALSE),
199 RXREL (OPneg, 2, 32, 0, dont, FALSE),
200 RXREL (OPadd, 2, 32, 0, dont, FALSE),
201 RXREL (OPsub, 2, 32, 0, dont, FALSE),
202 RXREL (OPmul, 2, 32, 0, dont, FALSE),
203 RXREL (OPdiv, 2, 32, 0, dont, FALSE),
204 RXREL (OPshla, 2, 32, 0, dont, FALSE),
205 RXREL (OPshra, 2, 32, 0, dont, FALSE),
206 RXREL (OPsctsize, 2, 32, 0, dont, FALSE),
207 RXREL (OPscttop, 2, 32, 0, dont, FALSE),
208 RXREL (OPand, 2, 32, 0, dont, FALSE),
209 RXREL (OPor, 2, 32, 0, dont, FALSE),
210 RXREL (OPxor, 2, 32, 0, dont, FALSE),
211 RXREL (OPnot, 2, 32, 0, dont, FALSE),
212 RXREL (OPmod, 2, 32, 0, dont, FALSE),
213 RXREL (OPromtop, 2, 32, 0, dont, FALSE),
214 RXREL (OPramtop, 2, 32, 0, dont, FALSE)
215 };
216 \f
217 /* Map BFD reloc types to RX ELF reloc types. */
218
219 struct rx_reloc_map
220 {
221 bfd_reloc_code_real_type bfd_reloc_val;
222 unsigned int rx_reloc_val;
223 };
224
225 static const struct rx_reloc_map rx_reloc_map [] =
226 {
227 { BFD_RELOC_NONE, R_RX_NONE },
228 { BFD_RELOC_8, R_RX_DIR8S },
229 { BFD_RELOC_16, R_RX_DIR16S },
230 { BFD_RELOC_24, R_RX_DIR24S },
231 { BFD_RELOC_32, R_RX_DIR32 },
232 { BFD_RELOC_RX_16_OP, R_RX_DIR16 },
233 { BFD_RELOC_RX_DIR3U_PCREL, R_RX_DIR3U_PCREL },
234 { BFD_RELOC_8_PCREL, R_RX_DIR8S_PCREL },
235 { BFD_RELOC_16_PCREL, R_RX_DIR16S_PCREL },
236 { BFD_RELOC_24_PCREL, R_RX_DIR24S_PCREL },
237 { BFD_RELOC_RX_8U, R_RX_DIR8U },
238 { BFD_RELOC_RX_16U, R_RX_DIR16U },
239 { BFD_RELOC_RX_24U, R_RX_RH_24_UNS },
240 { BFD_RELOC_RX_NEG8, R_RX_RH_8_NEG },
241 { BFD_RELOC_RX_NEG16, R_RX_RH_16_NEG },
242 { BFD_RELOC_RX_NEG24, R_RX_RH_24_NEG },
243 { BFD_RELOC_RX_NEG32, R_RX_RH_32_NEG },
244 { BFD_RELOC_RX_DIFF, R_RX_RH_DIFF },
245 { BFD_RELOC_RX_GPRELB, R_RX_RH_GPRELB },
246 { BFD_RELOC_RX_GPRELW, R_RX_RH_GPRELW },
247 { BFD_RELOC_RX_GPRELL, R_RX_RH_GPRELL },
248 { BFD_RELOC_RX_RELAX, R_RX_RH_RELAX },
249 { BFD_RELOC_RX_SYM, R_RX_SYM },
250 { BFD_RELOC_RX_OP_SUBTRACT, R_RX_OPsub },
251 { BFD_RELOC_RX_ABS8, R_RX_ABS8 },
252 { BFD_RELOC_RX_ABS16, R_RX_ABS16 },
253 { BFD_RELOC_RX_ABS32, R_RX_ABS32 },
254 { BFD_RELOC_RX_ABS16UL, R_RX_ABS16UL },
255 { BFD_RELOC_RX_ABS16UW, R_RX_ABS16UW },
256 { BFD_RELOC_RX_ABS16U, R_RX_ABS16U }
257 };
258
259 #define BIGE(abfd) ((abfd)->xvec->byteorder == BFD_ENDIAN_BIG)
260
261 static reloc_howto_type *
262 rx_reloc_type_lookup (bfd * abfd ATTRIBUTE_UNUSED,
263 bfd_reloc_code_real_type code)
264 {
265 unsigned int i;
266
267 if (code == BFD_RELOC_RX_32_OP)
268 return rx_elf_howto_table + R_RX_DIR32;
269
270 for (i = ARRAY_SIZE (rx_reloc_map); --i;)
271 if (rx_reloc_map [i].bfd_reloc_val == code)
272 return rx_elf_howto_table + rx_reloc_map[i].rx_reloc_val;
273
274 return NULL;
275 }
276
277 static reloc_howto_type *
278 rx_reloc_name_lookup (bfd * abfd ATTRIBUTE_UNUSED, const char * r_name)
279 {
280 unsigned int i;
281
282 for (i = 0; i < ARRAY_SIZE (rx_elf_howto_table); i++)
283 if (rx_elf_howto_table[i].name != NULL
284 && strcasecmp (rx_elf_howto_table[i].name, r_name) == 0)
285 return rx_elf_howto_table + i;
286
287 return NULL;
288 }
289
290 /* Set the howto pointer for an RX ELF reloc. */
291
292 static void
293 rx_info_to_howto_rela (bfd * abfd ATTRIBUTE_UNUSED,
294 arelent * cache_ptr,
295 Elf_Internal_Rela * dst)
296 {
297 unsigned int r_type;
298
299 r_type = ELF32_R_TYPE (dst->r_info);
300 BFD_ASSERT (r_type < (unsigned int) R_RX_max);
301 cache_ptr->howto = rx_elf_howto_table + r_type;
302 }
303 \f
304 static bfd_vma
305 get_symbol_value (const char * name,
306 bfd_reloc_status_type * status,
307 struct bfd_link_info * info,
308 bfd * input_bfd,
309 asection * input_section,
310 int offset)
311 {
312 bfd_vma value = 0;
313 struct bfd_link_hash_entry * h;
314
315 h = bfd_link_hash_lookup (info->hash, name, FALSE, FALSE, TRUE);
316
317 if (h == NULL
318 || (h->type != bfd_link_hash_defined
319 && h->type != bfd_link_hash_defweak))
320 * status = info->callbacks->undefined_symbol
321 (info, name, input_bfd, input_section, offset, TRUE);
322 else
323 value = (h->u.def.value
324 + h->u.def.section->output_section->vma
325 + h->u.def.section->output_offset);
326
327 return value;
328 }
329
330 static bfd_vma
331 get_gp (bfd_reloc_status_type * status,
332 struct bfd_link_info * info,
333 bfd * abfd,
334 asection * sec,
335 int offset)
336 {
337 static bfd_boolean cached = FALSE;
338 static bfd_vma cached_value = 0;
339
340 if (!cached)
341 {
342 cached_value = get_symbol_value ("__gp", status, info, abfd, sec, offset);
343 cached = TRUE;
344 }
345 return cached_value;
346 }
347
348 static bfd_vma
349 get_romstart (bfd_reloc_status_type * status,
350 struct bfd_link_info * info,
351 bfd * abfd,
352 asection * sec,
353 int offset)
354 {
355 static bfd_boolean cached = FALSE;
356 static bfd_vma cached_value = 0;
357
358 if (!cached)
359 {
360 cached_value = get_symbol_value ("_start", status, info, abfd, sec, offset);
361 cached = TRUE;
362 }
363 return cached_value;
364 }
365
366 static bfd_vma
367 get_ramstart (bfd_reloc_status_type * status,
368 struct bfd_link_info * info,
369 bfd * abfd,
370 asection * sec,
371 int offset)
372 {
373 static bfd_boolean cached = FALSE;
374 static bfd_vma cached_value = 0;
375
376 if (!cached)
377 {
378 cached_value = get_symbol_value ("__datastart", status, info, abfd, sec, offset);
379 cached = TRUE;
380 }
381 return cached_value;
382 }
383
384 #define NUM_STACK_ENTRIES 16
385 static int32_t rx_stack [ NUM_STACK_ENTRIES ];
386 static unsigned int rx_stack_top;
387
388 #define RX_STACK_PUSH(val) \
389 do \
390 { \
391 if (rx_stack_top < NUM_STACK_ENTRIES) \
392 rx_stack [rx_stack_top ++] = (val); \
393 else \
394 r = bfd_reloc_dangerous; \
395 } \
396 while (0)
397
398 #define RX_STACK_POP(dest) \
399 do \
400 { \
401 if (rx_stack_top > 0) \
402 (dest) = rx_stack [-- rx_stack_top]; \
403 else \
404 (dest) = 0, r = bfd_reloc_dangerous; \
405 } \
406 while (0)
407
408 /* Relocate an RX ELF section.
409 There is some attempt to make this function usable for many architectures,
410 both USE_REL and USE_RELA ['twould be nice if such a critter existed],
411 if only to serve as a learning tool.
412
413 The RELOCATE_SECTION function is called by the new ELF backend linker
414 to handle the relocations for a section.
415
416 The relocs are always passed as Rela structures; if the section
417 actually uses Rel structures, the r_addend field will always be
418 zero.
419
420 This function is responsible for adjusting the section contents as
421 necessary, and (if using Rela relocs and generating a relocatable
422 output file) adjusting the reloc addend as necessary.
423
424 This function does not have to worry about setting the reloc
425 address or the reloc symbol index.
426
427 LOCAL_SYMS is a pointer to the swapped in local symbols.
428
429 LOCAL_SECTIONS is an array giving the section in the input file
430 corresponding to the st_shndx field of each local symbol.
431
432 The global hash table entry for the global symbols can be found
433 via elf_sym_hashes (input_bfd).
434
435 When generating relocatable output, this function must handle
436 STB_LOCAL/STT_SECTION symbols specially. The output symbol is
437 going to be the section symbol corresponding to the output
438 section, which means that the addend must be adjusted
439 accordingly. */
440
441 static bfd_boolean
442 rx_elf_relocate_section
443 (bfd * output_bfd,
444 struct bfd_link_info * info,
445 bfd * input_bfd,
446 asection * input_section,
447 bfd_byte * contents,
448 Elf_Internal_Rela * relocs,
449 Elf_Internal_Sym * local_syms,
450 asection ** local_sections)
451 {
452 Elf_Internal_Shdr * symtab_hdr;
453 struct elf_link_hash_entry ** sym_hashes;
454 Elf_Internal_Rela * rel;
455 Elf_Internal_Rela * relend;
456
457 symtab_hdr = & elf_tdata (input_bfd)->symtab_hdr;
458 sym_hashes = elf_sym_hashes (input_bfd);
459 relend = relocs + input_section->reloc_count;
460 for (rel = relocs; rel < relend; rel ++)
461 {
462 reloc_howto_type * howto;
463 unsigned long r_symndx;
464 Elf_Internal_Sym * sym;
465 asection * sec;
466 struct elf_link_hash_entry * h;
467 bfd_vma relocation;
468 bfd_reloc_status_type r;
469 const char * name = NULL;
470 bfd_boolean unresolved_reloc = TRUE;
471 int r_type;
472
473 r_type = ELF32_R_TYPE (rel->r_info);
474 r_symndx = ELF32_R_SYM (rel->r_info);
475
476 howto = rx_elf_howto_table + ELF32_R_TYPE (rel->r_info);
477 h = NULL;
478 sym = NULL;
479 sec = NULL;
480 relocation = 0;
481
482 if (r_symndx < symtab_hdr->sh_info)
483 {
484 sym = local_syms + r_symndx;
485 sec = local_sections [r_symndx];
486 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, & sec, rel);
487
488 name = bfd_elf_string_from_elf_section
489 (input_bfd, symtab_hdr->sh_link, sym->st_name);
490 name = (sym->st_name == 0) ? bfd_section_name (input_bfd, sec) : name;
491 }
492 else
493 {
494 bfd_boolean warned;
495
496 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
497 r_symndx, symtab_hdr, sym_hashes, h,
498 sec, relocation, unresolved_reloc,
499 warned);
500
501 name = h->root.root.string;
502 }
503
504 if (sec != NULL && elf_discarded_section (sec))
505 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
506 rel, relend, howto, contents);
507
508 if (info->relocatable)
509 {
510 /* This is a relocatable link. We don't have to change
511 anything, unless the reloc is against a section symbol,
512 in which case we have to adjust according to where the
513 section symbol winds up in the output section. */
514 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
515 rel->r_addend += sec->output_offset;
516 continue;
517 }
518
519 if (h != NULL && h->root.type == bfd_link_hash_undefweak)
520 /* If the symbol is undefined and weak
521 then the relocation resolves to zero. */
522 relocation = 0;
523 else
524 {
525 if (howto->pc_relative)
526 {
527 relocation -= (input_section->output_section->vma
528 + input_section->output_offset
529 + rel->r_offset);
530 if (r_type != R_RX_RH_3_PCREL
531 && r_type != R_RX_DIR3U_PCREL)
532 relocation ++;
533 }
534
535 relocation += rel->r_addend;
536 }
537
538 r = bfd_reloc_ok;
539
540 #define RANGE(a,b) if (a > (long) relocation || (long) relocation > b) r = bfd_reloc_overflow
541 #define ALIGN(m) if (relocation & m) r = bfd_reloc_other;
542 #define OP(i) (contents[rel->r_offset + (i)])
543 #define WARN_REDHAT(type) \
544 _bfd_error_handler (_("%B:%A: Warning: deprecated Red Hat reloc " type " detected against: %s."), \
545 input_bfd, input_section, name)
546
547 /* Opcode relocs are always big endian. Data relocs are bi-endian. */
548 switch (r_type)
549 {
550 case R_RX_NONE:
551 break;
552
553 case R_RX_RH_RELAX:
554 break;
555
556 case R_RX_RH_3_PCREL:
557 WARN_REDHAT ("RX_RH_3_PCREL");
558 RANGE (3, 10);
559 OP (0) &= 0xf8;
560 OP (0) |= relocation & 0x07;
561 break;
562
563 case R_RX_RH_8_NEG:
564 WARN_REDHAT ("RX_RH_8_NEG");
565 relocation = - relocation;
566 case R_RX_DIR8S_PCREL:
567 RANGE (-128, 127);
568 OP (0) = relocation;
569 break;
570
571 case R_RX_DIR8S:
572 RANGE (-128, 255);
573 OP (0) = relocation;
574 break;
575
576 case R_RX_DIR8U:
577 RANGE (0, 255);
578 OP (0) = relocation;
579 break;
580
581 case R_RX_RH_16_NEG:
582 WARN_REDHAT ("RX_RH_16_NEG");
583 relocation = - relocation;
584 case R_RX_DIR16S_PCREL:
585 RANGE (-32768, 32767);
586 #if RX_OPCODE_BIG_ENDIAN
587 #else
588 OP (0) = relocation;
589 OP (1) = relocation >> 8;
590 #endif
591 break;
592
593 case R_RX_RH_16_OP:
594 WARN_REDHAT ("RX_RH_16_OP");
595 RANGE (-32768, 32767);
596 #if RX_OPCODE_BIG_ENDIAN
597 OP (1) = relocation;
598 OP (0) = relocation >> 8;
599 #else
600 OP (0) = relocation;
601 OP (1) = relocation >> 8;
602 #endif
603 break;
604
605 case R_RX_DIR16S:
606 RANGE (-32768, 65535);
607 if (BIGE (output_bfd) && !(input_section->flags & SEC_CODE))
608 {
609 OP (1) = relocation;
610 OP (0) = relocation >> 8;
611 }
612 else
613 {
614 OP (0) = relocation;
615 OP (1) = relocation >> 8;
616 }
617 break;
618
619 case R_RX_DIR16U:
620 RANGE (0, 65536);
621 #if RX_OPCODE_BIG_ENDIAN
622 OP (1) = relocation;
623 OP (0) = relocation >> 8;
624 #else
625 OP (0) = relocation;
626 OP (1) = relocation >> 8;
627 #endif
628 break;
629
630 case R_RX_DIR16:
631 RANGE (-32768, 65536);
632 #if RX_OPCODE_BIG_ENDIAN
633 OP (1) = relocation;
634 OP (0) = relocation >> 8;
635 #else
636 OP (0) = relocation;
637 OP (1) = relocation >> 8;
638 #endif
639 break;
640
641 case R_RX_DIR16_REV:
642 RANGE (-32768, 65536);
643 #if RX_OPCODE_BIG_ENDIAN
644 OP (0) = relocation;
645 OP (1) = relocation >> 8;
646 #else
647 OP (1) = relocation;
648 OP (0) = relocation >> 8;
649 #endif
650 break;
651
652 case R_RX_DIR3U_PCREL:
653 RANGE (3, 10);
654 OP (0) &= 0xf8;
655 OP (0) |= relocation & 0x07;
656 break;
657
658 case R_RX_RH_24_NEG:
659 WARN_REDHAT ("RX_RH_24_NEG");
660 relocation = - relocation;
661 case R_RX_DIR24S_PCREL:
662 RANGE (-0x800000, 0x7fffff);
663 #if RX_OPCODE_BIG_ENDIAN
664 OP (2) = relocation;
665 OP (1) = relocation >> 8;
666 OP (0) = relocation >> 16;
667 #else
668 OP (0) = relocation;
669 OP (1) = relocation >> 8;
670 OP (2) = relocation >> 16;
671 #endif
672 break;
673
674 case R_RX_RH_24_OP:
675 WARN_REDHAT ("RX_RH_24_OP");
676 RANGE (-0x800000, 0x7fffff);
677 #if RX_OPCODE_BIG_ENDIAN
678 OP (2) = relocation;
679 OP (1) = relocation >> 8;
680 OP (0) = relocation >> 16;
681 #else
682 OP (0) = relocation;
683 OP (1) = relocation >> 8;
684 OP (2) = relocation >> 16;
685 #endif
686 break;
687
688 case R_RX_DIR24S:
689 RANGE (-0x800000, 0x7fffff);
690 if (BIGE (output_bfd) && !(input_section->flags & SEC_CODE))
691 {
692 OP (2) = relocation;
693 OP (1) = relocation >> 8;
694 OP (0) = relocation >> 16;
695 }
696 else
697 {
698 OP (0) = relocation;
699 OP (1) = relocation >> 8;
700 OP (2) = relocation >> 16;
701 }
702 break;
703
704 case R_RX_RH_24_UNS:
705 WARN_REDHAT ("RX_RH_24_UNS");
706 RANGE (0, 0xffffff);
707 #if RX_OPCODE_BIG_ENDIAN
708 OP (2) = relocation;
709 OP (1) = relocation >> 8;
710 OP (0) = relocation >> 16;
711 #else
712 OP (0) = relocation;
713 OP (1) = relocation >> 8;
714 OP (2) = relocation >> 16;
715 #endif
716 break;
717
718 case R_RX_RH_32_NEG:
719 WARN_REDHAT ("RX_RH_32_NEG");
720 relocation = - relocation;
721 #if RX_OPCODE_BIG_ENDIAN
722 OP (3) = relocation;
723 OP (2) = relocation >> 8;
724 OP (1) = relocation >> 16;
725 OP (0) = relocation >> 24;
726 #else
727 OP (0) = relocation;
728 OP (1) = relocation >> 8;
729 OP (2) = relocation >> 16;
730 OP (3) = relocation >> 24;
731 #endif
732 break;
733
734 case R_RX_RH_32_OP:
735 WARN_REDHAT ("RX_RH_32_OP");
736 #if RX_OPCODE_BIG_ENDIAN
737 OP (3) = relocation;
738 OP (2) = relocation >> 8;
739 OP (1) = relocation >> 16;
740 OP (0) = relocation >> 24;
741 #else
742 OP (0) = relocation;
743 OP (1) = relocation >> 8;
744 OP (2) = relocation >> 16;
745 OP (3) = relocation >> 24;
746 #endif
747 break;
748
749 case R_RX_DIR32:
750 if (BIGE (output_bfd) && !(input_section->flags & SEC_CODE))
751 {
752 OP (3) = relocation;
753 OP (2) = relocation >> 8;
754 OP (1) = relocation >> 16;
755 OP (0) = relocation >> 24;
756 }
757 else
758 {
759 OP (0) = relocation;
760 OP (1) = relocation >> 8;
761 OP (2) = relocation >> 16;
762 OP (3) = relocation >> 24;
763 }
764 break;
765
766 case R_RX_DIR32_REV:
767 if (BIGE (output_bfd))
768 {
769 OP (0) = relocation;
770 OP (1) = relocation >> 8;
771 OP (2) = relocation >> 16;
772 OP (3) = relocation >> 24;
773 }
774 else
775 {
776 OP (3) = relocation;
777 OP (2) = relocation >> 8;
778 OP (1) = relocation >> 16;
779 OP (0) = relocation >> 24;
780 }
781 break;
782
783 case R_RX_RH_DIFF:
784 {
785 bfd_vma val;
786 WARN_REDHAT ("RX_RH_DIFF");
787 val = bfd_get_32 (output_bfd, & OP (0));
788 val -= relocation;
789 bfd_put_32 (output_bfd, val, & OP (0));
790 }
791 break;
792
793 case R_RX_RH_GPRELB:
794 WARN_REDHAT ("RX_RH_GPRELB");
795 relocation -= get_gp (&r, info, input_bfd, input_section, rel->r_offset);
796 RANGE (0, 65535);
797 #if RX_OPCODE_BIG_ENDIAN
798 OP (1) = relocation;
799 OP (0) = relocation >> 8;
800 #else
801 OP (0) = relocation;
802 OP (1) = relocation >> 8;
803 #endif
804 break;
805
806 case R_RX_RH_GPRELW:
807 WARN_REDHAT ("RX_RH_GPRELW");
808 relocation -= get_gp (&r, info, input_bfd, input_section, rel->r_offset);
809 ALIGN (1);
810 relocation >>= 1;
811 RANGE (0, 65535);
812 #if RX_OPCODE_BIG_ENDIAN
813 OP (1) = relocation;
814 OP (0) = relocation >> 8;
815 #else
816 OP (0) = relocation;
817 OP (1) = relocation >> 8;
818 #endif
819 break;
820
821 case R_RX_RH_GPRELL:
822 WARN_REDHAT ("RX_RH_GPRELL");
823 relocation -= get_gp (&r, info, input_bfd, input_section, rel->r_offset);
824 ALIGN (3);
825 relocation >>= 2;
826 RANGE (0, 65535);
827 #if RX_OPCODE_BIG_ENDIAN
828 OP (1) = relocation;
829 OP (0) = relocation >> 8;
830 #else
831 OP (0) = relocation;
832 OP (1) = relocation >> 8;
833 #endif
834 break;
835
836 /* Internal relocations just for relaxation: */
837 case R_RX_RH_ABS5p5B:
838 RX_STACK_POP (relocation);
839 RANGE (0, 31);
840 OP (0) &= 0xf8;
841 OP (0) |= relocation >> 2;
842 OP (1) &= 0x77;
843 OP (1) |= (relocation << 6) & 0x80;
844 OP (1) |= (relocation << 3) & 0x08;
845 break;
846
847 case R_RX_RH_ABS5p5W:
848 RX_STACK_POP (relocation);
849 RANGE (0, 62);
850 ALIGN (1);
851 relocation >>= 1;
852 OP (0) &= 0xf8;
853 OP (0) |= relocation >> 2;
854 OP (1) &= 0x77;
855 OP (1) |= (relocation << 6) & 0x80;
856 OP (1) |= (relocation << 3) & 0x08;
857 break;
858
859 case R_RX_RH_ABS5p5L:
860 RX_STACK_POP (relocation);
861 RANGE (0, 124);
862 ALIGN (3);
863 relocation >>= 2;
864 OP (0) &= 0xf8;
865 OP (0) |= relocation >> 2;
866 OP (1) &= 0x77;
867 OP (1) |= (relocation << 6) & 0x80;
868 OP (1) |= (relocation << 3) & 0x08;
869 break;
870
871 case R_RX_RH_ABS5p8B:
872 RX_STACK_POP (relocation);
873 RANGE (0, 31);
874 OP (0) &= 0x70;
875 OP (0) |= (relocation << 3) & 0x80;
876 OP (0) |= relocation & 0x0f;
877 break;
878
879 case R_RX_RH_ABS5p8W:
880 RX_STACK_POP (relocation);
881 RANGE (0, 62);
882 ALIGN (1);
883 relocation >>= 1;
884 OP (0) &= 0x70;
885 OP (0) |= (relocation << 3) & 0x80;
886 OP (0) |= relocation & 0x0f;
887 break;
888
889 case R_RX_RH_ABS5p8L:
890 RX_STACK_POP (relocation);
891 RANGE (0, 124);
892 ALIGN (3);
893 relocation >>= 2;
894 OP (0) &= 0x70;
895 OP (0) |= (relocation << 3) & 0x80;
896 OP (0) |= relocation & 0x0f;
897 break;
898
899 case R_RX_RH_UIMM4p8:
900 RANGE (0, 15);
901 OP (0) &= 0x0f;
902 OP (0) |= relocation << 4;
903 break;
904
905 case R_RX_RH_UNEG4p8:
906 RANGE (-15, 0);
907 OP (0) &= 0x0f;
908 OP (0) |= (-relocation) << 4;
909 break;
910
911 /* Complex reloc handling: */
912
913 case R_RX_ABS32:
914 RX_STACK_POP (relocation);
915 #if RX_OPCODE_BIG_ENDIAN
916 OP (3) = relocation;
917 OP (2) = relocation >> 8;
918 OP (1) = relocation >> 16;
919 OP (0) = relocation >> 24;
920 #else
921 OP (0) = relocation;
922 OP (1) = relocation >> 8;
923 OP (2) = relocation >> 16;
924 OP (3) = relocation >> 24;
925 #endif
926 break;
927
928 case R_RX_ABS32_REV:
929 RX_STACK_POP (relocation);
930 #if RX_OPCODE_BIG_ENDIAN
931 OP (0) = relocation;
932 OP (1) = relocation >> 8;
933 OP (2) = relocation >> 16;
934 OP (3) = relocation >> 24;
935 #else
936 OP (3) = relocation;
937 OP (2) = relocation >> 8;
938 OP (1) = relocation >> 16;
939 OP (0) = relocation >> 24;
940 #endif
941 break;
942
943 case R_RX_ABS24S_PCREL:
944 case R_RX_ABS24S:
945 RX_STACK_POP (relocation);
946 RANGE (-0x800000, 0x7fffff);
947 if (BIGE (output_bfd) && !(input_section->flags & SEC_CODE))
948 {
949 OP (2) = relocation;
950 OP (1) = relocation >> 8;
951 OP (0) = relocation >> 16;
952 }
953 else
954 {
955 OP (0) = relocation;
956 OP (1) = relocation >> 8;
957 OP (2) = relocation >> 16;
958 }
959 break;
960
961 case R_RX_ABS16:
962 RX_STACK_POP (relocation);
963 RANGE (-32768, 65535);
964 #if RX_OPCODE_BIG_ENDIAN
965 OP (1) = relocation;
966 OP (0) = relocation >> 8;
967 #else
968 OP (0) = relocation;
969 OP (1) = relocation >> 8;
970 #endif
971 break;
972
973 case R_RX_ABS16_REV:
974 RX_STACK_POP (relocation);
975 RANGE (-32768, 65535);
976 #if RX_OPCODE_BIG_ENDIAN
977 OP (0) = relocation;
978 OP (1) = relocation >> 8;
979 #else
980 OP (1) = relocation;
981 OP (0) = relocation >> 8;
982 #endif
983 break;
984
985 case R_RX_ABS16S_PCREL:
986 case R_RX_ABS16S:
987 RX_STACK_POP (relocation);
988 RANGE (-32768, 32767);
989 if (BIGE (output_bfd) && !(input_section->flags & SEC_CODE))
990 {
991 OP (1) = relocation;
992 OP (0) = relocation >> 8;
993 }
994 else
995 {
996 OP (0) = relocation;
997 OP (1) = relocation >> 8;
998 }
999 break;
1000
1001 case R_RX_ABS16U:
1002 RX_STACK_POP (relocation);
1003 RANGE (0, 65536);
1004 #if RX_OPCODE_BIG_ENDIAN
1005 OP (1) = relocation;
1006 OP (0) = relocation >> 8;
1007 #else
1008 OP (0) = relocation;
1009 OP (1) = relocation >> 8;
1010 #endif
1011 break;
1012
1013 case R_RX_ABS16UL:
1014 RX_STACK_POP (relocation);
1015 relocation >>= 2;
1016 RANGE (0, 65536);
1017 #if RX_OPCODE_BIG_ENDIAN
1018 OP (1) = relocation;
1019 OP (0) = relocation >> 8;
1020 #else
1021 OP (0) = relocation;
1022 OP (1) = relocation >> 8;
1023 #endif
1024 break;
1025
1026 case R_RX_ABS16UW:
1027 RX_STACK_POP (relocation);
1028 relocation >>= 1;
1029 RANGE (0, 65536);
1030 #if RX_OPCODE_BIG_ENDIAN
1031 OP (1) = relocation;
1032 OP (0) = relocation >> 8;
1033 #else
1034 OP (0) = relocation;
1035 OP (1) = relocation >> 8;
1036 #endif
1037 break;
1038
1039 case R_RX_ABS8:
1040 RX_STACK_POP (relocation);
1041 RANGE (-128, 255);
1042 OP (0) = relocation;
1043 break;
1044
1045 case R_RX_ABS8U:
1046 RX_STACK_POP (relocation);
1047 RANGE (0, 255);
1048 OP (0) = relocation;
1049 break;
1050
1051 case R_RX_ABS8UL:
1052 RX_STACK_POP (relocation);
1053 relocation >>= 2;
1054 RANGE (0, 255);
1055 OP (0) = relocation;
1056 break;
1057
1058 case R_RX_ABS8UW:
1059 RX_STACK_POP (relocation);
1060 relocation >>= 1;
1061 RANGE (0, 255);
1062 OP (0) = relocation;
1063 break;
1064
1065 case R_RX_ABS8S_PCREL:
1066 case R_RX_ABS8S:
1067 RX_STACK_POP (relocation);
1068 RANGE (-128, 127);
1069 OP (0) = relocation;
1070 break;
1071
1072 case R_RX_SYM:
1073 if (r_symndx < symtab_hdr->sh_info)
1074 RX_STACK_PUSH (sec->output_section->vma
1075 + sec->output_offset
1076 + sym->st_value);
1077 else
1078 {
1079 if (h != NULL
1080 && (h->root.type == bfd_link_hash_defined
1081 || h->root.type == bfd_link_hash_defweak))
1082 RX_STACK_PUSH (h->root.u.def.value
1083 + sec->output_section->vma
1084 + sec->output_offset);
1085 else
1086 _bfd_error_handler (_("Warning: RX_SYM reloc with an unknown symbol"));
1087 }
1088 break;
1089
1090 case R_RX_OPneg:
1091 {
1092 int32_t tmp;
1093
1094 RX_STACK_POP (tmp);
1095 tmp = - tmp;
1096 RX_STACK_PUSH (tmp);
1097 }
1098 break;
1099
1100 case R_RX_OPadd:
1101 {
1102 int32_t tmp1, tmp2;
1103
1104 RX_STACK_POP (tmp1);
1105 RX_STACK_POP (tmp2);
1106 tmp1 += tmp2;
1107 RX_STACK_PUSH (tmp1);
1108 }
1109 break;
1110
1111 case R_RX_OPsub:
1112 {
1113 int32_t tmp1, tmp2;
1114
1115 RX_STACK_POP (tmp1);
1116 RX_STACK_POP (tmp2);
1117 tmp2 -= tmp1;
1118 RX_STACK_PUSH (tmp2);
1119 }
1120 break;
1121
1122 case R_RX_OPmul:
1123 {
1124 int32_t tmp1, tmp2;
1125
1126 RX_STACK_POP (tmp1);
1127 RX_STACK_POP (tmp2);
1128 tmp1 *= tmp2;
1129 RX_STACK_PUSH (tmp1);
1130 }
1131 break;
1132
1133 case R_RX_OPdiv:
1134 {
1135 int32_t tmp1, tmp2;
1136
1137 RX_STACK_POP (tmp1);
1138 RX_STACK_POP (tmp2);
1139 tmp1 /= tmp2;
1140 RX_STACK_PUSH (tmp1);
1141 }
1142 break;
1143
1144 case R_RX_OPshla:
1145 {
1146 int32_t tmp1, tmp2;
1147
1148 RX_STACK_POP (tmp1);
1149 RX_STACK_POP (tmp2);
1150 tmp1 <<= tmp2;
1151 RX_STACK_PUSH (tmp1);
1152 }
1153 break;
1154
1155 case R_RX_OPshra:
1156 {
1157 int32_t tmp1, tmp2;
1158
1159 RX_STACK_POP (tmp1);
1160 RX_STACK_POP (tmp2);
1161 tmp1 >>= tmp2;
1162 RX_STACK_PUSH (tmp1);
1163 }
1164 break;
1165
1166 case R_RX_OPsctsize:
1167 RX_STACK_PUSH (input_section->size);
1168 break;
1169
1170 case R_RX_OPscttop:
1171 RX_STACK_PUSH (input_section->output_section->vma);
1172 break;
1173
1174 case R_RX_OPand:
1175 {
1176 int32_t tmp1, tmp2;
1177
1178 RX_STACK_POP (tmp1);
1179 RX_STACK_POP (tmp2);
1180 tmp1 &= tmp2;
1181 RX_STACK_PUSH (tmp1);
1182 }
1183 break;
1184
1185 case R_RX_OPor:
1186 {
1187 int32_t tmp1, tmp2;
1188
1189 RX_STACK_POP (tmp1);
1190 RX_STACK_POP (tmp2);
1191 tmp1 |= tmp2;
1192 RX_STACK_PUSH (tmp1);
1193 }
1194 break;
1195
1196 case R_RX_OPxor:
1197 {
1198 int32_t tmp1, tmp2;
1199
1200 RX_STACK_POP (tmp1);
1201 RX_STACK_POP (tmp2);
1202 tmp1 ^= tmp2;
1203 RX_STACK_PUSH (tmp1);
1204 }
1205 break;
1206
1207 case R_RX_OPnot:
1208 {
1209 int32_t tmp;
1210
1211 RX_STACK_POP (tmp);
1212 tmp = ~ tmp;
1213 RX_STACK_PUSH (tmp);
1214 }
1215 break;
1216
1217 case R_RX_OPmod:
1218 {
1219 int32_t tmp1, tmp2;
1220
1221 RX_STACK_POP (tmp1);
1222 RX_STACK_POP (tmp2);
1223 tmp1 %= tmp2;
1224 RX_STACK_PUSH (tmp1);
1225 }
1226 break;
1227
1228 case R_RX_OPromtop:
1229 RX_STACK_PUSH (get_romstart (&r, info, input_bfd, input_section, rel->r_offset));
1230 break;
1231
1232 case R_RX_OPramtop:
1233 RX_STACK_PUSH (get_ramstart (&r, info, input_bfd, input_section, rel->r_offset));
1234 break;
1235
1236 default:
1237 r = bfd_reloc_notsupported;
1238 break;
1239 }
1240
1241 if (r != bfd_reloc_ok)
1242 {
1243 const char * msg = NULL;
1244
1245 switch (r)
1246 {
1247 case bfd_reloc_overflow:
1248 /* Catch the case of a missing function declaration
1249 and emit a more helpful error message. */
1250 if (r_type == R_RX_DIR24S_PCREL)
1251 msg = _("%B(%A): error: call to undefined function '%s'");
1252 else
1253 r = info->callbacks->reloc_overflow
1254 (info, (h ? &h->root : NULL), name, howto->name, (bfd_vma) 0,
1255 input_bfd, input_section, rel->r_offset);
1256 break;
1257
1258 case bfd_reloc_undefined:
1259 r = info->callbacks->undefined_symbol
1260 (info, name, input_bfd, input_section, rel->r_offset,
1261 TRUE);
1262 break;
1263
1264 case bfd_reloc_other:
1265 msg = _("%B(%A): warning: unaligned access to symbol '%s' in the small data area");
1266 break;
1267
1268 case bfd_reloc_outofrange:
1269 msg = _("%B(%A): internal error: out of range error");
1270 break;
1271
1272 case bfd_reloc_notsupported:
1273 msg = _("%B(%A): internal error: unsupported relocation error");
1274 break;
1275
1276 case bfd_reloc_dangerous:
1277 msg = _("%B(%A): internal error: dangerous relocation");
1278 break;
1279
1280 default:
1281 msg = _("%B(%A): internal error: unknown error");
1282 break;
1283 }
1284
1285 if (msg)
1286 _bfd_error_handler (msg, input_bfd, input_section, name);
1287
1288 if (! r)
1289 return FALSE;
1290 }
1291 }
1292
1293 return TRUE;
1294 }
1295 \f
1296 /* Relaxation Support. */
1297
1298 /* Progression of relocations from largest operand size to smallest
1299 operand size. */
1300
1301 static int
1302 next_smaller_reloc (int r)
1303 {
1304 switch (r)
1305 {
1306 case R_RX_DIR32: return R_RX_DIR24S;
1307 case R_RX_DIR24S: return R_RX_DIR16S;
1308 case R_RX_DIR16S: return R_RX_DIR8S;
1309 case R_RX_DIR8S: return R_RX_NONE;
1310
1311 case R_RX_DIR16: return R_RX_DIR8;
1312 case R_RX_DIR8: return R_RX_NONE;
1313
1314 case R_RX_DIR16U: return R_RX_DIR8U;
1315 case R_RX_DIR8U: return R_RX_NONE;
1316
1317 case R_RX_DIR24S_PCREL: return R_RX_DIR16S_PCREL;
1318 case R_RX_DIR16S_PCREL: return R_RX_DIR8S_PCREL;
1319 case R_RX_DIR8S_PCREL: return R_RX_DIR3U_PCREL;
1320
1321 case R_RX_DIR16UL: return R_RX_DIR8UL;
1322 case R_RX_DIR8UL: return R_RX_NONE;
1323 case R_RX_DIR16UW: return R_RX_DIR8UW;
1324 case R_RX_DIR8UW: return R_RX_NONE;
1325
1326 case R_RX_RH_32_OP: return R_RX_RH_24_OP;
1327 case R_RX_RH_24_OP: return R_RX_RH_16_OP;
1328 case R_RX_RH_16_OP: return R_RX_DIR8;
1329
1330 case R_RX_ABS32: return R_RX_ABS24S;
1331 case R_RX_ABS24S: return R_RX_ABS16S;
1332 case R_RX_ABS16: return R_RX_ABS8;
1333 case R_RX_ABS16U: return R_RX_ABS8U;
1334 case R_RX_ABS16S: return R_RX_ABS8S;
1335 case R_RX_ABS8: return R_RX_NONE;
1336 case R_RX_ABS8U: return R_RX_NONE;
1337 case R_RX_ABS8S: return R_RX_NONE;
1338 case R_RX_ABS24S_PCREL: return R_RX_ABS16S_PCREL;
1339 case R_RX_ABS16S_PCREL: return R_RX_ABS8S_PCREL;
1340 case R_RX_ABS8S_PCREL: return R_RX_NONE;
1341 case R_RX_ABS16UL: return R_RX_ABS8UL;
1342 case R_RX_ABS16UW: return R_RX_ABS8UW;
1343 case R_RX_ABS8UL: return R_RX_NONE;
1344 case R_RX_ABS8UW: return R_RX_NONE;
1345 }
1346 return r;
1347 };
1348
1349 /* Delete some bytes from a section while relaxing. */
1350
1351 static bfd_boolean
1352 elf32_rx_relax_delete_bytes (bfd *abfd, asection *sec, bfd_vma addr, int count,
1353 Elf_Internal_Rela *alignment_rel, int force_snip)
1354 {
1355 Elf_Internal_Shdr * symtab_hdr;
1356 unsigned int sec_shndx;
1357 bfd_byte * contents;
1358 Elf_Internal_Rela * irel;
1359 Elf_Internal_Rela * irelend;
1360 Elf_Internal_Sym * isym;
1361 Elf_Internal_Sym * isymend;
1362 bfd_vma toaddr;
1363 unsigned int symcount;
1364 struct elf_link_hash_entry ** sym_hashes;
1365 struct elf_link_hash_entry ** end_hashes;
1366
1367 if (!alignment_rel)
1368 force_snip = 1;
1369
1370 sec_shndx = _bfd_elf_section_from_bfd_section (abfd, sec);
1371
1372 contents = elf_section_data (sec)->this_hdr.contents;
1373
1374 /* The deletion must stop at the next alignment boundary, if
1375 ALIGNMENT_REL is non-NULL. */
1376 toaddr = sec->size;
1377 if (alignment_rel)
1378 toaddr = alignment_rel->r_offset;
1379
1380 irel = elf_section_data (sec)->relocs;
1381 irelend = irel + sec->reloc_count;
1382
1383 /* Actually delete the bytes. */
1384 memmove (contents + addr, contents + addr + count,
1385 (size_t) (toaddr - addr - count));
1386
1387 /* If we don't have an alignment marker to worry about, we can just
1388 shrink the section. Otherwise, we have to fill in the newly
1389 created gap with NOP insns (0x03). */
1390 if (force_snip)
1391 sec->size -= count;
1392 else
1393 memset (contents + toaddr - count, 0x03, count);
1394
1395 /* Adjust all the relocs. */
1396 for (irel = elf_section_data (sec)->relocs; irel < irelend; irel++)
1397 {
1398 /* Get the new reloc address. */
1399 if (irel->r_offset > addr
1400 && (irel->r_offset < toaddr
1401 || (force_snip && irel->r_offset == toaddr)))
1402 irel->r_offset -= count;
1403
1404 /* If we see an ALIGN marker at the end of the gap, we move it
1405 to the beginning of the gap, since marking these gaps is what
1406 they're for. */
1407 if (irel->r_offset == toaddr
1408 && ELF32_R_TYPE (irel->r_info) == R_RX_RH_RELAX
1409 && irel->r_addend & RX_RELAXA_ALIGN)
1410 irel->r_offset -= count;
1411 }
1412
1413 /* Adjust the local symbols defined in this section. */
1414 symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
1415 isym = (Elf_Internal_Sym *) symtab_hdr->contents;
1416 isymend = isym + symtab_hdr->sh_info;
1417
1418 for (; isym < isymend; isym++)
1419 {
1420 /* If the symbol is in the range of memory we just moved, we
1421 have to adjust its value. */
1422 if (isym->st_shndx == sec_shndx
1423 && isym->st_value > addr
1424 && isym->st_value < toaddr)
1425 isym->st_value -= count;
1426
1427 /* If the symbol *spans* the bytes we just deleted (i.e. it's
1428 *end* is in the moved bytes but it's *start* isn't), then we
1429 must adjust its size. */
1430 if (isym->st_shndx == sec_shndx
1431 && isym->st_value < addr
1432 && isym->st_value + isym->st_size > addr
1433 && isym->st_value + isym->st_size < toaddr)
1434 isym->st_size -= count;
1435 }
1436
1437 /* Now adjust the global symbols defined in this section. */
1438 symcount = (symtab_hdr->sh_size / sizeof (Elf32_External_Sym)
1439 - symtab_hdr->sh_info);
1440 sym_hashes = elf_sym_hashes (abfd);
1441 end_hashes = sym_hashes + symcount;
1442
1443 for (; sym_hashes < end_hashes; sym_hashes++)
1444 {
1445 struct elf_link_hash_entry *sym_hash = *sym_hashes;
1446
1447 if ((sym_hash->root.type == bfd_link_hash_defined
1448 || sym_hash->root.type == bfd_link_hash_defweak)
1449 && sym_hash->root.u.def.section == sec)
1450 {
1451 /* As above, adjust the value if needed. */
1452 if (sym_hash->root.u.def.value > addr
1453 && sym_hash->root.u.def.value < toaddr)
1454 sym_hash->root.u.def.value -= count;
1455
1456 /* As above, adjust the size if needed. */
1457 if (sym_hash->root.u.def.value < addr
1458 && sym_hash->root.u.def.value + sym_hash->size > addr
1459 && sym_hash->root.u.def.value + sym_hash->size < toaddr)
1460 sym_hash->size -= count;
1461 }
1462 }
1463
1464 return TRUE;
1465 }
1466
1467 /* Used to sort relocs by address. If relocs have the same address,
1468 we maintain their relative order, except that R_RX_RH_RELAX
1469 alignment relocs must be the first reloc for any given address. */
1470
1471 static void
1472 reloc_bubblesort (Elf_Internal_Rela * r, int count)
1473 {
1474 int i;
1475 bfd_boolean again;
1476 bfd_boolean swappit;
1477
1478 /* This is almost a classic bubblesort. It's the slowest sort, but
1479 we're taking advantage of the fact that the relocations are
1480 mostly in order already (the assembler emits them that way) and
1481 we need relocs with the same address to remain in the same
1482 relative order. */
1483 again = TRUE;
1484 while (again)
1485 {
1486 again = FALSE;
1487 for (i = 0; i < count - 1; i ++)
1488 {
1489 if (r[i].r_offset > r[i + 1].r_offset)
1490 swappit = TRUE;
1491 else if (r[i].r_offset < r[i + 1].r_offset)
1492 swappit = FALSE;
1493 else if (ELF32_R_TYPE (r[i + 1].r_info) == R_RX_RH_RELAX
1494 && (r[i + 1].r_addend & RX_RELAXA_ALIGN))
1495 swappit = TRUE;
1496 else if (ELF32_R_TYPE (r[i + 1].r_info) == R_RX_RH_RELAX
1497 && (r[i + 1].r_addend & RX_RELAXA_ELIGN)
1498 && !(ELF32_R_TYPE (r[i].r_info) == R_RX_RH_RELAX
1499 && (r[i].r_addend & RX_RELAXA_ALIGN)))
1500 swappit = TRUE;
1501 else
1502 swappit = FALSE;
1503
1504 if (swappit)
1505 {
1506 Elf_Internal_Rela tmp;
1507
1508 tmp = r[i];
1509 r[i] = r[i + 1];
1510 r[i + 1] = tmp;
1511 /* If we do move a reloc back, re-scan to see if it
1512 needs to be moved even further back. This avoids
1513 most of the O(n^2) behavior for our cases. */
1514 if (i > 0)
1515 i -= 2;
1516 again = TRUE;
1517 }
1518 }
1519 }
1520 }
1521
1522
1523 #define OFFSET_FOR_RELOC(rel, lrel, scale) \
1524 rx_offset_for_reloc (abfd, rel + 1, symtab_hdr, shndx_buf, intsyms, \
1525 lrel, abfd, sec, link_info, scale)
1526
1527 static bfd_vma
1528 rx_offset_for_reloc (bfd * abfd,
1529 Elf_Internal_Rela * rel,
1530 Elf_Internal_Shdr * symtab_hdr,
1531 Elf_External_Sym_Shndx * shndx_buf ATTRIBUTE_UNUSED,
1532 Elf_Internal_Sym * intsyms,
1533 Elf_Internal_Rela ** lrel,
1534 bfd * input_bfd,
1535 asection * input_section,
1536 struct bfd_link_info * info,
1537 int * scale)
1538 {
1539 bfd_vma symval;
1540 bfd_reloc_status_type r;
1541
1542 *scale = 1;
1543
1544 /* REL is the first of 1..N relocations. We compute the symbol
1545 value for each relocation, then combine them if needed. LREL
1546 gets a pointer to the last relocation used. */
1547 while (1)
1548 {
1549 int32_t tmp1, tmp2;
1550
1551 /* Get the value of the symbol referred to by the reloc. */
1552 if (ELF32_R_SYM (rel->r_info) < symtab_hdr->sh_info)
1553 {
1554 /* A local symbol. */
1555 Elf_Internal_Sym *isym;
1556 asection *ssec;
1557
1558 isym = intsyms + ELF32_R_SYM (rel->r_info);
1559
1560 if (isym->st_shndx == SHN_UNDEF)
1561 ssec = bfd_und_section_ptr;
1562 else if (isym->st_shndx == SHN_ABS)
1563 ssec = bfd_abs_section_ptr;
1564 else if (isym->st_shndx == SHN_COMMON)
1565 ssec = bfd_com_section_ptr;
1566 else
1567 ssec = bfd_section_from_elf_index (abfd,
1568 isym->st_shndx);
1569
1570 /* Initial symbol value. */
1571 symval = isym->st_value;
1572
1573 /* GAS may have made this symbol relative to a section, in
1574 which case, we have to add the addend to find the
1575 symbol. */
1576 if (ELF_ST_TYPE (isym->st_info) == STT_SECTION)
1577 symval += rel->r_addend;
1578
1579 if (ssec)
1580 {
1581 if ((ssec->flags & SEC_MERGE)
1582 && ssec->sec_info_type == ELF_INFO_TYPE_MERGE)
1583 symval = _bfd_merged_section_offset (abfd, & ssec,
1584 elf_section_data (ssec)->sec_info,
1585 symval);
1586 }
1587
1588 /* Now make the offset relative to where the linker is putting it. */
1589 if (ssec)
1590 symval +=
1591 ssec->output_section->vma + ssec->output_offset;
1592
1593 symval += rel->r_addend;
1594 }
1595 else
1596 {
1597 unsigned long indx;
1598 struct elf_link_hash_entry * h;
1599
1600 /* An external symbol. */
1601 indx = ELF32_R_SYM (rel->r_info) - symtab_hdr->sh_info;
1602 h = elf_sym_hashes (abfd)[indx];
1603 BFD_ASSERT (h != NULL);
1604
1605 if (h->root.type != bfd_link_hash_defined
1606 && h->root.type != bfd_link_hash_defweak)
1607 {
1608 /* This appears to be a reference to an undefined
1609 symbol. Just ignore it--it will be caught by the
1610 regular reloc processing. */
1611 if (lrel)
1612 *lrel = rel;
1613 return 0;
1614 }
1615
1616 symval = (h->root.u.def.value
1617 + h->root.u.def.section->output_section->vma
1618 + h->root.u.def.section->output_offset);
1619
1620 symval += rel->r_addend;
1621 }
1622
1623 switch (ELF32_R_TYPE (rel->r_info))
1624 {
1625 case R_RX_SYM:
1626 RX_STACK_PUSH (symval);
1627 break;
1628
1629 case R_RX_OPneg:
1630 RX_STACK_POP (tmp1);
1631 tmp1 = - tmp1;
1632 RX_STACK_PUSH (tmp1);
1633 break;
1634
1635 case R_RX_OPadd:
1636 RX_STACK_POP (tmp1);
1637 RX_STACK_POP (tmp2);
1638 tmp1 += tmp2;
1639 RX_STACK_PUSH (tmp1);
1640 break;
1641
1642 case R_RX_OPsub:
1643 RX_STACK_POP (tmp1);
1644 RX_STACK_POP (tmp2);
1645 tmp2 -= tmp1;
1646 RX_STACK_PUSH (tmp2);
1647 break;
1648
1649 case R_RX_OPmul:
1650 RX_STACK_POP (tmp1);
1651 RX_STACK_POP (tmp2);
1652 tmp1 *= tmp2;
1653 RX_STACK_PUSH (tmp1);
1654 break;
1655
1656 case R_RX_OPdiv:
1657 RX_STACK_POP (tmp1);
1658 RX_STACK_POP (tmp2);
1659 tmp1 /= tmp2;
1660 RX_STACK_PUSH (tmp1);
1661 break;
1662
1663 case R_RX_OPshla:
1664 RX_STACK_POP (tmp1);
1665 RX_STACK_POP (tmp2);
1666 tmp1 <<= tmp2;
1667 RX_STACK_PUSH (tmp1);
1668 break;
1669
1670 case R_RX_OPshra:
1671 RX_STACK_POP (tmp1);
1672 RX_STACK_POP (tmp2);
1673 tmp1 >>= tmp2;
1674 RX_STACK_PUSH (tmp1);
1675 break;
1676
1677 case R_RX_OPsctsize:
1678 RX_STACK_PUSH (input_section->size);
1679 break;
1680
1681 case R_RX_OPscttop:
1682 RX_STACK_PUSH (input_section->output_section->vma);
1683 break;
1684
1685 case R_RX_OPand:
1686 RX_STACK_POP (tmp1);
1687 RX_STACK_POP (tmp2);
1688 tmp1 &= tmp2;
1689 RX_STACK_PUSH (tmp1);
1690 break;
1691
1692 case R_RX_OPor:
1693 RX_STACK_POP (tmp1);
1694 RX_STACK_POP (tmp2);
1695 tmp1 |= tmp2;
1696 RX_STACK_PUSH (tmp1);
1697 break;
1698
1699 case R_RX_OPxor:
1700 RX_STACK_POP (tmp1);
1701 RX_STACK_POP (tmp2);
1702 tmp1 ^= tmp2;
1703 RX_STACK_PUSH (tmp1);
1704 break;
1705
1706 case R_RX_OPnot:
1707 RX_STACK_POP (tmp1);
1708 tmp1 = ~ tmp1;
1709 RX_STACK_PUSH (tmp1);
1710 break;
1711
1712 case R_RX_OPmod:
1713 RX_STACK_POP (tmp1);
1714 RX_STACK_POP (tmp2);
1715 tmp1 %= tmp2;
1716 RX_STACK_PUSH (tmp1);
1717 break;
1718
1719 case R_RX_OPromtop:
1720 RX_STACK_PUSH (get_romstart (&r, info, input_bfd, input_section, rel->r_offset));
1721 break;
1722
1723 case R_RX_OPramtop:
1724 RX_STACK_PUSH (get_ramstart (&r, info, input_bfd, input_section, rel->r_offset));
1725 break;
1726
1727 case R_RX_DIR16UL:
1728 case R_RX_DIR8UL:
1729 case R_RX_ABS16UL:
1730 case R_RX_ABS8UL:
1731 if (rx_stack_top)
1732 RX_STACK_POP (symval);
1733 if (lrel)
1734 *lrel = rel;
1735 *scale = 4;
1736 return symval;
1737
1738 case R_RX_DIR16UW:
1739 case R_RX_DIR8UW:
1740 case R_RX_ABS16UW:
1741 case R_RX_ABS8UW:
1742 if (rx_stack_top)
1743 RX_STACK_POP (symval);
1744 if (lrel)
1745 *lrel = rel;
1746 *scale = 2;
1747 return symval;
1748
1749 default:
1750 if (rx_stack_top)
1751 RX_STACK_POP (symval);
1752 if (lrel)
1753 *lrel = rel;
1754 return symval;
1755 }
1756
1757 rel ++;
1758 }
1759 }
1760
1761 static void
1762 move_reloc (Elf_Internal_Rela * irel, Elf_Internal_Rela * srel, int delta)
1763 {
1764 bfd_vma old_offset = srel->r_offset;
1765
1766 irel ++;
1767 while (irel <= srel)
1768 {
1769 if (irel->r_offset == old_offset)
1770 irel->r_offset += delta;
1771 irel ++;
1772 }
1773 }
1774
1775 /* Relax one section. */
1776
1777 static bfd_boolean
1778 elf32_rx_relax_section (bfd * abfd,
1779 asection * sec,
1780 struct bfd_link_info * link_info,
1781 bfd_boolean * again,
1782 bfd_boolean allow_pcrel3)
1783 {
1784 Elf_Internal_Shdr * symtab_hdr;
1785 Elf_Internal_Shdr * shndx_hdr;
1786 Elf_Internal_Rela * internal_relocs;
1787 Elf_Internal_Rela * free_relocs = NULL;
1788 Elf_Internal_Rela * irel;
1789 Elf_Internal_Rela * srel;
1790 Elf_Internal_Rela * irelend;
1791 Elf_Internal_Rela * next_alignment;
1792 Elf_Internal_Rela * prev_alignment;
1793 bfd_byte * contents = NULL;
1794 bfd_byte * free_contents = NULL;
1795 Elf_Internal_Sym * intsyms = NULL;
1796 Elf_Internal_Sym * free_intsyms = NULL;
1797 Elf_External_Sym_Shndx * shndx_buf = NULL;
1798 bfd_vma pc;
1799 bfd_vma sec_start;
1800 bfd_vma symval = 0;
1801 int pcrel = 0;
1802 int code = 0;
1803 int section_alignment_glue;
1804 /* how much to scale the relocation by - 1, 2, or 4. */
1805 int scale;
1806
1807 /* Assume nothing changes. */
1808 *again = FALSE;
1809
1810 /* We don't have to do anything for a relocatable link, if
1811 this section does not have relocs, or if this is not a
1812 code section. */
1813 if (link_info->relocatable
1814 || (sec->flags & SEC_RELOC) == 0
1815 || sec->reloc_count == 0
1816 || (sec->flags & SEC_CODE) == 0)
1817 return TRUE;
1818
1819 symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
1820 shndx_hdr = &elf_tdata (abfd)->symtab_shndx_hdr;
1821
1822 sec_start = sec->output_section->vma + sec->output_offset;
1823
1824 /* Get the section contents. */
1825 if (elf_section_data (sec)->this_hdr.contents != NULL)
1826 contents = elf_section_data (sec)->this_hdr.contents;
1827 /* Go get them off disk. */
1828 else
1829 {
1830 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
1831 goto error_return;
1832 elf_section_data (sec)->this_hdr.contents = contents;
1833 }
1834
1835 /* Read this BFD's symbols. */
1836 /* Get cached copy if it exists. */
1837 if (symtab_hdr->contents != NULL)
1838 intsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
1839 else
1840 {
1841 intsyms = bfd_elf_get_elf_syms (abfd, symtab_hdr, symtab_hdr->sh_info, 0, NULL, NULL, NULL);
1842 symtab_hdr->contents = (bfd_byte *) intsyms;
1843 }
1844
1845 if (shndx_hdr->sh_size != 0)
1846 {
1847 bfd_size_type amt;
1848
1849 amt = symtab_hdr->sh_info;
1850 amt *= sizeof (Elf_External_Sym_Shndx);
1851 shndx_buf = (Elf_External_Sym_Shndx *) bfd_malloc (amt);
1852 if (shndx_buf == NULL)
1853 goto error_return;
1854 if (bfd_seek (abfd, shndx_hdr->sh_offset, SEEK_SET) != 0
1855 || bfd_bread ((PTR) shndx_buf, amt, abfd) != amt)
1856 goto error_return;
1857 shndx_hdr->contents = (bfd_byte *) shndx_buf;
1858 }
1859
1860 /* Get a copy of the native relocations. */
1861 internal_relocs = (_bfd_elf_link_read_relocs
1862 (abfd, sec, (PTR) NULL, (Elf_Internal_Rela *) NULL,
1863 link_info->keep_memory));
1864 if (internal_relocs == NULL)
1865 goto error_return;
1866 if (! link_info->keep_memory)
1867 free_relocs = internal_relocs;
1868
1869 /* The RL_ relocs must be just before the operand relocs they go
1870 with, so we must sort them to guarantee this. We use bubblesort
1871 instead of qsort so we can guarantee that relocs with the same
1872 address remain in the same relative order. */
1873 reloc_bubblesort (internal_relocs, sec->reloc_count);
1874
1875 /* Walk through them looking for relaxing opportunities. */
1876 irelend = internal_relocs + sec->reloc_count;
1877
1878 /* This will either be NULL or a pointer to the next alignment
1879 relocation. */
1880 next_alignment = internal_relocs;
1881 /* This will be the previous alignment, although at first it points
1882 to the first real relocation. */
1883 prev_alignment = internal_relocs;
1884
1885 /* We calculate worst case shrinkage caused by alignment directives.
1886 No fool-proof, but better than either ignoring the problem or
1887 doing heavy duty analysis of all the alignment markers in all
1888 input sections. */
1889 section_alignment_glue = 0;
1890 for (irel = internal_relocs; irel < irelend; irel++)
1891 if (ELF32_R_TYPE (irel->r_info) == R_RX_RH_RELAX
1892 && irel->r_addend & RX_RELAXA_ALIGN)
1893 {
1894 int this_glue = 1 << (irel->r_addend & RX_RELAXA_ANUM);
1895
1896 if (section_alignment_glue < this_glue)
1897 section_alignment_glue = this_glue;
1898 }
1899 /* Worst case is all 0..N alignments, in order, causing 2*N-1 byte
1900 shrinkage. */
1901 section_alignment_glue *= 2;
1902
1903 for (irel = internal_relocs; irel < irelend; irel++)
1904 {
1905 unsigned char *insn;
1906 int nrelocs;
1907
1908 /* The insns we care about are all marked with one of these. */
1909 if (ELF32_R_TYPE (irel->r_info) != R_RX_RH_RELAX)
1910 continue;
1911
1912 if (irel->r_addend & RX_RELAXA_ALIGN
1913 || next_alignment == internal_relocs)
1914 {
1915 /* When we delete bytes, we need to maintain all the alignments
1916 indicated. In addition, we need to be careful about relaxing
1917 jumps across alignment boundaries - these displacements
1918 *grow* when we delete bytes. For now, don't shrink
1919 displacements across an alignment boundary, just in case.
1920 Note that this only affects relocations to the same
1921 section. */
1922 prev_alignment = next_alignment;
1923 next_alignment += 2;
1924 while (next_alignment < irelend
1925 && (ELF32_R_TYPE (next_alignment->r_info) != R_RX_RH_RELAX
1926 || !(next_alignment->r_addend & RX_RELAXA_ELIGN)))
1927 next_alignment ++;
1928 if (next_alignment >= irelend || next_alignment->r_offset == 0)
1929 next_alignment = NULL;
1930 }
1931
1932 /* When we hit alignment markers, see if we've shrunk enough
1933 before them to reduce the gap without violating the alignment
1934 requirements. */
1935 if (irel->r_addend & RX_RELAXA_ALIGN)
1936 {
1937 /* At this point, the next relocation *should* be the ELIGN
1938 end marker. */
1939 Elf_Internal_Rela *erel = irel + 1;
1940 unsigned int alignment, nbytes;
1941
1942 if (ELF32_R_TYPE (erel->r_info) != R_RX_RH_RELAX)
1943 continue;
1944 if (!(erel->r_addend & RX_RELAXA_ELIGN))
1945 continue;
1946
1947 alignment = 1 << (irel->r_addend & RX_RELAXA_ANUM);
1948
1949 if (erel->r_offset - irel->r_offset < alignment)
1950 continue;
1951
1952 nbytes = erel->r_offset - irel->r_offset;
1953 nbytes /= alignment;
1954 nbytes *= alignment;
1955
1956 elf32_rx_relax_delete_bytes (abfd, sec, erel->r_offset-nbytes, nbytes, next_alignment,
1957 erel->r_offset == sec->size);
1958 *again = TRUE;
1959
1960 continue;
1961 }
1962
1963 if (irel->r_addend & RX_RELAXA_ELIGN)
1964 continue;
1965
1966 insn = contents + irel->r_offset;
1967
1968 nrelocs = irel->r_addend & RX_RELAXA_RNUM;
1969
1970 /* At this point, we have an insn that is a candidate for linker
1971 relaxation. There are NRELOCS relocs following that may be
1972 relaxed, although each reloc may be made of more than one
1973 reloc entry (such as gp-rel symbols). */
1974
1975 /* Get the value of the symbol referred to by the reloc. Just
1976 in case this is the last reloc in the list, use the RL's
1977 addend to choose between this reloc (no addend) or the next
1978 (yes addend, which means at least one following reloc). */
1979
1980 /* srel points to the "current" reloction for this insn -
1981 actually the last reloc for a given operand, which is the one
1982 we need to update. We check the relaxations in the same
1983 order that the relocations happen, so we'll just push it
1984 along as we go. */
1985 srel = irel;
1986
1987 pc = sec->output_section->vma + sec->output_offset
1988 + srel->r_offset;
1989
1990 #define GET_RELOC \
1991 symval = OFFSET_FOR_RELOC (srel, &srel, &scale); \
1992 pcrel = symval - pc + srel->r_addend; \
1993 nrelocs --;
1994
1995 #define SNIPNR(offset, nbytes) \
1996 elf32_rx_relax_delete_bytes (abfd, sec, (insn - contents) + offset, nbytes, next_alignment, 0);
1997 #define SNIP(offset, nbytes, newtype) \
1998 SNIPNR (offset, nbytes); \
1999 srel->r_info = ELF32_R_INFO (ELF32_R_SYM (srel->r_info), newtype)
2000
2001 /* The order of these bit tests must match the order that the
2002 relocs appear in. Since we sorted those by offset, we can
2003 predict them. */
2004
2005 /* Note that the numbers in, say, DSP6 are the bit offsets of
2006 the code fields that describe the operand. Bits number 0 for
2007 the MSB of insn[0]. */
2008
2009 /* DSP* codes:
2010 0 00 [reg]
2011 1 01 dsp:8[reg]
2012 2 10 dsp:16[reg]
2013 3 11 reg */
2014 if (irel->r_addend & RX_RELAXA_DSP6)
2015 {
2016 GET_RELOC;
2017
2018 code = insn[0] & 3;
2019 if (code == 2 && symval/scale <= 255)
2020 {
2021 unsigned int newrel = ELF32_R_TYPE (srel->r_info);
2022 insn[0] &= 0xfc;
2023 insn[0] |= 0x01;
2024 newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
2025 if (newrel != ELF32_R_TYPE (srel->r_info))
2026 {
2027 SNIP (3, 1, newrel);
2028 *again = TRUE;
2029 }
2030 }
2031
2032 else if (code == 1 && symval == 0)
2033 {
2034 insn[0] &= 0xfc;
2035 SNIP (2, 1, R_RX_NONE);
2036 *again = TRUE;
2037 }
2038
2039 /* Special case DSP:5 format: MOV.bwl dsp:5[Rsrc],Rdst. */
2040 else if (code == 1 && symval/scale <= 31
2041 /* Decodable bits. */
2042 && (insn[0] & 0xcc) == 0xcc
2043 /* Width. */
2044 && (insn[0] & 0x30) != 3
2045 /* Register MSBs. */
2046 && (insn[1] & 0x88) == 0x00)
2047 {
2048 int newrel = 0;
2049
2050 insn[0] = 0x88 | (insn[0] & 0x30);
2051 /* The register fields are in the right place already. */
2052
2053 /* We can't relax this new opcode. */
2054 irel->r_addend = 0;
2055
2056 switch ((insn[0] & 0x30) >> 4)
2057 {
2058 case 0:
2059 newrel = R_RX_RH_ABS5p5B;
2060 break;
2061 case 1:
2062 newrel = R_RX_RH_ABS5p5W;
2063 break;
2064 case 2:
2065 newrel = R_RX_RH_ABS5p5L;
2066 break;
2067 }
2068
2069 move_reloc (irel, srel, -2);
2070 SNIP (2, 1, newrel);
2071 }
2072
2073 /* Special case DSP:5 format: MOVU.bw dsp:5[Rsrc],Rdst. */
2074 else if (code == 1 && symval/scale <= 31
2075 /* Decodable bits. */
2076 && (insn[0] & 0xf8) == 0x58
2077 /* Register MSBs. */
2078 && (insn[1] & 0x88) == 0x00)
2079 {
2080 int newrel = 0;
2081
2082 insn[0] = 0xb0 | ((insn[0] & 0x04) << 1);
2083 /* The register fields are in the right place already. */
2084
2085 /* We can't relax this new opcode. */
2086 irel->r_addend = 0;
2087
2088 switch ((insn[0] & 0x08) >> 3)
2089 {
2090 case 0:
2091 newrel = R_RX_RH_ABS5p5B;
2092 break;
2093 case 1:
2094 newrel = R_RX_RH_ABS5p5W;
2095 break;
2096 }
2097
2098 move_reloc (irel, srel, -2);
2099 SNIP (2, 1, newrel);
2100 }
2101 }
2102
2103 /* A DSP4 operand always follows a DSP6 operand, even if there's
2104 no relocation for it. We have to read the code out of the
2105 opcode to calculate the offset of the operand. */
2106 if (irel->r_addend & RX_RELAXA_DSP4)
2107 {
2108 int code6, offset = 0;
2109
2110 GET_RELOC;
2111
2112 code6 = insn[0] & 0x03;
2113 switch (code6)
2114 {
2115 case 0: offset = 2; break;
2116 case 1: offset = 3; break;
2117 case 2: offset = 4; break;
2118 case 3: offset = 2; break;
2119 }
2120
2121 code = (insn[0] & 0x0c) >> 2;
2122
2123 if (code == 2 && symval / scale <= 255)
2124 {
2125 unsigned int newrel = ELF32_R_TYPE (srel->r_info);
2126
2127 insn[0] &= 0xf3;
2128 insn[0] |= 0x04;
2129 newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
2130 if (newrel != ELF32_R_TYPE (srel->r_info))
2131 {
2132 SNIP (offset+1, 1, newrel);
2133 *again = TRUE;
2134 }
2135 }
2136
2137 else if (code == 1 && symval == 0)
2138 {
2139 insn[0] &= 0xf3;
2140 SNIP (offset, 1, R_RX_NONE);
2141 *again = TRUE;
2142 }
2143 /* Special case DSP:5 format: MOV.bwl Rsrc,dsp:5[Rdst] */
2144 else if (code == 1 && symval/scale <= 31
2145 /* Decodable bits. */
2146 && (insn[0] & 0xc3) == 0xc3
2147 /* Width. */
2148 && (insn[0] & 0x30) != 3
2149 /* Register MSBs. */
2150 && (insn[1] & 0x88) == 0x00)
2151 {
2152 int newrel = 0;
2153
2154 insn[0] = 0x80 | (insn[0] & 0x30);
2155 /* The register fields are in the right place already. */
2156
2157 /* We can't relax this new opcode. */
2158 irel->r_addend = 0;
2159
2160 switch ((insn[0] & 0x30) >> 4)
2161 {
2162 case 0:
2163 newrel = R_RX_RH_ABS5p5B;
2164 break;
2165 case 1:
2166 newrel = R_RX_RH_ABS5p5W;
2167 break;
2168 case 2:
2169 newrel = R_RX_RH_ABS5p5L;
2170 break;
2171 }
2172
2173 move_reloc (irel, srel, -2);
2174 SNIP (2, 1, newrel);
2175 }
2176 }
2177
2178 /* These always occur alone, but the offset depends on whether
2179 it's a MEMEX opcode (0x06) or not. */
2180 if (irel->r_addend & RX_RELAXA_DSP14)
2181 {
2182 int offset;
2183 GET_RELOC;
2184
2185 if (insn[0] == 0x06)
2186 offset = 3;
2187 else
2188 offset = 4;
2189
2190 code = insn[1] & 3;
2191
2192 if (code == 2 && symval / scale <= 255)
2193 {
2194 unsigned int newrel = ELF32_R_TYPE (srel->r_info);
2195
2196 insn[1] &= 0xfc;
2197 insn[1] |= 0x01;
2198 newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
2199 if (newrel != ELF32_R_TYPE (srel->r_info))
2200 {
2201 SNIP (offset, 1, newrel);
2202 *again = TRUE;
2203 }
2204 }
2205 else if (code == 1 && symval == 0)
2206 {
2207 insn[1] &= 0xfc;
2208 SNIP (offset, 1, R_RX_NONE);
2209 *again = TRUE;
2210 }
2211 }
2212
2213 /* IMM* codes:
2214 0 00 imm:32
2215 1 01 simm:8
2216 2 10 simm:16
2217 3 11 simm:24. */
2218
2219 /* These always occur alone. */
2220 if (irel->r_addend & RX_RELAXA_IMM6)
2221 {
2222 long ssymval;
2223
2224 GET_RELOC;
2225
2226 /* These relocations sign-extend, so we must do signed compares. */
2227 ssymval = (long) symval;
2228
2229 code = insn[0] & 0x03;
2230
2231 if (code == 0 && ssymval <= 8388607 && ssymval >= -8388608)
2232 {
2233 unsigned int newrel = ELF32_R_TYPE (srel->r_info);
2234
2235 insn[0] &= 0xfc;
2236 insn[0] |= 0x03;
2237 newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
2238 if (newrel != ELF32_R_TYPE (srel->r_info))
2239 {
2240 SNIP (2, 1, newrel);
2241 *again = TRUE;
2242 }
2243 }
2244
2245 else if (code == 3 && ssymval <= 32767 && ssymval >= -32768)
2246 {
2247 unsigned int newrel = ELF32_R_TYPE (srel->r_info);
2248
2249 insn[0] &= 0xfc;
2250 insn[0] |= 0x02;
2251 newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
2252 if (newrel != ELF32_R_TYPE (srel->r_info))
2253 {
2254 SNIP (2, 1, newrel);
2255 *again = TRUE;
2256 }
2257 }
2258
2259 /* Special case UIMM8 format: CMP #uimm8,Rdst. */
2260 else if (code == 2 && ssymval <= 255 && ssymval >= 16
2261 /* Decodable bits. */
2262 && (insn[0] & 0xfc) == 0x74
2263 /* Decodable bits. */
2264 && ((insn[1] & 0xf0) == 0x00))
2265 {
2266 int newrel;
2267
2268 insn[0] = 0x75;
2269 insn[1] = 0x50 | (insn[1] & 0x0f);
2270
2271 /* We can't relax this new opcode. */
2272 irel->r_addend = 0;
2273
2274 if (STACK_REL_P (ELF32_R_TYPE (srel->r_info)))
2275 newrel = R_RX_ABS8U;
2276 else
2277 newrel = R_RX_DIR8U;
2278
2279 SNIP (2, 1, newrel);
2280 *again = TRUE;
2281 }
2282
2283 else if (code == 2 && ssymval <= 127 && ssymval >= -128)
2284 {
2285 unsigned int newrel = ELF32_R_TYPE (srel->r_info);
2286
2287 insn[0] &= 0xfc;
2288 insn[0] |= 0x01;
2289 newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
2290 if (newrel != ELF32_R_TYPE (srel->r_info))
2291 {
2292 SNIP (2, 1, newrel);
2293 *again = TRUE;
2294 }
2295 }
2296
2297 /* Special case UIMM4 format: CMP, MUL, AND, OR. */
2298 else if (code == 1 && ssymval <= 15 && ssymval >= 0
2299 /* Decodable bits and immediate type. */
2300 && insn[0] == 0x75
2301 /* Decodable bits. */
2302 && (insn[1] & 0xc0) == 0x00)
2303 {
2304 static const int newop[4] = { 1, 3, 4, 5 };
2305
2306 insn[0] = 0x60 | newop[insn[1] >> 4];
2307 /* The register number doesn't move. */
2308
2309 /* We can't relax this new opcode. */
2310 irel->r_addend = 0;
2311
2312 move_reloc (irel, srel, -1);
2313
2314 SNIP (2, 1, R_RX_RH_UIMM4p8);
2315 *again = TRUE;
2316 }
2317
2318 /* Special case UIMM4 format: ADD -> ADD/SUB. */
2319 else if (code == 1 && ssymval <= 15 && ssymval >= -15
2320 /* Decodable bits and immediate type. */
2321 && insn[0] == 0x71
2322 /* Same register for source and destination. */
2323 && ((insn[1] >> 4) == (insn[1] & 0x0f)))
2324 {
2325 int newrel;
2326
2327 /* Note that we can't turn "add $0,Rs" into a NOP
2328 because the flags need to be set right. */
2329
2330 if (ssymval < 0)
2331 {
2332 insn[0] = 0x60; /* Subtract. */
2333 newrel = R_RX_RH_UNEG4p8;
2334 }
2335 else
2336 {
2337 insn[0] = 0x62; /* Add. */
2338 newrel = R_RX_RH_UIMM4p8;
2339 }
2340
2341 /* The register number is in the right place. */
2342
2343 /* We can't relax this new opcode. */
2344 irel->r_addend = 0;
2345
2346 move_reloc (irel, srel, -1);
2347
2348 SNIP (2, 1, newrel);
2349 *again = TRUE;
2350 }
2351 }
2352
2353 /* These are either matched with a DSP6 (2-byte base) or an id24
2354 (3-byte base). */
2355 if (irel->r_addend & RX_RELAXA_IMM12)
2356 {
2357 int dspcode, offset = 0;
2358 long ssymval;
2359
2360 GET_RELOC;
2361
2362 if ((insn[0] & 0xfc) == 0xfc)
2363 dspcode = 1; /* Just something with one byte operand. */
2364 else
2365 dspcode = insn[0] & 3;
2366 switch (dspcode)
2367 {
2368 case 0: offset = 2; break;
2369 case 1: offset = 3; break;
2370 case 2: offset = 4; break;
2371 case 3: offset = 2; break;
2372 }
2373
2374 /* These relocations sign-extend, so we must do signed compares. */
2375 ssymval = (long) symval;
2376
2377 code = (insn[1] >> 2) & 3;
2378 if (code == 0 && ssymval <= 8388607 && ssymval >= -8388608)
2379 {
2380 unsigned int newrel = ELF32_R_TYPE (srel->r_info);
2381
2382 insn[1] &= 0xf3;
2383 insn[1] |= 0x0c;
2384 newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
2385 if (newrel != ELF32_R_TYPE (srel->r_info))
2386 {
2387 SNIP (offset, 1, newrel);
2388 *again = TRUE;
2389 }
2390 }
2391
2392 else if (code == 3 && ssymval <= 32767 && ssymval >= -32768)
2393 {
2394 unsigned int newrel = ELF32_R_TYPE (srel->r_info);
2395
2396 insn[1] &= 0xf3;
2397 insn[1] |= 0x08;
2398 newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
2399 if (newrel != ELF32_R_TYPE (srel->r_info))
2400 {
2401 SNIP (offset, 1, newrel);
2402 *again = TRUE;
2403 }
2404 }
2405
2406 /* Special case UIMM8 format: MOV #uimm8,Rdst. */
2407 else if (code == 2 && ssymval <= 255 && ssymval >= 16
2408 /* Decodable bits. */
2409 && insn[0] == 0xfb
2410 /* Decodable bits. */
2411 && ((insn[1] & 0x03) == 0x02))
2412 {
2413 int newrel;
2414
2415 insn[0] = 0x75;
2416 insn[1] = 0x40 | (insn[1] >> 4);
2417
2418 /* We can't relax this new opcode. */
2419 irel->r_addend = 0;
2420
2421 if (STACK_REL_P (ELF32_R_TYPE (srel->r_info)))
2422 newrel = R_RX_ABS8U;
2423 else
2424 newrel = R_RX_DIR8U;
2425
2426 SNIP (2, 1, newrel);
2427 *again = TRUE;
2428 }
2429
2430 else if (code == 2 && ssymval <= 127 && ssymval >= -128)
2431 {
2432 unsigned int newrel = ELF32_R_TYPE(srel->r_info);
2433
2434 insn[1] &= 0xf3;
2435 insn[1] |= 0x04;
2436 newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
2437 if (newrel != ELF32_R_TYPE(srel->r_info))
2438 {
2439 SNIP (offset, 1, newrel);
2440 *again = TRUE;
2441 }
2442 }
2443
2444 /* Special case UIMM4 format: MOV #uimm4,Rdst. */
2445 else if (code == 1 && ssymval <= 15 && ssymval >= 0
2446 /* Decodable bits. */
2447 && insn[0] == 0xfb
2448 /* Decodable bits. */
2449 && ((insn[1] & 0x03) == 0x02))
2450 {
2451 insn[0] = 0x66;
2452 insn[1] = insn[1] >> 4;
2453
2454 /* We can't relax this new opcode. */
2455 irel->r_addend = 0;
2456
2457 move_reloc (irel, srel, -1);
2458
2459 SNIP (2, 1, R_RX_RH_UIMM4p8);
2460 *again = TRUE;
2461 }
2462 }
2463
2464 if (irel->r_addend & RX_RELAXA_BRA)
2465 {
2466 unsigned int newrel = ELF32_R_TYPE (srel->r_info);
2467 int max_pcrel3 = 4;
2468 int alignment_glue = 0;
2469
2470 GET_RELOC;
2471
2472 /* Branches over alignment chunks are problematic, as
2473 deleting bytes here makes the branch *further* away. We
2474 can be agressive with branches within this alignment
2475 block, but not branches outside it. */
2476 if ((prev_alignment == NULL
2477 || symval < (bfd_vma)(sec_start + prev_alignment->r_offset))
2478 && (next_alignment == NULL
2479 || symval > (bfd_vma)(sec_start + next_alignment->r_offset)))
2480 alignment_glue = section_alignment_glue;
2481
2482 if (ELF32_R_TYPE(srel[1].r_info) == R_RX_RH_RELAX
2483 && srel[1].r_addend & RX_RELAXA_BRA
2484 && srel[1].r_offset < irel->r_offset + pcrel)
2485 max_pcrel3 ++;
2486
2487 newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
2488
2489 /* The values we compare PCREL with are not what you'd
2490 expect; they're off by a little to compensate for (1)
2491 where the reloc is relative to the insn, and (2) how much
2492 the insn is going to change when we relax it. */
2493
2494 /* These we have to decode. */
2495 switch (insn[0])
2496 {
2497 case 0x04: /* BRA pcdsp:24 */
2498 if (-32768 + alignment_glue <= pcrel
2499 && pcrel <= 32765 - alignment_glue)
2500 {
2501 insn[0] = 0x38;
2502 SNIP (3, 1, newrel);
2503 *again = TRUE;
2504 }
2505 break;
2506
2507 case 0x38: /* BRA pcdsp:16 */
2508 if (-128 + alignment_glue <= pcrel
2509 && pcrel <= 127 - alignment_glue)
2510 {
2511 insn[0] = 0x2e;
2512 SNIP (2, 1, newrel);
2513 *again = TRUE;
2514 }
2515 break;
2516
2517 case 0x2e: /* BRA pcdsp:8 */
2518 /* Note that there's a risk here of shortening things so
2519 much that we no longer fit this reloc; it *should*
2520 only happen when you branch across a branch, and that
2521 branch also devolves into BRA.S. "Real" code should
2522 be OK. */
2523 if (max_pcrel3 + alignment_glue <= pcrel
2524 && pcrel <= 10 - alignment_glue
2525 && allow_pcrel3)
2526 {
2527 insn[0] = 0x08;
2528 SNIP (1, 1, newrel);
2529 move_reloc (irel, srel, -1);
2530 *again = TRUE;
2531 }
2532 break;
2533
2534 case 0x05: /* BSR pcdsp:24 */
2535 if (-32768 + alignment_glue <= pcrel
2536 && pcrel <= 32765 - alignment_glue)
2537 {
2538 insn[0] = 0x39;
2539 SNIP (1, 1, newrel);
2540 *again = TRUE;
2541 }
2542 break;
2543
2544 case 0x3a: /* BEQ.W pcdsp:16 */
2545 case 0x3b: /* BNE.W pcdsp:16 */
2546 if (-128 + alignment_glue <= pcrel
2547 && pcrel <= 127 - alignment_glue)
2548 {
2549 insn[0] = 0x20 | (insn[0] & 1);
2550 SNIP (1, 1, newrel);
2551 *again = TRUE;
2552 }
2553 break;
2554
2555 case 0x20: /* BEQ.B pcdsp:8 */
2556 case 0x21: /* BNE.B pcdsp:8 */
2557 if (max_pcrel3 + alignment_glue <= pcrel
2558 && pcrel - alignment_glue <= 10
2559 && allow_pcrel3)
2560 {
2561 insn[0] = 0x10 | ((insn[0] & 1) << 3);
2562 SNIP (1, 1, newrel);
2563 move_reloc (irel, srel, -1);
2564 *again = TRUE;
2565 }
2566 break;
2567
2568 case 0x16: /* synthetic BNE dsp24 */
2569 case 0x1e: /* synthetic BEQ dsp24 */
2570 if (-32767 + alignment_glue <= pcrel
2571 && pcrel <= 32766 - alignment_glue
2572 && insn[1] == 0x04)
2573 {
2574 if (insn[0] == 0x16)
2575 insn[0] = 0x3b;
2576 else
2577 insn[0] = 0x3a;
2578 /* We snip out the bytes at the end else the reloc
2579 will get moved too, and too much. */
2580 SNIP (3, 2, newrel);
2581 move_reloc (irel, srel, -1);
2582 *again = TRUE;
2583 }
2584 break;
2585 }
2586
2587 /* Special case - synthetic conditional branches, pcrel24.
2588 Note that EQ and NE have been handled above. */
2589 if ((insn[0] & 0xf0) == 0x20
2590 && insn[1] == 0x06
2591 && insn[2] == 0x04
2592 && srel->r_offset != irel->r_offset + 1
2593 && -32767 + alignment_glue <= pcrel
2594 && pcrel <= 32766 - alignment_glue)
2595 {
2596 insn[1] = 0x05;
2597 insn[2] = 0x38;
2598 SNIP (5, 1, newrel);
2599 *again = TRUE;
2600 }
2601
2602 /* Special case - synthetic conditional branches, pcrel16 */
2603 if ((insn[0] & 0xf0) == 0x20
2604 && insn[1] == 0x05
2605 && insn[2] == 0x38
2606 && srel->r_offset != irel->r_offset + 1
2607 && -127 + alignment_glue <= pcrel
2608 && pcrel <= 126 - alignment_glue)
2609 {
2610 int cond = (insn[0] & 0x0f) ^ 0x01;
2611
2612 insn[0] = 0x20 | cond;
2613 /* By moving the reloc first, we avoid having
2614 delete_bytes move it also. */
2615 move_reloc (irel, srel, -2);
2616 SNIP (2, 3, newrel);
2617 *again = TRUE;
2618 }
2619 }
2620
2621 BFD_ASSERT (nrelocs == 0);
2622
2623 /* Special case - check MOV.bwl #IMM, dsp[reg] and see if we can
2624 use MOV.bwl #uimm:8, dsp:5[r7] format. This is tricky
2625 because it may have one or two relocations. */
2626 if ((insn[0] & 0xfc) == 0xf8
2627 && (insn[1] & 0x80) == 0x00
2628 && (insn[0] & 0x03) != 0x03)
2629 {
2630 int dcode, icode, reg, ioff, dscale, ilen;
2631 bfd_vma disp_val = 0;
2632 long imm_val = 0;
2633 Elf_Internal_Rela * disp_rel = 0;
2634 Elf_Internal_Rela * imm_rel = 0;
2635
2636 /* Reset this. */
2637 srel = irel;
2638
2639 dcode = insn[0] & 0x03;
2640 icode = (insn[1] >> 2) & 0x03;
2641 reg = (insn[1] >> 4) & 0x0f;
2642
2643 ioff = dcode == 1 ? 3 : dcode == 2 ? 4 : 2;
2644
2645 /* Figure out what the dispacement is. */
2646 if (dcode == 1 || dcode == 2)
2647 {
2648 /* There's a displacement. See if there's a reloc for it. */
2649 if (srel[1].r_offset == irel->r_offset + 2)
2650 {
2651 GET_RELOC;
2652 disp_val = symval;
2653 disp_rel = srel;
2654 }
2655 else
2656 {
2657 if (dcode == 1)
2658 disp_val = insn[2];
2659 else
2660 {
2661 #if RX_OPCODE_BIG_ENDIAN
2662 disp_val = insn[2] * 256 + insn[3];
2663 #else
2664 disp_val = insn[2] + insn[3] * 256;
2665 #endif
2666 }
2667 switch (insn[1] & 3)
2668 {
2669 case 1:
2670 disp_val *= 2;
2671 scale = 2;
2672 break;
2673 case 2:
2674 disp_val *= 4;
2675 scale = 4;
2676 break;
2677 }
2678 }
2679 }
2680
2681 dscale = scale;
2682
2683 /* Figure out what the immediate is. */
2684 if (srel[1].r_offset == irel->r_offset + ioff)
2685 {
2686 GET_RELOC;
2687 imm_val = (long) symval;
2688 imm_rel = srel;
2689 }
2690 else
2691 {
2692 unsigned char * ip = insn + ioff;
2693
2694 switch (icode)
2695 {
2696 case 1:
2697 /* For byte writes, we don't sign extend. Makes the math easier later. */
2698 if (scale == 1)
2699 imm_val = ip[0];
2700 else
2701 imm_val = (char) ip[0];
2702 break;
2703 case 2:
2704 #if RX_OPCODE_BIG_ENDIAN
2705 imm_val = ((char) ip[0] << 8) | ip[1];
2706 #else
2707 imm_val = ((char) ip[1] << 8) | ip[0];
2708 #endif
2709 break;
2710 case 3:
2711 #if RX_OPCODE_BIG_ENDIAN
2712 imm_val = ((char) ip[0] << 16) | (ip[1] << 8) | ip[2];
2713 #else
2714 imm_val = ((char) ip[2] << 16) | (ip[1] << 8) | ip[0];
2715 #endif
2716 break;
2717 case 0:
2718 #if RX_OPCODE_BIG_ENDIAN
2719 imm_val = (ip[0] << 24) | (ip[1] << 16) | (ip[2] << 8) | ip[3];
2720 #else
2721 imm_val = (ip[3] << 24) | (ip[2] << 16) | (ip[1] << 8) | ip[0];
2722 #endif
2723 break;
2724 }
2725 }
2726
2727 ilen = 2;
2728
2729 switch (dcode)
2730 {
2731 case 1:
2732 ilen += 1;
2733 break;
2734 case 2:
2735 ilen += 2;
2736 break;
2737 }
2738
2739 switch (icode)
2740 {
2741 case 1:
2742 ilen += 1;
2743 break;
2744 case 2:
2745 ilen += 2;
2746 break;
2747 case 3:
2748 ilen += 3;
2749 break;
2750 case 4:
2751 ilen += 4;
2752 break;
2753 }
2754
2755 /* The shortcut happens when the immediate is 0..255,
2756 register r0 to r7, and displacement (scaled) 0..31. */
2757
2758 if (0 <= imm_val && imm_val <= 255
2759 && 0 <= reg && reg <= 7
2760 && disp_val / dscale <= 31)
2761 {
2762 insn[0] = 0x3c | (insn[1] & 0x03);
2763 insn[1] = (((disp_val / dscale) << 3) & 0x80) | (reg << 4) | ((disp_val/dscale) & 0x0f);
2764 insn[2] = imm_val;
2765
2766 if (disp_rel)
2767 {
2768 int newrel = R_RX_NONE;
2769
2770 switch (dscale)
2771 {
2772 case 1:
2773 newrel = R_RX_RH_ABS5p8B;
2774 break;
2775 case 2:
2776 newrel = R_RX_RH_ABS5p8W;
2777 break;
2778 case 4:
2779 newrel = R_RX_RH_ABS5p8L;
2780 break;
2781 }
2782 disp_rel->r_info = ELF32_R_INFO (ELF32_R_SYM (disp_rel->r_info), newrel);
2783 move_reloc (irel, disp_rel, -1);
2784 }
2785 if (imm_rel)
2786 {
2787 imm_rel->r_info = ELF32_R_INFO (ELF32_R_SYM (imm_rel->r_info), R_RX_DIR8U);
2788 move_reloc (disp_rel ? disp_rel : irel,
2789 imm_rel,
2790 irel->r_offset - imm_rel->r_offset + 2);
2791 }
2792
2793 SNIPNR (3, ilen - 3);
2794 *again = TRUE;
2795
2796 /* We can't relax this new opcode. */
2797 irel->r_addend = 0;
2798 }
2799 }
2800 }
2801
2802 /* We can't reliably relax branches to DIR3U_PCREL unless we know
2803 whatever they're branching over won't shrink any more. If we're
2804 basically done here, do one more pass just for branches - but
2805 don't request a pass after that one! */
2806 if (!*again && !allow_pcrel3)
2807 {
2808 bfd_boolean ignored;
2809
2810 elf32_rx_relax_section (abfd, sec, link_info, &ignored, TRUE);
2811 }
2812
2813 return TRUE;
2814
2815 error_return:
2816 if (free_relocs != NULL)
2817 free (free_relocs);
2818
2819 if (free_contents != NULL)
2820 free (free_contents);
2821
2822 if (shndx_buf != NULL)
2823 {
2824 shndx_hdr->contents = NULL;
2825 free (shndx_buf);
2826 }
2827
2828 if (free_intsyms != NULL)
2829 free (free_intsyms);
2830
2831 return FALSE;
2832 }
2833
2834 static bfd_boolean
2835 elf32_rx_relax_section_wrapper (bfd * abfd,
2836 asection * sec,
2837 struct bfd_link_info * link_info,
2838 bfd_boolean * again)
2839 {
2840 return elf32_rx_relax_section (abfd, sec, link_info, again, FALSE);
2841 }
2842 \f
2843 /* Function to set the ELF flag bits. */
2844
2845 static bfd_boolean
2846 rx_elf_set_private_flags (bfd * abfd, flagword flags)
2847 {
2848 elf_elfheader (abfd)->e_flags = flags;
2849 elf_flags_init (abfd) = TRUE;
2850 return TRUE;
2851 }
2852
2853 static bfd_boolean no_warn_mismatch = FALSE;
2854
2855 void bfd_elf32_rx_set_target_flags (bfd_boolean);
2856
2857 void
2858 bfd_elf32_rx_set_target_flags (bfd_boolean user_no_warn_mismatch)
2859 {
2860 no_warn_mismatch = user_no_warn_mismatch;
2861 }
2862
2863 /* Merge backend specific data from an object file to the output
2864 object file when linking. */
2865
2866 static bfd_boolean
2867 rx_elf_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
2868 {
2869 flagword old_flags;
2870 flagword new_flags;
2871 bfd_boolean error = FALSE;
2872
2873 new_flags = elf_elfheader (ibfd)->e_flags;
2874 old_flags = elf_elfheader (obfd)->e_flags;
2875
2876 if (!elf_flags_init (obfd))
2877 {
2878 /* First call, no flags set. */
2879 elf_flags_init (obfd) = TRUE;
2880 elf_elfheader (obfd)->e_flags = new_flags;
2881 }
2882 else if (old_flags != new_flags)
2883 {
2884 flagword known_flags = E_FLAG_RX_64BIT_DOUBLES | E_FLAG_RX_DSP;
2885
2886 if ((old_flags ^ new_flags) & known_flags)
2887 {
2888 /* Only complain if flag bits we care about do not match.
2889 Other bits may be set, since older binaries did use some
2890 deprecated flags. */
2891 if (no_warn_mismatch)
2892 {
2893 elf_elfheader (obfd)->e_flags = (new_flags | old_flags) & known_flags;
2894 }
2895 else
2896 {
2897 (*_bfd_error_handler)
2898 ("ELF header flags mismatch: old_flags = 0x%.8lx, new_flags = 0x%.8lx, filename = %s",
2899 old_flags, new_flags, bfd_get_filename (ibfd));
2900 error = TRUE;
2901 }
2902 }
2903 else
2904 elf_elfheader (obfd)->e_flags = new_flags & known_flags;
2905 }
2906
2907 if (error)
2908 bfd_set_error (bfd_error_bad_value);
2909
2910 return !error;
2911 }
2912 \f
2913 static bfd_boolean
2914 rx_elf_print_private_bfd_data (bfd * abfd, void * ptr)
2915 {
2916 FILE * file = (FILE *) ptr;
2917 flagword flags;
2918
2919 BFD_ASSERT (abfd != NULL && ptr != NULL);
2920
2921 /* Print normal ELF private data. */
2922 _bfd_elf_print_private_bfd_data (abfd, ptr);
2923
2924 flags = elf_elfheader (abfd)->e_flags;
2925 fprintf (file, _("private flags = 0x%lx:"), (long) flags);
2926
2927 if (flags & E_FLAG_RX_64BIT_DOUBLES)
2928 fprintf (file, _(" [64-bit doubles]"));
2929 if (flags & E_FLAG_RX_DSP)
2930 fprintf (file, _(" [dsp]"));
2931
2932 fputc ('\n', file);
2933 return TRUE;
2934 }
2935
2936 /* Return the MACH for an e_flags value. */
2937
2938 static int
2939 elf32_rx_machine (bfd * abfd)
2940 {
2941 if ((elf_elfheader (abfd)->e_flags & EF_RX_CPU_MASK) == EF_RX_CPU_RX)
2942 return bfd_mach_rx;
2943
2944 return 0;
2945 }
2946
2947 static bfd_boolean
2948 rx_elf_object_p (bfd * abfd)
2949 {
2950 bfd_default_set_arch_mach (abfd, bfd_arch_rx,
2951 elf32_rx_machine (abfd));
2952 return TRUE;
2953 }
2954 \f
2955
2956 #ifdef DEBUG
2957 void
2958 rx_dump_symtab (bfd * abfd, void * internal_syms, void * external_syms)
2959 {
2960 size_t locsymcount;
2961 Elf_Internal_Sym * isymbuf;
2962 Elf_Internal_Sym * isymend;
2963 Elf_Internal_Sym * isym;
2964 Elf_Internal_Shdr * symtab_hdr;
2965 bfd_boolean free_internal = FALSE, free_external = FALSE;
2966 char * st_info_str;
2967 char * st_info_stb_str;
2968 char * st_other_str;
2969 char * st_shndx_str;
2970
2971 if (! internal_syms)
2972 {
2973 internal_syms = bfd_malloc (1000);
2974 free_internal = 1;
2975 }
2976 if (! external_syms)
2977 {
2978 external_syms = bfd_malloc (1000);
2979 free_external = 1;
2980 }
2981
2982 symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
2983 locsymcount = symtab_hdr->sh_size / get_elf_backend_data (abfd)->s->sizeof_sym;
2984 if (free_internal)
2985 isymbuf = bfd_elf_get_elf_syms (abfd, symtab_hdr,
2986 symtab_hdr->sh_info, 0,
2987 internal_syms, external_syms, NULL);
2988 else
2989 isymbuf = internal_syms;
2990 isymend = isymbuf + locsymcount;
2991
2992 for (isym = isymbuf ; isym < isymend ; isym++)
2993 {
2994 switch (ELF_ST_TYPE (isym->st_info))
2995 {
2996 case STT_FUNC: st_info_str = "STT_FUNC";
2997 case STT_SECTION: st_info_str = "STT_SECTION";
2998 case STT_FILE: st_info_str = "STT_FILE";
2999 case STT_OBJECT: st_info_str = "STT_OBJECT";
3000 case STT_TLS: st_info_str = "STT_TLS";
3001 default: st_info_str = "";
3002 }
3003 switch (ELF_ST_BIND (isym->st_info))
3004 {
3005 case STB_LOCAL: st_info_stb_str = "STB_LOCAL";
3006 case STB_GLOBAL: st_info_stb_str = "STB_GLOBAL";
3007 default: st_info_stb_str = "";
3008 }
3009 switch (ELF_ST_VISIBILITY (isym->st_other))
3010 {
3011 case STV_DEFAULT: st_other_str = "STV_DEFAULT";
3012 case STV_INTERNAL: st_other_str = "STV_INTERNAL";
3013 case STV_PROTECTED: st_other_str = "STV_PROTECTED";
3014 default: st_other_str = "";
3015 }
3016 switch (isym->st_shndx)
3017 {
3018 case SHN_ABS: st_shndx_str = "SHN_ABS";
3019 case SHN_COMMON: st_shndx_str = "SHN_COMMON";
3020 case SHN_UNDEF: st_shndx_str = "SHN_UNDEF";
3021 default: st_shndx_str = "";
3022 }
3023
3024 printf ("isym = %p st_value = %lx st_size = %lx st_name = (%lu) %s "
3025 "st_info = (%d) %s %s st_other = (%d) %s st_shndx = (%d) %s\n",
3026 isym,
3027 (unsigned long) isym->st_value,
3028 (unsigned long) isym->st_size,
3029 isym->st_name,
3030 bfd_elf_string_from_elf_section (abfd, symtab_hdr->sh_link,
3031 isym->st_name),
3032 isym->st_info, st_info_str, st_info_stb_str,
3033 isym->st_other, st_other_str,
3034 isym->st_shndx, st_shndx_str);
3035 }
3036 if (free_internal)
3037 free (internal_syms);
3038 if (free_external)
3039 free (external_syms);
3040 }
3041
3042 char *
3043 rx_get_reloc (long reloc)
3044 {
3045 if (0 <= reloc && reloc < R_RX_max)
3046 return rx_elf_howto_table[reloc].name;
3047 return "";
3048 }
3049 #endif /* DEBUG */
3050
3051 \f
3052 /* We must take care to keep the on-disk copy of any code sections
3053 that are fully linked swapped if the target is big endian, to match
3054 the Renesas tools. */
3055
3056 /* The rule is: big endian object that are final-link executables,
3057 have code sections stored with 32-bit words swapped relative to
3058 what you'd get by default. */
3059
3060 static bfd_boolean
3061 rx_get_section_contents (bfd * abfd,
3062 sec_ptr section,
3063 void * location,
3064 file_ptr offset,
3065 bfd_size_type count)
3066 {
3067 int exec = (abfd->flags & EXEC_P) ? 1 : 0;
3068 int s_code = (section->flags & SEC_CODE) ? 1 : 0;
3069 bfd_boolean rv;
3070
3071 #ifdef DJDEBUG
3072 fprintf (stderr, "dj: get %ld %ld from %s %s e%d sc%d %08lx:%08lx\n",
3073 (long) offset, (long) count, section->name,
3074 bfd_big_endian(abfd) ? "be" : "le",
3075 exec, s_code, (long unsigned) section->filepos,
3076 (long unsigned) offset);
3077 #endif
3078
3079 if (exec && s_code && bfd_big_endian (abfd))
3080 {
3081 char * cloc = (char *) location;
3082 bfd_size_type cnt, end_cnt;
3083
3084 rv = TRUE;
3085
3086 /* Fetch and swap unaligned bytes at the beginning. */
3087 if (offset % 4)
3088 {
3089 char buf[4];
3090
3091 rv = _bfd_generic_get_section_contents (abfd, section, buf,
3092 (offset & -4), 4);
3093 if (!rv)
3094 return FALSE;
3095
3096 bfd_putb32 (bfd_getl32 (buf), buf);
3097
3098 cnt = 4 - (offset % 4);
3099 if (cnt > count)
3100 cnt = count;
3101
3102 memcpy (location, buf + (offset % 4), cnt);
3103
3104 count -= cnt;
3105 offset += cnt;
3106 cloc += count;
3107 }
3108
3109 end_cnt = count % 4;
3110
3111 /* Fetch and swap the middle bytes. */
3112 if (count >= 4)
3113 {
3114 rv = _bfd_generic_get_section_contents (abfd, section, cloc, offset,
3115 count - end_cnt);
3116 if (!rv)
3117 return FALSE;
3118
3119 for (cnt = count; cnt >= 4; cnt -= 4, cloc += 4)
3120 bfd_putb32 (bfd_getl32 (cloc), cloc);
3121 }
3122
3123 /* Fetch and swap the end bytes. */
3124 if (end_cnt > 0)
3125 {
3126 char buf[4];
3127
3128 /* Fetch the end bytes. */
3129 rv = _bfd_generic_get_section_contents (abfd, section, buf,
3130 offset + count - end_cnt, 4);
3131 if (!rv)
3132 return FALSE;
3133
3134 bfd_putb32 (bfd_getl32 (buf), buf);
3135 memcpy (cloc, buf, end_cnt);
3136 }
3137 }
3138 else
3139 rv = _bfd_generic_get_section_contents (abfd, section, location, offset, count);
3140
3141 return rv;
3142 }
3143
3144 #ifdef DJDEBUG
3145 static bfd_boolean
3146 rx2_set_section_contents (bfd * abfd,
3147 sec_ptr section,
3148 const void * location,
3149 file_ptr offset,
3150 bfd_size_type count)
3151 {
3152 bfd_size_type i;
3153
3154 fprintf (stderr, " set sec %s %08x loc %p offset %#x count %#x\n",
3155 section->name, (unsigned) section->vma, location, (int) offset, (int) count);
3156 for (i = 0; i < count; i++)
3157 {
3158 if (i % 16 == 0 && i > 0)
3159 fprintf (stderr, "\n");
3160
3161 if (i % 16 && i % 4 == 0)
3162 fprintf (stderr, " ");
3163
3164 if (i % 16 == 0)
3165 fprintf (stderr, " %08x:", (int) (section->vma + offset + i));
3166
3167 fprintf (stderr, " %02x", ((unsigned char *) location)[i]);
3168 }
3169 fprintf (stderr, "\n");
3170
3171 return _bfd_elf_set_section_contents (abfd, section, location, offset, count);
3172 }
3173 #define _bfd_elf_set_section_contents rx2_set_section_contents
3174 #endif
3175
3176 static bfd_boolean
3177 rx_set_section_contents (bfd * abfd,
3178 sec_ptr section,
3179 const void * location,
3180 file_ptr offset,
3181 bfd_size_type count)
3182 {
3183 bfd_boolean exec = (abfd->flags & EXEC_P) ? TRUE : FALSE;
3184 bfd_boolean s_code = (section->flags & SEC_CODE) ? TRUE : FALSE;
3185 bfd_boolean rv;
3186 char * swapped_data = NULL;
3187 bfd_size_type i;
3188 bfd_vma caddr = section->vma + offset;
3189 file_ptr faddr = 0;
3190 bfd_size_type scount;
3191
3192 #ifdef DJDEBUG
3193 bfd_size_type i;
3194
3195 fprintf (stderr, "\ndj: set %ld %ld to %s %s e%d sc%d\n",
3196 (long) offset, (long) count, section->name,
3197 bfd_big_endian (abfd) ? "be" : "le",
3198 exec, s_code);
3199
3200 for (i = 0; i < count; i++)
3201 {
3202 int a = section->vma + offset + i;
3203
3204 if (a % 16 == 0 && a > 0)
3205 fprintf (stderr, "\n");
3206
3207 if (a % 16 && a % 4 == 0)
3208 fprintf (stderr, " ");
3209
3210 if (a % 16 == 0 || i == 0)
3211 fprintf (stderr, " %08x:", (int) (section->vma + offset + i));
3212
3213 fprintf (stderr, " %02x", ((unsigned char *) location)[i]);
3214 }
3215
3216 fprintf (stderr, "\n");
3217 #endif
3218
3219 if (! exec || ! s_code || ! bfd_big_endian (abfd))
3220 return _bfd_elf_set_section_contents (abfd, section, location, offset, count);
3221
3222 while (count > 0 && caddr > 0 && caddr % 4)
3223 {
3224 switch (caddr % 4)
3225 {
3226 case 0: faddr = offset + 3; break;
3227 case 1: faddr = offset + 1; break;
3228 case 2: faddr = offset - 1; break;
3229 case 3: faddr = offset - 3; break;
3230 }
3231
3232 rv = _bfd_elf_set_section_contents (abfd, section, location, faddr, 1);
3233 if (! rv)
3234 return rv;
3235
3236 location ++;
3237 offset ++;
3238 count --;
3239 caddr ++;
3240 }
3241
3242 scount = (int)(count / 4) * 4;
3243 if (scount > 0)
3244 {
3245 char * cloc = (char *) location;
3246
3247 swapped_data = (char *) bfd_alloc (abfd, count);
3248
3249 for (i = 0; i < count; i += 4)
3250 {
3251 bfd_vma v = bfd_getl32 (cloc + i);
3252 bfd_putb32 (v, swapped_data + i);
3253 }
3254
3255 rv = _bfd_elf_set_section_contents (abfd, section, swapped_data, offset, scount);
3256
3257 if (!rv)
3258 return rv;
3259 }
3260
3261 count -= scount;
3262 location += scount;
3263 offset += scount;
3264
3265 if (count > 0)
3266 {
3267 caddr = section->vma + offset;
3268 while (count > 0)
3269 {
3270 switch (caddr % 4)
3271 {
3272 case 0: faddr = offset + 3; break;
3273 case 1: faddr = offset + 1; break;
3274 case 2: faddr = offset - 1; break;
3275 case 3: faddr = offset - 3; break;
3276 }
3277 rv = _bfd_elf_set_section_contents (abfd, section, location, faddr, 1);
3278 if (! rv)
3279 return rv;
3280
3281 location ++;
3282 offset ++;
3283 count --;
3284 caddr ++;
3285 }
3286 }
3287
3288 return TRUE;
3289 }
3290
3291 static bfd_boolean
3292 rx_final_link (bfd * abfd, struct bfd_link_info * info)
3293 {
3294 asection * o;
3295
3296 for (o = abfd->sections; o != NULL; o = o->next)
3297 {
3298 #ifdef DJDEBUG
3299 fprintf (stderr, "sec %s fl %x vma %lx lma %lx size %lx raw %lx\n",
3300 o->name, o->flags, o->vma, o->lma, o->size, o->rawsize);
3301 #endif
3302 if (o->flags & SEC_CODE
3303 && bfd_big_endian (abfd)
3304 && (o->size % 4 || o->rawsize % 4))
3305 {
3306 #ifdef DJDEBUG
3307 fprintf (stderr, "adjusting...\n");
3308 #endif
3309 o->size += 4 - (o->size % 4);
3310 o->rawsize += 4 - (o->rawsize % 4);
3311 }
3312 }
3313
3314 return bfd_elf_final_link (abfd, info);
3315 }
3316
3317 static bfd_boolean
3318 elf32_rx_modify_program_headers (bfd * abfd ATTRIBUTE_UNUSED,
3319 struct bfd_link_info * info ATTRIBUTE_UNUSED)
3320 {
3321 const struct elf_backend_data * bed;
3322 struct elf_obj_tdata * tdata;
3323 Elf_Internal_Phdr * phdr;
3324 unsigned int count;
3325 unsigned int i;
3326
3327 bed = get_elf_backend_data (abfd);
3328 tdata = elf_tdata (abfd);
3329 phdr = tdata->phdr;
3330 count = tdata->program_header_size / bed->s->sizeof_phdr;
3331
3332 for (i = count; i-- != 0; )
3333 if (phdr[i].p_type == PT_LOAD)
3334 {
3335 /* The Renesas tools expect p_paddr to be zero. However,
3336 there is no other way to store the writable data in ROM for
3337 startup initialization. So, we let the linker *think*
3338 we're using paddr and vaddr the "usual" way, but at the
3339 last minute we move the paddr into the vaddr (which is what
3340 the simulator uses) and zero out paddr. Note that this
3341 does not affect the section headers, just the program
3342 headers. We hope. */
3343 phdr[i].p_vaddr = phdr[i].p_paddr;
3344 /* If we zero out p_paddr, then the LMA in the section table
3345 becomes wrong. */
3346 /*phdr[i].p_paddr = 0;*/
3347 }
3348
3349 return TRUE;
3350 }
3351 \f
3352 #define ELF_ARCH bfd_arch_rx
3353 #define ELF_MACHINE_CODE EM_RX
3354 #define ELF_MAXPAGESIZE 0x1000
3355
3356 #define TARGET_BIG_SYM bfd_elf32_rx_be_vec
3357 #define TARGET_BIG_NAME "elf32-rx-be"
3358
3359 #define TARGET_LITTLE_SYM bfd_elf32_rx_le_vec
3360 #define TARGET_LITTLE_NAME "elf32-rx-le"
3361
3362 #define elf_info_to_howto_rel NULL
3363 #define elf_info_to_howto rx_info_to_howto_rela
3364 #define elf_backend_object_p rx_elf_object_p
3365 #define elf_backend_relocate_section rx_elf_relocate_section
3366 #define elf_symbol_leading_char ('_')
3367 #define elf_backend_can_gc_sections 1
3368 #define elf_backend_modify_program_headers elf32_rx_modify_program_headers
3369
3370 #define bfd_elf32_bfd_reloc_type_lookup rx_reloc_type_lookup
3371 #define bfd_elf32_bfd_reloc_name_lookup rx_reloc_name_lookup
3372 #define bfd_elf32_bfd_set_private_flags rx_elf_set_private_flags
3373 #define bfd_elf32_bfd_merge_private_bfd_data rx_elf_merge_private_bfd_data
3374 #define bfd_elf32_bfd_print_private_bfd_data rx_elf_print_private_bfd_data
3375 #define bfd_elf32_get_section_contents rx_get_section_contents
3376 #define bfd_elf32_set_section_contents rx_set_section_contents
3377 #define bfd_elf32_bfd_final_link rx_final_link
3378 #define bfd_elf32_bfd_relax_section elf32_rx_relax_section_wrapper
3379
3380 #include "elf32-target.h"