]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - bfd/elfnn-kvx.c
kvx: New port.
[thirdparty/binutils-gdb.git] / bfd / elfnn-kvx.c
1 /* KVX-specific support for NN-bit ELF.
2 Copyright (C) 2009-2023 Free Software Foundation, Inc.
3 Contributed by Kalray SA.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include "bfd.h"
23 #include "libiberty.h"
24 #include "libbfd.h"
25 #include "elf-bfd.h"
26 #include "bfdlink.h"
27 #include "objalloc.h"
28 #include "elf/kvx.h"
29 #include "elfxx-kvx.h"
30
31 #define ARCH_SIZE NN
32
33 #if ARCH_SIZE == 64
34 #define LOG_FILE_ALIGN 3
35 #endif
36
37 #if ARCH_SIZE == 32
38 #define LOG_FILE_ALIGN 2
39 #endif
40
41 #define IS_KVX_TLS_RELOC(R_TYPE) \
42 ((R_TYPE) == BFD_RELOC_KVX_S37_TLS_LE_LO10 \
43 || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_LE_UP27 \
44 || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LE_LO10 \
45 || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LE_UP27 \
46 || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LE_EX6 \
47 || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_DTPOFF_LO10 \
48 || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_DTPOFF_UP27 \
49 || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_DTPOFF_LO10 \
50 || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_DTPOFF_UP27 \
51 || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_DTPOFF_EX6 \
52 || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_IE_LO10 \
53 || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_IE_UP27 \
54 || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_IE_LO10 \
55 || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_IE_UP27 \
56 || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_IE_EX6 \
57 || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_GD_LO10 \
58 || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_GD_UP27 \
59 || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_GD_LO10 \
60 || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_GD_UP27 \
61 || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_GD_EX6 \
62 || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_LD_LO10 \
63 || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_LD_UP27 \
64 || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LD_LO10 \
65 || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LD_UP27 \
66 || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LD_EX6 \
67 )
68
69 #define IS_KVX_TLS_RELAX_RELOC(R_TYPE) 0
70
71 #define ELIMINATE_COPY_RELOCS 0
72
73 /* Return size of a relocation entry. HTAB is the bfd's
74 elf_kvx_link_hash_entry. */
75 #define RELOC_SIZE(HTAB) (sizeof (ElfNN_External_Rela))
76
77 /* GOT Entry size - 8 bytes in ELF64 and 4 bytes in ELF32. */
78 #define GOT_ENTRY_SIZE (ARCH_SIZE / 8)
79 #define PLT_ENTRY_SIZE (32)
80
81 #define PLT_SMALL_ENTRY_SIZE (4*4)
82
83 /* Encoding of the nop instruction */
84 #define INSN_NOP 0x00f0037f
85
86 #define kvx_compute_jump_table_size(htab) \
87 (((htab)->root.srelplt == NULL) ? 0 \
88 : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
89
90 static const bfd_byte elfNN_kvx_small_plt0_entry[PLT_ENTRY_SIZE] =
91 {
92 /* FIXME KVX: no first entry, not used yet */
93 0
94 };
95
96 /* Per function entry in a procedure linkage table looks like this
97 if the distance between the PLTGOT and the PLT is < 4GB use
98 these PLT entries. */
99 static const bfd_byte elfNN_kvx_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
100 {
101 0x10, 0x00, 0xc4, 0x0f, /* get $r16 = $pc ;; */
102 #if ARCH_SIZE == 32
103 0x10, 0x00, 0x40, 0xb0, /* lwz $r16 = 0[$r16] ;; */
104 #else
105 0x10, 0x00, 0x40, 0xb8, /* ld $r16 = 0[$r16] ;; */
106 #endif
107 0x00, 0x00, 0x00, 0x18, /* upper 27 bits for LSU */
108 0x10, 0x00, 0xd8, 0x0f, /* igoto $r16 ;; */
109 };
110
111 /* Long stub use 43bits format of make. */
112 static const uint32_t elfNN_kvx_long_branch_stub[] =
113 {
114 0xe0400000, /* make $r16 = LO10<emm43> EX6<imm43> */
115 0x00000000, /* UP27<imm43> ;; */
116 0x0fd80010, /* igoto "r16 ;; */
117 };
118
119 #define elf_info_to_howto elfNN_kvx_info_to_howto
120 #define elf_info_to_howto_rel elfNN_kvx_info_to_howto
121
122 #define KVX_ELF_ABI_VERSION 0
123
124 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
125 #define ALL_ONES (~ (bfd_vma) 0)
126
127 /* Indexed by the bfd interal reloc enumerators.
128 Therefore, the table needs to be synced with BFD_RELOC_KVX_*
129 in reloc.c. */
130
131 #define KVX_KV3_V1_KV3_V2_KV4_V1
132 #include "elfxx-kvx-relocs.h"
133 #undef KVX_KV3_V1_KV3_V2_KV4_V1
134
135 /* Given HOWTO, return the bfd internal relocation enumerator. */
136
137 static bfd_reloc_code_real_type
138 elfNN_kvx_bfd_reloc_from_howto (reloc_howto_type *howto)
139 {
140 const int size = (int) ARRAY_SIZE (elf_kvx_howto_table);
141 const ptrdiff_t offset = howto - elf_kvx_howto_table;
142
143 if (offset >= 0 && offset < size)
144 return BFD_RELOC_KVX_RELOC_START + offset + 1;
145
146 return BFD_RELOC_KVX_RELOC_START + 1;
147 }
148
149 /* Given R_TYPE, return the bfd internal relocation enumerator. */
150
151 static bfd_reloc_code_real_type
152 elfNN_kvx_bfd_reloc_from_type (bfd *abfd ATTRIBUTE_UNUSED, unsigned int r_type)
153 {
154 static bool initialized_p = false;
155 /* Indexed by R_TYPE, values are offsets in the howto_table. */
156 static unsigned int offsets[R_KVX_end];
157
158 if (!initialized_p)
159 {
160 unsigned int i;
161
162 for (i = 0; i < ARRAY_SIZE (elf_kvx_howto_table); ++i)
163 offsets[elf_kvx_howto_table[i].type] = i;
164
165 initialized_p = true;
166 }
167
168 /* PR 17512: file: b371e70a. */
169 if (r_type >= R_KVX_end)
170 {
171 bfd_set_error (bfd_error_bad_value);
172 return BFD_RELOC_KVX_RELOC_END;
173 }
174
175 return (BFD_RELOC_KVX_RELOC_START + 1) + offsets[r_type];
176 }
177
178 struct elf_kvx_reloc_map
179 {
180 bfd_reloc_code_real_type from;
181 bfd_reloc_code_real_type to;
182 };
183
184 /* Map bfd generic reloc to KVX-specific reloc. */
185 static const struct elf_kvx_reloc_map elf_kvx_reloc_map[] =
186 {
187 {BFD_RELOC_NONE, BFD_RELOC_KVX_NONE},
188
189 /* Basic data relocations. */
190 {BFD_RELOC_CTOR, BFD_RELOC_KVX_NN},
191 {BFD_RELOC_64, BFD_RELOC_KVX_64},
192 {BFD_RELOC_32, BFD_RELOC_KVX_32},
193 {BFD_RELOC_16, BFD_RELOC_KVX_16},
194 {BFD_RELOC_8, BFD_RELOC_KVX_8},
195
196 {BFD_RELOC_64_PCREL, BFD_RELOC_KVX_64_PCREL},
197 {BFD_RELOC_32_PCREL, BFD_RELOC_KVX_32_PCREL},
198 };
199
200 /* Given the bfd internal relocation enumerator in CODE, return the
201 corresponding howto entry. */
202
203 static reloc_howto_type *
204 elfNN_kvx_howto_from_bfd_reloc (bfd_reloc_code_real_type code)
205 {
206 unsigned int i;
207
208 /* Convert bfd generic reloc to KVX-specific reloc. */
209 if (code < BFD_RELOC_KVX_RELOC_START || code > BFD_RELOC_KVX_RELOC_END)
210 for (i = 0; i < ARRAY_SIZE (elf_kvx_reloc_map) ; i++)
211 if (elf_kvx_reloc_map[i].from == code)
212 {
213 code = elf_kvx_reloc_map[i].to;
214 break;
215 }
216
217 if (code > BFD_RELOC_KVX_RELOC_START && code < BFD_RELOC_KVX_RELOC_END)
218 return &elf_kvx_howto_table[code - (BFD_RELOC_KVX_RELOC_START + 1)];
219
220 return NULL;
221 }
222
223 static reloc_howto_type *
224 elfNN_kvx_howto_from_type (bfd *abfd, unsigned int r_type)
225 {
226 bfd_reloc_code_real_type val;
227 reloc_howto_type *howto;
228
229 #if ARCH_SIZE == 32
230 if (r_type > 256)
231 {
232 bfd_set_error (bfd_error_bad_value);
233 return NULL;
234 }
235 #endif
236
237 val = elfNN_kvx_bfd_reloc_from_type (abfd, r_type);
238 howto = elfNN_kvx_howto_from_bfd_reloc (val);
239
240 if (howto != NULL)
241 return howto;
242
243 bfd_set_error (bfd_error_bad_value);
244 return NULL;
245 }
246
247 static bool
248 elfNN_kvx_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc,
249 Elf_Internal_Rela *elf_reloc)
250 {
251 unsigned int r_type;
252
253 r_type = ELFNN_R_TYPE (elf_reloc->r_info);
254 bfd_reloc->howto = elfNN_kvx_howto_from_type (abfd, r_type);
255
256 if (bfd_reloc->howto == NULL)
257 {
258 /* xgettext:c-format */
259 _bfd_error_handler (_("%pB: unsupported relocation type %#x"), abfd, r_type);
260 return false;
261 }
262 return true;
263 }
264
265 static reloc_howto_type *
266 elfNN_kvx_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
267 bfd_reloc_code_real_type code)
268 {
269 reloc_howto_type *howto = elfNN_kvx_howto_from_bfd_reloc (code);
270
271 if (howto != NULL)
272 return howto;
273
274 bfd_set_error (bfd_error_bad_value);
275 return NULL;
276 }
277
278 static reloc_howto_type *
279 elfNN_kvx_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
280 const char *r_name)
281 {
282 unsigned int i;
283
284 for (i = 0; i < ARRAY_SIZE (elf_kvx_howto_table); ++i)
285 if (elf_kvx_howto_table[i].name != NULL
286 && strcasecmp (elf_kvx_howto_table[i].name, r_name) == 0)
287 return &elf_kvx_howto_table[i];
288
289 return NULL;
290 }
291
292 #define TARGET_LITTLE_SYM kvx_elfNN_vec
293 #define TARGET_LITTLE_NAME "elfNN-kvx"
294
295 /* The linker script knows the section names for placement.
296 The entry_names are used to do simple name mangling on the stubs.
297 Given a function name, and its type, the stub can be found. The
298 name can be changed. The only requirement is the %s be present. */
299 #define STUB_ENTRY_NAME "__%s_veneer"
300
301 /* The name of the dynamic interpreter. This is put in the .interp
302 section. */
303 #define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
304
305
306 /* PCREL 27 is signed-extended and scaled by 4 */
307 #define KVX_MAX_FWD_CALL_OFFSET \
308 (((1 << 26) - 1) << 2)
309 #define KVX_MAX_BWD_CALL_OFFSET \
310 (-((1 << 26) << 2))
311
312 /* Check that the destination of the call is within the PCREL27
313 range. */
314 static int
315 kvx_valid_call_p (bfd_vma value, bfd_vma place)
316 {
317 bfd_signed_vma offset = (bfd_signed_vma) (value - place);
318 return (offset <= KVX_MAX_FWD_CALL_OFFSET
319 && offset >= KVX_MAX_BWD_CALL_OFFSET);
320 }
321
322 /* Section name for stubs is the associated section name plus this
323 string. */
324 #define STUB_SUFFIX ".stub"
325
326 enum elf_kvx_stub_type
327 {
328 kvx_stub_none,
329 kvx_stub_long_branch,
330 };
331
332 struct elf_kvx_stub_hash_entry
333 {
334 /* Base hash table entry structure. */
335 struct bfd_hash_entry root;
336
337 /* The stub section. */
338 asection *stub_sec;
339
340 /* Offset within stub_sec of the beginning of this stub. */
341 bfd_vma stub_offset;
342
343 /* Given the symbol's value and its section we can determine its final
344 value when building the stubs (so the stub knows where to jump). */
345 bfd_vma target_value;
346 asection *target_section;
347
348 enum elf_kvx_stub_type stub_type;
349
350 /* The symbol table entry, if any, that this was derived from. */
351 struct elf_kvx_link_hash_entry *h;
352
353 /* Destination symbol type */
354 unsigned char st_type;
355
356 /* Where this stub is being called from, or, in the case of combined
357 stub sections, the first input section in the group. */
358 asection *id_sec;
359
360 /* The name for the local symbol at the start of this stub. The
361 stub name in the hash table has to be unique; this does not, so
362 it can be friendlier. */
363 char *output_name;
364 };
365
366 /* Used to build a map of a section. This is required for mixed-endian
367 code/data. */
368
369 typedef struct elf_elf_section_map
370 {
371 bfd_vma vma;
372 char type;
373 }
374 elf_kvx_section_map;
375
376
377 typedef struct _kvx_elf_section_data
378 {
379 struct bfd_elf_section_data elf;
380 unsigned int mapcount;
381 unsigned int mapsize;
382 elf_kvx_section_map *map;
383 }
384 _kvx_elf_section_data;
385
386 #define elf_kvx_section_data(sec) \
387 ((_kvx_elf_section_data *) elf_section_data (sec))
388
389 struct elf_kvx_local_symbol
390 {
391 unsigned int got_type;
392 bfd_signed_vma got_refcount;
393 bfd_vma got_offset;
394 };
395
396 struct elf_kvx_obj_tdata
397 {
398 struct elf_obj_tdata root;
399
400 /* local symbol descriptors */
401 struct elf_kvx_local_symbol *locals;
402
403 /* Zero to warn when linking objects with incompatible enum sizes. */
404 int no_enum_size_warning;
405
406 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
407 int no_wchar_size_warning;
408 };
409
410 #define elf_kvx_tdata(bfd) \
411 ((struct elf_kvx_obj_tdata *) (bfd)->tdata.any)
412
413 #define elf_kvx_locals(bfd) (elf_kvx_tdata (bfd)->locals)
414
415 #define is_kvx_elf(bfd) \
416 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
417 && elf_tdata (bfd) != NULL \
418 && elf_object_id (bfd) == KVX_ELF_DATA)
419
420 static bool
421 elfNN_kvx_mkobject (bfd *abfd)
422 {
423 return bfd_elf_allocate_object (abfd, sizeof (struct elf_kvx_obj_tdata),
424 KVX_ELF_DATA);
425 }
426
427 #define elf_kvx_hash_entry(ent) \
428 ((struct elf_kvx_link_hash_entry *)(ent))
429
430 #define GOT_UNKNOWN 0
431 #define GOT_NORMAL 1
432
433 #define GOT_TLS_GD 2
434 #define GOT_TLS_IE 4
435 #define GOT_TLS_LD 8
436
437 /* KVX ELF linker hash entry. */
438 struct elf_kvx_link_hash_entry
439 {
440 struct elf_link_hash_entry root;
441
442 /* Since PLT entries have variable size, we need to record the
443 index into .got.plt instead of recomputing it from the PLT
444 offset. */
445 bfd_signed_vma plt_got_offset;
446
447 /* Bit mask representing the type of GOT entry(s) if any required by
448 this symbol. */
449 unsigned int got_type;
450
451 /* A pointer to the most recently used stub hash entry against this
452 symbol. */
453 struct elf_kvx_stub_hash_entry *stub_cache;
454 };
455
456 /* Get the KVX elf linker hash table from a link_info structure. */
457 #define elf_kvx_hash_table(info) \
458 ((struct elf_kvx_link_hash_table *) ((info)->hash))
459
460 #define kvx_stub_hash_lookup(table, string, create, copy) \
461 ((struct elf_kvx_stub_hash_entry *) \
462 bfd_hash_lookup ((table), (string), (create), (copy)))
463
464 /* KVX ELF linker hash table. */
465 struct elf_kvx_link_hash_table
466 {
467 /* The main hash table. */
468 struct elf_link_hash_table root;
469
470 /* Nonzero to force PIC branch veneers. */
471 int pic_veneer;
472
473 /* The number of bytes in the initial entry in the PLT. */
474 bfd_size_type plt_header_size;
475
476 /* The number of bytes in the subsequent PLT etries. */
477 bfd_size_type plt_entry_size;
478
479 /* The bytes of the subsequent PLT entry. */
480 const bfd_byte *plt_entry;
481
482 /* Short-cuts to get to dynamic linker sections. */
483 asection *sdynbss;
484 asection *srelbss;
485
486 /* Small local sym cache. */
487 struct sym_cache sym_cache;
488
489 /* For convenience in allocate_dynrelocs. */
490 bfd *obfd;
491
492 /* The amount of space used by the reserved portion of the sgotplt
493 section, plus whatever space is used by the jump slots. */
494 bfd_vma sgotplt_jump_table_size;
495
496 /* The stub hash table. */
497 struct bfd_hash_table stub_hash_table;
498
499 /* Linker stub bfd. */
500 bfd *stub_bfd;
501
502 /* Linker call-backs. */
503 asection *(*add_stub_section) (const char *, asection *);
504 void (*layout_sections_again) (void);
505
506 /* Array to keep track of which stub sections have been created, and
507 information on stub grouping. */
508 struct map_stub
509 {
510 /* This is the section to which stubs in the group will be
511 attached. */
512 asection *link_sec;
513 /* The stub section. */
514 asection *stub_sec;
515 } *stub_group;
516
517 /* Assorted information used by elfNN_kvx_size_stubs. */
518 unsigned int bfd_count;
519 unsigned int top_index;
520 asection **input_list;
521 };
522
523 /* Create an entry in an KVX ELF linker hash table. */
524
525 static struct bfd_hash_entry *
526 elfNN_kvx_link_hash_newfunc (struct bfd_hash_entry *entry,
527 struct bfd_hash_table *table,
528 const char *string)
529 {
530 struct elf_kvx_link_hash_entry *ret =
531 (struct elf_kvx_link_hash_entry *) entry;
532
533 /* Allocate the structure if it has not already been allocated by a
534 subclass. */
535 if (ret == NULL)
536 ret = bfd_hash_allocate (table,
537 sizeof (struct elf_kvx_link_hash_entry));
538 if (ret == NULL)
539 return (struct bfd_hash_entry *) ret;
540
541 /* Call the allocation method of the superclass. */
542 ret = ((struct elf_kvx_link_hash_entry *)
543 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
544 table, string));
545 if (ret != NULL)
546 {
547 ret->got_type = GOT_UNKNOWN;
548 ret->plt_got_offset = (bfd_vma) - 1;
549 ret->stub_cache = NULL;
550 }
551
552 return (struct bfd_hash_entry *) ret;
553 }
554
555 /* Initialize an entry in the stub hash table. */
556
557 static struct bfd_hash_entry *
558 stub_hash_newfunc (struct bfd_hash_entry *entry,
559 struct bfd_hash_table *table, const char *string)
560 {
561 /* Allocate the structure if it has not already been allocated by a
562 subclass. */
563 if (entry == NULL)
564 {
565 entry = bfd_hash_allocate (table,
566 sizeof (struct
567 elf_kvx_stub_hash_entry));
568 if (entry == NULL)
569 return entry;
570 }
571
572 /* Call the allocation method of the superclass. */
573 entry = bfd_hash_newfunc (entry, table, string);
574 if (entry != NULL)
575 {
576 struct elf_kvx_stub_hash_entry *eh;
577
578 /* Initialize the local fields. */
579 eh = (struct elf_kvx_stub_hash_entry *) entry;
580 eh->stub_sec = NULL;
581 eh->stub_offset = 0;
582 eh->target_value = 0;
583 eh->target_section = NULL;
584 eh->stub_type = kvx_stub_none;
585 eh->h = NULL;
586 eh->id_sec = NULL;
587 }
588
589 return entry;
590 }
591
592 /* Copy the extra info we tack onto an elf_link_hash_entry. */
593
594 static void
595 elfNN_kvx_copy_indirect_symbol (struct bfd_link_info *info,
596 struct elf_link_hash_entry *dir,
597 struct elf_link_hash_entry *ind)
598 {
599 struct elf_kvx_link_hash_entry *edir, *eind;
600
601 edir = (struct elf_kvx_link_hash_entry *) dir;
602 eind = (struct elf_kvx_link_hash_entry *) ind;
603
604 if (ind->root.type == bfd_link_hash_indirect)
605 {
606 /* Copy over PLT info. */
607 if (dir->got.refcount <= 0)
608 {
609 edir->got_type = eind->got_type;
610 eind->got_type = GOT_UNKNOWN;
611 }
612 }
613
614 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
615 }
616
617 /* Destroy a KVX elf linker hash table. */
618
619 static void
620 elfNN_kvx_link_hash_table_free (bfd *obfd)
621 {
622 struct elf_kvx_link_hash_table *ret
623 = (struct elf_kvx_link_hash_table *) obfd->link.hash;
624
625 bfd_hash_table_free (&ret->stub_hash_table);
626 _bfd_elf_link_hash_table_free (obfd);
627 }
628
629 /* Create a KVX elf linker hash table. */
630
631 static struct bfd_link_hash_table *
632 elfNN_kvx_link_hash_table_create (bfd *abfd)
633 {
634 struct elf_kvx_link_hash_table *ret;
635 bfd_size_type amt = sizeof (struct elf_kvx_link_hash_table);
636
637 ret = bfd_zmalloc (amt);
638 if (ret == NULL)
639 return NULL;
640
641 if (!_bfd_elf_link_hash_table_init
642 (&ret->root, abfd, elfNN_kvx_link_hash_newfunc,
643 sizeof (struct elf_kvx_link_hash_entry), KVX_ELF_DATA))
644 {
645 free (ret);
646 return NULL;
647 }
648
649 ret->plt_header_size = PLT_ENTRY_SIZE;
650 ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
651 ret->plt_entry = elfNN_kvx_small_plt_entry;
652
653 ret->obfd = abfd;
654
655 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
656 sizeof (struct elf_kvx_stub_hash_entry)))
657 {
658 _bfd_elf_link_hash_table_free (abfd);
659 return NULL;
660 }
661
662 ret->root.root.hash_table_free = elfNN_kvx_link_hash_table_free;
663
664 return &ret->root.root;
665 }
666
667 static bfd_reloc_status_type
668 kvx_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
669 bfd_vma offset, bfd_vma value)
670 {
671 reloc_howto_type *howto;
672 bfd_vma place;
673
674 howto = elfNN_kvx_howto_from_type (input_bfd, r_type);
675 place = (input_section->output_section->vma + input_section->output_offset
676 + offset);
677
678 r_type = elfNN_kvx_bfd_reloc_from_type (input_bfd, r_type);
679 value = _bfd_kvx_elf_resolve_relocation (r_type, place, value, 0, false);
680 return _bfd_kvx_elf_put_addend (input_bfd,
681 input_section->contents + offset, r_type,
682 howto, value);
683 }
684
685 /* Determine the type of stub needed, if any, for a call. */
686
687 static enum elf_kvx_stub_type
688 kvx_type_of_stub (asection *input_sec,
689 const Elf_Internal_Rela *rel,
690 asection *sym_sec,
691 unsigned char st_type,
692 bfd_vma destination)
693 {
694 bfd_vma location;
695 bfd_signed_vma branch_offset;
696 unsigned int r_type;
697 enum elf_kvx_stub_type stub_type = kvx_stub_none;
698
699 if (st_type != STT_FUNC
700 && (sym_sec == input_sec))
701 return stub_type;
702
703 /* Determine where the call point is. */
704 location = (input_sec->output_offset
705 + input_sec->output_section->vma + rel->r_offset);
706
707 branch_offset = (bfd_signed_vma) (destination - location);
708
709 r_type = ELFNN_R_TYPE (rel->r_info);
710
711 /* We don't want to redirect any old unconditional jump in this way,
712 only one which is being used for a sibcall, where it is
713 acceptable for the R16 and R17 registers to be clobbered. */
714 if (r_type == R_KVX_PCREL27
715 && (branch_offset > KVX_MAX_FWD_CALL_OFFSET
716 || branch_offset < KVX_MAX_BWD_CALL_OFFSET))
717 {
718 stub_type = kvx_stub_long_branch;
719 }
720
721 return stub_type;
722 }
723
724 /* Build a name for an entry in the stub hash table. */
725
726 static char *
727 elfNN_kvx_stub_name (const asection *input_section,
728 const asection *sym_sec,
729 const struct elf_kvx_link_hash_entry *hash,
730 const Elf_Internal_Rela *rel)
731 {
732 char *stub_name;
733 bfd_size_type len;
734
735 if (hash)
736 {
737 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
738 stub_name = bfd_malloc (len);
739 if (stub_name != NULL)
740 snprintf (stub_name, len, "%08x_%s+%" PRIx64 "x",
741 (unsigned int) input_section->id,
742 hash->root.root.root.string,
743 rel->r_addend);
744 }
745 else
746 {
747 len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
748 stub_name = bfd_malloc (len);
749 if (stub_name != NULL)
750 snprintf (stub_name, len, "%08x_%x:%x+%" PRIx64 "x",
751 (unsigned int) input_section->id,
752 (unsigned int) sym_sec->id,
753 (unsigned int) ELFNN_R_SYM (rel->r_info),
754 rel->r_addend);
755 }
756
757 return stub_name;
758 }
759
760 /* Return true if symbol H should be hashed in the `.gnu.hash' section. For
761 executable PLT slots where the executable never takes the address of those
762 functions, the function symbols are not added to the hash table. */
763
764 static bool
765 elf_kvx_hash_symbol (struct elf_link_hash_entry *h)
766 {
767 if (h->plt.offset != (bfd_vma) -1
768 && !h->def_regular
769 && !h->pointer_equality_needed)
770 return false;
771
772 return _bfd_elf_hash_symbol (h);
773 }
774
775
776 /* Look up an entry in the stub hash. Stub entries are cached because
777 creating the stub name takes a bit of time. */
778
779 static struct elf_kvx_stub_hash_entry *
780 elfNN_kvx_get_stub_entry (const asection *input_section,
781 const asection *sym_sec,
782 struct elf_link_hash_entry *hash,
783 const Elf_Internal_Rela *rel,
784 struct elf_kvx_link_hash_table *htab)
785 {
786 struct elf_kvx_stub_hash_entry *stub_entry;
787 struct elf_kvx_link_hash_entry *h =
788 (struct elf_kvx_link_hash_entry *) hash;
789 const asection *id_sec;
790
791 if ((input_section->flags & SEC_CODE) == 0)
792 return NULL;
793
794 /* If this input section is part of a group of sections sharing one
795 stub section, then use the id of the first section in the group.
796 Stub names need to include a section id, as there may well be
797 more than one stub used to reach say, printf, and we need to
798 distinguish between them. */
799 id_sec = htab->stub_group[input_section->id].link_sec;
800
801 if (h != NULL && h->stub_cache != NULL
802 && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
803 {
804 stub_entry = h->stub_cache;
805 }
806 else
807 {
808 char *stub_name;
809
810 stub_name = elfNN_kvx_stub_name (id_sec, sym_sec, h, rel);
811 if (stub_name == NULL)
812 return NULL;
813
814 stub_entry = kvx_stub_hash_lookup (&htab->stub_hash_table,
815 stub_name, false, false);
816 if (h != NULL)
817 h->stub_cache = stub_entry;
818
819 free (stub_name);
820 }
821
822 return stub_entry;
823 }
824
825
826 /* Create a stub section. */
827
828 static asection *
829 _bfd_kvx_create_stub_section (asection *section,
830 struct elf_kvx_link_hash_table *htab)
831
832 {
833 size_t namelen;
834 bfd_size_type len;
835 char *s_name;
836
837 namelen = strlen (section->name);
838 len = namelen + sizeof (STUB_SUFFIX);
839 s_name = bfd_alloc (htab->stub_bfd, len);
840 if (s_name == NULL)
841 return NULL;
842
843 memcpy (s_name, section->name, namelen);
844 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
845 return (*htab->add_stub_section) (s_name, section);
846 }
847
848
849 /* Find or create a stub section for a link section.
850
851 Fix or create the stub section used to collect stubs attached to
852 the specified link section. */
853
854 static asection *
855 _bfd_kvx_get_stub_for_link_section (asection *link_section,
856 struct elf_kvx_link_hash_table *htab)
857 {
858 if (htab->stub_group[link_section->id].stub_sec == NULL)
859 htab->stub_group[link_section->id].stub_sec
860 = _bfd_kvx_create_stub_section (link_section, htab);
861 return htab->stub_group[link_section->id].stub_sec;
862 }
863
864
865 /* Find or create a stub section in the stub group for an input
866 section. */
867
868 static asection *
869 _bfd_kvx_create_or_find_stub_sec (asection *section,
870 struct elf_kvx_link_hash_table *htab)
871 {
872 asection *link_sec = htab->stub_group[section->id].link_sec;
873 return _bfd_kvx_get_stub_for_link_section (link_sec, htab);
874 }
875
876
877 /* Add a new stub entry in the stub group associated with an input
878 section to the stub hash. Not all fields of the new stub entry are
879 initialised. */
880
881 static struct elf_kvx_stub_hash_entry *
882 _bfd_kvx_add_stub_entry_in_group (const char *stub_name,
883 asection *section,
884 struct elf_kvx_link_hash_table *htab)
885 {
886 asection *link_sec;
887 asection *stub_sec;
888 struct elf_kvx_stub_hash_entry *stub_entry;
889
890 link_sec = htab->stub_group[section->id].link_sec;
891 stub_sec = _bfd_kvx_create_or_find_stub_sec (section, htab);
892
893 /* Enter this entry into the linker stub hash table. */
894 stub_entry = kvx_stub_hash_lookup (&htab->stub_hash_table, stub_name,
895 true, false);
896 if (stub_entry == NULL)
897 {
898 /* xgettext:c-format */
899 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
900 section->owner, stub_name);
901 return NULL;
902 }
903
904 stub_entry->stub_sec = stub_sec;
905 stub_entry->stub_offset = 0;
906 stub_entry->id_sec = link_sec;
907
908 return stub_entry;
909 }
910
911 static bool
912 kvx_build_one_stub (struct bfd_hash_entry *gen_entry,
913 void *in_arg)
914 {
915 struct elf_kvx_stub_hash_entry *stub_entry;
916 asection *stub_sec;
917 bfd *stub_bfd;
918 bfd_byte *loc;
919 bfd_vma sym_value;
920 unsigned int template_size;
921 const uint32_t *template;
922 unsigned int i;
923 struct bfd_link_info *info;
924
925 /* Massage our args to the form they really have. */
926 stub_entry = (struct elf_kvx_stub_hash_entry *) gen_entry;
927
928 info = (struct bfd_link_info *) in_arg;
929
930 /* Fail if the target section could not be assigned to an output
931 section. The user should fix his linker script. */
932 if (stub_entry->target_section->output_section == NULL
933 && info->non_contiguous_regions)
934 info->callbacks->einfo (_("%F%P: Could not assign '%pA' to an output section. "
935 "Retry without "
936 "--enable-non-contiguous-regions.\n"),
937 stub_entry->target_section);
938
939 stub_sec = stub_entry->stub_sec;
940
941 /* Make a note of the offset within the stubs for this entry. */
942 stub_entry->stub_offset = stub_sec->size;
943 loc = stub_sec->contents + stub_entry->stub_offset;
944
945 stub_bfd = stub_sec->owner;
946
947 /* This is the address of the stub destination. */
948 sym_value = (stub_entry->target_value
949 + stub_entry->target_section->output_offset
950 + stub_entry->target_section->output_section->vma);
951
952 switch (stub_entry->stub_type)
953 {
954 case kvx_stub_long_branch:
955 template = elfNN_kvx_long_branch_stub;
956 template_size = sizeof (elfNN_kvx_long_branch_stub);
957 break;
958 default:
959 abort ();
960 }
961
962 for (i = 0; i < (template_size / sizeof template[0]); i++)
963 {
964 bfd_putl32 (template[i], loc);
965 loc += 4;
966 }
967
968 stub_sec->size += template_size;
969
970 switch (stub_entry->stub_type)
971 {
972 case kvx_stub_long_branch:
973 /*
974 The stub uses a make insn with 43bits immediate.
975 We need to apply 3 relocations:
976 BFD_RELOC_KVX_S43_LO10
977 BFD_RELOC_KVX_S43_UP27
978 BFD_RELOC_KVX_S43_EX6
979 */
980 if (kvx_relocate (R_KVX_S43_LO10, stub_bfd, stub_sec,
981 stub_entry->stub_offset , sym_value) != bfd_reloc_ok)
982 BFD_FAIL ();
983 if (kvx_relocate (R_KVX_S43_EX6, stub_bfd, stub_sec,
984 stub_entry->stub_offset , sym_value) != bfd_reloc_ok)
985 BFD_FAIL ();
986 if (kvx_relocate (R_KVX_S43_UP27, stub_bfd, stub_sec,
987 stub_entry->stub_offset + 4, sym_value) != bfd_reloc_ok)
988 BFD_FAIL ();
989 break;
990 default:
991 abort ();
992 }
993
994 return true;
995 }
996
997 /* As above, but don't actually build the stub. Just bump offset so
998 we know stub section sizes. */
999
1000 static bool
1001 kvx_size_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg ATTRIBUTE_UNUSED)
1002 {
1003 struct elf_kvx_stub_hash_entry *stub_entry;
1004 int size;
1005
1006 /* Massage our args to the form they really have. */
1007 stub_entry = (struct elf_kvx_stub_hash_entry *) gen_entry;
1008
1009 switch (stub_entry->stub_type)
1010 {
1011 case kvx_stub_long_branch:
1012 size = sizeof (elfNN_kvx_long_branch_stub);
1013 break;
1014 default:
1015 abort ();
1016 }
1017
1018 stub_entry->stub_sec->size += size;
1019 return true;
1020 }
1021
1022 /* External entry points for sizing and building linker stubs. */
1023
1024 /* Set up various things so that we can make a list of input sections
1025 for each output section included in the link. Returns -1 on error,
1026 0 when no stubs will be needed, and 1 on success. */
1027
1028 int
1029 elfNN_kvx_setup_section_lists (bfd *output_bfd,
1030 struct bfd_link_info *info)
1031 {
1032 bfd *input_bfd;
1033 unsigned int bfd_count;
1034 unsigned int top_id, top_index;
1035 asection *section;
1036 asection **input_list, **list;
1037 bfd_size_type amt;
1038 struct elf_kvx_link_hash_table *htab =
1039 elf_kvx_hash_table (info);
1040
1041 if (!is_elf_hash_table ((const struct bfd_link_hash_table *)htab))
1042 return 0;
1043
1044 /* Count the number of input BFDs and find the top input section id. */
1045 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
1046 input_bfd != NULL; input_bfd = input_bfd->link.next)
1047 {
1048 bfd_count += 1;
1049 for (section = input_bfd->sections;
1050 section != NULL; section = section->next)
1051 {
1052 if (top_id < section->id)
1053 top_id = section->id;
1054 }
1055 }
1056 htab->bfd_count = bfd_count;
1057
1058 amt = sizeof (struct map_stub) * (top_id + 1);
1059 htab->stub_group = bfd_zmalloc (amt);
1060 if (htab->stub_group == NULL)
1061 return -1;
1062
1063 /* We can't use output_bfd->section_count here to find the top output
1064 section index as some sections may have been removed, and
1065 _bfd_strip_section_from_output doesn't renumber the indices. */
1066 for (section = output_bfd->sections, top_index = 0;
1067 section != NULL; section = section->next)
1068 {
1069 if (top_index < section->index)
1070 top_index = section->index;
1071 }
1072
1073 htab->top_index = top_index;
1074 amt = sizeof (asection *) * (top_index + 1);
1075 input_list = bfd_malloc (amt);
1076 htab->input_list = input_list;
1077 if (input_list == NULL)
1078 return -1;
1079
1080 /* For sections we aren't interested in, mark their entries with a
1081 value we can check later. */
1082 list = input_list + top_index;
1083 do
1084 *list = bfd_abs_section_ptr;
1085 while (list-- != input_list);
1086
1087 for (section = output_bfd->sections;
1088 section != NULL; section = section->next)
1089 {
1090 if ((section->flags & SEC_CODE) != 0)
1091 input_list[section->index] = NULL;
1092 }
1093
1094 return 1;
1095 }
1096
1097 /* Used by elfNN_kvx_next_input_section and group_sections. */
1098 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
1099
1100 /* The linker repeatedly calls this function for each input section,
1101 in the order that input sections are linked into output sections.
1102 Build lists of input sections to determine groupings between which
1103 we may insert linker stubs. */
1104
1105 void
1106 elfNN_kvx_next_input_section (struct bfd_link_info *info, asection *isec)
1107 {
1108 struct elf_kvx_link_hash_table *htab =
1109 elf_kvx_hash_table (info);
1110
1111 if (isec->output_section->index <= htab->top_index)
1112 {
1113 asection **list = htab->input_list + isec->output_section->index;
1114
1115 if (*list != bfd_abs_section_ptr)
1116 {
1117 /* Steal the link_sec pointer for our list. */
1118 /* This happens to make the list in reverse order,
1119 which is what we want. */
1120 PREV_SEC (isec) = *list;
1121 *list = isec;
1122 }
1123 }
1124 }
1125
1126 /* See whether we can group stub sections together. Grouping stub
1127 sections may result in fewer stubs. More importantly, we need to
1128 put all .init* and .fini* stubs at the beginning of the .init or
1129 .fini output sections respectively, because glibc splits the
1130 _init and _fini functions into multiple parts. Putting a stub in
1131 the middle of a function is not a good idea. */
1132
1133 static void
1134 group_sections (struct elf_kvx_link_hash_table *htab,
1135 bfd_size_type stub_group_size,
1136 bool stubs_always_after_branch)
1137 {
1138 asection **list = htab->input_list;
1139
1140 do
1141 {
1142 asection *tail = *list;
1143 asection *head;
1144
1145 if (tail == bfd_abs_section_ptr)
1146 continue;
1147
1148 /* Reverse the list: we must avoid placing stubs at the
1149 beginning of the section because the beginning of the text
1150 section may be required for an interrupt vector in bare metal
1151 code. */
1152 #define NEXT_SEC PREV_SEC
1153 head = NULL;
1154 while (tail != NULL)
1155 {
1156 /* Pop from tail. */
1157 asection *item = tail;
1158 tail = PREV_SEC (item);
1159
1160 /* Push on head. */
1161 NEXT_SEC (item) = head;
1162 head = item;
1163 }
1164
1165 while (head != NULL)
1166 {
1167 asection *curr;
1168 asection *next;
1169 bfd_vma stub_group_start = head->output_offset;
1170 bfd_vma end_of_next;
1171
1172 curr = head;
1173 while (NEXT_SEC (curr) != NULL)
1174 {
1175 next = NEXT_SEC (curr);
1176 end_of_next = next->output_offset + next->size;
1177 if (end_of_next - stub_group_start >= stub_group_size)
1178 /* End of NEXT is too far from start, so stop. */
1179 break;
1180 /* Add NEXT to the group. */
1181 curr = next;
1182 }
1183
1184 /* OK, the size from the start to the start of CURR is less
1185 than stub_group_size and thus can be handled by one stub
1186 section. (Or the head section is itself larger than
1187 stub_group_size, in which case we may be toast.)
1188 We should really be keeping track of the total size of
1189 stubs added here, as stubs contribute to the final output
1190 section size. */
1191 do
1192 {
1193 next = NEXT_SEC (head);
1194 /* Set up this stub group. */
1195 htab->stub_group[head->id].link_sec = curr;
1196 }
1197 while (head != curr && (head = next) != NULL);
1198
1199 /* But wait, there's more! Input sections up to stub_group_size
1200 bytes after the stub section can be handled by it too. */
1201 if (!stubs_always_after_branch)
1202 {
1203 stub_group_start = curr->output_offset + curr->size;
1204
1205 while (next != NULL)
1206 {
1207 end_of_next = next->output_offset + next->size;
1208 if (end_of_next - stub_group_start >= stub_group_size)
1209 /* End of NEXT is too far from stubs, so stop. */
1210 break;
1211 /* Add NEXT to the stub group. */
1212 head = next;
1213 next = NEXT_SEC (head);
1214 htab->stub_group[head->id].link_sec = curr;
1215 }
1216 }
1217 head = next;
1218 }
1219 }
1220 while (list++ != htab->input_list + htab->top_index);
1221
1222 free (htab->input_list);
1223 }
1224
1225 static void
1226 _bfd_kvx_resize_stubs (struct elf_kvx_link_hash_table *htab)
1227 {
1228 asection *section;
1229
1230 /* OK, we've added some stubs. Find out the new size of the
1231 stub sections. */
1232 for (section = htab->stub_bfd->sections;
1233 section != NULL; section = section->next)
1234 {
1235 /* Ignore non-stub sections. */
1236 if (!strstr (section->name, STUB_SUFFIX))
1237 continue;
1238 section->size = 0;
1239 }
1240
1241 bfd_hash_traverse (&htab->stub_hash_table, kvx_size_one_stub, htab);
1242 }
1243
1244 /* Satisfy the ELF linker by filling in some fields in our fake bfd. */
1245
1246 bool
1247 kvx_elfNN_init_stub_bfd (struct bfd_link_info *info,
1248 bfd *stub_bfd)
1249 {
1250 struct elf_kvx_link_hash_table *htab;
1251
1252 elf_elfheader (stub_bfd)->e_ident[EI_CLASS] = ELFCLASSNN;
1253
1254 /* Always hook our dynamic sections into the first bfd, which is the
1255 linker created stub bfd. This ensures that the GOT header is at
1256 the start of the output TOC section. */
1257 htab = elf_kvx_hash_table (info);
1258 if (htab == NULL)
1259 return false;
1260
1261 return true;
1262 }
1263
1264 /* Determine and set the size of the stub section for a final link.
1265
1266 The basic idea here is to examine all the relocations looking for
1267 PC-relative calls to a target that is unreachable with a 27bits
1268 immediate (found in call and goto). */
1269
1270 bool
1271 elfNN_kvx_size_stubs (bfd *output_bfd,
1272 bfd *stub_bfd,
1273 struct bfd_link_info *info,
1274 bfd_signed_vma group_size,
1275 asection * (*add_stub_section) (const char *,
1276 asection *),
1277 void (*layout_sections_again) (void))
1278 {
1279 bfd_size_type stub_group_size;
1280 bool stubs_always_before_branch;
1281 bool stub_changed = false;
1282 struct elf_kvx_link_hash_table *htab = elf_kvx_hash_table (info);
1283
1284 /* Propagate mach to stub bfd, because it may not have been
1285 finalized when we created stub_bfd. */
1286 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
1287 bfd_get_mach (output_bfd));
1288
1289 /* Stash our params away. */
1290 htab->stub_bfd = stub_bfd;
1291 htab->add_stub_section = add_stub_section;
1292 htab->layout_sections_again = layout_sections_again;
1293 stubs_always_before_branch = group_size < 0;
1294 if (group_size < 0)
1295 stub_group_size = -group_size;
1296 else
1297 stub_group_size = group_size;
1298
1299 if (stub_group_size == 1)
1300 {
1301 /* Default values. */
1302 /* KVX branch range is +-256MB. The value used is 1MB less. */
1303 stub_group_size = 255 * 1024 * 1024;
1304 }
1305
1306 group_sections (htab, stub_group_size, stubs_always_before_branch);
1307
1308 (*htab->layout_sections_again) ();
1309
1310 while (1)
1311 {
1312 bfd *input_bfd;
1313
1314 for (input_bfd = info->input_bfds;
1315 input_bfd != NULL; input_bfd = input_bfd->link.next)
1316 {
1317 Elf_Internal_Shdr *symtab_hdr;
1318 asection *section;
1319 Elf_Internal_Sym *local_syms = NULL;
1320
1321 if (!is_kvx_elf (input_bfd)
1322 || (input_bfd->flags & BFD_LINKER_CREATED) != 0)
1323 continue;
1324
1325 /* We'll need the symbol table in a second. */
1326 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
1327 if (symtab_hdr->sh_info == 0)
1328 continue;
1329
1330 /* Walk over each section attached to the input bfd. */
1331 for (section = input_bfd->sections;
1332 section != NULL; section = section->next)
1333 {
1334 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1335
1336 /* If there aren't any relocs, then there's nothing more
1337 to do. */
1338 if ((section->flags & SEC_RELOC) == 0
1339 || section->reloc_count == 0
1340 || (section->flags & SEC_CODE) == 0)
1341 continue;
1342
1343 /* If this section is a link-once section that will be
1344 discarded, then don't create any stubs. */
1345 if (section->output_section == NULL
1346 || section->output_section->owner != output_bfd)
1347 continue;
1348
1349 /* Get the relocs. */
1350 internal_relocs
1351 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
1352 NULL, info->keep_memory);
1353 if (internal_relocs == NULL)
1354 goto error_ret_free_local;
1355
1356 /* Now examine each relocation. */
1357 irela = internal_relocs;
1358 irelaend = irela + section->reloc_count;
1359 for (; irela < irelaend; irela++)
1360 {
1361 unsigned int r_type, r_indx;
1362 enum elf_kvx_stub_type stub_type;
1363 struct elf_kvx_stub_hash_entry *stub_entry;
1364 asection *sym_sec;
1365 bfd_vma sym_value;
1366 bfd_vma destination;
1367 struct elf_kvx_link_hash_entry *hash;
1368 const char *sym_name;
1369 char *stub_name;
1370 const asection *id_sec;
1371 unsigned char st_type;
1372 bfd_size_type len;
1373
1374 r_type = ELFNN_R_TYPE (irela->r_info);
1375 r_indx = ELFNN_R_SYM (irela->r_info);
1376
1377 if (r_type >= (unsigned int) R_KVX_end)
1378 {
1379 bfd_set_error (bfd_error_bad_value);
1380 error_ret_free_internal:
1381 if (elf_section_data (section)->relocs == NULL)
1382 free (internal_relocs);
1383 goto error_ret_free_local;
1384 }
1385
1386 /* Only look for stubs on unconditional branch and
1387 branch and link instructions. */
1388 /* This catches CALL and GOTO insn */
1389 if (r_type != (unsigned int) R_KVX_PCREL27)
1390 continue;
1391
1392 /* Now determine the call target, its name, value,
1393 section. */
1394 sym_sec = NULL;
1395 sym_value = 0;
1396 destination = 0;
1397 hash = NULL;
1398 sym_name = NULL;
1399 if (r_indx < symtab_hdr->sh_info)
1400 {
1401 /* It's a local symbol. */
1402 Elf_Internal_Sym *sym;
1403 Elf_Internal_Shdr *hdr;
1404
1405 if (local_syms == NULL)
1406 {
1407 local_syms
1408 = (Elf_Internal_Sym *) symtab_hdr->contents;
1409 if (local_syms == NULL)
1410 local_syms
1411 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
1412 symtab_hdr->sh_info, 0,
1413 NULL, NULL, NULL);
1414 if (local_syms == NULL)
1415 goto error_ret_free_internal;
1416 }
1417
1418 sym = local_syms + r_indx;
1419 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
1420 sym_sec = hdr->bfd_section;
1421 if (!sym_sec)
1422 /* This is an undefined symbol. It can never
1423 be resolved. */
1424 continue;
1425
1426 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
1427 sym_value = sym->st_value;
1428 destination = (sym_value + irela->r_addend
1429 + sym_sec->output_offset
1430 + sym_sec->output_section->vma);
1431 st_type = ELF_ST_TYPE (sym->st_info);
1432 sym_name
1433 = bfd_elf_string_from_elf_section (input_bfd,
1434 symtab_hdr->sh_link,
1435 sym->st_name);
1436 }
1437 else
1438 {
1439 int e_indx;
1440
1441 e_indx = r_indx - symtab_hdr->sh_info;
1442 hash = ((struct elf_kvx_link_hash_entry *)
1443 elf_sym_hashes (input_bfd)[e_indx]);
1444
1445 while (hash->root.root.type == bfd_link_hash_indirect
1446 || hash->root.root.type == bfd_link_hash_warning)
1447 hash = ((struct elf_kvx_link_hash_entry *)
1448 hash->root.root.u.i.link);
1449
1450 if (hash->root.root.type == bfd_link_hash_defined
1451 || hash->root.root.type == bfd_link_hash_defweak)
1452 {
1453 struct elf_kvx_link_hash_table *globals =
1454 elf_kvx_hash_table (info);
1455 sym_sec = hash->root.root.u.def.section;
1456 sym_value = hash->root.root.u.def.value;
1457 /* For a destination in a shared library,
1458 use the PLT stub as target address to
1459 decide whether a branch stub is
1460 needed. */
1461 if (globals->root.splt != NULL && hash != NULL
1462 && hash->root.plt.offset != (bfd_vma) - 1)
1463 {
1464 sym_sec = globals->root.splt;
1465 sym_value = hash->root.plt.offset;
1466 if (sym_sec->output_section != NULL)
1467 destination = (sym_value
1468 + sym_sec->output_offset
1469 +
1470 sym_sec->output_section->vma);
1471 }
1472 else if (sym_sec->output_section != NULL)
1473 destination = (sym_value + irela->r_addend
1474 + sym_sec->output_offset
1475 + sym_sec->output_section->vma);
1476 }
1477 else if (hash->root.root.type == bfd_link_hash_undefined
1478 || (hash->root.root.type
1479 == bfd_link_hash_undefweak))
1480 {
1481 /* For a shared library, use the PLT stub as
1482 target address to decide whether a long
1483 branch stub is needed.
1484 For absolute code, they cannot be handled. */
1485 struct elf_kvx_link_hash_table *globals =
1486 elf_kvx_hash_table (info);
1487
1488 if (globals->root.splt != NULL && hash != NULL
1489 && hash->root.plt.offset != (bfd_vma) - 1)
1490 {
1491 sym_sec = globals->root.splt;
1492 sym_value = hash->root.plt.offset;
1493 if (sym_sec->output_section != NULL)
1494 destination = (sym_value
1495 + sym_sec->output_offset
1496 +
1497 sym_sec->output_section->vma);
1498 }
1499 else
1500 continue;
1501 }
1502 else
1503 {
1504 bfd_set_error (bfd_error_bad_value);
1505 goto error_ret_free_internal;
1506 }
1507 st_type = ELF_ST_TYPE (hash->root.type);
1508 sym_name = hash->root.root.root.string;
1509 }
1510
1511 /* Determine what (if any) linker stub is needed. */
1512 stub_type = kvx_type_of_stub (section, irela, sym_sec,
1513 st_type, destination);
1514 if (stub_type == kvx_stub_none)
1515 continue;
1516
1517 /* Support for grouping stub sections. */
1518 id_sec = htab->stub_group[section->id].link_sec;
1519
1520 /* Get the name of this stub. */
1521 stub_name = elfNN_kvx_stub_name (id_sec, sym_sec, hash,
1522 irela);
1523 if (!stub_name)
1524 goto error_ret_free_internal;
1525
1526 stub_entry =
1527 kvx_stub_hash_lookup (&htab->stub_hash_table,
1528 stub_name, false, false);
1529 if (stub_entry != NULL)
1530 {
1531 /* The proper stub has already been created. */
1532 free (stub_name);
1533 /* Always update this stub's target since it may have
1534 changed after layout. */
1535 stub_entry->target_value = sym_value + irela->r_addend;
1536 continue;
1537 }
1538
1539 stub_entry = _bfd_kvx_add_stub_entry_in_group
1540 (stub_name, section, htab);
1541 if (stub_entry == NULL)
1542 {
1543 free (stub_name);
1544 goto error_ret_free_internal;
1545 }
1546
1547 stub_entry->target_value = sym_value + irela->r_addend;
1548 stub_entry->target_section = sym_sec;
1549 stub_entry->stub_type = stub_type;
1550 stub_entry->h = hash;
1551 stub_entry->st_type = st_type;
1552
1553 if (sym_name == NULL)
1554 sym_name = "unnamed";
1555 len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
1556 stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
1557 if (stub_entry->output_name == NULL)
1558 {
1559 free (stub_name);
1560 goto error_ret_free_internal;
1561 }
1562
1563 snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
1564 sym_name);
1565
1566 stub_changed = true;
1567 }
1568
1569 /* We're done with the internal relocs, free them. */
1570 if (elf_section_data (section)->relocs == NULL)
1571 free (internal_relocs);
1572 }
1573 }
1574
1575 if (!stub_changed)
1576 break;
1577
1578 _bfd_kvx_resize_stubs (htab);
1579
1580 /* Ask the linker to do its stuff. */
1581 (*htab->layout_sections_again) ();
1582 stub_changed = false;
1583 }
1584
1585 return true;
1586
1587 error_ret_free_local:
1588 return false;
1589
1590 }
1591
1592 /* Build all the stubs associated with the current output file. The
1593 stubs are kept in a hash table attached to the main linker hash
1594 table. We also set up the .plt entries for statically linked PIC
1595 functions here. This function is called via kvx_elf_finish in the
1596 linker. */
1597
1598 bool
1599 elfNN_kvx_build_stubs (struct bfd_link_info *info)
1600 {
1601 asection *stub_sec;
1602 struct bfd_hash_table *table;
1603 struct elf_kvx_link_hash_table *htab;
1604
1605 htab = elf_kvx_hash_table (info);
1606
1607 for (stub_sec = htab->stub_bfd->sections;
1608 stub_sec != NULL; stub_sec = stub_sec->next)
1609 {
1610 bfd_size_type size;
1611
1612 /* Ignore non-stub sections. */
1613 if (!strstr (stub_sec->name, STUB_SUFFIX))
1614 continue;
1615
1616 /* Allocate memory to hold the linker stubs. */
1617 size = stub_sec->size;
1618 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
1619 if (stub_sec->contents == NULL && size != 0)
1620 return false;
1621 stub_sec->size = 0;
1622 }
1623
1624 /* Build the stubs as directed by the stub hash table. */
1625 table = &htab->stub_hash_table;
1626 bfd_hash_traverse (table, kvx_build_one_stub, info);
1627
1628 return true;
1629 }
1630
1631 static bfd_vma
1632 kvx_calculate_got_entry_vma (struct elf_link_hash_entry *h,
1633 struct elf_kvx_link_hash_table
1634 *globals, struct bfd_link_info *info,
1635 bfd_vma value, bfd *output_bfd,
1636 bool *unresolved_reloc_p)
1637 {
1638 bfd_vma off = (bfd_vma) - 1;
1639 asection *basegot = globals->root.sgot;
1640 bool dyn = globals->root.dynamic_sections_created;
1641
1642 if (h != NULL)
1643 {
1644 BFD_ASSERT (basegot != NULL);
1645 off = h->got.offset;
1646 BFD_ASSERT (off != (bfd_vma) - 1);
1647 if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, bfd_link_pic (info), h)
1648 || (bfd_link_pic (info)
1649 && SYMBOL_REFERENCES_LOCAL (info, h))
1650 || (ELF_ST_VISIBILITY (h->other)
1651 && h->root.type == bfd_link_hash_undefweak))
1652 {
1653 /* This is actually a static link, or it is a -Bsymbolic link
1654 and the symbol is defined locally. We must initialize this
1655 entry in the global offset table. Since the offset must
1656 always be a multiple of 8 (4 in the case of ILP32), we use
1657 the least significant bit to record whether we have
1658 initialized it already.
1659 When doing a dynamic link, we create a .rel(a).got relocation
1660 entry to initialize the value. This is done in the
1661 finish_dynamic_symbol routine. */
1662 if ((off & 1) != 0)
1663 off &= ~1;
1664 else
1665 {
1666 bfd_put_NN (output_bfd, value, basegot->contents + off);
1667 h->got.offset |= 1;
1668 }
1669 }
1670 else
1671 *unresolved_reloc_p = false;
1672 }
1673
1674 return off;
1675 }
1676
1677 static unsigned int
1678 kvx_reloc_got_type (bfd_reloc_code_real_type r_type)
1679 {
1680 switch (r_type)
1681 {
1682 /* Extracted with:
1683 awk 'match ($0, /HOWTO.*R_(KVX.*_GOT(OFF)?(64)?_.*),/,ary) {print "case BFD_RELOC_" ary[1] ":";}' elfxx-kvxc.def
1684 */
1685 case BFD_RELOC_KVX_S37_GOTOFF_LO10:
1686 case BFD_RELOC_KVX_S37_GOTOFF_UP27:
1687
1688 case BFD_RELOC_KVX_S37_GOT_LO10:
1689 case BFD_RELOC_KVX_S37_GOT_UP27:
1690
1691 case BFD_RELOC_KVX_S43_GOTOFF_LO10:
1692 case BFD_RELOC_KVX_S43_GOTOFF_UP27:
1693 case BFD_RELOC_KVX_S43_GOTOFF_EX6:
1694
1695 case BFD_RELOC_KVX_S43_GOT_LO10:
1696 case BFD_RELOC_KVX_S43_GOT_UP27:
1697 case BFD_RELOC_KVX_S43_GOT_EX6:
1698 return GOT_NORMAL;
1699
1700 case BFD_RELOC_KVX_S37_TLS_GD_LO10:
1701 case BFD_RELOC_KVX_S37_TLS_GD_UP27:
1702 case BFD_RELOC_KVX_S43_TLS_GD_LO10:
1703 case BFD_RELOC_KVX_S43_TLS_GD_UP27:
1704 case BFD_RELOC_KVX_S43_TLS_GD_EX6:
1705 return GOT_TLS_GD;
1706
1707 case BFD_RELOC_KVX_S37_TLS_LD_LO10:
1708 case BFD_RELOC_KVX_S37_TLS_LD_UP27:
1709 case BFD_RELOC_KVX_S43_TLS_LD_LO10:
1710 case BFD_RELOC_KVX_S43_TLS_LD_UP27:
1711 case BFD_RELOC_KVX_S43_TLS_LD_EX6:
1712 return GOT_TLS_LD;
1713
1714 case BFD_RELOC_KVX_S37_TLS_IE_LO10:
1715 case BFD_RELOC_KVX_S37_TLS_IE_UP27:
1716 case BFD_RELOC_KVX_S43_TLS_IE_LO10:
1717 case BFD_RELOC_KVX_S43_TLS_IE_UP27:
1718 case BFD_RELOC_KVX_S43_TLS_IE_EX6:
1719 return GOT_TLS_IE;
1720
1721 default:
1722 break;
1723 }
1724 return GOT_UNKNOWN;
1725 }
1726
1727 static bool
1728 kvx_can_relax_tls (bfd *input_bfd ATTRIBUTE_UNUSED,
1729 struct bfd_link_info *info ATTRIBUTE_UNUSED,
1730 bfd_reloc_code_real_type r_type ATTRIBUTE_UNUSED,
1731 struct elf_link_hash_entry *h ATTRIBUTE_UNUSED,
1732 unsigned long r_symndx ATTRIBUTE_UNUSED)
1733 {
1734 if (! IS_KVX_TLS_RELAX_RELOC (r_type))
1735 return false;
1736
1737 /* Relaxing hook. Disabled on KVX. */
1738 /* See elfnn-aarch64.c */
1739 return true;
1740 }
1741
1742 /* Given the relocation code R_TYPE, return the relaxed bfd reloc
1743 enumerator. */
1744
1745 static bfd_reloc_code_real_type
1746 kvx_tls_transition (bfd *input_bfd,
1747 struct bfd_link_info *info,
1748 unsigned int r_type,
1749 struct elf_link_hash_entry *h,
1750 unsigned long r_symndx)
1751 {
1752 bfd_reloc_code_real_type bfd_r_type
1753 = elfNN_kvx_bfd_reloc_from_type (input_bfd, r_type);
1754
1755 if (! kvx_can_relax_tls (input_bfd, info, bfd_r_type, h, r_symndx))
1756 return bfd_r_type;
1757
1758 return bfd_r_type;
1759 }
1760
1761 /* Return the base VMA address which should be subtracted from real addresses
1762 when resolving R_KVX_*_TLS_GD_* and R_KVX_*_TLS_LD_* relocation. */
1763
1764 static bfd_vma
1765 dtpoff_base (struct bfd_link_info *info)
1766 {
1767 /* If tls_sec is NULL, we should have signalled an error already. */
1768 BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
1769 return elf_hash_table (info)->tls_sec->vma;
1770 }
1771
1772 /* Return the base VMA address which should be subtracted from real addresses
1773 when resolving R_KVX_*_TLS_IE_* and R_KVX_*_TLS_LE_* relocations. */
1774
1775 static bfd_vma
1776 tpoff_base (struct bfd_link_info *info)
1777 {
1778 struct elf_link_hash_table *htab = elf_hash_table (info);
1779
1780 /* If tls_sec is NULL, we should have signalled an error already. */
1781 BFD_ASSERT (htab->tls_sec != NULL);
1782
1783 bfd_vma base = align_power ((bfd_vma) 0,
1784 htab->tls_sec->alignment_power);
1785 return htab->tls_sec->vma - base;
1786 }
1787
1788 static bfd_vma *
1789 symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
1790 unsigned long r_symndx)
1791 {
1792 /* Calculate the address of the GOT entry for symbol
1793 referred to in h. */
1794 if (h != NULL)
1795 return &h->got.offset;
1796 else
1797 {
1798 /* local symbol */
1799 struct elf_kvx_local_symbol *l;
1800
1801 l = elf_kvx_locals (input_bfd);
1802 return &l[r_symndx].got_offset;
1803 }
1804 }
1805
1806 static void
1807 symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
1808 unsigned long r_symndx)
1809 {
1810 bfd_vma *p;
1811 p = symbol_got_offset_ref (input_bfd, h, r_symndx);
1812 *p |= 1;
1813 }
1814
1815 static int
1816 symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
1817 unsigned long r_symndx)
1818 {
1819 bfd_vma value;
1820 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
1821 return value & 1;
1822 }
1823
1824 static bfd_vma
1825 symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
1826 unsigned long r_symndx)
1827 {
1828 bfd_vma value;
1829 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
1830 value &= ~1;
1831 return value;
1832 }
1833
1834 /* N_ONES produces N one bits, without overflowing machine arithmetic. */
1835 #define N_ONES(n) (((((bfd_vma) 1 << ((n) -1)) - 1) << 1) | 1)
1836
1837 /*
1838 * This is a copy/paste + modification from
1839 * reloc.c:_bfd_relocate_contents. Relocations are applied to 32bits
1840 * words, so all overflow checks will overflow for values above
1841 * 32bits.
1842 */
1843 static bfd_reloc_status_type
1844 check_signed_overflow (enum complain_overflow complain_on_overflow,
1845 bfd_reloc_code_real_type bfd_r_type, bfd *input_bfd,
1846 bfd_vma relocation)
1847 {
1848 bfd_reloc_status_type flag = bfd_reloc_ok;
1849 bfd_vma addrmask, fieldmask, signmask, ss;
1850 bfd_vma a, b, sum;
1851 bfd_vma x = 0;
1852
1853 /* These usually come from howto struct. As we don't check for
1854 * values fitting in bitfields or in subpart of words, we set all
1855 * these to values to check as if the field is starting from first
1856 * bit.
1857 */
1858 unsigned int rightshift = 0;
1859 unsigned int bitpos = 0;
1860 unsigned int bitsize = 0;
1861 bfd_vma src_mask = -1;
1862
1863 /* Only regular symbol relocations are checked here. Others
1864 relocations (GOT, TLS) could be checked if the need is
1865 confirmed. At the moment, we keep previous behavior
1866 (ie. unchecked) for those. */
1867 switch (bfd_r_type)
1868 {
1869 case BFD_RELOC_KVX_S37_LO10:
1870 case BFD_RELOC_KVX_S37_UP27:
1871 bitsize = 37;
1872 break;
1873
1874 case BFD_RELOC_KVX_S32_LO5:
1875 case BFD_RELOC_KVX_S32_UP27:
1876 bitsize = 32;
1877 break;
1878
1879 case BFD_RELOC_KVX_S43_LO10:
1880 case BFD_RELOC_KVX_S43_UP27:
1881 case BFD_RELOC_KVX_S43_EX6:
1882 bitsize = 43;
1883 break;
1884
1885 case BFD_RELOC_KVX_S64_LO10:
1886 case BFD_RELOC_KVX_S64_UP27:
1887 case BFD_RELOC_KVX_S64_EX27:
1888 bitsize = 64;
1889 break;
1890
1891 default:
1892 return bfd_reloc_ok;
1893 }
1894
1895 /* direct copy/paste from reloc.c below */
1896
1897 /* Get the values to be added together. For signed and unsigned
1898 relocations, we assume that all values should be truncated to
1899 the size of an address. For bitfields, all the bits matter.
1900 See also bfd_check_overflow. */
1901 fieldmask = N_ONES (bitsize);
1902 signmask = ~fieldmask;
1903 addrmask = (N_ONES (bfd_arch_bits_per_address (input_bfd))
1904 | (fieldmask << rightshift));
1905 a = (relocation & addrmask) >> rightshift;
1906 b = (x & src_mask & addrmask) >> bitpos;
1907 addrmask >>= rightshift;
1908
1909 switch (complain_on_overflow)
1910 {
1911 case complain_overflow_signed:
1912 /* If any sign bits are set, all sign bits must be set.
1913 That is, A must be a valid negative address after
1914 shifting. */
1915 signmask = ~(fieldmask >> 1);
1916 /* Fall thru */
1917
1918 case complain_overflow_bitfield:
1919 /* Much like the signed check, but for a field one bit
1920 wider. We allow a bitfield to represent numbers in the
1921 range -2**n to 2**n-1, where n is the number of bits in the
1922 field. Note that when bfd_vma is 32 bits, a 32-bit reloc
1923 can't overflow, which is exactly what we want. */
1924 ss = a & signmask;
1925 if (ss != 0 && ss != (addrmask & signmask))
1926 flag = bfd_reloc_overflow;
1927
1928 /* We only need this next bit of code if the sign bit of B
1929 is below the sign bit of A. This would only happen if
1930 SRC_MASK had fewer bits than BITSIZE. Note that if
1931 SRC_MASK has more bits than BITSIZE, we can get into
1932 trouble; we would need to verify that B is in range, as
1933 we do for A above. */
1934 ss = ((~src_mask) >> 1) & src_mask;
1935 ss >>= bitpos;
1936
1937 /* Set all the bits above the sign bit. */
1938 b = (b ^ ss) - ss;
1939
1940 /* Now we can do the addition. */
1941 sum = a + b;
1942
1943 /* See if the result has the correct sign. Bits above the
1944 sign bit are junk now; ignore them. If the sum is
1945 positive, make sure we did not have all negative inputs;
1946 if the sum is negative, make sure we did not have all
1947 positive inputs. The test below looks only at the sign
1948 bits, and it really just
1949 SIGN (A) == SIGN (B) && SIGN (A) != SIGN (SUM)
1950
1951 We mask with addrmask here to explicitly allow an address
1952 wrap-around. The Linux kernel relies on it, and it is
1953 the only way to write assembler code which can run when
1954 loaded at a location 0x80000000 away from the location at
1955 which it is linked. */
1956 if (((~(a ^ b)) & (a ^ sum)) & signmask & addrmask)
1957 flag = bfd_reloc_overflow;
1958 break;
1959
1960 case complain_overflow_unsigned:
1961 /* Checking for an unsigned overflow is relatively easy:
1962 trim the addresses and add, and trim the result as well.
1963 Overflow is normally indicated when the result does not
1964 fit in the field. However, we also need to consider the
1965 case when, e.g., fieldmask is 0x7fffffff or smaller, an
1966 input is 0x80000000, and bfd_vma is only 32 bits; then we
1967 will get sum == 0, but there is an overflow, since the
1968 inputs did not fit in the field. Instead of doing a
1969 separate test, we can check for this by or-ing in the
1970 operands when testing for the sum overflowing its final
1971 field. */
1972 sum = (a + b) & addrmask;
1973 if ((a | b | sum) & signmask)
1974 flag = bfd_reloc_overflow;
1975 break;
1976
1977 default:
1978 abort ();
1979 }
1980 return flag;
1981 }
1982
1983 /* Perform a relocation as part of a final link. */
1984 static bfd_reloc_status_type
1985 elfNN_kvx_final_link_relocate (reloc_howto_type *howto,
1986 bfd *input_bfd,
1987 bfd *output_bfd,
1988 asection *input_section,
1989 bfd_byte *contents,
1990 Elf_Internal_Rela *rel,
1991 bfd_vma value,
1992 struct bfd_link_info *info,
1993 asection *sym_sec,
1994 struct elf_link_hash_entry *h,
1995 bool *unresolved_reloc_p,
1996 bool save_addend,
1997 bfd_vma *saved_addend,
1998 Elf_Internal_Sym *sym)
1999 {
2000 Elf_Internal_Shdr *symtab_hdr;
2001 unsigned int r_type = howto->type;
2002 bfd_reloc_code_real_type bfd_r_type
2003 = elfNN_kvx_bfd_reloc_from_howto (howto);
2004 bfd_reloc_code_real_type new_bfd_r_type;
2005 unsigned long r_symndx;
2006 bfd_byte *hit_data = contents + rel->r_offset;
2007 bfd_vma place, off;
2008 bfd_signed_vma signed_addend;
2009 struct elf_kvx_link_hash_table *globals;
2010 bool weak_undef_p;
2011 asection *base_got;
2012 bfd_reloc_status_type rret = bfd_reloc_ok;
2013 bool resolved_to_zero;
2014 globals = elf_kvx_hash_table (info);
2015
2016 symtab_hdr = &elf_symtab_hdr (input_bfd);
2017
2018 BFD_ASSERT (is_kvx_elf (input_bfd));
2019
2020 r_symndx = ELFNN_R_SYM (rel->r_info);
2021
2022 /* It is possible to have linker relaxations on some TLS access
2023 models. Update our information here. */
2024 new_bfd_r_type = kvx_tls_transition (input_bfd, info, r_type, h, r_symndx);
2025 if (new_bfd_r_type != bfd_r_type)
2026 {
2027 bfd_r_type = new_bfd_r_type;
2028 howto = elfNN_kvx_howto_from_bfd_reloc (bfd_r_type);
2029 BFD_ASSERT (howto != NULL);
2030 r_type = howto->type;
2031 }
2032
2033 place = input_section->output_section->vma
2034 + input_section->output_offset + rel->r_offset;
2035
2036 /* Get addend, accumulating the addend for consecutive relocs
2037 which refer to the same offset. */
2038 signed_addend = saved_addend ? *saved_addend : 0;
2039 signed_addend += rel->r_addend;
2040
2041 weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
2042 : bfd_is_und_section (sym_sec));
2043 resolved_to_zero = (h != NULL
2044 && UNDEFWEAK_NO_DYNAMIC_RELOC (info, h));
2045
2046 switch (bfd_r_type)
2047 {
2048 case BFD_RELOC_KVX_NN:
2049 #if ARCH_SIZE == 64
2050 case BFD_RELOC_KVX_32:
2051 #endif
2052 case BFD_RELOC_KVX_S37_LO10:
2053 case BFD_RELOC_KVX_S37_UP27:
2054
2055 case BFD_RELOC_KVX_S32_LO5:
2056 case BFD_RELOC_KVX_S32_UP27:
2057
2058 case BFD_RELOC_KVX_S43_LO10:
2059 case BFD_RELOC_KVX_S43_UP27:
2060 case BFD_RELOC_KVX_S43_EX6:
2061
2062 case BFD_RELOC_KVX_S64_LO10:
2063 case BFD_RELOC_KVX_S64_UP27:
2064 case BFD_RELOC_KVX_S64_EX27:
2065 /* When generating a shared object or relocatable executable, these
2066 relocations are copied into the output file to be resolved at
2067 run time. */
2068 if (((bfd_link_pic (info) == true)
2069 || globals->root.is_relocatable_executable)
2070 && (input_section->flags & SEC_ALLOC)
2071 && (h == NULL
2072 || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT && !resolved_to_zero)
2073 || h->root.type != bfd_link_hash_undefweak))
2074 {
2075 Elf_Internal_Rela outrel;
2076 bfd_byte *loc;
2077 bool skip, relocate;
2078 asection *sreloc;
2079
2080 *unresolved_reloc_p = false;
2081
2082 skip = false;
2083 relocate = false;
2084
2085 outrel.r_addend = signed_addend;
2086 outrel.r_offset =
2087 _bfd_elf_section_offset (output_bfd, info, input_section,
2088 rel->r_offset);
2089 if (outrel.r_offset == (bfd_vma) - 1)
2090 skip = true;
2091 else if (outrel.r_offset == (bfd_vma) - 2)
2092 {
2093 skip = true;
2094 relocate = true;
2095 }
2096
2097 outrel.r_offset += (input_section->output_section->vma
2098 + input_section->output_offset);
2099
2100 if (skip)
2101 memset (&outrel, 0, sizeof outrel);
2102 else if (h != NULL
2103 && h->dynindx != -1
2104 && (!bfd_link_pic (info) || !info->symbolic || !h->def_regular))
2105 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
2106 else if (bfd_r_type == BFD_RELOC_KVX_32
2107 || bfd_r_type == BFD_RELOC_KVX_64)
2108 {
2109 int symbol;
2110
2111 /* On SVR4-ish systems, the dynamic loader cannot
2112 relocate the text and data segments independently,
2113 so the symbol does not matter. */
2114 symbol = 0;
2115 outrel.r_info = ELFNN_R_INFO (symbol, R_KVX_RELATIVE);
2116 outrel.r_addend += value;
2117 }
2118 else if (bfd_link_pic (info) && info->symbolic)
2119 {
2120 goto skip_because_pic;
2121 }
2122 else
2123 {
2124 /* We may endup here from bad input code trying to
2125 insert relocation on symbols within code. We do not
2126 want that currently, and such code should use GOT +
2127 KVX_32/64 reloc that translate in KVX_RELATIVE
2128 */
2129 const char *name;
2130 if (h && h->root.root.string)
2131 name = h->root.root.string;
2132 else
2133 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
2134 NULL);
2135
2136 (*_bfd_error_handler)
2137 /* xgettext:c-format */
2138 (_("%pB(%pA+%#" PRIx64 "): "
2139 "unresolvable %s relocation in section `%s'"),
2140 input_bfd, input_section, (uint64_t) rel->r_offset, howto->name,
2141 name);
2142 return bfd_reloc_notsupported;
2143 }
2144
2145 sreloc = elf_section_data (input_section)->sreloc;
2146 if (sreloc == NULL || sreloc->contents == NULL)
2147 return bfd_reloc_notsupported;
2148
2149 loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (globals);
2150 bfd_elfNN_swap_reloca_out (output_bfd, &outrel, loc);
2151
2152 if (sreloc->reloc_count * RELOC_SIZE (globals) > sreloc->size)
2153 {
2154 /* Sanity to check that we have previously allocated
2155 sufficient space in the relocation section for the
2156 number of relocations we actually want to emit. */
2157 abort ();
2158 }
2159
2160 /* If this reloc is against an external symbol, we do not want to
2161 fiddle with the addend. Otherwise, we need to include the symbol
2162 value so that it becomes an addend for the dynamic reloc. */
2163 if (!relocate)
2164 return bfd_reloc_ok;
2165
2166 rret = check_signed_overflow (complain_overflow_signed, bfd_r_type,
2167 input_bfd, value + signed_addend);
2168 if (rret != bfd_reloc_ok)
2169 return rret;
2170
2171 return _bfd_final_link_relocate (howto, input_bfd, input_section,
2172 contents, rel->r_offset, value,
2173 signed_addend);
2174 }
2175
2176 skip_because_pic:
2177 rret = check_signed_overflow (complain_overflow_signed, bfd_r_type,
2178 input_bfd, value + signed_addend);
2179 if (rret != bfd_reloc_ok)
2180 return rret;
2181
2182 return _bfd_final_link_relocate (howto, input_bfd, input_section,
2183 contents, rel->r_offset, value,
2184 signed_addend);
2185 break;
2186
2187 case BFD_RELOC_KVX_PCREL17:
2188 case BFD_RELOC_KVX_PCREL27:
2189 {
2190 /*
2191 * BCU insn are always first in a bundle, so there is no need
2192 * to correct the address using offset within bundle
2193 */
2194
2195 asection *splt = globals->root.splt;
2196 bool via_plt_p =
2197 splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
2198
2199 /* A call to an undefined weak symbol is converted to a jump to
2200 the next instruction unless a PLT entry will be created.
2201 The jump to the next instruction is optimized as a NOP.
2202 Do the same for local undefined symbols. */
2203 if (weak_undef_p && ! via_plt_p)
2204 {
2205 bfd_putl32 (INSN_NOP, hit_data);
2206 return bfd_reloc_ok;
2207 }
2208
2209 /* If the call goes through a PLT entry, make sure to
2210 check distance to the right destination address. */
2211 if (via_plt_p)
2212 value = (splt->output_section->vma
2213 + splt->output_offset + h->plt.offset);
2214
2215 /* Check if a stub has to be inserted because the destination
2216 is too far away. */
2217 struct elf_kvx_stub_hash_entry *stub_entry = NULL;
2218
2219 /* If the target symbol is global and marked as a function the
2220 relocation applies a function call or a tail call. In this
2221 situation we can veneer out of range branches. The veneers
2222 use R16 and R17 hence cannot be used arbitrary out of range
2223 branches that occur within the body of a function. */
2224
2225 /* Check if a stub has to be inserted because the destination
2226 is too far away. */
2227 if (! kvx_valid_call_p (value, place))
2228 {
2229 /* The target is out of reach, so redirect the branch to
2230 the local stub for this function. */
2231 stub_entry = elfNN_kvx_get_stub_entry (input_section,
2232 sym_sec, h,
2233 rel, globals);
2234 if (stub_entry != NULL)
2235 value = (stub_entry->stub_offset
2236 + stub_entry->stub_sec->output_offset
2237 + stub_entry->stub_sec->output_section->vma);
2238 /* We have redirected the destination to stub entry address,
2239 so ignore any addend record in the original rela entry. */
2240 signed_addend = 0;
2241 }
2242 }
2243 *unresolved_reloc_p = false;
2244
2245 /* FALLTHROUGH */
2246
2247 /* PCREL 32 are used in dwarf2 table for exception handling */
2248 case BFD_RELOC_KVX_32_PCREL:
2249 case BFD_RELOC_KVX_S64_PCREL_LO10:
2250 case BFD_RELOC_KVX_S64_PCREL_UP27:
2251 case BFD_RELOC_KVX_S64_PCREL_EX27:
2252 case BFD_RELOC_KVX_S37_PCREL_LO10:
2253 case BFD_RELOC_KVX_S37_PCREL_UP27:
2254 case BFD_RELOC_KVX_S43_PCREL_LO10:
2255 case BFD_RELOC_KVX_S43_PCREL_UP27:
2256 case BFD_RELOC_KVX_S43_PCREL_EX6:
2257 return _bfd_final_link_relocate (howto, input_bfd, input_section,
2258 contents, rel->r_offset, value,
2259 signed_addend);
2260 break;
2261
2262 case BFD_RELOC_KVX_S37_TLS_LE_LO10:
2263 case BFD_RELOC_KVX_S37_TLS_LE_UP27:
2264
2265 case BFD_RELOC_KVX_S43_TLS_LE_LO10:
2266 case BFD_RELOC_KVX_S43_TLS_LE_UP27:
2267 case BFD_RELOC_KVX_S43_TLS_LE_EX6:
2268 return _bfd_final_link_relocate (howto, input_bfd, input_section,
2269 contents, rel->r_offset, value - tpoff_base (info),
2270 signed_addend);
2271 break;
2272
2273 case BFD_RELOC_KVX_S37_TLS_DTPOFF_LO10:
2274 case BFD_RELOC_KVX_S37_TLS_DTPOFF_UP27:
2275
2276 case BFD_RELOC_KVX_S43_TLS_DTPOFF_LO10:
2277 case BFD_RELOC_KVX_S43_TLS_DTPOFF_UP27:
2278 case BFD_RELOC_KVX_S43_TLS_DTPOFF_EX6:
2279 return _bfd_final_link_relocate (howto, input_bfd, input_section,
2280 contents, rel->r_offset, value - dtpoff_base (info),
2281 signed_addend);
2282
2283 case BFD_RELOC_KVX_S37_TLS_GD_UP27:
2284 case BFD_RELOC_KVX_S37_TLS_GD_LO10:
2285
2286 case BFD_RELOC_KVX_S43_TLS_GD_UP27:
2287 case BFD_RELOC_KVX_S43_TLS_GD_EX6:
2288 case BFD_RELOC_KVX_S43_TLS_GD_LO10:
2289
2290 case BFD_RELOC_KVX_S37_TLS_IE_UP27:
2291 case BFD_RELOC_KVX_S37_TLS_IE_LO10:
2292
2293 case BFD_RELOC_KVX_S43_TLS_IE_UP27:
2294 case BFD_RELOC_KVX_S43_TLS_IE_EX6:
2295 case BFD_RELOC_KVX_S43_TLS_IE_LO10:
2296
2297 case BFD_RELOC_KVX_S37_TLS_LD_UP27:
2298 case BFD_RELOC_KVX_S37_TLS_LD_LO10:
2299
2300 case BFD_RELOC_KVX_S43_TLS_LD_UP27:
2301 case BFD_RELOC_KVX_S43_TLS_LD_EX6:
2302 case BFD_RELOC_KVX_S43_TLS_LD_LO10:
2303
2304 if (globals->root.sgot == NULL)
2305 return bfd_reloc_notsupported;
2306 value = symbol_got_offset (input_bfd, h, r_symndx);
2307
2308 _bfd_final_link_relocate (howto, input_bfd, input_section,
2309 contents, rel->r_offset, value,
2310 signed_addend);
2311 *unresolved_reloc_p = false;
2312 break;
2313
2314 case BFD_RELOC_KVX_S37_GOTADDR_UP27:
2315 case BFD_RELOC_KVX_S37_GOTADDR_LO10:
2316
2317 case BFD_RELOC_KVX_S43_GOTADDR_UP27:
2318 case BFD_RELOC_KVX_S43_GOTADDR_EX6:
2319 case BFD_RELOC_KVX_S43_GOTADDR_LO10:
2320
2321 case BFD_RELOC_KVX_S64_GOTADDR_UP27:
2322 case BFD_RELOC_KVX_S64_GOTADDR_EX27:
2323 case BFD_RELOC_KVX_S64_GOTADDR_LO10:
2324 {
2325 if (globals->root.sgot == NULL)
2326 BFD_ASSERT (h != NULL);
2327
2328 value = globals->root.sgot->output_section->vma
2329 + globals->root.sgot->output_offset;
2330
2331 return _bfd_final_link_relocate (howto, input_bfd, input_section,
2332 contents, rel->r_offset, value,
2333 signed_addend);
2334 }
2335 break;
2336
2337 case BFD_RELOC_KVX_S37_GOTOFF_LO10:
2338 case BFD_RELOC_KVX_S37_GOTOFF_UP27:
2339
2340 case BFD_RELOC_KVX_32_GOTOFF:
2341 case BFD_RELOC_KVX_64_GOTOFF:
2342
2343 case BFD_RELOC_KVX_S43_GOTOFF_LO10:
2344 case BFD_RELOC_KVX_S43_GOTOFF_UP27:
2345 case BFD_RELOC_KVX_S43_GOTOFF_EX6:
2346
2347 {
2348 asection *basegot = globals->root.sgot;
2349 /* BFD_ASSERT(h == NULL); */
2350 BFD_ASSERT(globals->root.sgot != NULL);
2351 value -= basegot->output_section->vma + basegot->output_offset;
2352 return _bfd_final_link_relocate (howto, input_bfd, input_section,
2353 contents, rel->r_offset, value,
2354 signed_addend);
2355 }
2356 break;
2357
2358 case BFD_RELOC_KVX_S37_GOT_LO10:
2359 case BFD_RELOC_KVX_S37_GOT_UP27:
2360
2361 case BFD_RELOC_KVX_32_GOT:
2362 case BFD_RELOC_KVX_64_GOT:
2363
2364 case BFD_RELOC_KVX_S43_GOT_LO10:
2365 case BFD_RELOC_KVX_S43_GOT_UP27:
2366 case BFD_RELOC_KVX_S43_GOT_EX6:
2367
2368 if (globals->root.sgot == NULL)
2369 BFD_ASSERT (h != NULL);
2370
2371 if (h != NULL)
2372 {
2373 value = kvx_calculate_got_entry_vma (h, globals, info, value,
2374 output_bfd,
2375 unresolved_reloc_p);
2376 #ifdef UGLY_DEBUG
2377 printf("GOT_LO/HI for %s, value %x\n", h->root.root.string, value);
2378 #endif
2379
2380 /* value = _bfd_kvx_elf_resolve_relocation (bfd_r_type, place, value, */
2381 /* 0, weak_undef_p); */
2382 return _bfd_final_link_relocate (howto, input_bfd, input_section,
2383 contents, rel->r_offset, value,
2384 signed_addend);
2385 }
2386 else
2387 {
2388 #ifdef UGLY_DEBUG
2389 printf("GOT_LO/HI with h NULL, initial value %x\n", value);
2390 #endif
2391
2392 bfd_vma addend = 0;
2393 struct elf_kvx_local_symbol *locals
2394 = elf_kvx_locals (input_bfd);
2395
2396 if (locals == NULL)
2397 {
2398 int howto_index = bfd_r_type - BFD_RELOC_KVX_RELOC_START;
2399 _bfd_error_handler
2400 /* xgettext:c-format */
2401 (_("%pB: local symbol descriptor table be NULL when applying "
2402 "relocation %s against local symbol"),
2403 input_bfd, elf_kvx_howto_table[howto_index].name);
2404 abort ();
2405 }
2406
2407 off = symbol_got_offset (input_bfd, h, r_symndx);
2408 base_got = globals->root.sgot;
2409 bfd_vma got_entry_addr = (base_got->output_section->vma
2410 + base_got->output_offset + off);
2411
2412 if (!symbol_got_offset_mark_p (input_bfd, h, r_symndx))
2413 {
2414 bfd_put_64 (output_bfd, value, base_got->contents + off);
2415
2416 if (bfd_link_pic (info))
2417 {
2418 asection *s;
2419 Elf_Internal_Rela outrel;
2420
2421 /* For local symbol, we have done absolute relocation in static
2422 linking stageh. While for share library, we need to update
2423 the content of GOT entry according to the share objects
2424 loading base address. So we need to generate a
2425 R_AARCH64_RELATIVE reloc for dynamic linker. */
2426 s = globals->root.srelgot;
2427 if (s == NULL)
2428 abort ();
2429
2430 outrel.r_offset = got_entry_addr;
2431 outrel.r_info = ELFNN_R_INFO (0, R_KVX_RELATIVE);
2432 outrel.r_addend = value;
2433 elf_append_rela (output_bfd, s, &outrel);
2434 }
2435
2436 symbol_got_offset_mark (input_bfd, h, r_symndx);
2437 }
2438
2439 /* Update the relocation value to GOT entry addr as we have transformed
2440 the direct data access into indirect data access through GOT. */
2441 value = got_entry_addr;
2442
2443 return _bfd_final_link_relocate (howto, input_bfd, input_section,
2444 contents, rel->r_offset, off,
2445 addend);
2446 }
2447 break;
2448
2449 default:
2450 return bfd_reloc_notsupported;
2451 }
2452
2453 if (saved_addend)
2454 *saved_addend = value;
2455
2456 /* Only apply the final relocation in a sequence. */
2457 if (save_addend)
2458 return bfd_reloc_continue;
2459
2460 return _bfd_kvx_elf_put_addend (input_bfd, hit_data, bfd_r_type,
2461 howto, value);
2462 }
2463
2464
2465
2466 /* Relocate a KVX ELF section. */
2467
2468 static int
2469 elfNN_kvx_relocate_section (bfd *output_bfd,
2470 struct bfd_link_info *info,
2471 bfd *input_bfd,
2472 asection *input_section,
2473 bfd_byte *contents,
2474 Elf_Internal_Rela *relocs,
2475 Elf_Internal_Sym *local_syms,
2476 asection **local_sections)
2477 {
2478 Elf_Internal_Shdr *symtab_hdr;
2479 struct elf_link_hash_entry **sym_hashes;
2480 Elf_Internal_Rela *rel;
2481 Elf_Internal_Rela *relend;
2482 const char *name;
2483 struct elf_kvx_link_hash_table *globals;
2484 bool save_addend = false;
2485 bfd_vma addend = 0;
2486
2487 globals = elf_kvx_hash_table (info);
2488
2489 symtab_hdr = &elf_symtab_hdr (input_bfd);
2490 sym_hashes = elf_sym_hashes (input_bfd);
2491
2492 rel = relocs;
2493 relend = relocs + input_section->reloc_count;
2494 for (; rel < relend; rel++)
2495 {
2496 unsigned int r_type;
2497 bfd_reloc_code_real_type bfd_r_type;
2498 reloc_howto_type *howto;
2499 unsigned long r_symndx;
2500 Elf_Internal_Sym *sym;
2501 asection *sec;
2502 struct elf_link_hash_entry *h;
2503 bfd_vma relocation;
2504 bfd_reloc_status_type r;
2505 arelent bfd_reloc;
2506 char sym_type;
2507 bool unresolved_reloc = false;
2508 char *error_message = NULL;
2509
2510 r_symndx = ELFNN_R_SYM (rel->r_info);
2511 r_type = ELFNN_R_TYPE (rel->r_info);
2512
2513 bfd_reloc.howto = elfNN_kvx_howto_from_type (input_bfd, r_type);
2514 howto = bfd_reloc.howto;
2515
2516 if (howto == NULL)
2517 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
2518
2519 bfd_r_type = elfNN_kvx_bfd_reloc_from_howto (howto);
2520
2521 h = NULL;
2522 sym = NULL;
2523 sec = NULL;
2524
2525 if (r_symndx < symtab_hdr->sh_info) /* A local symbol. */
2526 {
2527 sym = local_syms + r_symndx;
2528 sym_type = ELFNN_ST_TYPE (sym->st_info);
2529 sec = local_sections[r_symndx];
2530
2531 /* An object file might have a reference to a local
2532 undefined symbol. This is a draft object file, but we
2533 should at least do something about it. */
2534 if (r_type != R_KVX_NONE
2535 && r_type != R_KVX_S37_GOTADDR_LO10
2536 && r_type != R_KVX_S37_GOTADDR_UP27
2537 && r_type != R_KVX_S64_GOTADDR_LO10
2538 && r_type != R_KVX_S64_GOTADDR_UP27
2539 && r_type != R_KVX_S64_GOTADDR_EX27
2540 && r_type != R_KVX_S43_GOTADDR_LO10
2541 && r_type != R_KVX_S43_GOTADDR_UP27
2542 && r_type != R_KVX_S43_GOTADDR_EX6
2543 && bfd_is_und_section (sec)
2544 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
2545 (*info->callbacks->undefined_symbol)
2546 (info, bfd_elf_string_from_elf_section
2547 (input_bfd, symtab_hdr->sh_link, sym->st_name),
2548 input_bfd, input_section, rel->r_offset, true);
2549
2550 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
2551 }
2552 else
2553 {
2554 bool warned, ignored;
2555
2556 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2557 r_symndx, symtab_hdr, sym_hashes,
2558 h, sec, relocation,
2559 unresolved_reloc, warned, ignored);
2560
2561 sym_type = h->type;
2562 }
2563
2564 if (sec != NULL && discarded_section (sec))
2565 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
2566 rel, 1, relend, howto, 0, contents);
2567
2568 if (bfd_link_relocatable (info))
2569 continue;
2570
2571 if (h != NULL)
2572 name = h->root.root.string;
2573 else
2574 {
2575 name = (bfd_elf_string_from_elf_section
2576 (input_bfd, symtab_hdr->sh_link, sym->st_name));
2577 if (name == NULL || *name == '\0')
2578 name = bfd_section_name (sec);
2579 }
2580
2581 if (r_symndx != 0
2582 && r_type != R_KVX_NONE
2583 && (h == NULL
2584 || h->root.type == bfd_link_hash_defined
2585 || h->root.type == bfd_link_hash_defweak)
2586 && IS_KVX_TLS_RELOC (bfd_r_type) != (sym_type == STT_TLS))
2587 {
2588 (*_bfd_error_handler)
2589 ((sym_type == STT_TLS
2590 /* xgettext:c-format */
2591 ? _("%pB(%pA+%#lx): %s used with TLS symbol %s")
2592 /* xgettext:c-format */
2593 : _("%pB(%pA+%#lx): %s used with non-TLS symbol %s")),
2594 input_bfd,
2595 input_section, (long) rel->r_offset, howto->name, name);
2596 }
2597
2598 /* Original aarch64 has relaxation handling for TLS here. */
2599 r = bfd_reloc_continue;
2600
2601 /* There may be multiple consecutive relocations for the
2602 same offset. In that case we are supposed to treat the
2603 output of each relocation as the addend for the next. */
2604 if (rel + 1 < relend
2605 && rel->r_offset == rel[1].r_offset
2606 && ELFNN_R_TYPE (rel[1].r_info) != R_KVX_NONE)
2607
2608 save_addend = true;
2609 else
2610 save_addend = false;
2611
2612 if (r == bfd_reloc_continue)
2613 r = elfNN_kvx_final_link_relocate (howto, input_bfd, output_bfd,
2614 input_section, contents, rel,
2615 relocation, info, sec,
2616 h, &unresolved_reloc,
2617 save_addend, &addend, sym);
2618
2619 switch (elfNN_kvx_bfd_reloc_from_type (input_bfd, r_type))
2620 {
2621 case BFD_RELOC_KVX_S37_TLS_GD_LO10:
2622 case BFD_RELOC_KVX_S37_TLS_GD_UP27:
2623
2624 case BFD_RELOC_KVX_S43_TLS_GD_LO10:
2625 case BFD_RELOC_KVX_S43_TLS_GD_UP27:
2626 case BFD_RELOC_KVX_S43_TLS_GD_EX6:
2627
2628 case BFD_RELOC_KVX_S37_TLS_LD_LO10:
2629 case BFD_RELOC_KVX_S37_TLS_LD_UP27:
2630
2631 case BFD_RELOC_KVX_S43_TLS_LD_LO10:
2632 case BFD_RELOC_KVX_S43_TLS_LD_UP27:
2633 case BFD_RELOC_KVX_S43_TLS_LD_EX6:
2634
2635 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
2636 {
2637 bool need_relocs = false;
2638 bfd_byte *loc;
2639 int indx;
2640 bfd_vma off;
2641
2642 off = symbol_got_offset (input_bfd, h, r_symndx);
2643 indx = h && h->dynindx != -1 ? h->dynindx : 0;
2644
2645 need_relocs =
2646 (bfd_link_pic (info) || indx != 0) &&
2647 (h == NULL
2648 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2649 || h->root.type != bfd_link_hash_undefweak);
2650
2651 BFD_ASSERT (globals->root.srelgot != NULL);
2652
2653 if (need_relocs)
2654 {
2655 Elf_Internal_Rela rela;
2656 rela.r_info = ELFNN_R_INFO (indx, R_KVX_64_DTPMOD);
2657 rela.r_addend = 0;
2658 rela.r_offset = globals->root.sgot->output_section->vma +
2659 globals->root.sgot->output_offset + off;
2660
2661 loc = globals->root.srelgot->contents;
2662 loc += globals->root.srelgot->reloc_count++
2663 * RELOC_SIZE (htab);
2664 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
2665
2666 bfd_reloc_code_real_type real_type =
2667 elfNN_kvx_bfd_reloc_from_type (input_bfd, r_type);
2668
2669 if (real_type == BFD_RELOC_KVX_S37_TLS_LD_LO10
2670 || real_type == BFD_RELOC_KVX_S37_TLS_LD_UP27
2671 || real_type == BFD_RELOC_KVX_S43_TLS_LD_LO10
2672 || real_type == BFD_RELOC_KVX_S43_TLS_LD_UP27
2673 || real_type == BFD_RELOC_KVX_S43_TLS_LD_EX6)
2674 {
2675 /* For local dynamic, don't generate DTPOFF in any case.
2676 Initialize the DTPOFF slot into zero, so we get module
2677 base address when invoke runtime TLS resolver. */
2678 bfd_put_NN (output_bfd, 0,
2679 globals->root.sgot->contents + off
2680 + GOT_ENTRY_SIZE);
2681 }
2682 else if (indx == 0)
2683 {
2684 bfd_put_NN (output_bfd,
2685 relocation - dtpoff_base (info),
2686 globals->root.sgot->contents + off
2687 + GOT_ENTRY_SIZE);
2688 }
2689 else
2690 {
2691 /* This TLS symbol is global. We emit a
2692 relocation to fixup the tls offset at load
2693 time. */
2694 rela.r_info =
2695 ELFNN_R_INFO (indx, R_KVX_64_DTPOFF);
2696 rela.r_addend = 0;
2697 rela.r_offset =
2698 (globals->root.sgot->output_section->vma
2699 + globals->root.sgot->output_offset + off
2700 + GOT_ENTRY_SIZE);
2701
2702 loc = globals->root.srelgot->contents;
2703 loc += globals->root.srelgot->reloc_count++
2704 * RELOC_SIZE (globals);
2705 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
2706 bfd_put_NN (output_bfd, (bfd_vma) 0,
2707 globals->root.sgot->contents + off
2708 + GOT_ENTRY_SIZE);
2709 }
2710 }
2711 else
2712 {
2713 bfd_put_NN (output_bfd, (bfd_vma) 1,
2714 globals->root.sgot->contents + off);
2715 bfd_put_NN (output_bfd,
2716 relocation - dtpoff_base (info),
2717 globals->root.sgot->contents + off
2718 + GOT_ENTRY_SIZE);
2719 }
2720
2721 symbol_got_offset_mark (input_bfd, h, r_symndx);
2722 }
2723 break;
2724
2725 case BFD_RELOC_KVX_S37_TLS_IE_LO10:
2726 case BFD_RELOC_KVX_S37_TLS_IE_UP27:
2727
2728 case BFD_RELOC_KVX_S43_TLS_IE_LO10:
2729 case BFD_RELOC_KVX_S43_TLS_IE_UP27:
2730 case BFD_RELOC_KVX_S43_TLS_IE_EX6:
2731 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
2732 {
2733 bool need_relocs = false;
2734 bfd_byte *loc;
2735 int indx;
2736 bfd_vma off;
2737
2738 off = symbol_got_offset (input_bfd, h, r_symndx);
2739
2740 indx = h && h->dynindx != -1 ? h->dynindx : 0;
2741
2742 need_relocs =
2743 (bfd_link_pic (info) || indx != 0) &&
2744 (h == NULL
2745 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2746 || h->root.type != bfd_link_hash_undefweak);
2747
2748 BFD_ASSERT (globals->root.srelgot != NULL);
2749
2750 if (need_relocs)
2751 {
2752 Elf_Internal_Rela rela;
2753
2754 if (indx == 0)
2755 rela.r_addend = relocation - dtpoff_base (info);
2756 else
2757 rela.r_addend = 0;
2758
2759 rela.r_info = ELFNN_R_INFO (indx, R_KVX_64_TPOFF);
2760 rela.r_offset = globals->root.sgot->output_section->vma +
2761 globals->root.sgot->output_offset + off;
2762
2763 loc = globals->root.srelgot->contents;
2764 loc += globals->root.srelgot->reloc_count++
2765 * RELOC_SIZE (htab);
2766
2767 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
2768
2769 bfd_put_NN (output_bfd, rela.r_addend,
2770 globals->root.sgot->contents + off);
2771 }
2772 else
2773 bfd_put_NN (output_bfd, relocation - tpoff_base (info),
2774 globals->root.sgot->contents + off);
2775
2776 symbol_got_offset_mark (input_bfd, h, r_symndx);
2777 }
2778 break;
2779
2780 default:
2781 break;
2782 }
2783
2784 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
2785 because such sections are not SEC_ALLOC and thus ld.so will
2786 not process them. */
2787 if (unresolved_reloc
2788 && !((input_section->flags & SEC_DEBUGGING) != 0
2789 && h->def_dynamic)
2790 && _bfd_elf_section_offset (output_bfd, info, input_section,
2791 +rel->r_offset) != (bfd_vma) - 1)
2792 {
2793 (*_bfd_error_handler)
2794 /* xgettext:c-format */
2795 (_("%pB(%pA+%#" PRIx64 "): "
2796 "unresolvable %s relocation against symbol `%s'"),
2797 input_bfd, input_section, (uint64_t) rel->r_offset, howto->name,
2798 h->root.root.string);
2799 return false;
2800 }
2801
2802 if (r != bfd_reloc_ok && r != bfd_reloc_continue)
2803 {
2804 switch (r)
2805 {
2806 case bfd_reloc_overflow:
2807 (*info->callbacks->reloc_overflow)
2808 (info, (h ? &h->root : NULL), name, howto->name, (bfd_vma) 0,
2809 input_bfd, input_section, rel->r_offset);
2810
2811 /* Original aarch64 code had a check for alignement correctness */
2812 break;
2813
2814 case bfd_reloc_undefined:
2815 (*info->callbacks->undefined_symbol)
2816 (info, name, input_bfd, input_section, rel->r_offset, true);
2817 break;
2818
2819 case bfd_reloc_outofrange:
2820 error_message = _("out of range");
2821 goto common_error;
2822
2823 case bfd_reloc_notsupported:
2824 error_message = _("unsupported relocation");
2825 goto common_error;
2826
2827 case bfd_reloc_dangerous:
2828 /* error_message should already be set. */
2829 goto common_error;
2830
2831 default:
2832 error_message = _("unknown error");
2833 /* Fall through. */
2834
2835 common_error:
2836 BFD_ASSERT (error_message != NULL);
2837 (*info->callbacks->reloc_dangerous)
2838 (info, error_message, input_bfd, input_section, rel->r_offset);
2839 break;
2840 }
2841 }
2842
2843 if (!save_addend)
2844 addend = 0;
2845 }
2846
2847 return true;
2848 }
2849
2850 /* Set the right machine number. */
2851
2852 static bool
2853 elfNN_kvx_object_p (bfd *abfd)
2854 {
2855 /* must be coherent with default arch in cpu-kvx.c */
2856 int e_set = bfd_mach_kv3_1;
2857
2858 if (elf_elfheader (abfd)->e_machine == EM_KVX)
2859 {
2860 int e_core = elf_elfheader (abfd)->e_flags & ELF_KVX_CORE_MASK;
2861 switch(e_core)
2862 {
2863 #if ARCH_SIZE == 64
2864 case ELF_KVX_CORE_KV3_1 : e_set = bfd_mach_kv3_1_64; break;
2865 case ELF_KVX_CORE_KV3_2 : e_set = bfd_mach_kv3_2_64; break;
2866 case ELF_KVX_CORE_KV4_1 : e_set = bfd_mach_kv4_1_64; break;
2867 #else
2868 case ELF_KVX_CORE_KV3_1 : e_set = bfd_mach_kv3_1; break;
2869 case ELF_KVX_CORE_KV3_2 : e_set = bfd_mach_kv3_2; break;
2870 case ELF_KVX_CORE_KV4_1 : e_set = bfd_mach_kv4_1; break;
2871 #endif
2872 default:
2873 (*_bfd_error_handler)(_("%s: Bad ELF id: `%d'"),
2874 abfd->filename, e_core);
2875 }
2876 }
2877 return bfd_default_set_arch_mach (abfd, bfd_arch_kvx, e_set);
2878
2879 }
2880
2881 /* Function to keep KVX specific flags in the ELF header. */
2882
2883 static bool
2884 elfNN_kvx_set_private_flags (bfd *abfd, flagword flags)
2885 {
2886 if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
2887 {
2888 }
2889 else
2890 {
2891 elf_elfheader (abfd)->e_flags = flags;
2892 elf_flags_init (abfd) = true;
2893 }
2894
2895 return true;
2896 }
2897
2898 /* Merge backend specific data from an object file to the output
2899 object file when linking. */
2900
2901 static bool
2902 elfNN_kvx_merge_private_bfd_data (bfd *ibfd, struct bfd_link_info *info)
2903 {
2904 bfd *obfd = info->output_bfd;
2905 flagword out_flags;
2906 flagword in_flags;
2907 bool flags_compatible = true;
2908 asection *sec;
2909
2910 /* Check if we have the same endianess. */
2911 if (!_bfd_generic_verify_endian_match (ibfd, info))
2912 return false;
2913
2914 if (!is_kvx_elf (ibfd) || !is_kvx_elf (obfd))
2915 return true;
2916
2917 /* The input BFD must have had its flags initialised. */
2918 /* The following seems bogus to me -- The flags are initialized in
2919 the assembler but I don't think an elf_flags_init field is
2920 written into the object. */
2921 /* BFD_ASSERT (elf_flags_init (ibfd)); */
2922
2923 if (bfd_get_arch_size (ibfd) != bfd_get_arch_size (obfd))
2924 {
2925 const char *msg;
2926
2927 if (bfd_get_arch_size (ibfd) == 32
2928 && bfd_get_arch_size (obfd) == 64)
2929 msg = _("%s: compiled as 32-bit object and %s is 64-bit");
2930 else if (bfd_get_arch_size (ibfd) == 64
2931 && bfd_get_arch_size (obfd) == 32)
2932 msg = _("%s: compiled as 64-bit object and %s is 32-bit");
2933 else
2934 msg = _("%s: object size does not match that of target %s");
2935
2936 (*_bfd_error_handler) (msg, bfd_get_filename (ibfd),
2937 bfd_get_filename (obfd));
2938 bfd_set_error (bfd_error_wrong_format);
2939 return false;
2940 }
2941
2942 in_flags = elf_elfheader (ibfd)->e_flags;
2943 out_flags = elf_elfheader (obfd)->e_flags;
2944
2945 if (!elf_flags_init (obfd))
2946 {
2947 /* If the input is the default architecture and had the default
2948 flags then do not bother setting the flags for the output
2949 architecture, instead allow future merges to do this. If no
2950 future merges ever set these flags then they will retain their
2951 uninitialised values, which surprise surprise, correspond
2952 to the default values. */
2953 if (bfd_get_arch_info (ibfd)->the_default
2954 && elf_elfheader (ibfd)->e_flags == 0)
2955 return true;
2956
2957 elf_flags_init (obfd) = true;
2958 elf_elfheader (obfd)->e_flags = in_flags;
2959
2960 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
2961 && bfd_get_arch_info (obfd)->the_default)
2962 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
2963 bfd_get_mach (ibfd));
2964
2965 return true;
2966 }
2967
2968 /* Identical flags must be compatible. */
2969 if (in_flags == out_flags)
2970 return true;
2971
2972 /* Check to see if the input BFD actually contains any sections. If
2973 not, its flags may not have been initialised either, but it
2974 cannot actually cause any incompatiblity. Do not short-circuit
2975 dynamic objects; their section list may be emptied by
2976 elf_link_add_object_symbols.
2977
2978 Also check to see if there are no code sections in the input.
2979 In this case there is no need to check for code specific flags.
2980 XXX - do we need to worry about floating-point format compatability
2981 in data sections ? */
2982 if (!(ibfd->flags & DYNAMIC))
2983 {
2984 bool null_input_bfd = true;
2985 bool only_data_sections = true;
2986
2987 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2988 {
2989 if ((bfd_section_flags (sec)
2990 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
2991 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
2992 only_data_sections = false;
2993
2994 null_input_bfd = false;
2995 break;
2996 }
2997
2998 if (null_input_bfd || only_data_sections)
2999 return true;
3000 }
3001 return flags_compatible;
3002 }
3003
3004 /* Display the flags field. */
3005
3006 static bool
3007 elfNN_kvx_print_private_bfd_data (bfd *abfd, void *ptr)
3008 {
3009 FILE *file = (FILE *) ptr;
3010 unsigned long flags;
3011
3012 BFD_ASSERT (abfd != NULL && ptr != NULL);
3013
3014 /* Print normal ELF private data. */
3015 _bfd_elf_print_private_bfd_data (abfd, ptr);
3016
3017 flags = elf_elfheader (abfd)->e_flags;
3018 /* Ignore init flag - it may not be set, despite the flags field
3019 containing valid data. */
3020
3021 /* xgettext:c-format */
3022 fprintf (file, _("Private flags = 0x%lx : "), elf_elfheader (abfd)->e_flags);
3023 if((flags & ELF_KVX_ABI_64B_ADDR_BIT) == ELF_KVX_ABI_64B_ADDR_BIT)
3024 {
3025 if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV3_1))
3026 fprintf (file, _("Coolidge (kv3) V1 64 bits"));
3027 else if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV3_2))
3028 fprintf (file, _("Coolidge (kv3) V2 64 bits"));
3029 else if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV4_1))
3030 fprintf (file, _("Coolidge (kv4) V1 64 bits"));
3031 }
3032 else
3033 {
3034 if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV3_1))
3035 fprintf (file, _("Coolidge (kv3) V1 32 bits"));
3036 else if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV3_2))
3037 fprintf (file, _("Coolidge (kv3) V2 32 bits"));
3038 else if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV4_1))
3039 fprintf (file, _("Coolidge (kv4) V1 32 bits"));
3040 }
3041
3042 fputc ('\n', file);
3043
3044 return true;
3045 }
3046
3047 /* Adjust a symbol defined by a dynamic object and referenced by a
3048 regular object. The current definition is in some section of the
3049 dynamic object, but we're not including those sections. We have to
3050 change the definition to something the rest of the link can
3051 understand. */
3052
3053 static bool
3054 elfNN_kvx_adjust_dynamic_symbol (struct bfd_link_info *info,
3055 struct elf_link_hash_entry *h)
3056 {
3057 struct elf_kvx_link_hash_table *htab;
3058 asection *s;
3059
3060 /* If this is a function, put it in the procedure linkage table. We
3061 will fill in the contents of the procedure linkage table later,
3062 when we know the address of the .got section. */
3063 if (h->type == STT_FUNC || h->needs_plt)
3064 {
3065 if (h->plt.refcount <= 0
3066 || ((SYMBOL_CALLS_LOCAL (info, h)
3067 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
3068 && h->root.type == bfd_link_hash_undefweak))))
3069 {
3070 /* This case can occur if we saw a CALL26 reloc in
3071 an input file, but the symbol wasn't referred to
3072 by a dynamic object or all references were
3073 garbage collected. In which case we can end up
3074 resolving. */
3075 h->plt.offset = (bfd_vma) - 1;
3076 h->needs_plt = 0;
3077 }
3078
3079 return true;
3080 }
3081 else
3082 /* Otherwise, reset to -1. */
3083 h->plt.offset = (bfd_vma) - 1;
3084
3085
3086 /* If this is a weak symbol, and there is a real definition, the
3087 processor independent code will have arranged for us to see the
3088 real definition first, and we can just use the same value. */
3089 if (h->is_weakalias)
3090 {
3091 struct elf_link_hash_entry *def = weakdef (h);
3092 BFD_ASSERT (def->root.type == bfd_link_hash_defined);
3093 h->root.u.def.section = def->root.u.def.section;
3094 h->root.u.def.value = def->root.u.def.value;
3095 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
3096 h->non_got_ref = def->non_got_ref;
3097 return true;
3098 }
3099
3100 /* If we are creating a shared library, we must presume that the
3101 only references to the symbol are via the global offset table.
3102 For such cases we need not do anything here; the relocations will
3103 be handled correctly by relocate_section. */
3104 if (bfd_link_pic (info))
3105 return true;
3106
3107 /* If there are no references to this symbol that do not use the
3108 GOT, we don't need to generate a copy reloc. */
3109 if (!h->non_got_ref)
3110 return true;
3111
3112 /* If -z nocopyreloc was given, we won't generate them either. */
3113 if (info->nocopyreloc)
3114 {
3115 h->non_got_ref = 0;
3116 return true;
3117 }
3118
3119 /* We must allocate the symbol in our .dynbss section, which will
3120 become part of the .bss section of the executable. There will be
3121 an entry for this symbol in the .dynsym section. The dynamic
3122 object will contain position independent code, so all references
3123 from the dynamic object to this symbol will go through the global
3124 offset table. The dynamic linker will use the .dynsym entry to
3125 determine the address it must put in the global offset table, so
3126 both the dynamic object and the regular object will refer to the
3127 same memory location for the variable. */
3128
3129 htab = elf_kvx_hash_table (info);
3130
3131 /* We must generate a R_KVX_COPY reloc to tell the dynamic linker
3132 to copy the initial value out of the dynamic object and into the
3133 runtime process image. */
3134 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
3135 {
3136 htab->srelbss->size += RELOC_SIZE (htab);
3137 h->needs_copy = 1;
3138 }
3139
3140 s = htab->sdynbss;
3141
3142 return _bfd_elf_adjust_dynamic_copy (info, h, s);
3143
3144 }
3145
3146 static bool
3147 elfNN_kvx_allocate_local_symbols (bfd *abfd, unsigned number)
3148 {
3149 struct elf_kvx_local_symbol *locals;
3150 locals = elf_kvx_locals (abfd);
3151 if (locals == NULL)
3152 {
3153 locals = (struct elf_kvx_local_symbol *)
3154 bfd_zalloc (abfd, number * sizeof (struct elf_kvx_local_symbol));
3155 if (locals == NULL)
3156 return false;
3157 elf_kvx_locals (abfd) = locals;
3158 }
3159 return true;
3160 }
3161
3162 /* Create the .got section to hold the global offset table. */
3163
3164 static bool
3165 kvx_elf_create_got_section (bfd *abfd, struct bfd_link_info *info)
3166 {
3167 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
3168 flagword flags;
3169 asection *s;
3170 struct elf_link_hash_entry *h;
3171 struct elf_link_hash_table *htab = elf_hash_table (info);
3172
3173 /* This function may be called more than once. */
3174 s = bfd_get_linker_section (abfd, ".got");
3175 if (s != NULL)
3176 return true;
3177
3178 flags = bed->dynamic_sec_flags;
3179
3180 s = bfd_make_section_anyway_with_flags (abfd,
3181 (bed->rela_plts_and_copies_p
3182 ? ".rela.got" : ".rel.got"),
3183 (bed->dynamic_sec_flags
3184 | SEC_READONLY));
3185 if (s == NULL
3186 || !bfd_set_section_alignment (s, bed->s->log_file_align))
3187
3188 return false;
3189 htab->srelgot = s;
3190
3191 s = bfd_make_section_anyway_with_flags (abfd, ".got", flags);
3192 if (s == NULL
3193 || !bfd_set_section_alignment (s, bed->s->log_file_align))
3194 return false;
3195 htab->sgot = s;
3196 htab->sgot->size += GOT_ENTRY_SIZE;
3197
3198 if (bed->want_got_sym)
3199 {
3200 /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the .got
3201 (or .got.plt) section. We don't do this in the linker script
3202 because we don't want to define the symbol if we are not creating
3203 a global offset table. */
3204 h = _bfd_elf_define_linkage_sym (abfd, info, s,
3205 "_GLOBAL_OFFSET_TABLE_");
3206 elf_hash_table (info)->hgot = h;
3207 if (h == NULL)
3208 return false;
3209 }
3210
3211 if (bed->want_got_plt)
3212 {
3213 s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags);
3214 if (s == NULL
3215 || !bfd_set_section_alignment (s,
3216 bed->s->log_file_align))
3217 return false;
3218 htab->sgotplt = s;
3219 }
3220
3221 /* The first bit of the global offset table is the header. */
3222 s->size += bed->got_header_size;
3223
3224 /* we still need to handle got content when doing static link with PIC */
3225 if (bfd_link_executable (info) && !bfd_link_pic (info)) {
3226 htab->dynobj = abfd;
3227 }
3228
3229 return true;
3230 }
3231
3232 /* Look through the relocs for a section during the first phase. */
3233
3234 static bool
3235 elfNN_kvx_check_relocs (bfd *abfd, struct bfd_link_info *info,
3236 asection *sec, const Elf_Internal_Rela *relocs)
3237 {
3238 Elf_Internal_Shdr *symtab_hdr;
3239 struct elf_link_hash_entry **sym_hashes;
3240 const Elf_Internal_Rela *rel;
3241 const Elf_Internal_Rela *rel_end;
3242 asection *sreloc;
3243
3244 struct elf_kvx_link_hash_table *htab;
3245
3246 if (bfd_link_relocatable (info))
3247 return true;
3248
3249 BFD_ASSERT (is_kvx_elf (abfd));
3250
3251 htab = elf_kvx_hash_table (info);
3252 sreloc = NULL;
3253
3254 symtab_hdr = &elf_symtab_hdr (abfd);
3255 sym_hashes = elf_sym_hashes (abfd);
3256
3257 rel_end = relocs + sec->reloc_count;
3258 for (rel = relocs; rel < rel_end; rel++)
3259 {
3260 struct elf_link_hash_entry *h;
3261 unsigned int r_symndx;
3262 unsigned int r_type;
3263 bfd_reloc_code_real_type bfd_r_type;
3264 Elf_Internal_Sym *isym;
3265
3266 r_symndx = ELFNN_R_SYM (rel->r_info);
3267 r_type = ELFNN_R_TYPE (rel->r_info);
3268
3269 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
3270 {
3271 /* xgettext:c-format */
3272 _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd, r_symndx);
3273 return false;
3274 }
3275
3276 if (r_symndx < symtab_hdr->sh_info)
3277 {
3278 /* A local symbol. */
3279 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
3280 abfd, r_symndx);
3281 if (isym == NULL)
3282 return false;
3283
3284 h = NULL;
3285 }
3286 else
3287 {
3288 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
3289 while (h->root.type == bfd_link_hash_indirect
3290 || h->root.type == bfd_link_hash_warning)
3291 h = (struct elf_link_hash_entry *) h->root.u.i.link;
3292 }
3293
3294 /* Could be done earlier, if h were already available. */
3295 bfd_r_type = kvx_tls_transition (abfd, info, r_type, h, r_symndx);
3296
3297 if (h != NULL)
3298 {
3299 /* Create the ifunc sections for static executables. If we
3300 never see an indirect function symbol nor we are building
3301 a static executable, those sections will be empty and
3302 won't appear in output. */
3303 switch (bfd_r_type)
3304 {
3305 default:
3306 break;
3307 }
3308
3309 /* It is referenced by a non-shared object. */
3310 h->ref_regular = 1;
3311 }
3312
3313 switch (bfd_r_type)
3314 {
3315
3316 case BFD_RELOC_KVX_S43_LO10:
3317 case BFD_RELOC_KVX_S43_UP27:
3318 case BFD_RELOC_KVX_S43_EX6:
3319
3320 case BFD_RELOC_KVX_S37_LO10:
3321 case BFD_RELOC_KVX_S37_UP27:
3322
3323 case BFD_RELOC_KVX_S64_LO10:
3324 case BFD_RELOC_KVX_S64_UP27:
3325 case BFD_RELOC_KVX_S64_EX27:
3326
3327 case BFD_RELOC_KVX_32:
3328 case BFD_RELOC_KVX_64:
3329
3330 /* We don't need to handle relocs into sections not going into
3331 the "real" output. */
3332 if ((sec->flags & SEC_ALLOC) == 0)
3333 break;
3334
3335 if (h != NULL)
3336 {
3337 if (!bfd_link_pic (info))
3338 h->non_got_ref = 1;
3339
3340 h->plt.refcount += 1;
3341 h->pointer_equality_needed = 1;
3342 }
3343
3344 /* No need to do anything if we're not creating a shared
3345 object. */
3346 if (! bfd_link_pic (info))
3347 break;
3348
3349 {
3350 struct elf_dyn_relocs *p;
3351 struct elf_dyn_relocs **head;
3352
3353 /* We must copy these reloc types into the output file.
3354 Create a reloc section in dynobj and make room for
3355 this reloc. */
3356 if (sreloc == NULL)
3357 {
3358 if (htab->root.dynobj == NULL)
3359 htab->root.dynobj = abfd;
3360
3361 sreloc = _bfd_elf_make_dynamic_reloc_section
3362 (sec, htab->root.dynobj, LOG_FILE_ALIGN, abfd, /*rela? */ true);
3363
3364 if (sreloc == NULL)
3365 return false;
3366 }
3367
3368 /* If this is a global symbol, we count the number of
3369 relocations we need for this symbol. */
3370 if (h != NULL)
3371 {
3372 head = &h->dyn_relocs;
3373 }
3374 else
3375 {
3376 /* Track dynamic relocs needed for local syms too.
3377 We really need local syms available to do this
3378 easily. Oh well. */
3379
3380 asection *s;
3381 void **vpp;
3382
3383 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
3384 abfd, r_symndx);
3385 if (isym == NULL)
3386 return false;
3387
3388 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3389 if (s == NULL)
3390 s = sec;
3391
3392 /* Beware of type punned pointers vs strict aliasing
3393 rules. */
3394 vpp = &(elf_section_data (s)->local_dynrel);
3395 head = (struct elf_dyn_relocs **) vpp;
3396 }
3397
3398 p = *head;
3399 if (p == NULL || p->sec != sec)
3400 {
3401 bfd_size_type amt = sizeof *p;
3402 p = ((struct elf_dyn_relocs *)
3403 bfd_zalloc (htab->root.dynobj, amt));
3404 if (p == NULL)
3405 return false;
3406 p->next = *head;
3407 *head = p;
3408 p->sec = sec;
3409 }
3410
3411 p->count += 1;
3412
3413 }
3414 break;
3415
3416 case BFD_RELOC_KVX_S37_GOT_LO10:
3417 case BFD_RELOC_KVX_S37_GOT_UP27:
3418
3419 case BFD_RELOC_KVX_S37_GOTOFF_LO10:
3420 case BFD_RELOC_KVX_S37_GOTOFF_UP27:
3421
3422 case BFD_RELOC_KVX_S43_GOT_LO10:
3423 case BFD_RELOC_KVX_S43_GOT_UP27:
3424 case BFD_RELOC_KVX_S43_GOT_EX6:
3425
3426 case BFD_RELOC_KVX_S43_GOTOFF_LO10:
3427 case BFD_RELOC_KVX_S43_GOTOFF_UP27:
3428 case BFD_RELOC_KVX_S43_GOTOFF_EX6:
3429
3430 case BFD_RELOC_KVX_S37_TLS_GD_LO10:
3431 case BFD_RELOC_KVX_S37_TLS_GD_UP27:
3432
3433 case BFD_RELOC_KVX_S43_TLS_GD_LO10:
3434 case BFD_RELOC_KVX_S43_TLS_GD_UP27:
3435 case BFD_RELOC_KVX_S43_TLS_GD_EX6:
3436
3437 case BFD_RELOC_KVX_S37_TLS_IE_LO10:
3438 case BFD_RELOC_KVX_S37_TLS_IE_UP27:
3439
3440 case BFD_RELOC_KVX_S43_TLS_IE_LO10:
3441 case BFD_RELOC_KVX_S43_TLS_IE_UP27:
3442 case BFD_RELOC_KVX_S43_TLS_IE_EX6:
3443
3444 case BFD_RELOC_KVX_S37_TLS_LD_LO10:
3445 case BFD_RELOC_KVX_S37_TLS_LD_UP27:
3446
3447 case BFD_RELOC_KVX_S43_TLS_LD_LO10:
3448 case BFD_RELOC_KVX_S43_TLS_LD_UP27:
3449 case BFD_RELOC_KVX_S43_TLS_LD_EX6:
3450 {
3451 unsigned got_type;
3452 unsigned old_got_type;
3453
3454 got_type = kvx_reloc_got_type (bfd_r_type);
3455
3456 if (h)
3457 {
3458 h->got.refcount += 1;
3459 old_got_type = elf_kvx_hash_entry (h)->got_type;
3460 }
3461 else
3462 {
3463 struct elf_kvx_local_symbol *locals;
3464
3465 if (!elfNN_kvx_allocate_local_symbols
3466 (abfd, symtab_hdr->sh_info))
3467 return false;
3468
3469 locals = elf_kvx_locals (abfd);
3470 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
3471 locals[r_symndx].got_refcount += 1;
3472 old_got_type = locals[r_symndx].got_type;
3473 }
3474
3475 /* We will already have issued an error message if there
3476 is a TLS/non-TLS mismatch, based on the symbol type.
3477 So just combine any TLS types needed. */
3478 if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
3479 && got_type != GOT_NORMAL)
3480 got_type |= old_got_type;
3481
3482 /* If the symbol is accessed by both IE and GD methods, we
3483 are able to relax. Turn off the GD flag, without
3484 messing up with any other kind of TLS types that may be
3485 involved. */
3486 /* Disabled untested and unused TLS */
3487 /* if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type)) */
3488 /* got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD); */
3489
3490 if (old_got_type != got_type)
3491 {
3492 if (h != NULL)
3493 elf_kvx_hash_entry (h)->got_type = got_type;
3494 else
3495 {
3496 struct elf_kvx_local_symbol *locals;
3497 locals = elf_kvx_locals (abfd);
3498 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
3499 locals[r_symndx].got_type = got_type;
3500 }
3501 }
3502
3503 if (htab->root.dynobj == NULL)
3504 htab->root.dynobj = abfd;
3505 if (! kvx_elf_create_got_section (htab->root.dynobj, info))
3506 return false;
3507 break;
3508 }
3509
3510 case BFD_RELOC_KVX_S64_GOTADDR_LO10:
3511 case BFD_RELOC_KVX_S64_GOTADDR_UP27:
3512 case BFD_RELOC_KVX_S64_GOTADDR_EX27:
3513
3514 case BFD_RELOC_KVX_S43_GOTADDR_LO10:
3515 case BFD_RELOC_KVX_S43_GOTADDR_UP27:
3516 case BFD_RELOC_KVX_S43_GOTADDR_EX6:
3517
3518 case BFD_RELOC_KVX_S37_GOTADDR_LO10:
3519 case BFD_RELOC_KVX_S37_GOTADDR_UP27:
3520
3521 if (htab->root.dynobj == NULL)
3522 htab->root.dynobj = abfd;
3523 if (! kvx_elf_create_got_section (htab->root.dynobj, info))
3524 return false;
3525 break;
3526
3527 case BFD_RELOC_KVX_PCREL27:
3528 case BFD_RELOC_KVX_PCREL17:
3529 /* If this is a local symbol then we resolve it
3530 directly without creating a PLT entry. */
3531 if (h == NULL)
3532 continue;
3533
3534 h->needs_plt = 1;
3535 if (h->plt.refcount <= 0)
3536 h->plt.refcount = 1;
3537 else
3538 h->plt.refcount += 1;
3539 break;
3540
3541 default:
3542 break;
3543 }
3544 }
3545
3546 return true;
3547 }
3548
3549 static bool
3550 elfNN_kvx_init_file_header (bfd *abfd, struct bfd_link_info *link_info)
3551 {
3552 Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */
3553
3554 if (!_bfd_elf_init_file_header (abfd, link_info))
3555 return false;
3556
3557 i_ehdrp = elf_elfheader (abfd);
3558 i_ehdrp->e_ident[EI_ABIVERSION] = KVX_ELF_ABI_VERSION;
3559 return true;
3560 }
3561
3562 static enum elf_reloc_type_class
3563 elfNN_kvx_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
3564 const asection *rel_sec ATTRIBUTE_UNUSED,
3565 const Elf_Internal_Rela *rela)
3566 {
3567 switch ((int) ELFNN_R_TYPE (rela->r_info))
3568 {
3569 case R_KVX_RELATIVE:
3570 return reloc_class_relative;
3571 case R_KVX_JMP_SLOT:
3572 return reloc_class_plt;
3573 case R_KVX_COPY:
3574 return reloc_class_copy;
3575 default:
3576 return reloc_class_normal;
3577 }
3578 }
3579
3580 /* A structure used to record a list of sections, independently
3581 of the next and prev fields in the asection structure. */
3582 typedef struct section_list
3583 {
3584 asection *sec;
3585 struct section_list *next;
3586 struct section_list *prev;
3587 }
3588 section_list;
3589
3590 typedef struct
3591 {
3592 void *finfo;
3593 struct bfd_link_info *info;
3594 asection *sec;
3595 int sec_shndx;
3596 int (*func) (void *, const char *, Elf_Internal_Sym *,
3597 asection *, struct elf_link_hash_entry *);
3598 } output_arch_syminfo;
3599
3600 /* Output a single local symbol for a generated stub. */
3601
3602 static bool
3603 elfNN_kvx_output_stub_sym (output_arch_syminfo *osi, const char *name,
3604 bfd_vma offset, bfd_vma size)
3605 {
3606 Elf_Internal_Sym sym;
3607
3608 sym.st_value = (osi->sec->output_section->vma
3609 + osi->sec->output_offset + offset);
3610 sym.st_size = size;
3611 sym.st_other = 0;
3612 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
3613 sym.st_shndx = osi->sec_shndx;
3614 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
3615 }
3616
3617 static bool
3618 kvx_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
3619 {
3620 struct elf_kvx_stub_hash_entry *stub_entry;
3621 asection *stub_sec;
3622 bfd_vma addr;
3623 char *stub_name;
3624 output_arch_syminfo *osi;
3625
3626 /* Massage our args to the form they really have. */
3627 stub_entry = (struct elf_kvx_stub_hash_entry *) gen_entry;
3628 osi = (output_arch_syminfo *) in_arg;
3629
3630 stub_sec = stub_entry->stub_sec;
3631
3632 /* Ensure this stub is attached to the current section being
3633 processed. */
3634 if (stub_sec != osi->sec)
3635 return true;
3636
3637 addr = (bfd_vma) stub_entry->stub_offset;
3638
3639 stub_name = stub_entry->output_name;
3640
3641 switch (stub_entry->stub_type)
3642 {
3643 case kvx_stub_long_branch:
3644 if (!elfNN_kvx_output_stub_sym
3645 (osi, stub_name, addr, sizeof (elfNN_kvx_long_branch_stub)))
3646 return false;
3647 break;
3648
3649 default:
3650 abort ();
3651 }
3652
3653 return true;
3654 }
3655
3656 /* Output mapping symbols for linker generated sections. */
3657
3658 static bool
3659 elfNN_kvx_output_arch_local_syms (bfd *output_bfd,
3660 struct bfd_link_info *info,
3661 void *finfo,
3662 int (*func) (void *, const char *,
3663 Elf_Internal_Sym *,
3664 asection *,
3665 struct elf_link_hash_entry
3666 *))
3667 {
3668 output_arch_syminfo osi;
3669 struct elf_kvx_link_hash_table *htab;
3670
3671 htab = elf_kvx_hash_table (info);
3672
3673 osi.finfo = finfo;
3674 osi.info = info;
3675 osi.func = func;
3676
3677 /* Long calls stubs. */
3678 if (htab->stub_bfd && htab->stub_bfd->sections)
3679 {
3680 asection *stub_sec;
3681
3682 for (stub_sec = htab->stub_bfd->sections;
3683 stub_sec != NULL; stub_sec = stub_sec->next)
3684 {
3685 /* Ignore non-stub sections. */
3686 if (!strstr (stub_sec->name, STUB_SUFFIX))
3687 continue;
3688
3689 osi.sec = stub_sec;
3690
3691 osi.sec_shndx = _bfd_elf_section_from_bfd_section
3692 (output_bfd, osi.sec->output_section);
3693
3694 bfd_hash_traverse (&htab->stub_hash_table, kvx_map_one_stub,
3695 &osi);
3696 }
3697 }
3698
3699 /* Finally, output mapping symbols for the PLT. */
3700 if (!htab->root.splt || htab->root.splt->size == 0)
3701 return true;
3702
3703 osi.sec_shndx = _bfd_elf_section_from_bfd_section
3704 (output_bfd, htab->root.splt->output_section);
3705 osi.sec = htab->root.splt;
3706
3707 return true;
3708
3709 }
3710
3711 /* Allocate target specific section data. */
3712
3713 static bool
3714 elfNN_kvx_new_section_hook (bfd *abfd, asection *sec)
3715 {
3716 if (!sec->used_by_bfd)
3717 {
3718 _kvx_elf_section_data *sdata;
3719 bfd_size_type amt = sizeof (*sdata);
3720
3721 sdata = bfd_zalloc (abfd, amt);
3722 if (sdata == NULL)
3723 return false;
3724 sec->used_by_bfd = sdata;
3725 }
3726
3727 return _bfd_elf_new_section_hook (abfd, sec);
3728 }
3729
3730 /* Create dynamic sections. This is different from the ARM backend in that
3731 the got, plt, gotplt and their relocation sections are all created in the
3732 standard part of the bfd elf backend. */
3733
3734 static bool
3735 elfNN_kvx_create_dynamic_sections (bfd *dynobj,
3736 struct bfd_link_info *info)
3737 {
3738 struct elf_kvx_link_hash_table *htab;
3739
3740 /* We need to create .got section. */
3741 if (!kvx_elf_create_got_section (dynobj, info))
3742 return false;
3743
3744 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3745 return false;
3746
3747 htab = elf_kvx_hash_table (info);
3748 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
3749 if (!bfd_link_pic (info))
3750 htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
3751
3752 if (!htab->sdynbss || (!bfd_link_pic (info) && !htab->srelbss))
3753 abort ();
3754
3755 return true;
3756 }
3757
3758
3759 /* Allocate space in .plt, .got and associated reloc sections for
3760 dynamic relocs. */
3761
3762 static bool
3763 elfNN_kvx_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
3764 {
3765 struct bfd_link_info *info;
3766 struct elf_kvx_link_hash_table *htab;
3767 struct elf_dyn_relocs *p;
3768
3769 /* An example of a bfd_link_hash_indirect symbol is versioned
3770 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
3771 -> __gxx_personality_v0(bfd_link_hash_defined)
3772
3773 There is no need to process bfd_link_hash_indirect symbols here
3774 because we will also be presented with the concrete instance of
3775 the symbol and elfNN_kvx_copy_indirect_symbol () will have been
3776 called to copy all relevant data from the generic to the concrete
3777 symbol instance.
3778 */
3779 if (h->root.type == bfd_link_hash_indirect)
3780 return true;
3781
3782 if (h->root.type == bfd_link_hash_warning)
3783 h = (struct elf_link_hash_entry *) h->root.u.i.link;
3784
3785 info = (struct bfd_link_info *) inf;
3786 htab = elf_kvx_hash_table (info);
3787
3788 if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
3789 {
3790 /* Make sure this symbol is output as a dynamic symbol.
3791 Undefined weak syms won't yet be marked as dynamic. */
3792 if (h->dynindx == -1 && !h->forced_local)
3793 {
3794 if (!bfd_elf_link_record_dynamic_symbol (info, h))
3795 return false;
3796 }
3797
3798 if (bfd_link_pic (info) || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
3799 {
3800 asection *s = htab->root.splt;
3801
3802 /* If this is the first .plt entry, make room for the special
3803 first entry. */
3804 if (s->size == 0)
3805 s->size += htab->plt_header_size;
3806
3807 h->plt.offset = s->size;
3808
3809 /* If this symbol is not defined in a regular file, and we are
3810 not generating a shared library, then set the symbol to this
3811 location in the .plt. This is required to make function
3812 pointers compare as equal between the normal executable and
3813 the shared library. */
3814 if (!bfd_link_pic (info) && !h->def_regular)
3815 {
3816 h->root.u.def.section = s;
3817 h->root.u.def.value = h->plt.offset;
3818 }
3819
3820 /* Make room for this entry. For now we only create the
3821 small model PLT entries. We later need to find a way
3822 of relaxing into these from the large model PLT entries. */
3823 s->size += PLT_SMALL_ENTRY_SIZE;
3824
3825 /* We also need to make an entry in the .got.plt section, which
3826 will be placed in the .got section by the linker script. */
3827 htab->root.sgotplt->size += GOT_ENTRY_SIZE;
3828
3829 /* We also need to make an entry in the .rela.plt section. */
3830 htab->root.srelplt->size += RELOC_SIZE (htab);
3831
3832 /* We need to ensure that all GOT entries that serve the PLT
3833 are consecutive with the special GOT slots [0] [1] and
3834 [2]. Any addtional relocations must be placed after the
3835 PLT related entries. We abuse the reloc_count such that
3836 during sizing we adjust reloc_count to indicate the
3837 number of PLT related reserved entries. In subsequent
3838 phases when filling in the contents of the reloc entries,
3839 PLT related entries are placed by computing their PLT
3840 index (0 .. reloc_count). While other none PLT relocs are
3841 placed at the slot indicated by reloc_count and
3842 reloc_count is updated. */
3843
3844 htab->root.srelplt->reloc_count++;
3845 }
3846 else
3847 {
3848 h->plt.offset = (bfd_vma) - 1;
3849 h->needs_plt = 0;
3850 }
3851 }
3852 else
3853 {
3854 h->plt.offset = (bfd_vma) - 1;
3855 h->needs_plt = 0;
3856 }
3857
3858 if (h->got.refcount > 0)
3859 {
3860 bool dyn;
3861 unsigned got_type = elf_kvx_hash_entry (h)->got_type;
3862
3863 h->got.offset = (bfd_vma) - 1;
3864
3865 dyn = htab->root.dynamic_sections_created;
3866
3867 /* Make sure this symbol is output as a dynamic symbol.
3868 Undefined weak syms won't yet be marked as dynamic. */
3869 if (dyn && h->dynindx == -1 && !h->forced_local)
3870 {
3871 if (!bfd_elf_link_record_dynamic_symbol (info, h))
3872 return false;
3873 }
3874
3875 if (got_type == GOT_UNKNOWN)
3876 {
3877 (*_bfd_error_handler)
3878 (_("relocation against `%s' has faulty GOT type "),
3879 (h) ? h->root.root.string : "a local symbol");
3880 bfd_set_error (bfd_error_bad_value);
3881 return false;
3882 }
3883 else if (got_type == GOT_NORMAL)
3884 {
3885 h->got.offset = htab->root.sgot->size;
3886 htab->root.sgot->size += GOT_ENTRY_SIZE;
3887 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3888 || h->root.type != bfd_link_hash_undefweak)
3889 && (bfd_link_pic (info)
3890 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
3891 {
3892 htab->root.srelgot->size += RELOC_SIZE (htab);
3893 }
3894 }
3895 else
3896 {
3897 int indx;
3898
3899 /* Any of these will require 2 GOT slots because
3900 * they use __tls_get_addr() */
3901 if (got_type & (GOT_TLS_GD | GOT_TLS_LD))
3902 {
3903 h->got.offset = htab->root.sgot->size;
3904 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
3905 }
3906
3907 if (got_type & GOT_TLS_IE)
3908 {
3909 h->got.offset = htab->root.sgot->size;
3910 htab->root.sgot->size += GOT_ENTRY_SIZE;
3911 }
3912
3913 indx = h && h->dynindx != -1 ? h->dynindx : 0;
3914 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3915 || h->root.type != bfd_link_hash_undefweak)
3916 && (bfd_link_pic (info)
3917 || indx != 0
3918 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
3919 {
3920 /* Only the GD case requires 2 relocations. */
3921 if (got_type & GOT_TLS_GD)
3922 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
3923
3924 /* LD needs a DTPMOD reloc, IE needs a DTPOFF. */
3925 if (got_type & (GOT_TLS_LD | GOT_TLS_IE))
3926 htab->root.srelgot->size += RELOC_SIZE (htab);
3927 }
3928 }
3929 }
3930 else
3931 {
3932 h->got.offset = (bfd_vma) - 1;
3933 }
3934
3935 if (h->dyn_relocs == NULL)
3936 return true;
3937
3938 /* In the shared -Bsymbolic case, discard space allocated for
3939 dynamic pc-relative relocs against symbols which turn out to be
3940 defined in regular objects. For the normal shared case, discard
3941 space for pc-relative relocs that have become local due to symbol
3942 visibility changes. */
3943
3944 if (bfd_link_pic (info))
3945 {
3946 /* Relocs that use pc_count are those that appear on a call
3947 insn, or certain REL relocs that can generated via assembly.
3948 We want calls to protected symbols to resolve directly to the
3949 function rather than going via the plt. If people want
3950 function pointer comparisons to work as expected then they
3951 should avoid writing weird assembly. */
3952 if (SYMBOL_CALLS_LOCAL (info, h))
3953 {
3954 struct elf_dyn_relocs **pp;
3955
3956 for (pp = &h->dyn_relocs; (p = *pp) != NULL;)
3957 {
3958 p->count -= p->pc_count;
3959 p->pc_count = 0;
3960 if (p->count == 0)
3961 *pp = p->next;
3962 else
3963 pp = &p->next;
3964 }
3965 }
3966
3967 /* Also discard relocs on undefined weak syms with non-default
3968 visibility. */
3969 if (h->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
3970 {
3971 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
3972 || UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
3973 h->dyn_relocs = NULL;
3974
3975 /* Make sure undefined weak symbols are output as a dynamic
3976 symbol in PIEs. */
3977 else if (h->dynindx == -1
3978 && !h->forced_local
3979 && !bfd_elf_link_record_dynamic_symbol (info, h))
3980 return false;
3981 }
3982
3983 }
3984 else if (ELIMINATE_COPY_RELOCS)
3985 {
3986 /* For the non-shared case, discard space for relocs against
3987 symbols which turn out to need copy relocs or are not
3988 dynamic. */
3989
3990 if (!h->non_got_ref
3991 && ((h->def_dynamic
3992 && !h->def_regular)
3993 || (htab->root.dynamic_sections_created
3994 && (h->root.type == bfd_link_hash_undefweak
3995 || h->root.type == bfd_link_hash_undefined))))
3996 {
3997 /* Make sure this symbol is output as a dynamic symbol.
3998 Undefined weak syms won't yet be marked as dynamic. */
3999 if (h->dynindx == -1
4000 && !h->forced_local
4001 && !bfd_elf_link_record_dynamic_symbol (info, h))
4002 return false;
4003
4004 /* If that succeeded, we know we'll be keeping all the
4005 relocs. */
4006 if (h->dynindx != -1)
4007 goto keep;
4008 }
4009
4010 h->dyn_relocs = NULL;
4011
4012 keep:;
4013 }
4014
4015 /* Finally, allocate space. */
4016 for (p = h->dyn_relocs; p != NULL; p = p->next)
4017 {
4018 asection *sreloc;
4019
4020 sreloc = elf_section_data (p->sec)->sreloc;
4021
4022 BFD_ASSERT (sreloc != NULL);
4023
4024 sreloc->size += p->count * RELOC_SIZE (htab);
4025 }
4026
4027 return true;
4028 }
4029
4030 /* Find any dynamic relocs that apply to read-only sections. */
4031
4032 static bool
4033 kvx_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
4034 {
4035 struct elf_dyn_relocs * p;
4036
4037 for (p = h->dyn_relocs; p != NULL; p = p->next)
4038 {
4039 asection *s = p->sec;
4040
4041 if (s != NULL && (s->flags & SEC_READONLY) != 0)
4042 {
4043 struct bfd_link_info *info = (struct bfd_link_info *) inf;
4044
4045 info->flags |= DF_TEXTREL;
4046 info->callbacks->minfo (_("%pB: dynamic relocation against `%pT' in "
4047 "read-only section `%pA'\n"),
4048 s->owner, h->root.root.string, s);
4049
4050 /* Not an error, just cut short the traversal. */
4051 return false;
4052 }
4053 }
4054 return true;
4055 }
4056
4057 /* This is the most important function of all . Innocuosly named
4058 though ! */
4059 static bool
4060 elfNN_kvx_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
4061 struct bfd_link_info *info)
4062 {
4063 struct elf_kvx_link_hash_table *htab;
4064 bfd *dynobj;
4065 asection *s;
4066 bool relocs;
4067 bfd *ibfd;
4068
4069 htab = elf_kvx_hash_table ((info));
4070 dynobj = htab->root.dynobj;
4071
4072 BFD_ASSERT (dynobj != NULL);
4073
4074 if (htab->root.dynamic_sections_created)
4075 {
4076 if (bfd_link_executable (info) && !info->nointerp)
4077 {
4078 s = bfd_get_linker_section (dynobj, ".interp");
4079 if (s == NULL)
4080 abort ();
4081 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
4082 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
4083 }
4084 }
4085
4086 /* Set up .got offsets for local syms, and space for local dynamic
4087 relocs. */
4088 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
4089 {
4090 struct elf_kvx_local_symbol *locals = NULL;
4091 Elf_Internal_Shdr *symtab_hdr;
4092 asection *srel;
4093 unsigned int i;
4094
4095 if (!is_kvx_elf (ibfd))
4096 continue;
4097
4098 for (s = ibfd->sections; s != NULL; s = s->next)
4099 {
4100 struct elf_dyn_relocs *p;
4101
4102 for (p = (struct elf_dyn_relocs *)
4103 (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
4104 {
4105 if (!bfd_is_abs_section (p->sec)
4106 && bfd_is_abs_section (p->sec->output_section))
4107 {
4108 /* Input section has been discarded, either because
4109 it is a copy of a linkonce section or due to
4110 linker script /DISCARD/, so we'll be discarding
4111 the relocs too. */
4112 }
4113 else if (p->count != 0)
4114 {
4115 srel = elf_section_data (p->sec)->sreloc;
4116 srel->size += p->count * RELOC_SIZE (htab);
4117 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
4118 info->flags |= DF_TEXTREL;
4119 }
4120 }
4121 }
4122
4123 locals = elf_kvx_locals (ibfd);
4124 if (!locals)
4125 continue;
4126
4127 symtab_hdr = &elf_symtab_hdr (ibfd);
4128 srel = htab->root.srelgot;
4129 for (i = 0; i < symtab_hdr->sh_info; i++)
4130 {
4131 locals[i].got_offset = (bfd_vma) - 1;
4132 if (locals[i].got_refcount > 0)
4133 {
4134 unsigned got_type = locals[i].got_type;
4135 if (got_type & (GOT_TLS_GD | GOT_TLS_LD))
4136 {
4137 locals[i].got_offset = htab->root.sgot->size;
4138 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
4139 }
4140
4141 if (got_type & (GOT_NORMAL | GOT_TLS_IE ))
4142 {
4143 locals[i].got_offset = htab->root.sgot->size;
4144 htab->root.sgot->size += GOT_ENTRY_SIZE;
4145 }
4146
4147 if (got_type == GOT_UNKNOWN)
4148 {
4149 }
4150
4151 if (bfd_link_pic (info))
4152 {
4153 if (got_type & GOT_TLS_GD)
4154 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
4155
4156 if (got_type & GOT_TLS_IE
4157 || got_type & GOT_TLS_LD
4158 || got_type & GOT_NORMAL)
4159 htab->root.srelgot->size += RELOC_SIZE (htab);
4160 }
4161 }
4162 else
4163 {
4164 locals[i].got_refcount = (bfd_vma) - 1;
4165 }
4166 }
4167 }
4168
4169
4170 /* Allocate global sym .plt and .got entries, and space for global
4171 sym dynamic relocs. */
4172 elf_link_hash_traverse (&htab->root, elfNN_kvx_allocate_dynrelocs,
4173 info);
4174
4175 /* For every jump slot reserved in the sgotplt, reloc_count is
4176 incremented. However, when we reserve space for TLS descriptors,
4177 it's not incremented, so in order to compute the space reserved
4178 for them, it suffices to multiply the reloc count by the jump
4179 slot size. */
4180
4181 if (htab->root.srelplt)
4182 htab->sgotplt_jump_table_size = kvx_compute_jump_table_size (htab);
4183
4184 /* We now have determined the sizes of the various dynamic sections.
4185 Allocate memory for them. */
4186 relocs = false;
4187 for (s = dynobj->sections; s != NULL; s = s->next)
4188 {
4189 if ((s->flags & SEC_LINKER_CREATED) == 0)
4190 continue;
4191
4192 if (s == htab->root.splt
4193 || s == htab->root.sgot
4194 || s == htab->root.sgotplt
4195 || s == htab->root.iplt
4196 || s == htab->root.igotplt || s == htab->sdynbss)
4197 {
4198 /* Strip this section if we don't need it; see the
4199 comment below. */
4200 }
4201 else if (startswith (bfd_section_name (s), ".rela"))
4202 {
4203 if (s->size != 0 && s != htab->root.srelplt)
4204 relocs = true;
4205
4206 /* We use the reloc_count field as a counter if we need
4207 to copy relocs into the output file. */
4208 if (s != htab->root.srelplt)
4209 s->reloc_count = 0;
4210 }
4211 else
4212 {
4213 /* It's not one of our sections, so don't allocate space. */
4214 continue;
4215 }
4216
4217 if (s->size == 0)
4218 {
4219 /* If we don't need this section, strip it from the
4220 output file. This is mostly to handle .rela.bss and
4221 .rela.plt. We must create both sections in
4222 create_dynamic_sections, because they must be created
4223 before the linker maps input sections to output
4224 sections. The linker does that before
4225 adjust_dynamic_symbol is called, and it is that
4226 function which decides whether anything needs to go
4227 into these sections. */
4228
4229 s->flags |= SEC_EXCLUDE;
4230 continue;
4231 }
4232
4233 if ((s->flags & SEC_HAS_CONTENTS) == 0)
4234 continue;
4235
4236 /* Allocate memory for the section contents. We use bfd_zalloc
4237 here in case unused entries are not reclaimed before the
4238 section's contents are written out. This should not happen,
4239 but this way if it does, we get a R_KVX_NONE reloc instead
4240 of garbage. */
4241 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
4242 if (s->contents == NULL)
4243 return false;
4244 }
4245
4246 if (htab->root.dynamic_sections_created)
4247 {
4248 /* Add some entries to the .dynamic section. We fill in the
4249 values later, in elfNN_kvx_finish_dynamic_sections, but we
4250 must add the entries now so that we get the correct size for
4251 the .dynamic section. The DT_DEBUG entry is filled in by the
4252 dynamic linker and used by the debugger. */
4253 #define add_dynamic_entry(TAG, VAL) \
4254 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
4255
4256 if (bfd_link_executable (info))
4257 {
4258 if (!add_dynamic_entry (DT_DEBUG, 0))
4259 return false;
4260 }
4261
4262 if (htab->root.splt->size != 0)
4263 {
4264 if (!add_dynamic_entry (DT_PLTGOT, 0)
4265 || !add_dynamic_entry (DT_PLTRELSZ, 0)
4266 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
4267 || !add_dynamic_entry (DT_JMPREL, 0))
4268 return false;
4269 }
4270
4271 if (relocs)
4272 {
4273 if (!add_dynamic_entry (DT_RELA, 0)
4274 || !add_dynamic_entry (DT_RELASZ, 0)
4275 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
4276 return false;
4277
4278 /* If any dynamic relocs apply to a read-only section,
4279 then we need a DT_TEXTREL entry. */
4280 if ((info->flags & DF_TEXTREL) == 0)
4281 elf_link_hash_traverse (&htab->root, kvx_readonly_dynrelocs,
4282 info);
4283
4284 if ((info->flags & DF_TEXTREL) != 0)
4285 {
4286 if (!add_dynamic_entry (DT_TEXTREL, 0))
4287 return false;
4288 }
4289 }
4290 }
4291 #undef add_dynamic_entry
4292
4293 return true;
4294 }
4295
4296 static inline void
4297 elf_kvx_update_plt_entry (bfd *output_bfd,
4298 bfd_reloc_code_real_type r_type,
4299 bfd_byte *plt_entry, bfd_vma value)
4300 {
4301 reloc_howto_type *howto = elfNN_kvx_howto_from_bfd_reloc (r_type);
4302 BFD_ASSERT(howto != NULL);
4303 _bfd_kvx_elf_put_addend (output_bfd, plt_entry, r_type, howto, value);
4304 }
4305
4306 static void
4307 elfNN_kvx_create_small_pltn_entry (struct elf_link_hash_entry *h,
4308 struct elf_kvx_link_hash_table
4309 *htab, bfd *output_bfd)
4310 {
4311 bfd_byte *plt_entry;
4312 bfd_vma plt_index;
4313 bfd_vma got_offset;
4314 bfd_vma gotplt_entry_address;
4315 bfd_vma plt_entry_address;
4316 Elf_Internal_Rela rela;
4317 bfd_byte *loc;
4318 asection *plt, *gotplt, *relplt;
4319
4320 plt = htab->root.splt;
4321 gotplt = htab->root.sgotplt;
4322 relplt = htab->root.srelplt;
4323
4324 /* Get the index in the procedure linkage table which
4325 corresponds to this symbol. This is the index of this symbol
4326 in all the symbols for which we are making plt entries. The
4327 first entry in the procedure linkage table is reserved.
4328
4329 Get the offset into the .got table of the entry that
4330 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
4331 bytes. The first three are reserved for the dynamic linker.
4332
4333 For static executables, we don't reserve anything. */
4334
4335 if (plt == htab->root.splt)
4336 {
4337 plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
4338 got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
4339 }
4340 else
4341 {
4342 plt_index = h->plt.offset / htab->plt_entry_size;
4343 got_offset = plt_index * GOT_ENTRY_SIZE;
4344 }
4345
4346 plt_entry = plt->contents + h->plt.offset;
4347 plt_entry_address = plt->output_section->vma
4348 + plt->output_offset + h->plt.offset;
4349 gotplt_entry_address = gotplt->output_section->vma +
4350 gotplt->output_offset + got_offset;
4351
4352 /* Copy in the boiler-plate for the PLTn entry. */
4353 memcpy (plt_entry, elfNN_kvx_small_plt_entry, PLT_SMALL_ENTRY_SIZE);
4354
4355 /* Patch the loading of the GOT entry, relative to the PLT entry
4356 * address
4357 */
4358
4359 /* Use 37bits offset for both 32 and 64bits mode */
4360 /* Fill the LO10 of of lw $r9 = 0[$r14] */
4361 elf_kvx_update_plt_entry(output_bfd, BFD_RELOC_KVX_S37_LO10,
4362 plt_entry+4,
4363 gotplt_entry_address - plt_entry_address);
4364
4365 /* Fill the UP27 of of lw $r9 = 0[$r14] */
4366 elf_kvx_update_plt_entry(output_bfd, BFD_RELOC_KVX_S37_UP27,
4367 plt_entry+8,
4368 gotplt_entry_address - plt_entry_address);
4369
4370 rela.r_offset = gotplt_entry_address;
4371
4372 /* Fill in the entry in the .rela.plt section. */
4373 rela.r_info = ELFNN_R_INFO (h->dynindx, R_KVX_JMP_SLOT);
4374 rela.r_addend = 0;
4375
4376 /* Compute the relocation entry to used based on PLT index and do
4377 not adjust reloc_count. The reloc_count has already been adjusted
4378 to account for this entry. */
4379 loc = relplt->contents + plt_index * RELOC_SIZE (htab);
4380 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
4381 }
4382
4383 /* Size sections even though they're not dynamic. We use it to setup
4384 _TLS_MODULE_BASE_, if needed. */
4385
4386 static bool
4387 elfNN_kvx_always_size_sections (bfd *output_bfd,
4388 struct bfd_link_info *info)
4389 {
4390 asection *tls_sec;
4391
4392 if (bfd_link_relocatable (info))
4393 return true;
4394
4395 tls_sec = elf_hash_table (info)->tls_sec;
4396
4397 if (tls_sec)
4398 {
4399 struct elf_link_hash_entry *tlsbase;
4400
4401 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
4402 "_TLS_MODULE_BASE_", true, true, false);
4403
4404 if (tlsbase)
4405 {
4406 struct bfd_link_hash_entry *h = NULL;
4407 const struct elf_backend_data *bed =
4408 get_elf_backend_data (output_bfd);
4409
4410 if (!(_bfd_generic_link_add_one_symbol
4411 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
4412 tls_sec, 0, NULL, false, bed->collect, &h)))
4413 return false;
4414
4415 tlsbase->type = STT_TLS;
4416 tlsbase = (struct elf_link_hash_entry *) h;
4417 tlsbase->def_regular = 1;
4418 tlsbase->other = STV_HIDDEN;
4419 (*bed->elf_backend_hide_symbol) (info, tlsbase, true);
4420 }
4421 }
4422
4423 return true;
4424 }
4425
4426 /* Finish up dynamic symbol handling. We set the contents of various
4427 dynamic sections here. */
4428 static bool
4429 elfNN_kvx_finish_dynamic_symbol (bfd *output_bfd,
4430 struct bfd_link_info *info,
4431 struct elf_link_hash_entry *h,
4432 Elf_Internal_Sym *sym)
4433 {
4434 struct elf_kvx_link_hash_table *htab;
4435 htab = elf_kvx_hash_table (info);
4436
4437 if (h->plt.offset != (bfd_vma) - 1)
4438 {
4439 asection *plt = NULL, *gotplt = NULL, *relplt = NULL;
4440
4441 /* This symbol has an entry in the procedure linkage table. Set
4442 it up. */
4443
4444 if (htab->root.splt != NULL)
4445 {
4446 plt = htab->root.splt;
4447 gotplt = htab->root.sgotplt;
4448 relplt = htab->root.srelplt;
4449 }
4450
4451 /* This symbol has an entry in the procedure linkage table. Set
4452 it up. */
4453 if ((h->dynindx == -1
4454 && !((h->forced_local || bfd_link_executable (info))
4455 && h->def_regular
4456 && h->type == STT_GNU_IFUNC))
4457 || plt == NULL
4458 || gotplt == NULL
4459 || relplt == NULL)
4460 abort ();
4461
4462 elfNN_kvx_create_small_pltn_entry (h, htab, output_bfd);
4463 if (!h->def_regular)
4464 {
4465 /* Mark the symbol as undefined, rather than as defined in
4466 the .plt section. */
4467 sym->st_shndx = SHN_UNDEF;
4468 /* If the symbol is weak we need to clear the value.
4469 Otherwise, the PLT entry would provide a definition for
4470 the symbol even if the symbol wasn't defined anywhere,
4471 and so the symbol would never be NULL. Leave the value if
4472 there were any relocations where pointer equality matters
4473 (this is a clue for the dynamic linker, to make function
4474 pointer comparisons work between an application and shared
4475 library). */
4476 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
4477 sym->st_value = 0;
4478 }
4479 }
4480
4481 if (h->got.offset != (bfd_vma) - 1
4482 && elf_kvx_hash_entry (h)->got_type == GOT_NORMAL)
4483 {
4484 Elf_Internal_Rela rela;
4485 bfd_byte *loc;
4486
4487 /* This symbol has an entry in the global offset table. Set it
4488 up. */
4489 if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
4490 abort ();
4491
4492 rela.r_offset = (htab->root.sgot->output_section->vma
4493 + htab->root.sgot->output_offset
4494 + (h->got.offset & ~(bfd_vma) 1));
4495
4496 #ifdef UGLY_DEBUG
4497 printf("setting rela at offset 0x%x(0x%x + 0x%x + 0x%x) for %s\n",
4498 rela.r_offset,
4499 htab->root.sgot->output_section->vma,
4500 htab->root.sgot->output_offset,
4501 h->got.offset,
4502 h->root.root.string);
4503 #endif
4504
4505 if (bfd_link_pic (info) && SYMBOL_REFERENCES_LOCAL (info, h))
4506 {
4507 if (!h->def_regular)
4508 return false;
4509
4510 /* in case of PLT related GOT entry, it is not clear who is
4511 supposed to set the LSB of GOT entry...
4512 kvx_calculate_got_entry_vma() would be a good candidate,
4513 but it is not called currently
4514 So we are commenting it ATM
4515 */
4516 // BFD_ASSERT ((h->got.offset & 1) != 0);
4517 rela.r_info = ELFNN_R_INFO (0, R_KVX_RELATIVE);
4518 rela.r_addend = (h->root.u.def.value
4519 + h->root.u.def.section->output_section->vma
4520 + h->root.u.def.section->output_offset);
4521 }
4522 else
4523 {
4524 BFD_ASSERT ((h->got.offset & 1) == 0);
4525 bfd_put_NN (output_bfd, (bfd_vma) 0,
4526 htab->root.sgot->contents + h->got.offset);
4527 rela.r_info = ELFNN_R_INFO (h->dynindx, R_KVX_GLOB_DAT);
4528 rela.r_addend = 0;
4529 }
4530
4531 loc = htab->root.srelgot->contents;
4532 loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
4533 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
4534 }
4535
4536 if (h->needs_copy)
4537 {
4538 Elf_Internal_Rela rela;
4539 bfd_byte *loc;
4540
4541 /* This symbol needs a copy reloc. Set it up. */
4542
4543 if (h->dynindx == -1
4544 || (h->root.type != bfd_link_hash_defined
4545 && h->root.type != bfd_link_hash_defweak)
4546 || htab->srelbss == NULL)
4547 abort ();
4548
4549 rela.r_offset = (h->root.u.def.value
4550 + h->root.u.def.section->output_section->vma
4551 + h->root.u.def.section->output_offset);
4552 rela.r_info = ELFNN_R_INFO (h->dynindx, R_KVX_COPY);
4553 rela.r_addend = 0;
4554 loc = htab->srelbss->contents;
4555 loc += htab->srelbss->reloc_count++ * RELOC_SIZE (htab);
4556 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
4557 }
4558
4559 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may
4560 be NULL for local symbols. */
4561 if (sym != NULL
4562 && (h == elf_hash_table (info)->hdynamic
4563 || h == elf_hash_table (info)->hgot))
4564 sym->st_shndx = SHN_ABS;
4565
4566 return true;
4567 }
4568
4569 static void
4570 elfNN_kvx_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
4571 struct elf_kvx_link_hash_table
4572 *htab)
4573 {
4574 memcpy (htab->root.splt->contents, elfNN_kvx_small_plt0_entry,
4575 PLT_ENTRY_SIZE);
4576 elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize =
4577 PLT_ENTRY_SIZE;
4578 }
4579
4580 static bool
4581 elfNN_kvx_finish_dynamic_sections (bfd *output_bfd,
4582 struct bfd_link_info *info)
4583 {
4584 struct elf_kvx_link_hash_table *htab;
4585 bfd *dynobj;
4586 asection *sdyn;
4587
4588 htab = elf_kvx_hash_table (info);
4589 dynobj = htab->root.dynobj;
4590 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
4591
4592 if (htab->root.dynamic_sections_created)
4593 {
4594 ElfNN_External_Dyn *dyncon, *dynconend;
4595
4596 if (sdyn == NULL || htab->root.sgot == NULL)
4597 abort ();
4598
4599 dyncon = (ElfNN_External_Dyn *) sdyn->contents;
4600 dynconend = (ElfNN_External_Dyn *) (sdyn->contents + sdyn->size);
4601 for (; dyncon < dynconend; dyncon++)
4602 {
4603 Elf_Internal_Dyn dyn;
4604 asection *s;
4605
4606 bfd_elfNN_swap_dyn_in (dynobj, dyncon, &dyn);
4607
4608 switch (dyn.d_tag)
4609 {
4610 default:
4611 continue;
4612
4613 case DT_PLTGOT:
4614 s = htab->root.sgotplt;
4615 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
4616 break;
4617
4618 case DT_JMPREL:
4619 dyn.d_un.d_ptr = htab->root.srelplt->output_section->vma;
4620 break;
4621
4622 case DT_PLTRELSZ:
4623 s = htab->root.srelplt;
4624 dyn.d_un.d_val = s->size;
4625 break;
4626
4627 case DT_RELASZ:
4628 /* The procedure linkage table relocs (DT_JMPREL) should
4629 not be included in the overall relocs (DT_RELA).
4630 Therefore, we override the DT_RELASZ entry here to
4631 make it not include the JMPREL relocs. Since the
4632 linker script arranges for .rela.plt to follow all
4633 other relocation sections, we don't have to worry
4634 about changing the DT_RELA entry. */
4635 if (htab->root.srelplt != NULL)
4636 {
4637 s = htab->root.srelplt;
4638 dyn.d_un.d_val -= s->size;
4639 }
4640 break;
4641 }
4642
4643 bfd_elfNN_swap_dyn_out (output_bfd, &dyn, dyncon);
4644 }
4645
4646 }
4647
4648 /* Fill in the special first entry in the procedure linkage table. */
4649 if (htab->root.splt && htab->root.splt->size > 0)
4650 {
4651 elfNN_kvx_init_small_plt0_entry (output_bfd, htab);
4652
4653 elf_section_data (htab->root.splt->output_section)->
4654 this_hdr.sh_entsize = htab->plt_entry_size;
4655 }
4656
4657 if (htab->root.sgotplt)
4658 {
4659 if (bfd_is_abs_section (htab->root.sgotplt->output_section))
4660 {
4661 (*_bfd_error_handler)
4662 (_("discarded output section: `%pA'"), htab->root.sgotplt);
4663 return false;
4664 }
4665
4666 /* Fill in the first three entries in the global offset table. */
4667 if (htab->root.sgotplt->size > 0)
4668 {
4669 bfd_put_NN (output_bfd, (bfd_vma) 0, htab->root.sgotplt->contents);
4670
4671 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
4672 bfd_put_NN (output_bfd,
4673 (bfd_vma) 0,
4674 htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
4675 bfd_put_NN (output_bfd,
4676 (bfd_vma) 0,
4677 htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
4678 }
4679
4680 if (htab->root.sgot)
4681 {
4682 if (htab->root.sgot->size > 0)
4683 {
4684 bfd_vma addr =
4685 sdyn ? sdyn->output_section->vma + sdyn->output_offset : 0;
4686 bfd_put_NN (output_bfd, addr, htab->root.sgot->contents);
4687 }
4688 }
4689
4690 elf_section_data (htab->root.sgotplt->output_section)->
4691 this_hdr.sh_entsize = GOT_ENTRY_SIZE;
4692 }
4693
4694 if (htab->root.sgot && htab->root.sgot->size > 0)
4695 elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
4696 = GOT_ENTRY_SIZE;
4697
4698 return true;
4699 }
4700
4701 /* Return address for Ith PLT stub in section PLT, for relocation REL
4702 or (bfd_vma) -1 if it should not be included. */
4703
4704 static bfd_vma
4705 elfNN_kvx_plt_sym_val (bfd_vma i, const asection *plt,
4706 const arelent *rel ATTRIBUTE_UNUSED)
4707 {
4708 return plt->vma + PLT_ENTRY_SIZE + i * PLT_SMALL_ENTRY_SIZE;
4709 }
4710
4711 #define ELF_ARCH bfd_arch_kvx
4712 #define ELF_MACHINE_CODE EM_KVX
4713 #define ELF_MAXPAGESIZE 0x10000
4714 #define ELF_MINPAGESIZE 0x1000
4715 #define ELF_COMMONPAGESIZE 0x1000
4716
4717 #define bfd_elfNN_bfd_link_hash_table_create \
4718 elfNN_kvx_link_hash_table_create
4719
4720 #define bfd_elfNN_bfd_merge_private_bfd_data \
4721 elfNN_kvx_merge_private_bfd_data
4722
4723 #define bfd_elfNN_bfd_print_private_bfd_data \
4724 elfNN_kvx_print_private_bfd_data
4725
4726 #define bfd_elfNN_bfd_reloc_type_lookup \
4727 elfNN_kvx_reloc_type_lookup
4728
4729 #define bfd_elfNN_bfd_reloc_name_lookup \
4730 elfNN_kvx_reloc_name_lookup
4731
4732 #define bfd_elfNN_bfd_set_private_flags \
4733 elfNN_kvx_set_private_flags
4734
4735 #define bfd_elfNN_mkobject \
4736 elfNN_kvx_mkobject
4737
4738 #define bfd_elfNN_new_section_hook \
4739 elfNN_kvx_new_section_hook
4740
4741 #define elf_backend_adjust_dynamic_symbol \
4742 elfNN_kvx_adjust_dynamic_symbol
4743
4744 #define elf_backend_always_size_sections \
4745 elfNN_kvx_always_size_sections
4746
4747 #define elf_backend_check_relocs \
4748 elfNN_kvx_check_relocs
4749
4750 #define elf_backend_copy_indirect_symbol \
4751 elfNN_kvx_copy_indirect_symbol
4752
4753 /* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
4754 to them in our hash. */
4755 #define elf_backend_create_dynamic_sections \
4756 elfNN_kvx_create_dynamic_sections
4757
4758 #define elf_backend_init_index_section \
4759 _bfd_elf_init_2_index_sections
4760
4761 #define elf_backend_finish_dynamic_sections \
4762 elfNN_kvx_finish_dynamic_sections
4763
4764 #define elf_backend_finish_dynamic_symbol \
4765 elfNN_kvx_finish_dynamic_symbol
4766
4767 #define elf_backend_object_p \
4768 elfNN_kvx_object_p
4769
4770 #define elf_backend_output_arch_local_syms \
4771 elfNN_kvx_output_arch_local_syms
4772
4773 #define elf_backend_plt_sym_val \
4774 elfNN_kvx_plt_sym_val
4775
4776 #define elf_backend_init_file_header \
4777 elfNN_kvx_init_file_header
4778
4779 #define elf_backend_init_process_headers \
4780 elfNN_kvx_init_process_headers
4781
4782 #define elf_backend_relocate_section \
4783 elfNN_kvx_relocate_section
4784
4785 #define elf_backend_reloc_type_class \
4786 elfNN_kvx_reloc_type_class
4787
4788 #define elf_backend_size_dynamic_sections \
4789 elfNN_kvx_size_dynamic_sections
4790
4791 #define elf_backend_can_refcount 1
4792 #define elf_backend_can_gc_sections 1
4793 #define elf_backend_plt_readonly 1
4794 #define elf_backend_want_got_plt 1
4795 #define elf_backend_want_plt_sym 0
4796 #define elf_backend_may_use_rel_p 0
4797 #define elf_backend_may_use_rela_p 1
4798 #define elf_backend_default_use_rela_p 1
4799 #define elf_backend_rela_normal 1
4800 #define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
4801 #define elf_backend_default_execstack 0
4802 #define elf_backend_extern_protected_data 1
4803 #define elf_backend_hash_symbol elf_kvx_hash_symbol
4804
4805 #include "elfNN-target.h"