]> git.ipfire.org Git - thirdparty/glibc.git/blob - elf/dynamic-link.h
Fix potential problem with skipping relocations
[thirdparty/glibc.git] / elf / dynamic-link.h
1 /* Inline functions for dynamic linking.
2 Copyright (C) 1995-2005,2006,2008,2011 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
19
20 /* This macro is used as a callback from elf_machine_rel{a,} when a
21 static TLS reloc is about to be performed. Since (in dl-load.c) we
22 permit dynamic loading of objects that might use such relocs, we
23 have to check whether each use is actually doable. If the object
24 whose TLS segment the reference resolves to was allocated space in
25 the static TLS block at startup, then it's ok. Otherwise, we make
26 an attempt to allocate it in surplus space on the fly. If that
27 can't be done, we fall back to the error that DF_STATIC_TLS is
28 intended to produce. */
29 #define CHECK_STATIC_TLS(map, sym_map) \
30 do { \
31 if (__builtin_expect ((sym_map)->l_tls_offset == NO_TLS_OFFSET \
32 || ((sym_map)->l_tls_offset \
33 == FORCED_DYNAMIC_TLS_OFFSET), 0)) \
34 _dl_allocate_static_tls (sym_map); \
35 } while (0)
36
37 #define TRY_STATIC_TLS(map, sym_map) \
38 (__builtin_expect ((sym_map)->l_tls_offset \
39 != FORCED_DYNAMIC_TLS_OFFSET, 1) \
40 && (__builtin_expect ((sym_map)->l_tls_offset != NO_TLS_OFFSET, 1) \
41 || _dl_try_allocate_static_tls (sym_map) == 0))
42
43 int internal_function _dl_try_allocate_static_tls (struct link_map *map);
44
45 #include <elf.h>
46 #include <assert.h>
47
48 #ifdef RESOLVE_MAP
49 /* We pass reloc_addr as a pointer to void, as opposed to a pointer to
50 ElfW(Addr), because not all architectures can assume that the
51 relocated address is properly aligned, whereas the compiler is
52 entitled to assume that a pointer to a type is properly aligned for
53 the type. Even if we cast the pointer back to some other type with
54 less strict alignment requirements, the compiler might still
55 remember that the pointer was originally more aligned, thereby
56 optimizing away alignment tests or using word instructions for
57 copying memory, breaking the very code written to handle the
58 unaligned cases. */
59 # if ! ELF_MACHINE_NO_REL
60 auto inline void __attribute__((always_inline))
61 elf_machine_rel (struct link_map *map, const ElfW(Rel) *reloc,
62 const ElfW(Sym) *sym, const struct r_found_version *version,
63 void *const reloc_addr, int skip_ifunc);
64 auto inline void __attribute__((always_inline))
65 elf_machine_rel_relative (ElfW(Addr) l_addr, const ElfW(Rel) *reloc,
66 void *const reloc_addr);
67 # endif
68 # if ! ELF_MACHINE_NO_RELA
69 auto inline void __attribute__((always_inline))
70 elf_machine_rela (struct link_map *map, const ElfW(Rela) *reloc,
71 const ElfW(Sym) *sym, const struct r_found_version *version,
72 void *const reloc_addr, int skip_ifunc);
73 auto inline void __attribute__((always_inline))
74 elf_machine_rela_relative (ElfW(Addr) l_addr, const ElfW(Rela) *reloc,
75 void *const reloc_addr);
76 # endif
77 # if ELF_MACHINE_NO_RELA || defined ELF_MACHINE_PLT_REL
78 auto inline void __attribute__((always_inline))
79 elf_machine_lazy_rel (struct link_map *map,
80 ElfW(Addr) l_addr, const ElfW(Rel) *reloc,
81 int skip_ifunc);
82 # else
83 auto inline void __attribute__((always_inline))
84 elf_machine_lazy_rel (struct link_map *map,
85 ElfW(Addr) l_addr, const ElfW(Rela) *reloc,
86 int skip_ifunc);
87 # endif
88 #endif
89
90 #include <dl-machine.h>
91
92 #ifndef VERSYMIDX
93 # define VERSYMIDX(sym) (DT_NUM + DT_THISPROCNUM + DT_VERSIONTAGIDX (sym))
94 #endif
95
96
97 /* Read the dynamic section at DYN and fill in INFO with indices DT_*. */
98 #ifndef RESOLVE_MAP
99 static
100 #else
101 auto
102 #endif
103 inline void __attribute__ ((unused, always_inline))
104 elf_get_dynamic_info (struct link_map *l, ElfW(Dyn) *temp)
105 {
106 ElfW(Dyn) *dyn = l->l_ld;
107 ElfW(Dyn) **info;
108 #if __ELF_NATIVE_CLASS == 32
109 typedef Elf32_Word d_tag_utype;
110 #elif __ELF_NATIVE_CLASS == 64
111 typedef Elf64_Xword d_tag_utype;
112 #endif
113
114 #ifndef RTLD_BOOTSTRAP
115 if (dyn == NULL)
116 return;
117 #endif
118
119 info = l->l_info;
120
121 while (dyn->d_tag != DT_NULL)
122 {
123 if ((d_tag_utype) dyn->d_tag < DT_NUM)
124 info[dyn->d_tag] = dyn;
125 else if (dyn->d_tag >= DT_LOPROC &&
126 dyn->d_tag < DT_LOPROC + DT_THISPROCNUM)
127 info[dyn->d_tag - DT_LOPROC + DT_NUM] = dyn;
128 else if ((d_tag_utype) DT_VERSIONTAGIDX (dyn->d_tag) < DT_VERSIONTAGNUM)
129 info[VERSYMIDX (dyn->d_tag)] = dyn;
130 else if ((d_tag_utype) DT_EXTRATAGIDX (dyn->d_tag) < DT_EXTRANUM)
131 info[DT_EXTRATAGIDX (dyn->d_tag) + DT_NUM + DT_THISPROCNUM
132 + DT_VERSIONTAGNUM] = dyn;
133 else if ((d_tag_utype) DT_VALTAGIDX (dyn->d_tag) < DT_VALNUM)
134 info[DT_VALTAGIDX (dyn->d_tag) + DT_NUM + DT_THISPROCNUM
135 + DT_VERSIONTAGNUM + DT_EXTRANUM] = dyn;
136 else if ((d_tag_utype) DT_ADDRTAGIDX (dyn->d_tag) < DT_ADDRNUM)
137 info[DT_ADDRTAGIDX (dyn->d_tag) + DT_NUM + DT_THISPROCNUM
138 + DT_VERSIONTAGNUM + DT_EXTRANUM + DT_VALNUM] = dyn;
139 ++dyn;
140 }
141
142 #define DL_RO_DYN_TEMP_CNT 8
143
144 #ifndef DL_RO_DYN_SECTION
145 /* Don't adjust .dynamic unnecessarily. */
146 if (l->l_addr != 0)
147 {
148 ElfW(Addr) l_addr = l->l_addr;
149 int cnt = 0;
150
151 # define ADJUST_DYN_INFO(tag) \
152 do \
153 if (info[tag] != NULL) \
154 { \
155 if (temp) \
156 { \
157 temp[cnt].d_tag = info[tag]->d_tag; \
158 temp[cnt].d_un.d_ptr = info[tag]->d_un.d_ptr + l_addr; \
159 info[tag] = temp + cnt++; \
160 } \
161 else \
162 info[tag]->d_un.d_ptr += l_addr; \
163 } \
164 while (0)
165
166 ADJUST_DYN_INFO (DT_HASH);
167 ADJUST_DYN_INFO (DT_PLTGOT);
168 ADJUST_DYN_INFO (DT_STRTAB);
169 ADJUST_DYN_INFO (DT_SYMTAB);
170 # if ! ELF_MACHINE_NO_RELA
171 ADJUST_DYN_INFO (DT_RELA);
172 # endif
173 # if ! ELF_MACHINE_NO_REL
174 ADJUST_DYN_INFO (DT_REL);
175 # endif
176 ADJUST_DYN_INFO (DT_JMPREL);
177 ADJUST_DYN_INFO (VERSYMIDX (DT_VERSYM));
178 ADJUST_DYN_INFO (DT_ADDRTAGIDX (DT_GNU_HASH) + DT_NUM + DT_THISPROCNUM
179 + DT_VERSIONTAGNUM + DT_EXTRANUM + DT_VALNUM);
180 # undef ADJUST_DYN_INFO
181 assert (cnt <= DL_RO_DYN_TEMP_CNT);
182 }
183 #endif
184 if (info[DT_PLTREL] != NULL)
185 {
186 #if ELF_MACHINE_NO_RELA
187 assert (info[DT_PLTREL]->d_un.d_val == DT_REL);
188 #elif ELF_MACHINE_NO_REL
189 assert (info[DT_PLTREL]->d_un.d_val == DT_RELA);
190 #else
191 assert (info[DT_PLTREL]->d_un.d_val == DT_REL
192 || info[DT_PLTREL]->d_un.d_val == DT_RELA);
193 #endif
194 }
195 #if ! ELF_MACHINE_NO_RELA
196 if (info[DT_RELA] != NULL)
197 assert (info[DT_RELAENT]->d_un.d_val == sizeof (ElfW(Rela)));
198 # endif
199 # if ! ELF_MACHINE_NO_REL
200 if (info[DT_REL] != NULL)
201 assert (info[DT_RELENT]->d_un.d_val == sizeof (ElfW(Rel)));
202 #endif
203 #ifdef RTLD_BOOTSTRAP
204 /* Only the bind now flags are allowed. */
205 assert (info[VERSYMIDX (DT_FLAGS_1)] == NULL
206 || (info[VERSYMIDX (DT_FLAGS_1)]->d_un.d_val & ~DF_1_NOW) == 0);
207 assert (info[DT_FLAGS] == NULL
208 || (info[DT_FLAGS]->d_un.d_val & ~DF_BIND_NOW) == 0);
209 /* Flags must not be set for ld.so. */
210 assert (info[DT_RUNPATH] == NULL);
211 assert (info[DT_RPATH] == NULL);
212 #else
213 if (info[DT_FLAGS] != NULL)
214 {
215 /* Flags are used. Translate to the old form where available.
216 Since these l_info entries are only tested for NULL pointers it
217 is ok if they point to the DT_FLAGS entry. */
218 l->l_flags = info[DT_FLAGS]->d_un.d_val;
219
220 if (l->l_flags & DF_SYMBOLIC)
221 info[DT_SYMBOLIC] = info[DT_FLAGS];
222 if (l->l_flags & DF_TEXTREL)
223 info[DT_TEXTREL] = info[DT_FLAGS];
224 if (l->l_flags & DF_BIND_NOW)
225 info[DT_BIND_NOW] = info[DT_FLAGS];
226 }
227 if (info[VERSYMIDX (DT_FLAGS_1)] != NULL)
228 {
229 l->l_flags_1 = info[VERSYMIDX (DT_FLAGS_1)]->d_un.d_val;
230
231 if (l->l_flags_1 & DF_1_NOW)
232 info[DT_BIND_NOW] = info[VERSYMIDX (DT_FLAGS_1)];
233 }
234 if (info[DT_RUNPATH] != NULL)
235 /* If both RUNPATH and RPATH are given, the latter is ignored. */
236 info[DT_RPATH] = NULL;
237 #endif
238 }
239
240 #ifdef RESOLVE_MAP
241
242 # ifdef RTLD_BOOTSTRAP
243 # define ELF_DURING_STARTUP (1)
244 # else
245 # define ELF_DURING_STARTUP (0)
246 # endif
247
248 /* Get the definitions of `elf_dynamic_do_rel' and `elf_dynamic_do_rela'.
249 These functions are almost identical, so we use cpp magic to avoid
250 duplicating their code. It cannot be done in a more general function
251 because we must be able to completely inline. */
252
253 /* On some machines, notably SPARC, DT_REL* includes DT_JMPREL in its
254 range. Note that according to the ELF spec, this is completely legal!
255 But conditionally define things so that on machines we know this will
256 not happen we do something more optimal. */
257
258 # ifdef ELF_MACHINE_PLTREL_OVERLAP
259 # define _ELF_DYNAMIC_DO_RELOC(RELOC, reloc, map, do_lazy, skip_ifunc, test_rel) \
260 do { \
261 struct { ElfW(Addr) start, size; ElfW(Word) nrelative; int lazy; } \
262 ranges[3]; \
263 int ranges_index; \
264 \
265 ranges[0].lazy = ranges[2].lazy = 0; \
266 ranges[1].lazy = 1; \
267 ranges[0].size = ranges[1].size = ranges[2].size = 0; \
268 ranges[0].nrelative = ranges[1].nrelative = ranges[2].nrelative = 0; \
269 \
270 if ((map)->l_info[DT_##RELOC]) \
271 { \
272 ranges[0].start = D_PTR ((map), l_info[DT_##RELOC]); \
273 ranges[0].size = (map)->l_info[DT_##RELOC##SZ]->d_un.d_val; \
274 if (map->l_info[VERSYMIDX (DT_##RELOC##COUNT)] != NULL) \
275 ranges[0].nrelative \
276 = MIN (map->l_info[VERSYMIDX (DT_##RELOC##COUNT)]->d_un.d_val, \
277 ranges[0].size / sizeof (ElfW(reloc))); \
278 } \
279 \
280 if ((do_lazy) \
281 && (map)->l_info[DT_PLTREL] \
282 && (!test_rel || (map)->l_info[DT_PLTREL]->d_un.d_val == DT_##RELOC)) \
283 { \
284 ranges[1].start = D_PTR ((map), l_info[DT_JMPREL]); \
285 ranges[1].size = (map)->l_info[DT_PLTRELSZ]->d_un.d_val; \
286 ranges[2].start = ranges[1].start + ranges[1].size; \
287 ranges[2].size = ranges[0].start + ranges[0].size - ranges[2].start; \
288 ranges[0].size = ranges[1].start - ranges[0].start; \
289 } \
290 \
291 for (ranges_index = 0; ranges_index < 3; ++ranges_index) \
292 elf_dynamic_do_##reloc ((map), \
293 ranges[ranges_index].start, \
294 ranges[ranges_index].size, \
295 ranges[ranges_index].nrelative, \
296 ranges[ranges_index].lazy, \
297 skip_ifunc); \
298 } while (0)
299 # else
300 # define _ELF_DYNAMIC_DO_RELOC(RELOC, reloc, map, do_lazy, skip_ifunc, test_rel) \
301 do { \
302 struct { ElfW(Addr) start, size; ElfW(Word) nrelative; int lazy; } \
303 ranges[2] = { { 0, 0, 0, 0 }, { 0, 0, 0, 0 } }; \
304 \
305 if ((map)->l_info[DT_##RELOC]) \
306 { \
307 ranges[0].start = D_PTR ((map), l_info[DT_##RELOC]); \
308 ranges[0].size = (map)->l_info[DT_##RELOC##SZ]->d_un.d_val; \
309 if (map->l_info[VERSYMIDX (DT_##RELOC##COUNT)] != NULL) \
310 ranges[0].nrelative \
311 = MIN (map->l_info[VERSYMIDX (DT_##RELOC##COUNT)]->d_un.d_val, \
312 ranges[0].size / sizeof (ElfW(reloc))); \
313 } \
314 if ((map)->l_info[DT_PLTREL] \
315 && (!test_rel || (map)->l_info[DT_PLTREL]->d_un.d_val == DT_##RELOC)) \
316 { \
317 ElfW(Addr) start = D_PTR ((map), l_info[DT_JMPREL]); \
318 \
319 if (! ELF_DURING_STARTUP \
320 && ((do_lazy) \
321 /* This test does not only detect whether the relocation \
322 sections are in the right order, it also checks whether \
323 there is a DT_REL/DT_RELA section. */ \
324 || __builtin_expect (ranges[0].start + ranges[0].size \
325 != start, 0))) \
326 { \
327 ranges[1].start = start; \
328 ranges[1].size = (map)->l_info[DT_PLTRELSZ]->d_un.d_val; \
329 ranges[1].lazy = (do_lazy); \
330 } \
331 else \
332 { \
333 /* Combine processing the sections. */ \
334 assert (ranges[0].start + ranges[0].size == start); \
335 ranges[0].size += (map)->l_info[DT_PLTRELSZ]->d_un.d_val; \
336 } \
337 } \
338 \
339 if (ELF_DURING_STARTUP) \
340 elf_dynamic_do_##reloc ((map), ranges[0].start, ranges[0].size, \
341 ranges[0].nrelative, 0, skip_ifunc); \
342 else \
343 { \
344 int ranges_index; \
345 for (ranges_index = 0; ranges_index < 2; ++ranges_index) \
346 elf_dynamic_do_##reloc ((map), \
347 ranges[ranges_index].start, \
348 ranges[ranges_index].size, \
349 ranges[ranges_index].nrelative, \
350 ranges[ranges_index].lazy, \
351 skip_ifunc); \
352 } \
353 } while (0)
354 # endif
355
356 # if ELF_MACHINE_NO_REL || ELF_MACHINE_NO_RELA
357 # define _ELF_CHECK_REL 0
358 # else
359 # define _ELF_CHECK_REL 1
360 # endif
361
362 # if ! ELF_MACHINE_NO_REL
363 # include "do-rel.h"
364 # define ELF_DYNAMIC_DO_REL(map, lazy, skip_ifunc) \
365 _ELF_DYNAMIC_DO_RELOC (REL, Rel, map, lazy, skip_ifunc, _ELF_CHECK_REL)
366 # else
367 # define ELF_DYNAMIC_DO_REL(map, lazy, skip_ifunc) /* Nothing to do. */
368 # endif
369
370 # if ! ELF_MACHINE_NO_RELA
371 # define DO_RELA
372 # include "do-rel.h"
373 # define ELF_DYNAMIC_DO_RELA(map, lazy, skip_ifunc) \
374 _ELF_DYNAMIC_DO_RELOC (RELA, Rela, map, lazy, skip_ifunc, _ELF_CHECK_REL)
375 # else
376 # define ELF_DYNAMIC_DO_RELA(map, lazy, skip_ifunc) /* Nothing to do. */
377 # endif
378
379 /* This can't just be an inline function because GCC is too dumb
380 to inline functions containing inlines themselves. */
381 # define ELF_DYNAMIC_RELOCATE(map, lazy, consider_profile, skip_ifunc) \
382 do { \
383 int edr_lazy = elf_machine_runtime_setup ((map), (lazy), \
384 (consider_profile)); \
385 ELF_DYNAMIC_DO_REL ((map), edr_lazy, skip_ifunc); \
386 ELF_DYNAMIC_DO_RELA ((map), edr_lazy, skip_ifunc); \
387 } while (0)
388
389 #endif