]>
Commit | Line | Data |
---|---|---|
d66e34cd | 1 | /* Relocate a shared object and resolve its references to other loaded objects. |
11bf311e | 2 | Copyright (C) 1995-2004, 2005, 2006 Free Software Foundation, Inc. |
afd4eb37 | 3 | This file is part of the GNU C Library. |
d66e34cd | 4 | |
afd4eb37 | 5 | The GNU C Library is free software; you can redistribute it and/or |
41bdb6e2 AJ |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either | |
8 | version 2.1 of the License, or (at your option) any later version. | |
d66e34cd | 9 | |
afd4eb37 UD |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
41bdb6e2 | 13 | Lesser General Public License for more details. |
d66e34cd | 14 | |
41bdb6e2 AJ |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, write to the Free | |
17 | Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA | |
18 | 02111-1307 USA. */ | |
d66e34cd | 19 | |
ea7eb7e3 | 20 | #include <errno.h> |
8e17ea58 | 21 | #include <libintl.h> |
ea7eb7e3 | 22 | #include <stdlib.h> |
d66e34cd | 23 | #include <unistd.h> |
a42195db | 24 | #include <ldsodefs.h> |
ea7eb7e3 | 25 | #include <sys/mman.h> |
af8bf6bd | 26 | #include <sys/param.h> |
ea7eb7e3 | 27 | #include <sys/types.h> |
d66e34cd RM |
28 | #include "dynamic-link.h" |
29 | ||
680254fe | 30 | /* Statistics function. */ |
d6b5d570 UD |
31 | #ifdef SHARED |
32 | # define bump_num_cache_relocations() ++GL(dl_num_cache_relocations) | |
be4b5a95 | 33 | #else |
1b4575ae | 34 | # define bump_num_cache_relocations() ((void) 0) |
be4b5a95 | 35 | #endif |
680254fe | 36 | |
3996f34b | 37 | |
c56baa87 RM |
38 | /* We are trying to perform a static TLS relocation in MAP, but it was |
39 | dynamically loaded. This can only work if there is enough surplus in | |
40 | the static TLS area already allocated for each running thread. If this | |
567678b6 | 41 | object's TLS segment is too big to fit, we fail. If it fits, |
9722e6f3 UD |
42 | we set MAP->l_tls_offset and return. |
43 | This function intentionally does not return any value but signals error | |
44 | directly, as static TLS should be rare and code handling it should | |
45 | not be inlined as much as possible. */ | |
46 | void | |
4c513679 | 47 | internal_function __attribute_noinline__ |
545dbc93 | 48 | _dl_allocate_static_tls (struct link_map *map) |
c56baa87 | 49 | { |
99fe3b0e UD |
50 | /* If the alignment requirements are too high fail. */ |
51 | if (map->l_tls_align > GL(dl_tls_static_align)) | |
9722e6f3 UD |
52 | { |
53 | fail: | |
154d10bd | 54 | _dl_signal_error (0, map->l_name, NULL, N_("\ |
9722e6f3 UD |
55 | cannot allocate memory in static TLS block")); |
56 | } | |
7ed33cba | 57 | |
11bf311e | 58 | #if TLS_TCB_AT_TP |
3c6904fb UD |
59 | size_t freebytes; |
60 | size_t n; | |
61 | size_t blsize; | |
62 | ||
99fe3b0e UD |
63 | freebytes = GL(dl_tls_static_size) - GL(dl_tls_static_used) - TLS_TCB_SIZE; |
64 | ||
65 | blsize = map->l_tls_blocksize + map->l_tls_firstbyte_offset; | |
66 | if (freebytes < blsize) | |
9722e6f3 | 67 | goto fail; |
99fe3b0e UD |
68 | |
69 | n = (freebytes - blsize) / map->l_tls_align; | |
70 | ||
9dcafc55 UD |
71 | size_t offset = GL(dl_tls_static_used) + (freebytes - n * map->l_tls_align |
72 | - map->l_tls_firstbyte_offset); | |
99fe3b0e UD |
73 | |
74 | map->l_tls_offset = GL(dl_tls_static_used) = offset; | |
11bf311e | 75 | #elif TLS_DTV_AT_TP |
b77ca0e8 UD |
76 | size_t used; |
77 | size_t check; | |
78 | ||
9dcafc55 | 79 | size_t offset = roundup (GL(dl_tls_static_used), map->l_tls_align); |
7ed33cba RM |
80 | used = offset + map->l_tls_blocksize; |
81 | check = used; | |
c56baa87 | 82 | /* dl_tls_static_used includes the TCB at the beginning. */ |
7ed33cba RM |
83 | |
84 | if (check > GL(dl_tls_static_size)) | |
9722e6f3 | 85 | goto fail; |
7ed33cba | 86 | |
c56baa87 | 87 | map->l_tls_offset = offset; |
7ed33cba | 88 | GL(dl_tls_static_used) = used; |
11bf311e UD |
89 | #else |
90 | # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined" | |
91 | #endif | |
99fe3b0e | 92 | |
9dcafc55 UD |
93 | /* If the object is not yet relocated we cannot initialize the |
94 | static TLS region. Delay it. */ | |
95 | if (map->l_real->l_relocated) | |
96 | { | |
97 | #ifdef SHARED | |
98 | if (__builtin_expect (THREAD_DTV()[0].counter != GL(dl_tls_generation), | |
99 | 0)) | |
100 | /* Update the slot information data for at least the generation of | |
101 | the DSO we are allocating data for. */ | |
102 | (void) _dl_update_slotinfo (map->l_tls_modid); | |
103 | #endif | |
104 | ||
105 | GL(dl_init_static_tls) (map); | |
106 | } | |
9722e6f3 UD |
107 | else |
108 | map->l_need_tls_init = 1; | |
109 | } | |
110 | ||
111 | /* Initialize static TLS area and DTV for current (only) thread. | |
112 | libpthread implementations should provide their own hook | |
113 | to handle all threads. */ | |
114 | void | |
115 | _dl_nothread_init_static_tls (struct link_map *map) | |
116 | { | |
11bf311e | 117 | #if TLS_TCB_AT_TP |
9722e6f3 | 118 | void *dest = (char *) THREAD_SELF - map->l_tls_offset; |
11bf311e | 119 | #elif TLS_DTV_AT_TP |
9722e6f3 | 120 | void *dest = (char *) THREAD_SELF + map->l_tls_offset + TLS_PRE_TCB_SIZE; |
11bf311e UD |
121 | #else |
122 | # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined" | |
123 | #endif | |
9722e6f3 UD |
124 | |
125 | /* Fill in the DTV slot so that a later LD/GD access will find it. */ | |
af272d4f | 126 | dtv_t *dtv = THREAD_DTV (); |
e13e33e2 | 127 | assert (map->l_tls_modid <= dtv[-1].counter); |
af272d4f UD |
128 | dtv[map->l_tls_modid].pointer.val = dest; |
129 | dtv[map->l_tls_modid].pointer.is_static = true; | |
9722e6f3 UD |
130 | |
131 | /* Initialize the memory. */ | |
132 | memset (__mempcpy (dest, map->l_tls_initimage, map->l_tls_initimage_size), | |
133 | '\0', map->l_tls_blocksize - map->l_tls_initimage_size); | |
c56baa87 | 134 | } |
c56baa87 RM |
135 | |
136 | ||
d66e34cd | 137 | void |
be935610 UD |
138 | _dl_relocate_object (struct link_map *l, struct r_scope_elem *scope[], |
139 | int lazy, int consider_profiling) | |
d66e34cd | 140 | { |
f133c097 UD |
141 | struct textrels |
142 | { | |
143 | caddr_t start; | |
144 | size_t len; | |
145 | int prot; | |
146 | struct textrels *next; | |
147 | } *textrels = NULL; | |
148 | /* Initialize it to make the compiler happy. */ | |
149 | const char *errstring = NULL; | |
150 | ||
9dcafc55 UD |
151 | #ifdef SHARED |
152 | /* If we are auditing, install the same handlers we need for profiling. */ | |
153 | consider_profiling |= GLRO(dl_audit) != NULL; | |
9f0d7b6d UD |
154 | #elif defined PROF |
155 | /* Never use dynamic linker profiling for gprof profiling code. */ | |
156 | # define consider_profiling 0 | |
9dcafc55 UD |
157 | #endif |
158 | ||
d66e34cd RM |
159 | if (l->l_relocated) |
160 | return; | |
161 | ||
88187dcc UD |
162 | /* If DT_BIND_NOW is set relocate all references in this object. We |
163 | do not do this if we are profiling, of course. */ | |
9dcafc55 | 164 | // XXX Correct for auditing? |
55e2d5c7 UD |
165 | if (!consider_profiling |
166 | && __builtin_expect (l->l_info[DT_BIND_NOW] != NULL, 0)) | |
88187dcc UD |
167 | lazy = 0; |
168 | ||
afdca0f2 | 169 | if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_RELOC, 0)) |
154d10bd UD |
170 | _dl_debug_printf ("\nrelocation processing: %s%s\n", |
171 | l->l_name[0] ? l->l_name : rtld_progname, | |
172 | lazy ? " (lazy)" : ""); | |
8193034b | 173 | |
f133c097 UD |
174 | /* DT_TEXTREL is now in level 2 and might phase out at some time. |
175 | But we rewrite the DT_FLAGS entry to a DT_TEXTREL entry to make | |
176 | testing easier and therefore it will be available at all time. */ | |
dc051821 | 177 | if (__builtin_expect (l->l_info[DT_TEXTREL] != NULL, 0)) |
d66e34cd RM |
178 | { |
179 | /* Bletch. We must make read-only segments writable | |
180 | long enough to relocate them. */ | |
266180eb | 181 | const ElfW(Phdr) *ph; |
d66e34cd RM |
182 | for (ph = l->l_phdr; ph < &l->l_phdr[l->l_phnum]; ++ph) |
183 | if (ph->p_type == PT_LOAD && (ph->p_flags & PF_W) == 0) | |
184 | { | |
f133c097 UD |
185 | struct textrels *newp; |
186 | ||
187 | newp = (struct textrels *) alloca (sizeof (*newp)); | |
afdca0f2 UD |
188 | newp->len = (((ph->p_vaddr + ph->p_memsz + GLRO(dl_pagesize) - 1) |
189 | & ~(GLRO(dl_pagesize) - 1)) | |
190 | - (ph->p_vaddr & ~(GLRO(dl_pagesize) - 1))); | |
191 | newp->start = ((ph->p_vaddr & ~(GLRO(dl_pagesize) - 1)) | |
f133c097 UD |
192 | + (caddr_t) l->l_addr); |
193 | ||
194 | if (__mprotect (newp->start, newp->len, PROT_READ|PROT_WRITE) < 0) | |
195 | { | |
196 | errstring = N_("cannot make segment writable for relocation"); | |
197 | call_error: | |
154d10bd | 198 | _dl_signal_error (errno, l->l_name, NULL, errstring); |
f133c097 UD |
199 | } |
200 | ||
201 | #if (PF_R | PF_W | PF_X) == 7 && (PROT_READ | PROT_WRITE | PROT_EXEC) == 7 | |
202 | newp->prot = (PF_TO_PROT | |
203 | >> ((ph->p_flags & (PF_R | PF_W | PF_X)) * 4)) & 0xf; | |
204 | #else | |
205 | newp->prot = 0; | |
206 | if (ph->p_flags & PF_R) | |
207 | newp->prot |= PROT_READ; | |
208 | if (ph->p_flags & PF_W) | |
209 | newp->prot |= PROT_WRITE; | |
210 | if (ph->p_flags & PF_X) | |
211 | newp->prot |= PROT_EXEC; | |
212 | #endif | |
213 | newp->next = textrels; | |
214 | textrels = newp; | |
d66e34cd RM |
215 | } |
216 | } | |
217 | ||
218 | { | |
ba79d61b | 219 | /* Do the actual relocation of the object's GOT and other data. */ |
d66e34cd | 220 | |
f420344c | 221 | /* String table object symbols. */ |
a42195db | 222 | const char *strtab = (const void *) D_PTR (l, l_info[DT_STRTAB]); |
d66e34cd | 223 | |
f51d1dfd | 224 | /* This macro is used as a callback from the ELF_DYNAMIC_RELOCATE code. */ |
cf5a372e | 225 | #define RESOLVE_MAP(ref, version, r_type) \ |
c0282c06 | 226 | (ELFW(ST_BIND) ((*ref)->st_info) != STB_LOCAL \ |
680254fe | 227 | ? ((__builtin_expect ((*ref) == l->l_lookup_cache.sym, 0) \ |
cf5a372e | 228 | && elf_machine_type_class (r_type) == l->l_lookup_cache.type_class) \ |
1b4575ae | 229 | ? (bump_num_cache_relocations (), \ |
680254fe UD |
230 | (*ref) = l->l_lookup_cache.ret, \ |
231 | l->l_lookup_cache.value) \ | |
232 | : ({ lookup_t _lr; \ | |
cf5a372e UD |
233 | int _tc = elf_machine_type_class (r_type); \ |
234 | l->l_lookup_cache.type_class = _tc; \ | |
680254fe | 235 | l->l_lookup_cache.sym = (*ref); \ |
021723ab UD |
236 | const struct r_found_version *v = NULL; \ |
237 | int flags = DL_LOOKUP_ADD_DEPENDENCY; \ | |
238 | if ((version) != NULL && (version)->hash != 0) \ | |
239 | { \ | |
240 | v = (version); \ | |
241 | flags = 0; \ | |
242 | } \ | |
243 | _lr = _dl_lookup_symbol_x (strtab + (*ref)->st_name, l, (ref), \ | |
244 | scope, v, _tc, flags, NULL); \ | |
680254fe UD |
245 | l->l_lookup_cache.ret = (*ref); \ |
246 | l->l_lookup_cache.value = _lr; })) \ | |
c0282c06 | 247 | : l) |
f51d1dfd | 248 | |
c56baa87 RM |
249 | /* This macro is used as a callback from elf_machine_rel{a,} when a |
250 | static TLS reloc is about to be performed. Since (in dl-load.c) we | |
251 | permit dynamic loading of objects that might use such relocs, we | |
252 | have to check whether each use is actually doable. If the object | |
253 | whose TLS segment the reference resolves to was allocated space in | |
254 | the static TLS block at startup, then it's ok. Otherwise, we make | |
255 | an attempt to allocate it in surplus space on the fly. If that | |
256 | can't be done, we fall back to the error that DF_STATIC_TLS is | |
257 | intended to produce. */ | |
2430d57a RM |
258 | #define CHECK_STATIC_TLS(map, sym_map) \ |
259 | do { \ | |
299601a1 | 260 | if (__builtin_expect ((sym_map)->l_tls_offset == NO_TLS_OFFSET, 0)) \ |
9722e6f3 | 261 | _dl_allocate_static_tls (sym_map); \ |
2430d57a RM |
262 | } while (0) |
263 | ||
f51d1dfd | 264 | #include "dynamic-link.h" |
647eb037 | 265 | |
c0fb8a56 | 266 | ELF_DYNAMIC_RELOCATE (l, lazy, consider_profiling); |
ea7eb7e3 | 267 | |
9f0d7b6d | 268 | #ifndef PROF |
67c94753 | 269 | if (__builtin_expect (consider_profiling, 0)) |
ea7eb7e3 UD |
270 | { |
271 | /* Allocate the array which will contain the already found | |
ea97f90c | 272 | relocations. If the shared object lacks a PLT (for example |
3cfd2d07 | 273 | if it only contains lead function) the l_info[DT_PLTRELSZ] |
ea97f90c UD |
274 | will be NULL. */ |
275 | if (l->l_info[DT_PLTRELSZ] == NULL) | |
7982ecfe | 276 | { |
9dcafc55 | 277 | errstring = N_("%s: no PLTREL found in object %s\n"); |
7982ecfe UD |
278 | fatal: |
279 | _dl_fatal_printf (errstring, | |
e6caf4e1 | 280 | rtld_progname ?: "<program name unknown>", |
7982ecfe UD |
281 | l->l_name); |
282 | } | |
ea97f90c | 283 | |
9dcafc55 UD |
284 | l->l_reloc_result = calloc (sizeof (l->l_reloc_result[0]), |
285 | l->l_info[DT_PLTRELSZ]->d_un.d_val); | |
ea7eb7e3 | 286 | if (l->l_reloc_result == NULL) |
7982ecfe UD |
287 | { |
288 | errstring = N_("\ | |
9dcafc55 | 289 | %s: out of memory to store relocation results for %s\n"); |
7982ecfe UD |
290 | goto fatal; |
291 | } | |
ea7eb7e3 | 292 | } |
9f0d7b6d | 293 | #endif |
d66e34cd RM |
294 | } |
295 | ||
714a562f | 296 | /* Mark the object so we know this work has been done. */ |
d66e34cd RM |
297 | l->l_relocated = 1; |
298 | ||
f133c097 UD |
299 | /* Undo the segment protection changes. */ |
300 | while (__builtin_expect (textrels != NULL, 0)) | |
d66e34cd | 301 | { |
f133c097 UD |
302 | if (__mprotect (textrels->start, textrels->len, textrels->prot) < 0) |
303 | { | |
304 | errstring = N_("cannot restore segment prot after reloc"); | |
305 | goto call_error; | |
306 | } | |
24d60840 | 307 | |
f133c097 | 308 | textrels = textrels->next; |
d66e34cd | 309 | } |
ed20b3d9 UD |
310 | |
311 | /* In case we can protect the data now that the relocations are | |
312 | done, do it. */ | |
313 | if (l->l_relro_size != 0) | |
75631a57 UD |
314 | _dl_protect_relro (l); |
315 | } | |
154d10bd | 316 | |
ed20b3d9 | 317 | |
75631a57 UD |
318 | void internal_function |
319 | _dl_protect_relro (struct link_map *l) | |
320 | { | |
afdca0f2 UD |
321 | ElfW(Addr) start = ((l->l_addr + l->l_relro_addr) |
322 | & ~(GLRO(dl_pagesize) - 1)); | |
75631a57 | 323 | ElfW(Addr) end = ((l->l_addr + l->l_relro_addr + l->l_relro_size) |
afdca0f2 | 324 | & ~(GLRO(dl_pagesize) - 1)); |
75631a57 UD |
325 | |
326 | if (start != end | |
327 | && __mprotect ((void *) start, end - start, PROT_READ) < 0) | |
328 | { | |
154d10bd | 329 | static const char errstring[] = N_("\ |
ed20b3d9 | 330 | cannot apply additional memory protection after relocation"); |
154d10bd | 331 | _dl_signal_error (errno, l->l_name, NULL, errstring); |
ed20b3d9 | 332 | } |
d66e34cd | 333 | } |
421c80d2 RM |
334 | |
335 | void | |
567678b6 | 336 | internal_function __attribute_noinline__ |
ea41b926 | 337 | _dl_reloc_bad_type (struct link_map *map, unsigned int type, int plt) |
421c80d2 | 338 | { |
75bfdfc7 UD |
339 | extern const char INTUSE(_itoa_lower_digits)[] attribute_hidden; |
340 | #define DIGIT(b) INTUSE(_itoa_lower_digits)[(b) & 0xf]; | |
ea41b926 UD |
341 | |
342 | /* XXX We cannot translate these messages. */ | |
58436415 UD |
343 | static const char msg[2][32 |
344 | #if __ELF_NATIVE_CLASS == 64 | |
345 | + 6 | |
346 | #endif | |
347 | ] = { "unexpected reloc type 0x", | |
348 | "unexpected PLT reloc type 0x" }; | |
ea41b926 UD |
349 | char msgbuf[sizeof (msg[0])]; |
350 | char *cp; | |
351 | ||
352 | cp = __stpcpy (msgbuf, msg[plt]); | |
58436415 UD |
353 | #if __ELF_NATIVE_CLASS == 64 |
354 | if (__builtin_expect(type > 0xff, 0)) | |
355 | { | |
356 | *cp++ = DIGIT (type >> 28); | |
357 | *cp++ = DIGIT (type >> 24); | |
358 | *cp++ = DIGIT (type >> 20); | |
359 | *cp++ = DIGIT (type >> 16); | |
360 | *cp++ = DIGIT (type >> 12); | |
361 | *cp++ = DIGIT (type >> 8); | |
362 | } | |
363 | #endif | |
ea41b926 | 364 | *cp++ = DIGIT (type >> 4); |
f6fe5826 UD |
365 | *cp++ = DIGIT (type); |
366 | *cp = '\0'; | |
ea41b926 | 367 | |
154d10bd | 368 | _dl_signal_error (0, map->l_name, NULL, msgbuf); |
421c80d2 | 369 | } |