]> git.ipfire.org Git - thirdparty/glibc.git/blob - elf/dl-deps.c
2012-06-21 Jeff Law <law@redhat.com>
[thirdparty/glibc.git] / elf / dl-deps.c
1 /* Load the dependencies of a mapped object.
2 Copyright (C) 1996-2003, 2004-2007, 2010-2012
3 Free Software Foundation, Inc.
4 This file is part of the GNU C Library.
5
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; if not, see
18 <http://www.gnu.org/licenses/>. */
19
20 #include <atomic.h>
21 #include <assert.h>
22 #include <dlfcn.h>
23 #include <errno.h>
24 #include <libintl.h>
25 #include <stddef.h>
26 #include <stdlib.h>
27 #include <string.h>
28 #include <unistd.h>
29 #include <sys/param.h>
30 #include <ldsodefs.h>
31
32 #include <dl-dst.h>
33
34 /* Whether an shared object references one or more auxiliary objects
35 is signaled by the AUXTAG entry in l_info. */
36 #define AUXTAG (DT_NUM + DT_THISPROCNUM + DT_VERSIONTAGNUM \
37 + DT_EXTRATAGIDX (DT_AUXILIARY))
38 /* Whether an shared object references one or more auxiliary objects
39 is signaled by the AUXTAG entry in l_info. */
40 #define FILTERTAG (DT_NUM + DT_THISPROCNUM + DT_VERSIONTAGNUM \
41 + DT_EXTRATAGIDX (DT_FILTER))
42
43
44 /* When loading auxiliary objects we must ignore errors. It's ok if
45 an object is missing. */
46 struct openaux_args
47 {
48 /* The arguments to openaux. */
49 struct link_map *map;
50 int trace_mode;
51 int open_mode;
52 const char *strtab;
53 const char *name;
54
55 /* The return value of openaux. */
56 struct link_map *aux;
57 };
58
59 static void
60 openaux (void *a)
61 {
62 struct openaux_args *args = (struct openaux_args *) a;
63
64 args->aux = _dl_map_object (args->map, args->name,
65 (args->map->l_type == lt_executable
66 ? lt_library : args->map->l_type),
67 args->trace_mode, args->open_mode,
68 args->map->l_ns);
69 }
70
71 static ptrdiff_t
72 internal_function
73 _dl_build_local_scope (struct link_map **list, struct link_map *map)
74 {
75 struct link_map **p = list;
76 struct link_map **q;
77
78 *p++ = map;
79 map->l_reserved = 1;
80 if (map->l_initfini)
81 for (q = map->l_initfini + 1; *q; ++q)
82 if (! (*q)->l_reserved)
83 p += _dl_build_local_scope (p, *q);
84 return p - list;
85 }
86
87
88 /* We use a very special kind of list to track the path
89 through the list of loaded shared objects. We have to
90 produce a flat list with unique members of all involved objects.
91 */
92 struct list
93 {
94 int done; /* Nonzero if this map was processed. */
95 struct link_map *map; /* The data. */
96 struct list *next; /* Elements for normal list. */
97 };
98
99
100 /* Macro to expand DST. It is an macro since we use `alloca'. */
101 #define expand_dst(l, str, fatal) \
102 ({ \
103 const char *__str = (str); \
104 const char *__result = __str; \
105 size_t __dst_cnt = DL_DST_COUNT (__str, 0); \
106 \
107 if (__dst_cnt != 0) \
108 { \
109 char *__newp; \
110 \
111 /* DST must not appear in SUID/SGID programs. */ \
112 if (INTUSE(__libc_enable_secure)) \
113 _dl_signal_error (0, __str, NULL, N_("\
114 DST not allowed in SUID/SGID programs")); \
115 \
116 __newp = (char *) alloca (DL_DST_REQUIRED (l, __str, strlen (__str), \
117 __dst_cnt)); \
118 \
119 __result = _dl_dst_substitute (l, __str, __newp, 0); \
120 \
121 if (*__result == '\0') \
122 { \
123 /* The replacement for the DST is not known. We can't \
124 processed. */ \
125 if (fatal) \
126 _dl_signal_error (0, __str, NULL, N_("\
127 empty dynamic string token substitution")); \
128 else \
129 { \
130 /* This is for DT_AUXILIARY. */ \
131 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS, 0))\
132 _dl_debug_printf (N_("\
133 cannot load auxiliary `%s' because of empty dynamic string token " \
134 "substitution\n"), __str); \
135 continue; \
136 } \
137 } \
138 } \
139 \
140 __result; })
141
142
143 void
144 internal_function
145 _dl_map_object_deps (struct link_map *map,
146 struct link_map **preloads, unsigned int npreloads,
147 int trace_mode, int open_mode)
148 {
149 struct list *known = __alloca (sizeof *known * (1 + npreloads + 1));
150 struct list *runp, *tail;
151 unsigned int nlist, i;
152 /* Object name. */
153 const char *name;
154 int errno_saved;
155 int errno_reason;
156 const char *errstring;
157 const char *objname;
158
159 auto inline void preload (struct link_map *map);
160
161 inline void preload (struct link_map *map)
162 {
163 known[nlist].done = 0;
164 known[nlist].map = map;
165 known[nlist].next = &known[nlist + 1];
166
167 ++nlist;
168 /* We use `l_reserved' as a mark bit to detect objects we have
169 already put in the search list and avoid adding duplicate
170 elements later in the list. */
171 map->l_reserved = 1;
172 }
173
174 /* No loaded object so far. */
175 nlist = 0;
176
177 /* First load MAP itself. */
178 preload (map);
179
180 /* Add the preloaded items after MAP but before any of its dependencies. */
181 for (i = 0; i < npreloads; ++i)
182 preload (preloads[i]);
183
184 /* Terminate the lists. */
185 known[nlist - 1].next = NULL;
186
187 /* Pointer to last unique object. */
188 tail = &known[nlist - 1];
189
190 /* No alloca'd space yet. */
191 struct link_map **needed_space = NULL;
192 size_t needed_space_bytes = 0;
193
194 /* Process each element of the search list, loading each of its
195 auxiliary objects and immediate dependencies. Auxiliary objects
196 will be added in the list before the object itself and
197 dependencies will be appended to the list as we step through it.
198 This produces a flat, ordered list that represents a
199 breadth-first search of the dependency tree.
200
201 The whole process is complicated by the fact that we better
202 should use alloca for the temporary list elements. But using
203 alloca means we cannot use recursive function calls. */
204 errno_saved = errno;
205 errno_reason = 0;
206 errstring = NULL;
207 errno = 0;
208 name = NULL;
209 for (runp = known; runp; )
210 {
211 struct link_map *l = runp->map;
212 struct link_map **needed = NULL;
213 unsigned int nneeded = 0;
214
215 /* Unless otherwise stated, this object is handled. */
216 runp->done = 1;
217
218 /* Allocate a temporary record to contain the references to the
219 dependencies of this object. */
220 if (l->l_searchlist.r_list == NULL && l->l_initfini == NULL
221 && l != map && l->l_ldnum > 0)
222 {
223 size_t new_size = l->l_ldnum * sizeof (struct link_map *);
224
225 if (new_size > needed_space_bytes)
226 needed_space
227 = extend_alloca (needed_space, needed_space_bytes, new_size);
228
229 needed = needed_space;
230 }
231
232 if (l->l_info[DT_NEEDED] || l->l_info[AUXTAG] || l->l_info[FILTERTAG])
233 {
234 const char *strtab = (const void *) D_PTR (l, l_info[DT_STRTAB]);
235 struct openaux_args args;
236 struct list *orig;
237 const ElfW(Dyn) *d;
238
239 args.strtab = strtab;
240 args.map = l;
241 args.trace_mode = trace_mode;
242 args.open_mode = open_mode;
243 orig = runp;
244
245 for (d = l->l_ld; d->d_tag != DT_NULL; ++d)
246 if (__builtin_expect (d->d_tag, DT_NEEDED) == DT_NEEDED)
247 {
248 /* Map in the needed object. */
249 struct link_map *dep;
250
251 /* Recognize DSTs. */
252 name = expand_dst (l, strtab + d->d_un.d_val, 0);
253 /* Store the tag in the argument structure. */
254 args.name = name;
255
256 bool malloced;
257 int err = _dl_catch_error (&objname, &errstring, &malloced,
258 openaux, &args);
259 if (__builtin_expect (errstring != NULL, 0))
260 {
261 char *new_errstring = strdupa (errstring);
262 objname = strdupa (objname);
263 if (malloced)
264 free ((char *) errstring);
265 errstring = new_errstring;
266
267 if (err)
268 errno_reason = err;
269 else
270 errno_reason = -1;
271 goto out;
272 }
273 else
274 dep = args.aux;
275
276 if (! dep->l_reserved)
277 {
278 /* Allocate new entry. */
279 struct list *newp;
280
281 newp = alloca (sizeof (struct list));
282
283 /* Append DEP to the list. */
284 newp->map = dep;
285 newp->done = 0;
286 newp->next = NULL;
287 tail->next = newp;
288 tail = newp;
289 ++nlist;
290 /* Set the mark bit that says it's already in the list. */
291 dep->l_reserved = 1;
292 }
293
294 /* Remember this dependency. */
295 if (needed != NULL)
296 needed[nneeded++] = dep;
297 }
298 else if (d->d_tag == DT_AUXILIARY || d->d_tag == DT_FILTER)
299 {
300 struct list *newp;
301
302 /* Recognize DSTs. */
303 name = expand_dst (l, strtab + d->d_un.d_val,
304 d->d_tag == DT_AUXILIARY);
305 /* Store the tag in the argument structure. */
306 args.name = name;
307
308 if (d->d_tag == DT_AUXILIARY)
309 {
310 /* Say that we are about to load an auxiliary library. */
311 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS,
312 0))
313 _dl_debug_printf ("load auxiliary object=%s"
314 " requested by file=%s\n",
315 name,
316 l->l_name[0]
317 ? l->l_name : rtld_progname);
318
319 /* We must be prepared that the addressed shared
320 object is not available. */
321 bool malloced;
322 (void) _dl_catch_error (&objname, &errstring, &malloced,
323 openaux, &args);
324 if (__builtin_expect (errstring != NULL, 0))
325 {
326 /* We are not interested in the error message. */
327 assert (errstring != NULL);
328 if (malloced)
329 free ((char *) errstring);
330
331 /* Simply ignore this error and continue the work. */
332 continue;
333 }
334 }
335 else
336 {
337 /* Say that we are about to load an auxiliary library. */
338 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS,
339 0))
340 _dl_debug_printf ("load filtered object=%s"
341 " requested by file=%s\n",
342 name,
343 l->l_name[0]
344 ? l->l_name : rtld_progname);
345
346 /* For filter objects the dependency must be available. */
347 bool malloced;
348 int err = _dl_catch_error (&objname, &errstring, &malloced,
349 openaux, &args);
350 if (__builtin_expect (errstring != NULL, 0))
351 {
352 char *new_errstring = strdupa (errstring);
353 objname = strdupa (objname);
354 if (malloced)
355 free ((char *) errstring);
356 errstring = new_errstring;
357
358 if (err)
359 errno_reason = err;
360 else
361 errno_reason = -1;
362 goto out;
363 }
364 }
365
366 /* The auxiliary object is actually available.
367 Incorporate the map in all the lists. */
368
369 /* Allocate new entry. This always has to be done. */
370 newp = alloca (sizeof (struct list));
371
372 /* We want to insert the new map before the current one,
373 but we have no back links. So we copy the contents of
374 the current entry over. Note that ORIG and NEWP now
375 have switched their meanings. */
376 memcpy (newp, orig, sizeof (*newp));
377
378 /* Initialize new entry. */
379 orig->done = 0;
380 orig->map = args.aux;
381
382 /* Remember this dependency. */
383 if (needed != NULL)
384 needed[nneeded++] = args.aux;
385
386 /* We must handle two situations here: the map is new,
387 so we must add it in all three lists. If the map
388 is already known, we have two further possibilities:
389 - if the object is before the current map in the
390 search list, we do nothing. It is already found
391 early
392 - if the object is after the current one, we must
393 move it just before the current map to make sure
394 the symbols are found early enough
395 */
396 if (args.aux->l_reserved)
397 {
398 /* The object is already somewhere in the list.
399 Locate it first. */
400 struct list *late;
401
402 /* This object is already in the search list we
403 are building. Don't add a duplicate pointer.
404 Just added by _dl_map_object. */
405 for (late = newp; late->next != NULL; late = late->next)
406 if (late->next->map == args.aux)
407 break;
408
409 if (late->next != NULL)
410 {
411 /* The object is somewhere behind the current
412 position in the search path. We have to
413 move it to this earlier position. */
414 orig->next = newp;
415
416 /* Now remove the later entry from the list
417 and adjust the tail pointer. */
418 if (tail == late->next)
419 tail = late;
420 late->next = late->next->next;
421
422 /* We must move the object earlier in the chain. */
423 if (args.aux->l_prev != NULL)
424 args.aux->l_prev->l_next = args.aux->l_next;
425 if (args.aux->l_next != NULL)
426 args.aux->l_next->l_prev = args.aux->l_prev;
427
428 args.aux->l_prev = newp->map->l_prev;
429 newp->map->l_prev = args.aux;
430 if (args.aux->l_prev != NULL)
431 args.aux->l_prev->l_next = args.aux;
432 args.aux->l_next = newp->map;
433 }
434 else
435 {
436 /* The object must be somewhere earlier in the
437 list. Undo to the current list element what
438 we did above. */
439 memcpy (orig, newp, sizeof (*newp));
440 continue;
441 }
442 }
443 else
444 {
445 /* This is easy. We just add the symbol right here. */
446 orig->next = newp;
447 ++nlist;
448 /* Set the mark bit that says it's already in the list. */
449 args.aux->l_reserved = 1;
450
451 /* The only problem is that in the double linked
452 list of all objects we don't have this new
453 object at the correct place. Correct this here. */
454 if (args.aux->l_prev)
455 args.aux->l_prev->l_next = args.aux->l_next;
456 if (args.aux->l_next)
457 args.aux->l_next->l_prev = args.aux->l_prev;
458
459 args.aux->l_prev = newp->map->l_prev;
460 newp->map->l_prev = args.aux;
461 if (args.aux->l_prev != NULL)
462 args.aux->l_prev->l_next = args.aux;
463 args.aux->l_next = newp->map;
464 }
465
466 /* Move the tail pointer if necessary. */
467 if (orig == tail)
468 tail = newp;
469
470 /* Move on the insert point. */
471 orig = newp;
472 }
473 }
474
475 /* Terminate the list of dependencies and store the array address. */
476 if (needed != NULL)
477 {
478 needed[nneeded++] = NULL;
479
480 struct link_map **l_initfini = (struct link_map **)
481 malloc ((2 * nneeded + 1) * sizeof needed[0]);
482 if (l_initfini == NULL)
483 _dl_signal_error (ENOMEM, map->l_name, NULL,
484 N_("cannot allocate dependency list"));
485 l_initfini[0] = l;
486 memcpy (&l_initfini[1], needed, nneeded * sizeof needed[0]);
487 memcpy (&l_initfini[nneeded + 1], l_initfini,
488 nneeded * sizeof needed[0]);
489 atomic_write_barrier ();
490 l->l_initfini = l_initfini;
491 }
492
493 /* If we have no auxiliary objects just go on to the next map. */
494 if (runp->done)
495 do
496 runp = runp->next;
497 while (runp != NULL && runp->done);
498 }
499
500 out:
501 if (errno == 0 && errno_saved != 0)
502 __set_errno (errno_saved);
503
504 struct link_map **old_l_initfini = NULL;
505 if (map->l_initfini != NULL && map->l_type == lt_loaded)
506 {
507 /* This object was previously loaded as a dependency and we have
508 a separate l_initfini list. We don't need it anymore. */
509 assert (map->l_searchlist.r_list == NULL);
510 old_l_initfini = map->l_initfini;
511 }
512
513 /* Store the search list we built in the object. It will be used for
514 searches in the scope of this object. */
515 struct link_map **l_initfini =
516 (struct link_map **) malloc ((2 * nlist + 1)
517 * sizeof (struct link_map *));
518 if (l_initfini == NULL)
519 _dl_signal_error (ENOMEM, map->l_name, NULL,
520 N_("cannot allocate symbol search list"));
521
522
523 map->l_searchlist.r_list = &l_initfini[nlist + 1];
524 map->l_searchlist.r_nlist = nlist;
525
526 for (nlist = 0, runp = known; runp; runp = runp->next)
527 {
528 if (__builtin_expect (trace_mode, 0) && runp->map->l_faked)
529 /* This can happen when we trace the loading. */
530 --map->l_searchlist.r_nlist;
531 else
532 map->l_searchlist.r_list[nlist++] = runp->map;
533
534 /* Now clear all the mark bits we set in the objects on the search list
535 to avoid duplicates, so the next call starts fresh. */
536 runp->map->l_reserved = 0;
537 }
538
539 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_PRELINK, 0) != 0
540 && map == GL(dl_ns)[LM_ID_BASE]._ns_loaded)
541 {
542 /* If we are to compute conflicts, we have to build local scope
543 for each library, not just the ultimate loader. */
544 for (i = 0; i < nlist; ++i)
545 {
546 struct link_map *l = map->l_searchlist.r_list[i];
547 unsigned int j, cnt;
548
549 /* The local scope has been already computed. */
550 if (l == map
551 || (l->l_local_scope[0]
552 && l->l_local_scope[0]->r_nlist) != 0)
553 continue;
554
555 if (l->l_info[AUXTAG] || l->l_info[FILTERTAG])
556 {
557 /* As current DT_AUXILIARY/DT_FILTER implementation needs to be
558 rewritten, no need to bother with prelinking the old
559 implementation. */
560 _dl_signal_error (EINVAL, l->l_name, NULL, N_("\
561 Filters not supported with LD_TRACE_PRELINKING"));
562 }
563
564 cnt = _dl_build_local_scope (l_initfini, l);
565 assert (cnt <= nlist);
566 for (j = 0; j < cnt; j++)
567 {
568 l_initfini[j]->l_reserved = 0;
569 if (j && __builtin_expect (l_initfini[j]->l_info[DT_SYMBOLIC]
570 != NULL, 0))
571 l->l_symbolic_in_local_scope = true;
572 }
573
574 l->l_local_scope[0] =
575 (struct r_scope_elem *) malloc (sizeof (struct r_scope_elem)
576 + (cnt
577 * sizeof (struct link_map *)));
578 if (l->l_local_scope[0] == NULL)
579 _dl_signal_error (ENOMEM, map->l_name, NULL,
580 N_("cannot allocate symbol search list"));
581 l->l_local_scope[0]->r_nlist = cnt;
582 l->l_local_scope[0]->r_list =
583 (struct link_map **) (l->l_local_scope[0] + 1);
584 memcpy (l->l_local_scope[0]->r_list, l_initfini,
585 cnt * sizeof (struct link_map *));
586 }
587 }
588
589 /* Maybe we can remove some relocation dependencies now. */
590 assert (map->l_searchlist.r_list[0] == map);
591 struct link_map_reldeps *l_reldeps = NULL;
592 if (map->l_reldeps != NULL)
593 {
594 for (i = 1; i < nlist; ++i)
595 map->l_searchlist.r_list[i]->l_reserved = 1;
596
597 struct link_map **list = &map->l_reldeps->list[0];
598 for (i = 0; i < map->l_reldeps->act; ++i)
599 if (list[i]->l_reserved)
600 {
601 /* Need to allocate new array of relocation dependencies. */
602 struct link_map_reldeps *l_reldeps;
603 l_reldeps = malloc (sizeof (*l_reldeps)
604 + map->l_reldepsmax
605 * sizeof (struct link_map *));
606 if (l_reldeps == NULL)
607 /* Bad luck, keep the reldeps duplicated between
608 map->l_reldeps->list and map->l_initfini lists. */
609 ;
610 else
611 {
612 unsigned int j = i;
613 memcpy (&l_reldeps->list[0], &list[0],
614 i * sizeof (struct link_map *));
615 for (i = i + 1; i < map->l_reldeps->act; ++i)
616 if (!list[i]->l_reserved)
617 l_reldeps->list[j++] = list[i];
618 l_reldeps->act = j;
619 }
620 }
621
622 for (i = 1; i < nlist; ++i)
623 map->l_searchlist.r_list[i]->l_reserved = 0;
624 }
625
626 /* Sort the initializer list to take dependencies into account. The binary
627 itself will always be initialize last. */
628 memcpy (l_initfini, map->l_searchlist.r_list,
629 nlist * sizeof (struct link_map *));
630 if (__builtin_expect (nlist > 1, 1))
631 {
632 /* We can skip looking for the binary itself which is at the front
633 of the search list. */
634 i = 1;
635 uint16_t seen[nlist];
636 memset (seen, 0, nlist * sizeof (seen[0]));
637 while (1)
638 {
639 /* Keep track of which object we looked at this round. */
640 ++seen[i];
641 struct link_map *thisp = l_initfini[i];
642
643 /* Find the last object in the list for which the current one is
644 a dependency and move the current object behind the object
645 with the dependency. */
646 unsigned int k = nlist - 1;
647 while (k > i)
648 {
649 struct link_map **runp = l_initfini[k]->l_initfini;
650 if (runp != NULL)
651 /* Look through the dependencies of the object. */
652 while (*runp != NULL)
653 if (__builtin_expect (*runp++ == thisp, 0))
654 {
655 /* Move the current object to the back past the last
656 object with it as the dependency. */
657 memmove (&l_initfini[i], &l_initfini[i + 1],
658 (k - i) * sizeof (l_initfini[0]));
659 l_initfini[k] = thisp;
660
661 if (seen[i + 1] > nlist - i)
662 {
663 ++i;
664 goto next_clear;
665 }
666
667 uint16_t this_seen = seen[i];
668 memmove (&seen[i], &seen[i + 1],
669 (k - i) * sizeof (seen[0]));
670 seen[k] = this_seen;
671
672 goto next;
673 }
674
675 --k;
676 }
677
678 if (++i == nlist)
679 break;
680 next_clear:
681 memset (&seen[i], 0, (nlist - i) * sizeof (seen[0]));
682
683 next:;
684 }
685 }
686
687 /* Terminate the list of dependencies. */
688 l_initfini[nlist] = NULL;
689 atomic_write_barrier ();
690 map->l_initfini = l_initfini;
691 if (l_reldeps != NULL)
692 {
693 atomic_write_barrier ();
694 void *old_l_reldeps = map->l_reldeps;
695 map->l_reldeps = l_reldeps;
696 _dl_scope_free (old_l_reldeps);
697 }
698 if (old_l_initfini != NULL)
699 map->l_orig_initfini = old_l_initfini;
700
701 if (errno_reason)
702 _dl_signal_error (errno_reason == -1 ? 0 : errno_reason, objname,
703 NULL, errstring);
704 }