]> git.ipfire.org Git - thirdparty/glibc.git/blob - elf/dl-deps.c
Fix handling of STB_GNU_UNIQUE in LD_TRACE_PRELINKING
[thirdparty/glibc.git] / elf / dl-deps.c
1 /* Load the dependencies of a mapped object.
2 Copyright (C) 1996-2003, 2004, 2005, 2006, 2007, 2010
3 Free Software Foundation, Inc.
4 This file is part of the GNU C Library.
5
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; if not, write to the Free
18 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19 02111-1307 USA. */
20
21 #include <atomic.h>
22 #include <assert.h>
23 #include <dlfcn.h>
24 #include <errno.h>
25 #include <libintl.h>
26 #include <stddef.h>
27 #include <stdlib.h>
28 #include <string.h>
29 #include <unistd.h>
30 #include <sys/param.h>
31 #include <ldsodefs.h>
32
33 #include <dl-dst.h>
34
35 /* Whether an shared object references one or more auxiliary objects
36 is signaled by the AUXTAG entry in l_info. */
37 #define AUXTAG (DT_NUM + DT_THISPROCNUM + DT_VERSIONTAGNUM \
38 + DT_EXTRATAGIDX (DT_AUXILIARY))
39 /* Whether an shared object references one or more auxiliary objects
40 is signaled by the AUXTAG entry in l_info. */
41 #define FILTERTAG (DT_NUM + DT_THISPROCNUM + DT_VERSIONTAGNUM \
42 + DT_EXTRATAGIDX (DT_FILTER))
43
44
45 /* When loading auxiliary objects we must ignore errors. It's ok if
46 an object is missing. */
47 struct openaux_args
48 {
49 /* The arguments to openaux. */
50 struct link_map *map;
51 int trace_mode;
52 int open_mode;
53 const char *strtab;
54 const char *name;
55
56 /* The return value of openaux. */
57 struct link_map *aux;
58 };
59
60 static void
61 openaux (void *a)
62 {
63 struct openaux_args *args = (struct openaux_args *) a;
64
65 args->aux = _dl_map_object (args->map, args->name, 0,
66 (args->map->l_type == lt_executable
67 ? lt_library : args->map->l_type),
68 args->trace_mode, args->open_mode,
69 args->map->l_ns);
70 }
71
72 static ptrdiff_t
73 internal_function
74 _dl_build_local_scope (struct link_map **list, struct link_map *map)
75 {
76 struct link_map **p = list;
77 struct link_map **q;
78
79 *p++ = map;
80 map->l_reserved = 1;
81 if (map->l_initfini)
82 for (q = map->l_initfini + 1; *q; ++q)
83 if (! (*q)->l_reserved)
84 p += _dl_build_local_scope (p, *q);
85 return p - list;
86 }
87
88
89 /* We use a very special kind of list to track the path
90 through the list of loaded shared objects. We have to
91 produce a flat list with unique members of all involved objects.
92 */
93 struct list
94 {
95 int done; /* Nonzero if this map was processed. */
96 struct link_map *map; /* The data. */
97 struct list *next; /* Elements for normal list. */
98 };
99
100
101 /* Macro to expand DST. It is an macro since we use `alloca'. */
102 #define expand_dst(l, str, fatal) \
103 ({ \
104 const char *__str = (str); \
105 const char *__result = __str; \
106 size_t __dst_cnt = DL_DST_COUNT (__str, 0); \
107 \
108 if (__dst_cnt != 0) \
109 { \
110 char *__newp; \
111 \
112 /* DST must not appear in SUID/SGID programs. */ \
113 if (INTUSE(__libc_enable_secure)) \
114 _dl_signal_error (0, __str, NULL, N_("\
115 DST not allowed in SUID/SGID programs")); \
116 \
117 __newp = (char *) alloca (DL_DST_REQUIRED (l, __str, strlen (__str), \
118 __dst_cnt)); \
119 \
120 __result = _dl_dst_substitute (l, __str, __newp, 0); \
121 \
122 if (*__result == '\0') \
123 { \
124 /* The replacement for the DST is not known. We can't \
125 processed. */ \
126 if (fatal) \
127 _dl_signal_error (0, __str, NULL, N_("\
128 empty dynamic string token substitution")); \
129 else \
130 { \
131 /* This is for DT_AUXILIARY. */ \
132 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS, 0))\
133 _dl_debug_printf (N_("\
134 cannot load auxiliary `%s' because of empty dynamic string token " \
135 "substitution\n"), __str); \
136 continue; \
137 } \
138 } \
139 } \
140 \
141 __result; })
142
143
144 void
145 internal_function
146 _dl_map_object_deps (struct link_map *map,
147 struct link_map **preloads, unsigned int npreloads,
148 int trace_mode, int open_mode)
149 {
150 struct list *known = __alloca (sizeof *known * (1 + npreloads + 1));
151 struct list *runp, *tail;
152 unsigned int nlist, i;
153 /* Object name. */
154 const char *name;
155 int errno_saved;
156 int errno_reason;
157 const char *errstring;
158 const char *objname;
159
160 auto inline void preload (struct link_map *map);
161
162 inline void preload (struct link_map *map)
163 {
164 known[nlist].done = 0;
165 known[nlist].map = map;
166 known[nlist].next = &known[nlist + 1];
167
168 ++nlist;
169 /* We use `l_reserved' as a mark bit to detect objects we have
170 already put in the search list and avoid adding duplicate
171 elements later in the list. */
172 map->l_reserved = 1;
173 }
174
175 /* No loaded object so far. */
176 nlist = 0;
177
178 /* First load MAP itself. */
179 preload (map);
180
181 /* Add the preloaded items after MAP but before any of its dependencies. */
182 for (i = 0; i < npreloads; ++i)
183 preload (preloads[i]);
184
185 /* Terminate the lists. */
186 known[nlist - 1].next = NULL;
187
188 /* Pointer to last unique object. */
189 tail = &known[nlist - 1];
190
191 /* Process each element of the search list, loading each of its
192 auxiliary objects and immediate dependencies. Auxiliary objects
193 will be added in the list before the object itself and
194 dependencies will be appended to the list as we step through it.
195 This produces a flat, ordered list that represents a
196 breadth-first search of the dependency tree.
197
198 The whole process is complicated by the fact that we better
199 should use alloca for the temporary list elements. But using
200 alloca means we cannot use recursive function calls. */
201 errno_saved = errno;
202 errno_reason = 0;
203 errstring = NULL;
204 errno = 0;
205 name = NULL;
206 for (runp = known; runp; )
207 {
208 struct link_map *l = runp->map;
209 struct link_map **needed = NULL;
210 unsigned int nneeded = 0;
211
212 /* Unless otherwise stated, this object is handled. */
213 runp->done = 1;
214
215 /* Allocate a temporary record to contain the references to the
216 dependencies of this object. */
217 if (l->l_searchlist.r_list == NULL && l->l_initfini == NULL
218 && l != map && l->l_ldnum > 0)
219 needed = (struct link_map **) alloca (l->l_ldnum
220 * sizeof (struct link_map *));
221
222 if (l->l_info[DT_NEEDED] || l->l_info[AUXTAG] || l->l_info[FILTERTAG])
223 {
224 const char *strtab = (const void *) D_PTR (l, l_info[DT_STRTAB]);
225 struct openaux_args args;
226 struct list *orig;
227 const ElfW(Dyn) *d;
228
229 args.strtab = strtab;
230 args.map = l;
231 args.trace_mode = trace_mode;
232 args.open_mode = open_mode;
233 orig = runp;
234
235 for (d = l->l_ld; d->d_tag != DT_NULL; ++d)
236 if (__builtin_expect (d->d_tag, DT_NEEDED) == DT_NEEDED)
237 {
238 /* Map in the needed object. */
239 struct link_map *dep;
240
241 /* Recognize DSTs. */
242 name = expand_dst (l, strtab + d->d_un.d_val, 0);
243 /* Store the tag in the argument structure. */
244 args.name = name;
245
246 bool malloced;
247 int err = _dl_catch_error (&objname, &errstring, &malloced,
248 openaux, &args);
249 if (__builtin_expect (errstring != NULL, 0))
250 {
251 char *new_errstring = strdupa (errstring);
252 objname = strdupa (objname);
253 if (malloced)
254 free ((char *) errstring);
255 errstring = new_errstring;
256
257 if (err)
258 errno_reason = err;
259 else
260 errno_reason = -1;
261 goto out;
262 }
263 else
264 dep = args.aux;
265
266 if (! dep->l_reserved)
267 {
268 /* Allocate new entry. */
269 struct list *newp;
270
271 newp = alloca (sizeof (struct list));
272
273 /* Append DEP to the list. */
274 newp->map = dep;
275 newp->done = 0;
276 newp->next = NULL;
277 tail->next = newp;
278 tail = newp;
279 ++nlist;
280 /* Set the mark bit that says it's already in the list. */
281 dep->l_reserved = 1;
282 }
283
284 /* Remember this dependency. */
285 if (needed != NULL)
286 needed[nneeded++] = dep;
287 }
288 else if (d->d_tag == DT_AUXILIARY || d->d_tag == DT_FILTER)
289 {
290 struct list *newp;
291
292 /* Recognize DSTs. */
293 name = expand_dst (l, strtab + d->d_un.d_val,
294 d->d_tag == DT_AUXILIARY);
295 /* Store the tag in the argument structure. */
296 args.name = name;
297
298 if (d->d_tag == DT_AUXILIARY)
299 {
300 /* Say that we are about to load an auxiliary library. */
301 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS,
302 0))
303 _dl_debug_printf ("load auxiliary object=%s"
304 " requested by file=%s\n",
305 name,
306 l->l_name[0]
307 ? l->l_name : rtld_progname);
308
309 /* We must be prepared that the addressed shared
310 object is not available. */
311 bool malloced;
312 (void) _dl_catch_error (&objname, &errstring, &malloced,
313 openaux, &args);
314 if (__builtin_expect (errstring != NULL, 0))
315 {
316 /* We are not interested in the error message. */
317 assert (errstring != NULL);
318 if (malloced)
319 free ((char *) errstring);
320
321 /* Simply ignore this error and continue the work. */
322 continue;
323 }
324 }
325 else
326 {
327 /* Say that we are about to load an auxiliary library. */
328 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS,
329 0))
330 _dl_debug_printf ("load filtered object=%s"
331 " requested by file=%s\n",
332 name,
333 l->l_name[0]
334 ? l->l_name : rtld_progname);
335
336 /* For filter objects the dependency must be available. */
337 bool malloced;
338 int err = _dl_catch_error (&objname, &errstring, &malloced,
339 openaux, &args);
340 if (__builtin_expect (errstring != NULL, 0))
341 {
342 char *new_errstring = strdupa (errstring);
343 objname = strdupa (objname);
344 if (malloced)
345 free ((char *) errstring);
346 errstring = new_errstring;
347
348 if (err)
349 errno_reason = err;
350 else
351 errno_reason = -1;
352 goto out;
353 }
354 }
355
356 /* The auxiliary object is actually available.
357 Incorporate the map in all the lists. */
358
359 /* Allocate new entry. This always has to be done. */
360 newp = alloca (sizeof (struct list));
361
362 /* We want to insert the new map before the current one,
363 but we have no back links. So we copy the contents of
364 the current entry over. Note that ORIG and NEWP now
365 have switched their meanings. */
366 memcpy (newp, orig, sizeof (*newp));
367
368 /* Initialize new entry. */
369 orig->done = 0;
370 orig->map = args.aux;
371
372 /* Remember this dependency. */
373 if (needed != NULL)
374 needed[nneeded++] = args.aux;
375
376 /* We must handle two situations here: the map is new,
377 so we must add it in all three lists. If the map
378 is already known, we have two further possibilities:
379 - if the object is before the current map in the
380 search list, we do nothing. It is already found
381 early
382 - if the object is after the current one, we must
383 move it just before the current map to make sure
384 the symbols are found early enough
385 */
386 if (args.aux->l_reserved)
387 {
388 /* The object is already somewhere in the list.
389 Locate it first. */
390 struct list *late;
391
392 /* This object is already in the search list we
393 are building. Don't add a duplicate pointer.
394 Just added by _dl_map_object. */
395 for (late = newp; late->next != NULL; late = late->next)
396 if (late->next->map == args.aux)
397 break;
398
399 if (late->next != NULL)
400 {
401 /* The object is somewhere behind the current
402 position in the search path. We have to
403 move it to this earlier position. */
404 orig->next = newp;
405
406 /* Now remove the later entry from the list
407 and adjust the tail pointer. */
408 if (tail == late->next)
409 tail = late;
410 late->next = late->next->next;
411
412 /* We must move the object earlier in the chain. */
413 if (args.aux->l_prev != NULL)
414 args.aux->l_prev->l_next = args.aux->l_next;
415 if (args.aux->l_next != NULL)
416 args.aux->l_next->l_prev = args.aux->l_prev;
417
418 args.aux->l_prev = newp->map->l_prev;
419 newp->map->l_prev = args.aux;
420 if (args.aux->l_prev != NULL)
421 args.aux->l_prev->l_next = args.aux;
422 args.aux->l_next = newp->map;
423 }
424 else
425 {
426 /* The object must be somewhere earlier in the
427 list. Undo to the current list element what
428 we did above. */
429 memcpy (orig, newp, sizeof (*newp));
430 continue;
431 }
432 }
433 else
434 {
435 /* This is easy. We just add the symbol right here. */
436 orig->next = newp;
437 ++nlist;
438 /* Set the mark bit that says it's already in the list. */
439 args.aux->l_reserved = 1;
440
441 /* The only problem is that in the double linked
442 list of all objects we don't have this new
443 object at the correct place. Correct this here. */
444 if (args.aux->l_prev)
445 args.aux->l_prev->l_next = args.aux->l_next;
446 if (args.aux->l_next)
447 args.aux->l_next->l_prev = args.aux->l_prev;
448
449 args.aux->l_prev = newp->map->l_prev;
450 newp->map->l_prev = args.aux;
451 if (args.aux->l_prev != NULL)
452 args.aux->l_prev->l_next = args.aux;
453 args.aux->l_next = newp->map;
454 }
455
456 /* Move the tail pointer if necessary. */
457 if (orig == tail)
458 tail = newp;
459
460 /* Move on the insert point. */
461 orig = newp;
462 }
463 }
464
465 /* Terminate the list of dependencies and store the array address. */
466 if (needed != NULL)
467 {
468 needed[nneeded++] = NULL;
469
470 struct link_map **l_initfini = (struct link_map **)
471 malloc ((2 * nneeded + 1) * sizeof needed[0]);
472 if (l_initfini == NULL)
473 _dl_signal_error (ENOMEM, map->l_name, NULL,
474 N_("cannot allocate dependency list"));
475 l_initfini[0] = l;
476 memcpy (&l_initfini[1], needed, nneeded * sizeof needed[0]);
477 memcpy (&l_initfini[nneeded + 1], l_initfini,
478 nneeded * sizeof needed[0]);
479 atomic_write_barrier ();
480 l->l_initfini = l_initfini;
481 }
482
483 /* If we have no auxiliary objects just go on to the next map. */
484 if (runp->done)
485 do
486 runp = runp->next;
487 while (runp != NULL && runp->done);
488 }
489
490 out:
491 if (errno == 0 && errno_saved != 0)
492 __set_errno (errno_saved);
493
494 struct link_map **old_l_initfini = NULL;
495 if (map->l_initfini != NULL && map->l_type == lt_loaded)
496 {
497 /* This object was previously loaded as a dependency and we have
498 a separate l_initfini list. We don't need it anymore. */
499 assert (map->l_searchlist.r_list == NULL);
500 old_l_initfini = map->l_initfini;
501 }
502
503 /* Store the search list we built in the object. It will be used for
504 searches in the scope of this object. */
505 struct link_map **l_initfini =
506 (struct link_map **) malloc ((2 * nlist + 1)
507 * sizeof (struct link_map *));
508 if (l_initfini == NULL)
509 _dl_signal_error (ENOMEM, map->l_name, NULL,
510 N_("cannot allocate symbol search list"));
511
512
513 map->l_searchlist.r_list = &l_initfini[nlist + 1];
514 map->l_searchlist.r_nlist = nlist;
515
516 for (nlist = 0, runp = known; runp; runp = runp->next)
517 {
518 if (__builtin_expect (trace_mode, 0) && runp->map->l_faked)
519 /* This can happen when we trace the loading. */
520 --map->l_searchlist.r_nlist;
521 else
522 map->l_searchlist.r_list[nlist++] = runp->map;
523
524 /* Now clear all the mark bits we set in the objects on the search list
525 to avoid duplicates, so the next call starts fresh. */
526 runp->map->l_reserved = 0;
527 }
528
529 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_PRELINK, 0) != 0
530 && map == GL(dl_ns)[LM_ID_BASE]._ns_loaded)
531 {
532 /* If we are to compute conflicts, we have to build local scope
533 for each library, not just the ultimate loader. */
534 for (i = 0; i < nlist; ++i)
535 {
536 struct link_map *l = map->l_searchlist.r_list[i];
537 unsigned int j, cnt;
538
539 /* The local scope has been already computed. */
540 if (l == map
541 || (l->l_local_scope[0]
542 && l->l_local_scope[0]->r_nlist) != 0)
543 continue;
544
545 if (l->l_info[AUXTAG] || l->l_info[FILTERTAG])
546 {
547 /* As current DT_AUXILIARY/DT_FILTER implementation needs to be
548 rewritten, no need to bother with prelinking the old
549 implementation. */
550 _dl_signal_error (EINVAL, l->l_name, NULL, N_("\
551 Filters not supported with LD_TRACE_PRELINKING"));
552 }
553
554 cnt = _dl_build_local_scope (l_initfini, l);
555 assert (cnt <= nlist);
556 for (j = 0; j < cnt; j++)
557 {
558 l_initfini[j]->l_reserved = 0;
559 if (j && __builtin_expect (l_initfini[j]->l_info[DT_SYMBOLIC]
560 != NULL, 0))
561 l->l_symbolic_in_local_scope = true;
562 }
563
564 l->l_local_scope[0] =
565 (struct r_scope_elem *) malloc (sizeof (struct r_scope_elem)
566 + (cnt
567 * sizeof (struct link_map *)));
568 if (l->l_local_scope[0] == NULL)
569 _dl_signal_error (ENOMEM, map->l_name, NULL,
570 N_("cannot allocate symbol search list"));
571 l->l_local_scope[0]->r_nlist = cnt;
572 l->l_local_scope[0]->r_list =
573 (struct link_map **) (l->l_local_scope[0] + 1);
574 memcpy (l->l_local_scope[0]->r_list, l_initfini,
575 cnt * sizeof (struct link_map *));
576 }
577 }
578
579 /* Maybe we can remove some relocation dependencies now. */
580 assert (map->l_searchlist.r_list[0] == map);
581 struct link_map_reldeps *l_reldeps = NULL;
582 if (map->l_reldeps != NULL)
583 {
584 for (i = 1; i < nlist; ++i)
585 map->l_searchlist.r_list[i]->l_reserved = 1;
586
587 struct link_map **list = &map->l_reldeps->list[0];
588 for (i = 0; i < map->l_reldeps->act; ++i)
589 if (list[i]->l_reserved)
590 {
591 /* Need to allocate new array of relocation dependencies. */
592 struct link_map_reldeps *l_reldeps;
593 l_reldeps = malloc (sizeof (*l_reldeps)
594 + map->l_reldepsmax
595 * sizeof (struct link_map *));
596 if (l_reldeps == NULL)
597 /* Bad luck, keep the reldeps duplicated between
598 map->l_reldeps->list and map->l_initfini lists. */
599 ;
600 else
601 {
602 unsigned int j = i;
603 memcpy (&l_reldeps->list[0], &list[0],
604 i * sizeof (struct link_map *));
605 for (i = i + 1; i < map->l_reldeps->act; ++i)
606 if (!list[i]->l_reserved)
607 l_reldeps->list[j++] = list[i];
608 l_reldeps->act = j;
609 }
610 }
611
612 for (i = 1; i < nlist; ++i)
613 map->l_searchlist.r_list[i]->l_reserved = 0;
614 }
615
616 /* Now determine the order in which the initialization has to happen. */
617 memcpy (l_initfini, map->l_searchlist.r_list,
618 nlist * sizeof (struct link_map *));
619 /* We can skip looking for the binary itself which is at the front
620 of the search list. Look through the list backward so that circular
621 dependencies are not changing the order. */
622 for (i = 1; i < nlist; ++i)
623 {
624 struct link_map *l = map->l_searchlist.r_list[i];
625 unsigned int j;
626 unsigned int k;
627
628 /* Find the place in the initfini list where the map is currently
629 located. */
630 for (j = 1; l_initfini[j] != l; ++j)
631 ;
632
633 /* Find all object for which the current one is a dependency and
634 move the found object (if necessary) in front. */
635 for (k = j + 1; k < nlist; ++k)
636 {
637 struct link_map **runp;
638
639 runp = l_initfini[k]->l_initfini;
640 if (runp != NULL)
641 {
642 while (*runp != NULL)
643 if (__builtin_expect (*runp++ == l, 0))
644 {
645 struct link_map *here = l_initfini[k];
646
647 /* Move it now. */
648 memmove (&l_initfini[j] + 1, &l_initfini[j],
649 (k - j) * sizeof (struct link_map *));
650 l_initfini[j] = here;
651
652 /* Don't insert further matches before the last
653 entry moved to the front. */
654 ++j;
655
656 break;
657 }
658 }
659 }
660 }
661 /* Terminate the list of dependencies. */
662 l_initfini[nlist] = NULL;
663 atomic_write_barrier ();
664 map->l_initfini = l_initfini;
665 if (l_reldeps != NULL)
666 {
667 atomic_write_barrier ();
668 void *old_l_reldeps = map->l_reldeps;
669 map->l_reldeps = l_reldeps;
670 _dl_scope_free (old_l_reldeps);
671 }
672 if (old_l_initfini != NULL)
673 _dl_scope_free (old_l_initfini);
674
675 if (errno_reason)
676 _dl_signal_error (errno_reason == -1 ? 0 : errno_reason, objname,
677 NULL, errstring);
678 }