]> git.ipfire.org Git - thirdparty/glibc.git/blob - elf/dl-deps.c
Update copyright notices with scripts/update-copyrights.
[thirdparty/glibc.git] / elf / dl-deps.c
1 /* Load the dependencies of a mapped object.
2 Copyright (C) 1996-2013 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
18
19 #include <atomic.h>
20 #include <assert.h>
21 #include <dlfcn.h>
22 #include <errno.h>
23 #include <libintl.h>
24 #include <stddef.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <sys/param.h>
29 #include <ldsodefs.h>
30
31 #include <dl-dst.h>
32
33 /* Whether an shared object references one or more auxiliary objects
34 is signaled by the AUXTAG entry in l_info. */
35 #define AUXTAG (DT_NUM + DT_THISPROCNUM + DT_VERSIONTAGNUM \
36 + DT_EXTRATAGIDX (DT_AUXILIARY))
37 /* Whether an shared object references one or more auxiliary objects
38 is signaled by the AUXTAG entry in l_info. */
39 #define FILTERTAG (DT_NUM + DT_THISPROCNUM + DT_VERSIONTAGNUM \
40 + DT_EXTRATAGIDX (DT_FILTER))
41
42
43 /* When loading auxiliary objects we must ignore errors. It's ok if
44 an object is missing. */
45 struct openaux_args
46 {
47 /* The arguments to openaux. */
48 struct link_map *map;
49 int trace_mode;
50 int open_mode;
51 const char *strtab;
52 const char *name;
53
54 /* The return value of openaux. */
55 struct link_map *aux;
56 };
57
58 static void
59 openaux (void *a)
60 {
61 struct openaux_args *args = (struct openaux_args *) a;
62
63 args->aux = _dl_map_object (args->map, args->name,
64 (args->map->l_type == lt_executable
65 ? lt_library : args->map->l_type),
66 args->trace_mode, args->open_mode,
67 args->map->l_ns);
68 }
69
70 static ptrdiff_t
71 internal_function
72 _dl_build_local_scope (struct link_map **list, struct link_map *map)
73 {
74 struct link_map **p = list;
75 struct link_map **q;
76
77 *p++ = map;
78 map->l_reserved = 1;
79 if (map->l_initfini)
80 for (q = map->l_initfini + 1; *q; ++q)
81 if (! (*q)->l_reserved)
82 p += _dl_build_local_scope (p, *q);
83 return p - list;
84 }
85
86
87 /* We use a very special kind of list to track the path
88 through the list of loaded shared objects. We have to
89 produce a flat list with unique members of all involved objects.
90 */
91 struct list
92 {
93 int done; /* Nonzero if this map was processed. */
94 struct link_map *map; /* The data. */
95 struct list *next; /* Elements for normal list. */
96 };
97
98
99 /* Macro to expand DST. It is an macro since we use `alloca'. */
100 #define expand_dst(l, str, fatal) \
101 ({ \
102 const char *__str = (str); \
103 const char *__result = __str; \
104 size_t __dst_cnt = DL_DST_COUNT (__str, 0); \
105 \
106 if (__dst_cnt != 0) \
107 { \
108 char *__newp; \
109 \
110 /* DST must not appear in SUID/SGID programs. */ \
111 if (INTUSE(__libc_enable_secure)) \
112 _dl_signal_error (0, __str, NULL, N_("\
113 DST not allowed in SUID/SGID programs")); \
114 \
115 __newp = (char *) alloca (DL_DST_REQUIRED (l, __str, strlen (__str), \
116 __dst_cnt)); \
117 \
118 __result = _dl_dst_substitute (l, __str, __newp, 0); \
119 \
120 if (*__result == '\0') \
121 { \
122 /* The replacement for the DST is not known. We can't \
123 processed. */ \
124 if (fatal) \
125 _dl_signal_error (0, __str, NULL, N_("\
126 empty dynamic string token substitution")); \
127 else \
128 { \
129 /* This is for DT_AUXILIARY. */ \
130 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS, 0))\
131 _dl_debug_printf (N_("\
132 cannot load auxiliary `%s' because of empty dynamic string token " \
133 "substitution\n"), __str); \
134 continue; \
135 } \
136 } \
137 } \
138 \
139 __result; })
140
141
142 void
143 internal_function
144 _dl_map_object_deps (struct link_map *map,
145 struct link_map **preloads, unsigned int npreloads,
146 int trace_mode, int open_mode)
147 {
148 struct list *known = __alloca (sizeof *known * (1 + npreloads + 1));
149 struct list *runp, *tail;
150 unsigned int nlist, i;
151 /* Object name. */
152 const char *name;
153 int errno_saved;
154 int errno_reason;
155 const char *errstring;
156 const char *objname;
157
158 auto inline void preload (struct link_map *map);
159
160 inline void preload (struct link_map *map)
161 {
162 known[nlist].done = 0;
163 known[nlist].map = map;
164 known[nlist].next = &known[nlist + 1];
165
166 ++nlist;
167 /* We use `l_reserved' as a mark bit to detect objects we have
168 already put in the search list and avoid adding duplicate
169 elements later in the list. */
170 map->l_reserved = 1;
171 }
172
173 /* No loaded object so far. */
174 nlist = 0;
175
176 /* First load MAP itself. */
177 preload (map);
178
179 /* Add the preloaded items after MAP but before any of its dependencies. */
180 for (i = 0; i < npreloads; ++i)
181 preload (preloads[i]);
182
183 /* Terminate the lists. */
184 known[nlist - 1].next = NULL;
185
186 /* Pointer to last unique object. */
187 tail = &known[nlist - 1];
188
189 /* No alloca'd space yet. */
190 struct link_map **needed_space = NULL;
191 size_t needed_space_bytes = 0;
192
193 /* Process each element of the search list, loading each of its
194 auxiliary objects and immediate dependencies. Auxiliary objects
195 will be added in the list before the object itself and
196 dependencies will be appended to the list as we step through it.
197 This produces a flat, ordered list that represents a
198 breadth-first search of the dependency tree.
199
200 The whole process is complicated by the fact that we better
201 should use alloca for the temporary list elements. But using
202 alloca means we cannot use recursive function calls. */
203 errno_saved = errno;
204 errno_reason = 0;
205 errstring = NULL;
206 errno = 0;
207 name = NULL;
208 for (runp = known; runp; )
209 {
210 struct link_map *l = runp->map;
211 struct link_map **needed = NULL;
212 unsigned int nneeded = 0;
213
214 /* Unless otherwise stated, this object is handled. */
215 runp->done = 1;
216
217 /* Allocate a temporary record to contain the references to the
218 dependencies of this object. */
219 if (l->l_searchlist.r_list == NULL && l->l_initfini == NULL
220 && l != map && l->l_ldnum > 0)
221 {
222 size_t new_size = l->l_ldnum * sizeof (struct link_map *);
223
224 if (new_size > needed_space_bytes)
225 needed_space
226 = extend_alloca (needed_space, needed_space_bytes, new_size);
227
228 needed = needed_space;
229 }
230
231 if (l->l_info[DT_NEEDED] || l->l_info[AUXTAG] || l->l_info[FILTERTAG])
232 {
233 const char *strtab = (const void *) D_PTR (l, l_info[DT_STRTAB]);
234 struct openaux_args args;
235 struct list *orig;
236 const ElfW(Dyn) *d;
237
238 args.strtab = strtab;
239 args.map = l;
240 args.trace_mode = trace_mode;
241 args.open_mode = open_mode;
242 orig = runp;
243
244 for (d = l->l_ld; d->d_tag != DT_NULL; ++d)
245 if (__builtin_expect (d->d_tag, DT_NEEDED) == DT_NEEDED)
246 {
247 /* Map in the needed object. */
248 struct link_map *dep;
249
250 /* Recognize DSTs. */
251 name = expand_dst (l, strtab + d->d_un.d_val, 0);
252 /* Store the tag in the argument structure. */
253 args.name = name;
254
255 bool malloced;
256 int err = _dl_catch_error (&objname, &errstring, &malloced,
257 openaux, &args);
258 if (__builtin_expect (errstring != NULL, 0))
259 {
260 char *new_errstring = strdupa (errstring);
261 objname = strdupa (objname);
262 if (malloced)
263 free ((char *) errstring);
264 errstring = new_errstring;
265
266 if (err)
267 errno_reason = err;
268 else
269 errno_reason = -1;
270 goto out;
271 }
272 else
273 dep = args.aux;
274
275 if (! dep->l_reserved)
276 {
277 /* Allocate new entry. */
278 struct list *newp;
279
280 newp = alloca (sizeof (struct list));
281
282 /* Append DEP to the list. */
283 newp->map = dep;
284 newp->done = 0;
285 newp->next = NULL;
286 tail->next = newp;
287 tail = newp;
288 ++nlist;
289 /* Set the mark bit that says it's already in the list. */
290 dep->l_reserved = 1;
291 }
292
293 /* Remember this dependency. */
294 if (needed != NULL)
295 needed[nneeded++] = dep;
296 }
297 else if (d->d_tag == DT_AUXILIARY || d->d_tag == DT_FILTER)
298 {
299 struct list *newp;
300
301 /* Recognize DSTs. */
302 name = expand_dst (l, strtab + d->d_un.d_val,
303 d->d_tag == DT_AUXILIARY);
304 /* Store the tag in the argument structure. */
305 args.name = name;
306
307 if (d->d_tag == DT_AUXILIARY)
308 {
309 /* Say that we are about to load an auxiliary library. */
310 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS,
311 0))
312 _dl_debug_printf ("load auxiliary object=%s"
313 " requested by file=%s\n",
314 name,
315 l->l_name[0]
316 ? l->l_name : rtld_progname);
317
318 /* We must be prepared that the addressed shared
319 object is not available. */
320 bool malloced;
321 (void) _dl_catch_error (&objname, &errstring, &malloced,
322 openaux, &args);
323 if (__builtin_expect (errstring != NULL, 0))
324 {
325 /* We are not interested in the error message. */
326 assert (errstring != NULL);
327 if (malloced)
328 free ((char *) errstring);
329
330 /* Simply ignore this error and continue the work. */
331 continue;
332 }
333 }
334 else
335 {
336 /* Say that we are about to load an auxiliary library. */
337 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS,
338 0))
339 _dl_debug_printf ("load filtered object=%s"
340 " requested by file=%s\n",
341 name,
342 l->l_name[0]
343 ? l->l_name : rtld_progname);
344
345 /* For filter objects the dependency must be available. */
346 bool malloced;
347 int err = _dl_catch_error (&objname, &errstring, &malloced,
348 openaux, &args);
349 if (__builtin_expect (errstring != NULL, 0))
350 {
351 char *new_errstring = strdupa (errstring);
352 objname = strdupa (objname);
353 if (malloced)
354 free ((char *) errstring);
355 errstring = new_errstring;
356
357 if (err)
358 errno_reason = err;
359 else
360 errno_reason = -1;
361 goto out;
362 }
363 }
364
365 /* The auxiliary object is actually available.
366 Incorporate the map in all the lists. */
367
368 /* Allocate new entry. This always has to be done. */
369 newp = alloca (sizeof (struct list));
370
371 /* We want to insert the new map before the current one,
372 but we have no back links. So we copy the contents of
373 the current entry over. Note that ORIG and NEWP now
374 have switched their meanings. */
375 memcpy (newp, orig, sizeof (*newp));
376
377 /* Initialize new entry. */
378 orig->done = 0;
379 orig->map = args.aux;
380
381 /* Remember this dependency. */
382 if (needed != NULL)
383 needed[nneeded++] = args.aux;
384
385 /* We must handle two situations here: the map is new,
386 so we must add it in all three lists. If the map
387 is already known, we have two further possibilities:
388 - if the object is before the current map in the
389 search list, we do nothing. It is already found
390 early
391 - if the object is after the current one, we must
392 move it just before the current map to make sure
393 the symbols are found early enough
394 */
395 if (args.aux->l_reserved)
396 {
397 /* The object is already somewhere in the list.
398 Locate it first. */
399 struct list *late;
400
401 /* This object is already in the search list we
402 are building. Don't add a duplicate pointer.
403 Just added by _dl_map_object. */
404 for (late = newp; late->next != NULL; late = late->next)
405 if (late->next->map == args.aux)
406 break;
407
408 if (late->next != NULL)
409 {
410 /* The object is somewhere behind the current
411 position in the search path. We have to
412 move it to this earlier position. */
413 orig->next = newp;
414
415 /* Now remove the later entry from the list
416 and adjust the tail pointer. */
417 if (tail == late->next)
418 tail = late;
419 late->next = late->next->next;
420
421 /* We must move the object earlier in the chain. */
422 if (args.aux->l_prev != NULL)
423 args.aux->l_prev->l_next = args.aux->l_next;
424 if (args.aux->l_next != NULL)
425 args.aux->l_next->l_prev = args.aux->l_prev;
426
427 args.aux->l_prev = newp->map->l_prev;
428 newp->map->l_prev = args.aux;
429 if (args.aux->l_prev != NULL)
430 args.aux->l_prev->l_next = args.aux;
431 args.aux->l_next = newp->map;
432 }
433 else
434 {
435 /* The object must be somewhere earlier in the
436 list. Undo to the current list element what
437 we did above. */
438 memcpy (orig, newp, sizeof (*newp));
439 continue;
440 }
441 }
442 else
443 {
444 /* This is easy. We just add the symbol right here. */
445 orig->next = newp;
446 ++nlist;
447 /* Set the mark bit that says it's already in the list. */
448 args.aux->l_reserved = 1;
449
450 /* The only problem is that in the double linked
451 list of all objects we don't have this new
452 object at the correct place. Correct this here. */
453 if (args.aux->l_prev)
454 args.aux->l_prev->l_next = args.aux->l_next;
455 if (args.aux->l_next)
456 args.aux->l_next->l_prev = args.aux->l_prev;
457
458 args.aux->l_prev = newp->map->l_prev;
459 newp->map->l_prev = args.aux;
460 if (args.aux->l_prev != NULL)
461 args.aux->l_prev->l_next = args.aux;
462 args.aux->l_next = newp->map;
463 }
464
465 /* Move the tail pointer if necessary. */
466 if (orig == tail)
467 tail = newp;
468
469 /* Move on the insert point. */
470 orig = newp;
471 }
472 }
473
474 /* Terminate the list of dependencies and store the array address. */
475 if (needed != NULL)
476 {
477 needed[nneeded++] = NULL;
478
479 struct link_map **l_initfini = (struct link_map **)
480 malloc ((2 * nneeded + 1) * sizeof needed[0]);
481 if (l_initfini == NULL)
482 _dl_signal_error (ENOMEM, map->l_name, NULL,
483 N_("cannot allocate dependency list"));
484 l_initfini[0] = l;
485 memcpy (&l_initfini[1], needed, nneeded * sizeof needed[0]);
486 memcpy (&l_initfini[nneeded + 1], l_initfini,
487 nneeded * sizeof needed[0]);
488 atomic_write_barrier ();
489 l->l_initfini = l_initfini;
490 l->l_free_initfini = 1;
491 }
492
493 /* If we have no auxiliary objects just go on to the next map. */
494 if (runp->done)
495 do
496 runp = runp->next;
497 while (runp != NULL && runp->done);
498 }
499
500 out:
501 if (errno == 0 && errno_saved != 0)
502 __set_errno (errno_saved);
503
504 struct link_map **old_l_initfini = NULL;
505 if (map->l_initfini != NULL && map->l_type == lt_loaded)
506 {
507 /* This object was previously loaded as a dependency and we have
508 a separate l_initfini list. We don't need it anymore. */
509 assert (map->l_searchlist.r_list == NULL);
510 old_l_initfini = map->l_initfini;
511 }
512
513 /* Store the search list we built in the object. It will be used for
514 searches in the scope of this object. */
515 struct link_map **l_initfini =
516 (struct link_map **) malloc ((2 * nlist + 1)
517 * sizeof (struct link_map *));
518 if (l_initfini == NULL)
519 _dl_signal_error (ENOMEM, map->l_name, NULL,
520 N_("cannot allocate symbol search list"));
521
522
523 map->l_searchlist.r_list = &l_initfini[nlist + 1];
524 map->l_searchlist.r_nlist = nlist;
525
526 for (nlist = 0, runp = known; runp; runp = runp->next)
527 {
528 if (__builtin_expect (trace_mode, 0) && runp->map->l_faked)
529 /* This can happen when we trace the loading. */
530 --map->l_searchlist.r_nlist;
531 else
532 map->l_searchlist.r_list[nlist++] = runp->map;
533
534 /* Now clear all the mark bits we set in the objects on the search list
535 to avoid duplicates, so the next call starts fresh. */
536 runp->map->l_reserved = 0;
537 }
538
539 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_PRELINK, 0) != 0
540 && map == GL(dl_ns)[LM_ID_BASE]._ns_loaded)
541 {
542 /* If we are to compute conflicts, we have to build local scope
543 for each library, not just the ultimate loader. */
544 for (i = 0; i < nlist; ++i)
545 {
546 struct link_map *l = map->l_searchlist.r_list[i];
547 unsigned int j, cnt;
548
549 /* The local scope has been already computed. */
550 if (l == map
551 || (l->l_local_scope[0]
552 && l->l_local_scope[0]->r_nlist) != 0)
553 continue;
554
555 if (l->l_info[AUXTAG] || l->l_info[FILTERTAG])
556 {
557 /* As current DT_AUXILIARY/DT_FILTER implementation needs to be
558 rewritten, no need to bother with prelinking the old
559 implementation. */
560 _dl_signal_error (EINVAL, l->l_name, NULL, N_("\
561 Filters not supported with LD_TRACE_PRELINKING"));
562 }
563
564 cnt = _dl_build_local_scope (l_initfini, l);
565 assert (cnt <= nlist);
566 for (j = 0; j < cnt; j++)
567 {
568 l_initfini[j]->l_reserved = 0;
569 if (j && __builtin_expect (l_initfini[j]->l_info[DT_SYMBOLIC]
570 != NULL, 0))
571 l->l_symbolic_in_local_scope = true;
572 }
573
574 l->l_local_scope[0] =
575 (struct r_scope_elem *) malloc (sizeof (struct r_scope_elem)
576 + (cnt
577 * sizeof (struct link_map *)));
578 if (l->l_local_scope[0] == NULL)
579 _dl_signal_error (ENOMEM, map->l_name, NULL,
580 N_("cannot allocate symbol search list"));
581 l->l_local_scope[0]->r_nlist = cnt;
582 l->l_local_scope[0]->r_list =
583 (struct link_map **) (l->l_local_scope[0] + 1);
584 memcpy (l->l_local_scope[0]->r_list, l_initfini,
585 cnt * sizeof (struct link_map *));
586 }
587 }
588
589 /* Maybe we can remove some relocation dependencies now. */
590 assert (map->l_searchlist.r_list[0] == map);
591 struct link_map_reldeps *l_reldeps = NULL;
592 if (map->l_reldeps != NULL)
593 {
594 for (i = 1; i < nlist; ++i)
595 map->l_searchlist.r_list[i]->l_reserved = 1;
596
597 struct link_map **list = &map->l_reldeps->list[0];
598 for (i = 0; i < map->l_reldeps->act; ++i)
599 if (list[i]->l_reserved)
600 {
601 /* Need to allocate new array of relocation dependencies. */
602 struct link_map_reldeps *l_reldeps;
603 l_reldeps = malloc (sizeof (*l_reldeps)
604 + map->l_reldepsmax
605 * sizeof (struct link_map *));
606 if (l_reldeps == NULL)
607 /* Bad luck, keep the reldeps duplicated between
608 map->l_reldeps->list and map->l_initfini lists. */
609 ;
610 else
611 {
612 unsigned int j = i;
613 memcpy (&l_reldeps->list[0], &list[0],
614 i * sizeof (struct link_map *));
615 for (i = i + 1; i < map->l_reldeps->act; ++i)
616 if (!list[i]->l_reserved)
617 l_reldeps->list[j++] = list[i];
618 l_reldeps->act = j;
619 }
620 }
621
622 for (i = 1; i < nlist; ++i)
623 map->l_searchlist.r_list[i]->l_reserved = 0;
624 }
625
626 /* Sort the initializer list to take dependencies into account. The binary
627 itself will always be initialize last. */
628 memcpy (l_initfini, map->l_searchlist.r_list,
629 nlist * sizeof (struct link_map *));
630 if (__builtin_expect (nlist > 1, 1))
631 {
632 /* We can skip looking for the binary itself which is at the front
633 of the search list. */
634 i = 1;
635 uint16_t seen[nlist];
636 memset (seen, 0, nlist * sizeof (seen[0]));
637 while (1)
638 {
639 /* Keep track of which object we looked at this round. */
640 ++seen[i];
641 struct link_map *thisp = l_initfini[i];
642
643 /* Find the last object in the list for which the current one is
644 a dependency and move the current object behind the object
645 with the dependency. */
646 unsigned int k = nlist - 1;
647 while (k > i)
648 {
649 struct link_map **runp = l_initfini[k]->l_initfini;
650 if (runp != NULL)
651 /* Look through the dependencies of the object. */
652 while (*runp != NULL)
653 if (__builtin_expect (*runp++ == thisp, 0))
654 {
655 /* Move the current object to the back past the last
656 object with it as the dependency. */
657 memmove (&l_initfini[i], &l_initfini[i + 1],
658 (k - i) * sizeof (l_initfini[0]));
659 l_initfini[k] = thisp;
660
661 if (seen[i + 1] > nlist - i)
662 {
663 ++i;
664 goto next_clear;
665 }
666
667 uint16_t this_seen = seen[i];
668 memmove (&seen[i], &seen[i + 1],
669 (k - i) * sizeof (seen[0]));
670 seen[k] = this_seen;
671
672 goto next;
673 }
674
675 --k;
676 }
677
678 if (++i == nlist)
679 break;
680 next_clear:
681 memset (&seen[i], 0, (nlist - i) * sizeof (seen[0]));
682
683 next:;
684 }
685 }
686
687 /* Terminate the list of dependencies. */
688 l_initfini[nlist] = NULL;
689 atomic_write_barrier ();
690 map->l_initfini = l_initfini;
691 map->l_free_initfini = 1;
692 if (l_reldeps != NULL)
693 {
694 atomic_write_barrier ();
695 void *old_l_reldeps = map->l_reldeps;
696 map->l_reldeps = l_reldeps;
697 _dl_scope_free (old_l_reldeps);
698 }
699 if (old_l_initfini != NULL)
700 _dl_scope_free (old_l_initfini);
701
702 if (errno_reason)
703 _dl_signal_error (errno_reason == -1 ? 0 : errno_reason, objname,
704 NULL, errstring);
705 }