]> git.ipfire.org Git - thirdparty/glibc.git/blob - elf/dl-profile.c
Update.
[thirdparty/glibc.git] / elf / dl-profile.c
1 /* Profiling of shared libraries.
2 Copyright (C) 1997 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Ulrich Drepper <drepper@cygnus.com>, 1997.
5 Based on the BSD mcount implementation.
6
7 The GNU C Library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Library General Public License as
9 published by the Free Software Foundation; either version 2 of the
10 License, or (at your option) any later version.
11
12 The GNU C Library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Library General Public License for more details.
16
17 You should have received a copy of the GNU Library General Public
18 License along with the GNU C Library; see the file COPYING.LIB. If not,
19 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
21
22 #include <errno.h>
23 #include <fcntl.h>
24 #include <inttypes.h>
25 #include <link.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <string.h>
29 #include <unistd.h>
30 #include <sys/gmon.h>
31 #include <sys/gmon_out.h>
32 #include <sys/mman.h>
33 #include <sys/stat.h>
34 #include <atomicity.h>
35
36 /* The LD_PROFILE feature has to be implemented different to the
37 normal profiling using the gmon/ functions. The problem is that an
38 arbitrary amount of processes simulataneously can be run using
39 profiling and all write the results in the same file. To provide
40 this mechanism one could implement a complicated mechanism to merge
41 the content of two profiling runs or one could extend the file
42 format to allow more than one data set. For the second solution we
43 would have the problem that the file can grow in size beyond any
44 limit and both solutions have the problem that the concurrency of
45 writing the results is a big problem.
46
47 Another much simpler method is to use mmap to map the same file in
48 all using programs and modify the data in the mmap'ed area and so
49 also automatically on the disk. Using the MAP_SHARED option of
50 mmap(2) this can be done without big problems in more than one
51 file.
52
53 This approach is very different from the normal profiling. We have
54 to use the profiling data in exactly the way they are expected to
55 be written to disk. But the normal format used by gprof is not usable
56 to do this. It is optimized for size. It writes the tags as single
57 bytes but this means that the following 32/64 bit values are
58 unaligned.
59
60 Therefore we use a new format. This will look like this
61
62 0 1 2 3 <- byte is 32 bit word
63 0000 g m o n
64 0004 *version* <- GMON_SHOBJ_VERSION
65 0008 00 00 00 00
66 000c 00 00 00 00
67 0010 00 00 00 00
68
69 0014 *tag* <- GMON_TAG_TIME_HIST
70 0018 ?? ?? ?? ??
71 ?? ?? ?? ?? <- 32/64 bit LowPC
72 0018+A ?? ?? ?? ??
73 ?? ?? ?? ?? <- 32/64 bit HighPC
74 0018+2*A *histsize*
75 001c+2*A *profrate*
76 0020+2*A s e c o
77 0024+2*A n d s \0
78 0028+2*A \0 \0 \0 \0
79 002c+2*A \0 \0 \0
80 002f+2*A s
81
82 0030+2*A ?? ?? ?? ?? <- Count data
83 ... ...
84 0030+2*A+K ?? ?? ?? ??
85
86 0030+2*A+K *tag* <- GMON_TAG_CG_ARC
87 0034+2*A+K *lastused*
88 0038+2*A+K ?? ?? ?? ??
89 ?? ?? ?? ?? <- FromPC#1
90 0038+3*A+K ?? ?? ?? ??
91 ?? ?? ?? ?? <- ToPC#1
92 0038+4*A+K ?? ?? ?? ?? <- Count#1
93 ... ... ...
94 0038+(2*(CN-1)+2)*A+(CN-1)*4+K ?? ?? ?? ??
95 ?? ?? ?? ?? <- FromPC#CGN
96 0038+(2*(CN-1)+3)*A+(CN-1)*4+K ?? ?? ?? ??
97 ?? ?? ?? ?? <- ToPC#CGN
98 0038+(2*CN+2)*A+(CN-1)*4+K ?? ?? ?? ?? <- Count#CGN
99
100 We put (for now? no basic block information in the file since this would
101 introduce rase conditions among all the processes who want to write them.
102
103 `K' is the number of count entries which is computed as
104
105 textsize / HISTFRACTION
106
107 `CG' in the above table is the number of call graph arcs. Normally,
108 the table is sparse and the profiling code writes out only the those
109 entries which are really used in the program run. But since we must
110 not extend this table (the profiling file) we'll keep them all here.
111 So CN can be executed in advance as
112
113 MINARCS <= textsize*(ARCDENSITY/100) <= MAXARCS
114
115 Now the remaining question is: how to build the data structures we can
116 work with from this data. We need the from set and must associate the
117 froms with all the associated tos. We will do this by constructing this
118 data structures at the program start. To do this we'll simply visit all
119 entries in the call graph table and add it to the appropriate list. */
120
121 extern char *_strerror_internal __P ((int, char *buf, size_t));
122
123 extern int __profile_frequency __P ((void));
124
125 /* We define a special type to address the elements of the arc table.
126 This is basically the `gmon_cg_arc_record' format but it includes
127 the room for the tag and it uses real types. */
128 struct here_cg_arc_record
129 {
130 uintptr_t from_pc;
131 uintptr_t self_pc;
132 uint32_t count;
133 } __attribute__ ((packed));
134
135 static struct here_cg_arc_record *data;
136
137 /* This is the number of entry which have been incorporated in the toset. */
138 static uint32_t narcs;
139 /* This is a pointer to the object representing the number of entries
140 currently in the mmaped file. At no point of time this has to be the
141 same as NARCS. If it is equal all entries from the file are in our
142 lists. */
143 static uint32_t *narcsp;
144
145 /* Description of the currently profiled object. */
146 static long int state;
147
148 static volatile uint16_t *kcount;
149 static size_t kcountsize;
150
151 struct here_fromstruct
152 {
153 struct here_cg_arc_record volatile *here;
154 uint16_t link;
155 };
156
157 static uint16_t *tos;
158 static size_t tossize;
159
160 static struct here_fromstruct *froms;
161 static size_t fromssize;
162 static size_t fromlimit;
163 static size_t fromidx;
164
165 static uintptr_t lowpc;
166 static uintptr_t highpc;
167 static size_t textsize;
168 static unsigned int hashfraction;
169 static unsigned int log_hashfraction;
170
171 /* This is the information about the mmaped memory. */
172 static struct gmon_hdr *addr;
173 static off_t expected_size;
174
175 /* See profil(2) where this is described. */
176 static int s_scale;
177 #define SCALE_1_TO_1 0x10000L
178
179
180 \f
181 /* Set up profiling data to profile object desribed by MAP. The output
182 file is found (or created) in OUTPUT_DIR. */
183 void
184 _dl_start_profile (struct link_map *map, const char *output_dir)
185 {
186 char *filename;
187 int fd;
188 struct stat st;
189 const ElfW(Phdr) *ph;
190 ElfW(Addr) mapstart = ~((ElfW(Addr)) 0);
191 ElfW(Addr) mapend = 0;
192 struct gmon_hdr gmon_hdr;
193 struct gmon_hist_hdr hist_hdr;
194 char *hist, *cp;
195 size_t idx;
196
197 /* Compute the size of the sections which contain program code. */
198 for (ph = map->l_phdr; ph < &map->l_phdr[map->l_phnum]; ++ph)
199 if (ph->p_type == PT_LOAD && (ph->p_flags & PF_X))
200 {
201 ElfW(Addr) start = (ph->p_vaddr & ~(_dl_pagesize - 1));
202 ElfW(Addr) end = ((ph->p_vaddr + ph->p_memsz + _dl_pagesize - 1)
203 & ~(_dl_pagesize - 1));
204
205 if (start < mapstart)
206 mapstart = start;
207 if (end > mapend)
208 mapend = end;
209 }
210
211 /* Now we can compute the size of the profiling data. This is done
212 with the same formulars as in `monstartup' (see gmon.c). */
213 state = GMON_PROF_OFF;
214 lowpc = ROUNDDOWN (mapstart + map->l_addr,
215 HISTFRACTION * sizeof (HISTCOUNTER));
216 highpc = ROUNDUP (mapend + map->l_addr,
217 HISTFRACTION * sizeof (HISTCOUNTER));
218 textsize = highpc - lowpc;
219 kcountsize = textsize / HISTFRACTION;
220 hashfraction = HASHFRACTION;
221 if ((HASHFRACTION & (HASHFRACTION - 1)) == 0)
222 /* If HASHFRACTION is a power of two, mcount can use shifting
223 instead of integer division. Precompute shift amount. */
224 log_hashfraction = __builtin_ffs (hashfraction * sizeof (*froms)) - 1;
225 else
226 log_hashfraction = -1;
227 tossize = textsize / HASHFRACTION;
228 fromlimit = textsize * ARCDENSITY / 100;
229 if (fromlimit < MINARCS)
230 fromlimit = MINARCS;
231 if (fromlimit > MAXARCS)
232 fromlimit = MAXARCS;
233 fromssize = fromlimit * sizeof (struct here_fromstruct);
234
235 expected_size = (sizeof (struct gmon_hdr)
236 + 4 + sizeof (struct gmon_hist_hdr) + kcountsize
237 + 4 + 4 + fromssize * sizeof (struct here_cg_arc_record));
238
239 /* Create the gmon_hdr we expect or write. */
240 memset (&gmon_hdr, '\0', sizeof (struct gmon_hdr));
241 memcpy (&gmon_hdr.cookie[0], GMON_MAGIC, sizeof (gmon_hdr.cookie));
242 *(int32_t *) gmon_hdr.version = GMON_SHOBJ_VERSION;
243
244 /* Create the hist_hdr we expect or write. */
245 *(char **) hist_hdr.low_pc = (char *) mapstart;
246 *(char **) hist_hdr.high_pc = (char *) mapend;
247 *(int32_t *) hist_hdr.hist_size = kcountsize / sizeof (HISTCOUNTER);
248 *(int32_t *) hist_hdr.prof_rate = __profile_frequency ();
249 strncpy (hist_hdr.dimen, "seconds", sizeof (hist_hdr.dimen));
250 hist_hdr.dimen_abbrev = 's';
251
252 /* First determine the output name. We write in the directory
253 OUTPUT_DIR and the name is composed from the shared objects
254 soname (or the file name) and the ending ".profile". */
255 filename = (char *) alloca (strlen (output_dir) + 1 + strlen (_dl_profile)
256 + sizeof ".profile");
257 cp = __stpcpy (filename, output_dir);
258 *cp++ = '/';
259 __stpcpy (__stpcpy (cp, _dl_profile), ".profile");
260
261 fd = __open (filename, O_RDWR | O_CREAT, 0666);
262 if (fd == -1)
263 /* We cannot write the profiling data so don't do anthing. */
264 return;
265
266 if (fstat (fd, &st) < 0 || !S_ISREG (st.st_mode))
267 {
268 /* Not stat'able or not a regular file => don't use it. */
269 close (fd);
270 return;
271 }
272
273 /* Test the size. If it does not match what we expect from the size
274 values in the map MAP we don't use it and warn the user. */
275 if (st.st_size == 0)
276 {
277 /* We have to create the file. */
278 char buf[_dl_pagesize];
279
280 memset (buf, '\0', _dl_pagesize);
281
282 if (__lseek (fd, expected_size & ~(_dl_pagesize - 1), SEEK_SET) == -1)
283 {
284 char buf[400];
285 int errnum;
286 cannot_create:
287 errnum = errno;
288 __close (fd);
289 _dl_sysdep_error (filename, ": cannot create file: ",
290 _strerror_internal (errnum, buf, sizeof buf),
291 "\n", NULL);
292 return;
293 }
294
295 if (TEMP_FAILURE_RETRY (__write (fd, buf, (expected_size
296 & (_dl_pagesize - 1)))) < 0)
297 goto cannot_create;
298 }
299 else if (st.st_size != expected_size)
300 {
301 __close (fd);
302 wrong_format:
303
304 if (addr != NULL)
305 __munmap ((void *) addr, expected_size);
306
307 _dl_sysdep_error (filename,
308 ": file is no correct profile data file for `",
309 _dl_profile, "'\n", NULL);
310 return;
311 }
312
313 addr = (struct gmon_hdr *) __mmap (NULL, expected_size, PROT_READ|PROT_WRITE,
314 MAP_SHARED|MAP_FILE, fd, 0);
315 if (addr == (struct gmon_hdr *) MAP_FAILED)
316 {
317 char buf[400];
318 int errnum = errno;
319 __close (fd);
320 _dl_sysdep_error (filename, ": cannot map file: ",
321 _strerror_internal (errnum, buf, sizeof buf),
322 "\n", NULL);
323 return;
324 }
325
326 /* We don't need the file desriptor anymore. */
327 __close (fd);
328
329 /* Pointer to data after the header. */
330 hist = (char *) (addr + 1);
331 kcount = (uint16_t *) ((char *) hist + sizeof (uint32_t)
332 + sizeof (struct gmon_hist_hdr));
333
334 /* Compute pointer to array of the arc information. */
335 data = (struct here_cg_arc_record *) ((char *) kcount + kcountsize
336 + 2 * sizeof (uint32_t));
337 narcsp = (uint32_t *) (hist + sizeof (uint32_t)
338 + sizeof (struct gmon_hist_hdr) + sizeof (uint32_t));
339
340 if (st.st_size == 0)
341 {
342 /* Create the signature. */
343 memcpy (addr, &gmon_hdr, sizeof (struct gmon_hdr));
344
345 *(uint32_t *) hist = GMON_TAG_TIME_HIST;
346 memcpy (hist + sizeof (uint32_t), &hist_hdr,
347 sizeof (struct gmon_hist_hdr));
348
349 *(uint32_t *) (hist + sizeof (uint32_t) + sizeof (struct gmon_hist_hdr)
350 + kcountsize) = GMON_TAG_CG_ARC;
351 }
352 else
353 {
354 /* Test the signature in the file. */
355 if (memcmp (addr, &gmon_hdr, sizeof (struct gmon_hdr)) != 0
356 || *(uint32_t *) hist != GMON_TAG_TIME_HIST
357 || memcmp (hist + sizeof (uint32_t), &hist_hdr,
358 sizeof (struct gmon_hist_hdr)) != 0
359 || (*(uint32_t *) (hist + sizeof (uint32_t)
360 + sizeof (struct gmon_hist_hdr) + kcountsize)
361 != GMON_TAG_CG_ARC))
362 goto wrong_format;
363 }
364
365 /* Allocate memory for the froms data and the pointer to the tos records. */
366 tos = (uint16_t *) calloc (tossize + fromssize, 1);
367 if (froms == NULL)
368 {
369 __munmap ((void *) addr, expected_size);
370 _dl_sysdep_fatal ("Out of memory while initializing profiler", NULL);
371 /* NOTREACHED */
372 }
373
374 froms = (struct here_fromstruct *) ((char *) tos + tossize);
375 fromidx = 0;
376
377 /* Now we have to process all the arc count entries. BTW: it is
378 not critical whether the *NARCSP value changes meanwhile. Before
379 we enter a new entry in to toset we will check that everything is
380 available in TOS. This happens in _dl_mcount.
381
382 Loading the entries in reverse order should help to get the most
383 frequently used entries at the front of the list. */
384 for (idx = narcs = *narcsp; idx > 0; )
385 {
386 size_t to_index;
387 size_t newfromidx;
388 --idx;
389 to_index = ((data[idx].self_pc - lowpc)
390 / (hashfraction * sizeof (*tos)));
391 newfromidx = fromidx++;
392 froms[newfromidx].here = &data[idx];
393 froms[newfromidx].link = tos[to_index];
394 tos[to_index] = newfromidx;
395 }
396
397 /* Setup counting data. */
398 if (kcountsize < highpc - lowpc)
399 s_scale = ((double) kcountsize / (highpc - lowpc)) * SCALE_1_TO_1;
400 else
401 s_scale = SCALE_1_TO_1;
402
403 /* Start the profiler. */
404 __profil ((void *) kcount, kcountsize, lowpc, s_scale);
405
406 /* Turn on profiling. */
407 state = GMON_PROF_ON;
408 }
409
410
411 void
412 _dl_mcount (ElfW(Addr) frompc, ElfW(Addr) selfpc)
413 {
414 uint16_t *topcindex;
415 size_t i, fromindex;
416 struct here_fromstruct *fromp;
417
418 if (! compare_and_swap (&state, GMON_PROF_ON, GMON_PROF_BUSY))
419 return;
420
421 /* Compute relative addresses. The shared object can be loaded at
422 any address. The value of frompc could be anything. We cannot
423 restrict it in any way, just set to a fixed value (0) in case it
424 is outside the allowed range. These calls show up as calls from
425 <external> in the gprof output. */
426 frompc -= lowpc;
427 if (frompc >= textsize)
428 frompc = 0;
429 selfpc -= lowpc;
430 if (selfpc >= textsize)
431 goto done;
432
433 /* Getting here we now have to find out whether the location was
434 already used. If yes we are lucky and only have to increment a
435 counter (this also has to be atomic). If the entry is new things
436 are getting complicated... */
437
438 /* Avoid integer divide if possible. */
439 if ((HASHFRACTION & (HASHFRACTION - 1)) == 0)
440 i = selfpc >> log_hashfraction;
441 else
442 i = selfpc / (hashfraction * sizeof (*tos));
443
444 topcindex = &tos[i];
445 fromindex = *topcindex;
446
447 if (fromindex == 0)
448 goto check_new_or_add;
449
450 fromp = &froms[fromindex];
451
452 /* We have to look through the chain of arcs whether there is already
453 an entry for our arc. */
454 while (fromp->here->from_pc == frompc)
455 {
456 if (fromp->link != 0)
457 do
458 fromp = &froms[fromp->link];
459 while (fromp->link != 0 && fromp->here->from_pc != frompc);
460
461 if (fromp->link == 0)
462 {
463 topcindex = &fromp->link;
464
465 check_new_or_add:
466 /* Our entry is not among the entries we read so far from the
467 data file. Now see whether we have to update the list. */
468 while (narcs != *narcsp)
469 {
470 size_t to_index;
471 size_t newfromidx;
472 to_index = ((data[narcs].self_pc - lowpc)
473 / (hashfraction * sizeof (*tos)));
474 newfromidx = fromidx++;
475 froms[newfromidx].here = &data[narcs];
476 froms[newfromidx].link = tos[to_index];
477 tos[to_index] = newfromidx;
478 ++narcs;
479 }
480
481 /* If we still have no entry stop searching and insert. */
482 if (*topcindex == 0)
483 {
484 fromidx = 1 + exchange_and_add (narcsp, 1);
485 ++narcs;
486
487 /* In rare cases it could happen that all entries in FROMS are
488 occupied. So we cannot count this anymore. */
489 if (fromidx >= fromlimit)
490 goto done;
491
492 *topcindex = fromindex;
493 fromp = &froms[fromindex];
494
495 fromp = &froms[fromp->link];
496
497 fromp->link = 0;
498 fromp->here->from_pc = frompc;
499 fromp->here->count = 0;
500
501 break;
502 }
503
504 fromp = &froms[*topcindex];
505 }
506 else
507 /* Found in. */
508 break;
509 }
510
511 /* Increment the counter. */
512 atomic_add (&fromp->here->count, 1);
513
514 done:
515 state = GMON_PROF_ON;
516 }