]> git.ipfire.org Git - thirdparty/glibc.git/blob - nscd/mem.c
Optimize xmalloc, xcalloc, xrealloc, and xstrdup
[thirdparty/glibc.git] / nscd / mem.c
1 /* Cache memory handling.
2 Copyright (C) 2004-2006, 2008, 2009, 2012 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Ulrich Drepper <drepper@redhat.com>, 2004.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published
8 by the Free Software Foundation; version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software Foundation,
18 Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
19
20 #include <assert.h>
21 #include <errno.h>
22 #include <error.h>
23 #include <fcntl.h>
24 #include <inttypes.h>
25 #include <libintl.h>
26 #include <limits.h>
27 #include <obstack.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <unistd.h>
31 #include <sys/mman.h>
32 #include <sys/param.h>
33
34 #include "dbg_log.h"
35 #include "nscd.h"
36
37
38 static int
39 sort_he (const void *p1, const void *p2)
40 {
41 struct hashentry *h1 = *(struct hashentry **) p1;
42 struct hashentry *h2 = *(struct hashentry **) p2;
43
44 if (h1 < h2)
45 return -1;
46 if (h1 > h2)
47 return 1;
48 return 0;
49 }
50
51
52 static int
53 sort_he_data (const void *p1, const void *p2)
54 {
55 struct hashentry *h1 = *(struct hashentry **) p1;
56 struct hashentry *h2 = *(struct hashentry **) p2;
57
58 if (h1->packet < h2->packet)
59 return -1;
60 if (h1->packet > h2->packet)
61 return 1;
62 return 0;
63 }
64
65
66 /* Basic definitions for the bitmap implementation. Only BITMAP_T
67 needs to be changed to choose a different word size. */
68 #define BITMAP_T uint8_t
69 #define BITS (CHAR_BIT * sizeof (BITMAP_T))
70 #define ALLBITS ((((BITMAP_T) 1) << BITS) - 1)
71 #define HIGHBIT (((BITMAP_T) 1) << (BITS - 1))
72
73
74 static void
75 markrange (BITMAP_T *mark, ref_t start, size_t len)
76 {
77 /* Adjust parameters for block alignment. */
78 assert ((start & BLOCK_ALIGN_M1) == 0);
79 start /= BLOCK_ALIGN;
80 len = (len + BLOCK_ALIGN_M1) / BLOCK_ALIGN;
81
82 size_t elem = start / BITS;
83
84 if (start % BITS != 0)
85 {
86 if (start % BITS + len <= BITS)
87 {
88 /* All fits in the partial byte. */
89 mark[elem] |= (ALLBITS >> (BITS - len)) << (start % BITS);
90 return;
91 }
92
93 mark[elem++] |= ALLBITS << (start % BITS);
94 len -= BITS - (start % BITS);
95 }
96
97 while (len >= BITS)
98 {
99 mark[elem++] = ALLBITS;
100 len -= BITS;
101 }
102
103 if (len > 0)
104 mark[elem] |= ALLBITS >> (BITS - len);
105 }
106
107
108 void
109 gc (struct database_dyn *db)
110 {
111 /* We need write access. */
112 pthread_rwlock_wrlock (&db->lock);
113
114 /* And the memory handling lock. */
115 pthread_mutex_lock (&db->memlock);
116
117 /* We need an array representing the data area. All memory
118 allocation is BLOCK_ALIGN aligned so this is the level at which
119 we have to look at the memory. We use a mark and sweep algorithm
120 where the marks are placed in this array. */
121 assert (db->head->first_free % BLOCK_ALIGN == 0);
122
123 BITMAP_T *mark;
124 bool mark_use_malloc;
125 /* In prune_cache we are also using a dynamically allocated array.
126 If the array in the caller is too large we have malloc'ed it. */
127 size_t stack_used = sizeof (bool) * db->head->module;
128 if (__builtin_expect (stack_used > MAX_STACK_USE, 0))
129 stack_used = 0;
130 size_t nmark = (db->head->first_free / BLOCK_ALIGN + BITS - 1) / BITS;
131 size_t memory_needed = nmark * sizeof (BITMAP_T);
132 if (__builtin_expect (stack_used + memory_needed <= MAX_STACK_USE, 1))
133 {
134 mark = (BITMAP_T *) alloca_account (memory_needed, stack_used);
135 mark_use_malloc = false;
136 memset (mark, '\0', memory_needed);
137 }
138 else
139 {
140 mark = (BITMAP_T *) xcalloc (1, memory_needed);
141 mark_use_malloc = true;
142 }
143
144 /* Create an array which can hold pointer to all the entries in hash
145 entries. */
146 memory_needed = 2 * db->head->nentries * sizeof (struct hashentry *);
147 struct hashentry **he;
148 struct hashentry **he_data;
149 bool he_use_malloc;
150 if (__builtin_expect (stack_used + memory_needed <= MAX_STACK_USE, 1))
151 {
152 he = alloca_account (memory_needed, stack_used);
153 he_use_malloc = false;
154 }
155 else
156 {
157 he = xmalloc (memory_needed);
158 he_use_malloc = true;
159 }
160 he_data = &he[db->head->nentries];
161
162 size_t cnt = 0;
163 for (size_t idx = 0; idx < db->head->module; ++idx)
164 {
165 ref_t *prevp = &db->head->array[idx];
166 ref_t run = *prevp;
167
168 while (run != ENDREF)
169 {
170 assert (cnt < db->head->nentries);
171 he[cnt] = (struct hashentry *) (db->data + run);
172
173 he[cnt]->prevp = prevp;
174 prevp = &he[cnt]->next;
175
176 /* This is the hash entry itself. */
177 markrange (mark, run, sizeof (struct hashentry));
178
179 /* Add the information for the data itself. We do this
180 only for the one special entry marked with FIRST. */
181 if (he[cnt]->first)
182 {
183 struct datahead *dh
184 = (struct datahead *) (db->data + he[cnt]->packet);
185 markrange (mark, he[cnt]->packet, dh->allocsize);
186 }
187
188 run = he[cnt]->next;
189
190 ++cnt;
191 }
192 }
193 assert (cnt == db->head->nentries);
194
195 /* Sort the entries by the addresses of the referenced data. All
196 the entries pointing to the same DATAHEAD object will have the
197 same key. Stability of the sorting is unimportant. */
198 memcpy (he_data, he, cnt * sizeof (struct hashentry *));
199 qsort (he_data, cnt, sizeof (struct hashentry *), sort_he_data);
200
201 /* Sort the entries by their address. */
202 qsort (he, cnt, sizeof (struct hashentry *), sort_he);
203
204 #define obstack_chunk_alloc xmalloc
205 #define obstack_chunk_free free
206 struct obstack ob;
207 obstack_init (&ob);
208
209 /* Determine the highest used address. */
210 size_t high = nmark;
211 while (high > 0 && mark[high - 1] == 0)
212 --high;
213
214 /* No memory used. */
215 if (high == 0)
216 {
217 db->head->first_free = 0;
218 goto out;
219 }
220
221 /* Determine the highest offset. */
222 BITMAP_T mask = HIGHBIT;
223 ref_t highref = (high * BITS - 1) * BLOCK_ALIGN;
224 while ((mark[high - 1] & mask) == 0)
225 {
226 mask >>= 1;
227 highref -= BLOCK_ALIGN;
228 }
229
230 /* Now we can iterate over the MARK array and find bits which are not
231 set. These represent memory which can be recovered. */
232 size_t byte = 0;
233 /* Find the first gap. */
234 while (byte < high && mark[byte] == ALLBITS)
235 ++byte;
236
237 if (byte == high
238 || (byte == high - 1 && (mark[byte] & ~(mask | (mask - 1))) == 0))
239 /* No gap. */
240 goto out;
241
242 mask = 1;
243 cnt = 0;
244 while ((mark[byte] & mask) != 0)
245 {
246 ++cnt;
247 mask <<= 1;
248 }
249 ref_t off_free = (byte * BITS + cnt) * BLOCK_ALIGN;
250 assert (off_free <= db->head->first_free);
251
252 struct hashentry **next_hash = he;
253 struct hashentry **next_data = he_data;
254
255 /* Skip over the hash entries in the first block which does not get
256 moved. */
257 while (next_hash < &he[db->head->nentries]
258 && *next_hash < (struct hashentry *) (db->data + off_free))
259 ++next_hash;
260
261 while (next_data < &he_data[db->head->nentries]
262 && (*next_data)->packet < off_free)
263 ++next_data;
264
265
266 /* Now we start modifying the data. Make sure all readers of the
267 data are aware of this and temporarily don't use the data. */
268 ++db->head->gc_cycle;
269 assert ((db->head->gc_cycle & 1) == 1);
270
271
272 /* We do not perform the move operations right away since the
273 he_data array is not sorted by the address of the data. */
274 struct moveinfo
275 {
276 void *from;
277 void *to;
278 size_t size;
279 struct moveinfo *next;
280 } *moves = NULL;
281
282 while (byte < high)
283 {
284 /* Search for the next filled block. BYTE is the index of the
285 entry in MARK, MASK is the bit, and CNT is the bit number.
286 OFF_FILLED is the corresponding offset. */
287 if ((mark[byte] & ~(mask - 1)) == 0)
288 {
289 /* No other bit set in the same element of MARK. Search in the
290 following memory. */
291 do
292 ++byte;
293 while (byte < high && mark[byte] == 0);
294
295 if (byte == high)
296 /* That was it. */
297 break;
298
299 mask = 1;
300 cnt = 0;
301 }
302 /* Find the exact bit. */
303 while ((mark[byte] & mask) == 0)
304 {
305 ++cnt;
306 mask <<= 1;
307 }
308
309 ref_t off_alloc = (byte * BITS + cnt) * BLOCK_ALIGN;
310 assert (off_alloc <= db->head->first_free);
311
312 /* Find the end of the used area. */
313 if ((mark[byte] & ~(mask - 1)) == (BITMAP_T) ~(mask - 1))
314 {
315 /* All other bits set. Search the next bytes in MARK. */
316 do
317 ++byte;
318 while (byte < high && mark[byte] == ALLBITS);
319
320 mask = 1;
321 cnt = 0;
322 }
323 if (byte < high)
324 {
325 /* Find the exact bit. */
326 while ((mark[byte] & mask) != 0)
327 {
328 ++cnt;
329 mask <<= 1;
330 }
331 }
332
333 ref_t off_allocend = (byte * BITS + cnt) * BLOCK_ALIGN;
334 assert (off_allocend <= db->head->first_free);
335 /* Now we know that we can copy the area from OFF_ALLOC to
336 OFF_ALLOCEND (not included) to the memory starting at
337 OFF_FREE. First fix up all the entries for the
338 displacement. */
339 ref_t disp = off_alloc - off_free;
340
341 struct moveinfo *new_move;
342 if (__builtin_expect (stack_used + sizeof (*new_move) <= MAX_STACK_USE,
343 1))
344 new_move = alloca_account (sizeof (*new_move), stack_used);
345 else
346 new_move = obstack_alloc (&ob, sizeof (*new_move));
347 new_move->from = db->data + off_alloc;
348 new_move->to = db->data + off_free;
349 new_move->size = off_allocend - off_alloc;
350 /* Create a circular list to be always able to append at the end. */
351 if (moves == NULL)
352 moves = new_move->next = new_move;
353 else
354 {
355 new_move->next = moves->next;
356 moves = moves->next = new_move;
357 }
358
359 /* The following loop will prepare to move this much data. */
360 off_free += off_allocend - off_alloc;
361
362 while (off_alloc < off_allocend)
363 {
364 /* Determine whether the next entry is for a hash entry or
365 the data. */
366 if ((struct hashentry *) (db->data + off_alloc) == *next_hash)
367 {
368 /* Just correct the forward reference. */
369 *(*next_hash++)->prevp -= disp;
370
371 off_alloc += ((sizeof (struct hashentry) + BLOCK_ALIGN_M1)
372 & ~BLOCK_ALIGN_M1);
373 }
374 else
375 {
376 assert (next_data < &he_data[db->head->nentries]);
377 assert ((*next_data)->packet == off_alloc);
378
379 struct datahead *dh = (struct datahead *) (db->data + off_alloc);
380 do
381 {
382 assert ((*next_data)->key >= (*next_data)->packet);
383 assert ((*next_data)->key + (*next_data)->len
384 <= (*next_data)->packet + dh->allocsize);
385
386 (*next_data)->packet -= disp;
387 (*next_data)->key -= disp;
388 ++next_data;
389 }
390 while (next_data < &he_data[db->head->nentries]
391 && (*next_data)->packet == off_alloc);
392
393 off_alloc += (dh->allocsize + BLOCK_ALIGN_M1) & ~BLOCK_ALIGN_M1;
394 }
395 }
396 assert (off_alloc == off_allocend);
397
398 assert (off_alloc <= db->head->first_free);
399 if (off_alloc == db->head->first_free)
400 /* We are done, that was the last block. */
401 break;
402 }
403 assert (next_hash == &he[db->head->nentries]);
404 assert (next_data == &he_data[db->head->nentries]);
405
406 /* Now perform the actual moves. */
407 if (moves != NULL)
408 {
409 struct moveinfo *runp = moves->next;
410 do
411 {
412 assert ((char *) runp->to >= db->data);
413 assert ((char *) runp->to + runp->size
414 <= db->data + db->head->first_free);
415 assert ((char *) runp->from >= db->data);
416 assert ((char *) runp->from + runp->size
417 <= db->data + db->head->first_free);
418
419 /* The regions may overlap. */
420 memmove (runp->to, runp->from, runp->size);
421 runp = runp->next;
422 }
423 while (runp != moves->next);
424
425 if (__builtin_expect (debug_level >= 3, 0))
426 dbg_log (_("freed %zu bytes in %s cache"),
427 db->head->first_free
428 - ((char *) moves->to + moves->size - db->data),
429 dbnames[db - dbs]);
430
431 /* The byte past the end of the last copied block is the next
432 available byte. */
433 db->head->first_free = (char *) moves->to + moves->size - db->data;
434
435 /* Consistency check. */
436 if (__builtin_expect (debug_level >= 3, 0))
437 {
438 for (size_t idx = 0; idx < db->head->module; ++idx)
439 {
440 ref_t run = db->head->array[idx];
441 size_t cnt = 0;
442
443 while (run != ENDREF)
444 {
445 if (run + sizeof (struct hashentry) > db->head->first_free)
446 {
447 dbg_log ("entry %zu in hash bucket %zu out of bounds: "
448 "%" PRIu32 "+%zu > %zu\n",
449 cnt, idx, run, sizeof (struct hashentry),
450 (size_t) db->head->first_free);
451 break;
452 }
453
454 struct hashentry *he = (struct hashentry *) (db->data + run);
455
456 if (he->key + he->len > db->head->first_free)
457 dbg_log ("key of entry %zu in hash bucket %zu out of "
458 "bounds: %" PRIu32 "+%zu > %zu\n",
459 cnt, idx, he->key, (size_t) he->len,
460 (size_t) db->head->first_free);
461
462 if (he->packet + sizeof (struct datahead)
463 > db->head->first_free)
464 dbg_log ("packet of entry %zu in hash bucket %zu out of "
465 "bounds: %" PRIu32 "+%zu > %zu\n",
466 cnt, idx, he->packet, sizeof (struct datahead),
467 (size_t) db->head->first_free);
468 else
469 {
470 struct datahead *dh = (struct datahead *) (db->data
471 + he->packet);
472 if (he->packet + dh->allocsize
473 > db->head->first_free)
474 dbg_log ("full key of entry %zu in hash bucket %zu "
475 "out of bounds: %" PRIu32 "+%zu > %zu",
476 cnt, idx, he->packet, (size_t) dh->allocsize,
477 (size_t) db->head->first_free);
478 }
479
480 run = he->next;
481 ++cnt;
482 }
483 }
484 }
485 }
486
487 /* Make sure the data on disk is updated. */
488 if (db->persistent)
489 msync (db->head, db->data + db->head->first_free - (char *) db->head,
490 MS_ASYNC);
491
492
493 /* Now we are done modifying the data. */
494 ++db->head->gc_cycle;
495 assert ((db->head->gc_cycle & 1) == 0);
496
497 /* We are done. */
498 out:
499 pthread_mutex_unlock (&db->memlock);
500 pthread_rwlock_unlock (&db->lock);
501
502 if (he_use_malloc)
503 free (he);
504 if (mark_use_malloc)
505 free (mark);
506
507 obstack_free (&ob, NULL);
508 }
509
510
511 void *
512 mempool_alloc (struct database_dyn *db, size_t len, int data_alloc)
513 {
514 /* Make sure LEN is a multiple of our maximum alignment so we can
515 keep track of used memory is multiples of this alignment value. */
516 if ((len & BLOCK_ALIGN_M1) != 0)
517 len += BLOCK_ALIGN - (len & BLOCK_ALIGN_M1);
518
519 if (data_alloc)
520 pthread_rwlock_rdlock (&db->lock);
521
522 pthread_mutex_lock (&db->memlock);
523
524 assert ((db->head->first_free & BLOCK_ALIGN_M1) == 0);
525
526 bool tried_resize = false;
527 void *res;
528 retry:
529 res = db->data + db->head->first_free;
530
531 if (__builtin_expect (db->head->first_free + len > db->head->data_size, 0))
532 {
533 if (! tried_resize)
534 {
535 /* Try to resize the database. Grow size of 1/8th. */
536 size_t oldtotal = (sizeof (struct database_pers_head)
537 + roundup (db->head->module * sizeof (ref_t),
538 ALIGN)
539 + db->head->data_size);
540 size_t new_data_size = (db->head->data_size
541 + MAX (2 * len, db->head->data_size / 8));
542 size_t newtotal = (sizeof (struct database_pers_head)
543 + roundup (db->head->module * sizeof (ref_t), ALIGN)
544 + new_data_size);
545 if (newtotal > db->max_db_size)
546 {
547 new_data_size -= newtotal - db->max_db_size;
548 newtotal = db->max_db_size;
549 }
550
551 if (db->mmap_used && newtotal > oldtotal
552 /* We only have to adjust the file size. The new pages
553 become magically available. */
554 && TEMP_FAILURE_RETRY_VAL (posix_fallocate (db->wr_fd, oldtotal,
555 newtotal
556 - oldtotal)) == 0)
557 {
558 db->head->data_size = new_data_size;
559 tried_resize = true;
560 goto retry;
561 }
562 }
563
564 if (data_alloc)
565 pthread_rwlock_unlock (&db->lock);
566
567 if (! db->last_alloc_failed)
568 {
569 dbg_log (_("no more memory for database '%s'"), dbnames[db - dbs]);
570
571 db->last_alloc_failed = true;
572 }
573
574 ++db->head->addfailed;
575
576 /* No luck. */
577 res = NULL;
578 }
579 else
580 {
581 db->head->first_free += len;
582
583 db->last_alloc_failed = false;
584
585 }
586
587 pthread_mutex_unlock (&db->memlock);
588
589 return res;
590 }