]> git.ipfire.org Git - thirdparty/glibc.git/blame - nscd/cache.c
allocalim.h: use __glibc_likely instead of __builtin_expect
[thirdparty/glibc.git] / nscd / cache.c
CommitLineData
688903eb 1/* Copyright (c) 1998-2018 Free Software Foundation, Inc.
67479a70
UD
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@cygnus.com>, 1998.
4
43bc8ac6 5 This program is free software; you can redistribute it and/or modify
2e2efe65
RM
6 it under the terms of the GNU General Public License as published
7 by the Free Software Foundation; version 2 of the License, or
8 (at your option) any later version.
67479a70 9
43bc8ac6 10 This program is distributed in the hope that it will be useful,
67479a70 11 but WITHOUT ANY WARRANTY; without even the implied warranty of
43bc8ac6
UD
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
67479a70 14
43bc8ac6 15 You should have received a copy of the GNU General Public License
59ba27a6 16 along with this program; if not, see <http://www.gnu.org/licenses/>. */
67479a70 17
a95a08b4 18#include <assert.h>
4009bf40 19#include <atomic.h>
67479a70
UD
20#include <errno.h>
21#include <error.h>
a757fda3 22#include <inttypes.h>
67479a70
UD
23#include <limits.h>
24#include <stdlib.h>
25#include <string.h>
4360eafd 26#include <libintl.h>
e518937a 27#include <arpa/inet.h>
a95a08b4 28#include <sys/mman.h>
67479a70
UD
29#include <sys/param.h>
30#include <sys/stat.h>
31#include <sys/uio.h>
67f36c79 32#include <nss.h>
67479a70
UD
33
34#include "nscd.h"
35#include "dbg_log.h"
36
a95a08b4 37
30294ea4
UD
38/* Wrapper functions with error checking for standard functions. */
39extern void *xcalloc (size_t n, size_t s);
40
41
a95a08b4
UD
42/* Number of times a value is reloaded without being used. UINT_MAX
43 means unlimited. */
44unsigned int reload_count = DEFAULT_RELOAD_LIMIT;
45
46
a4c7ea7b
UD
47static time_t (*const readdfcts[LASTREQ]) (struct database_dyn *,
48 struct hashentry *,
49 struct datahead *) =
b21fa963
UD
50{
51 [GETPWBYNAME] = readdpwbyname,
52 [GETPWBYUID] = readdpwbyuid,
53 [GETGRBYNAME] = readdgrbyname,
54 [GETGRBYGID] = readdgrbygid,
55 [GETHOSTBYNAME] = readdhstbyname,
56 [GETHOSTBYNAMEv6] = readdhstbynamev6,
57 [GETHOSTBYADDR] = readdhstbyaddr,
58 [GETHOSTBYADDRv6] = readdhstbyaddrv6,
59 [GETAI] = readdhstai,
60 [INITGROUPS] = readdinitgroups,
61 [GETSERVBYNAME] = readdservbyname,
684ae515
UD
62 [GETSERVBYPORT] = readdservbyport,
63 [GETNETGRENT] = readdgetnetgrent,
64 [INNETGR] = readdinnetgr
b21fa963
UD
65};
66
67
67479a70
UD
68/* Search the cache for a matching entry and return it when found. If
69 this fails search the negative cache and return (void *) -1 if this
70 search was successful. Otherwise return NULL.
71
72 This function must be called with the read-lock held. */
a95a08b4 73struct datahead *
684ae515 74cache_search (request_type type, const void *key, size_t len,
a95a08b4 75 struct database_dyn *table, uid_t owner)
67479a70 76{
67f36c79 77 unsigned long int hash = __nss_hash (key, len) % table->head->module;
67479a70 78
a95a08b4
UD
79 unsigned long int nsearched = 0;
80 struct datahead *result = NULL;
67479a70 81
a95a08b4
UD
82 ref_t work = table->head->array[hash];
83 while (work != ENDREF)
67479a70 84 {
c86e6aec
UD
85 ++nsearched;
86
a95a08b4
UD
87 struct hashentry *here = (struct hashentry *) (table->data + work);
88
89 if (type == here->type && len == here->len
90 && memcmp (key, table->data + here->key, len) == 0
91 && here->owner == owner)
67479a70
UD
92 {
93 /* We found the entry. Increment the appropriate counter. */
a95a08b4
UD
94 struct datahead *dh
95 = (struct datahead *) (table->data + here->packet);
96
97 /* See whether we must ignore the entry. */
98 if (dh->usable)
99 {
100 /* We do not synchronize the memory here. The statistics
101 data is not crucial, we synchronize only once in a while
102 in the cleanup threads. */
103 if (dh->notfound)
104 ++table->head->neghit;
105 else
106 {
107 ++table->head->poshit;
108
109 if (dh->nreloads != 0)
110 dh->nreloads = 0;
111 }
67479a70 112
a95a08b4
UD
113 result = dh;
114 break;
115 }
67479a70
UD
116 }
117
a95a08b4 118 work = here->next;
67479a70
UD
119 }
120
a95a08b4
UD
121 if (nsearched > table->head->maxnsearched)
122 table->head->maxnsearched = nsearched;
c86e6aec 123
a95a08b4 124 return result;
67479a70
UD
125}
126
127/* Add a new entry to the cache. The return value is zero if the function
128 call was successful.
129
130 This function must be called with the read-lock held.
131
132 We modify the table but we nevertheless only acquire a read-lock.
133 This is ok since we use operations which would be safe even without
134 locking, given that the `prune_cache' function never runs. Using
135 the readlock reduces the chance of conflicts. */
a95a08b4
UD
136int
137cache_add (int type, const void *key, size_t len, struct datahead *packet,
138 bool first, struct database_dyn *table,
528741cb 139 uid_t owner, bool prune_wakeup)
67479a70 140{
a1ffb40e 141 if (__glibc_unlikely (debug_level >= 2))
418dfb12
UD
142 {
143 const char *str;
144 char buf[INET6_ADDRSTRLEN + 1];
145 if (type == GETHOSTBYADDR || type == GETHOSTBYADDRv6)
146 str = inet_ntop (type == GETHOSTBYADDR ? AF_INET : AF_INET6,
147 key, buf, sizeof (buf));
148 else
149 str = key;
150
151 dbg_log (_("add new entry \"%s\" of type %s for %s to cache%s"),
152 str, serv2str[type], dbnames[table - dbs],
11bf311e 153 first ? _(" (first)") : "");
418dfb12 154 }
a95a08b4 155
67f36c79 156 unsigned long int hash = __nss_hash (key, len) % table->head->module;
67479a70
UD
157 struct hashentry *newp;
158
20e498bd 159 newp = mempool_alloc (table, sizeof (struct hashentry), 0);
a95a08b4 160 /* If we cannot allocate memory, just do not do anything. */
67479a70 161 if (newp == NULL)
f7685cef 162 {
d0296640
UD
163 /* If necessary mark the entry as unusable so that lookups will
164 not use it. */
165 if (first)
166 packet->usable = false;
167
f7685cef
UD
168 return -1;
169 }
67479a70
UD
170
171 newp->type = type;
a95a08b4 172 newp->first = first;
67479a70 173 newp->len = len;
a95a08b4
UD
174 newp->key = (char *) key - table->data;
175 assert (newp->key + newp->len <= table->head->first_free);
a1c542bf 176 newp->owner = owner;
a95a08b4 177 newp->packet = (char *) packet - table->data;
528741cb 178 assert ((newp->packet & BLOCK_ALIGN_M1) == 0);
67479a70
UD
179
180 /* Put the new entry in the first position. */
76a0b73e
TR
181 /* TODO Review concurrency. Use atomic_exchange_release. */
182 newp->next = atomic_load_relaxed (&table->head->array[hash]);
183 while (!atomic_compare_exchange_weak_release (&table->head->array[hash],
184 (ref_t *) &newp->next,
185 (ref_t) ((char *) newp
186 - table->data)));
67479a70
UD
187
188 /* Update the statistics. */
a95a08b4
UD
189 if (packet->notfound)
190 ++table->head->negmiss;
191 else if (first)
192 ++table->head->posmiss;
193
194 /* We depend on this value being correct and at least as high as the
195 real number of entries. */
196 atomic_increment (&table->head->nentries);
197
198 /* It does not matter that we are not loading the just increment
199 value, this is just for statistics. */
200 unsigned long int nentries = table->head->nentries;
201 if (nentries > table->head->maxnentries)
202 table->head->maxnentries = nentries;
203
cc46d02a
UD
204 if (table->persistent)
205 // XXX async OK?
206 msync ((void *) table->head,
207 (char *) &table->head->array[hash] - (char *) table->head
208 + sizeof (ref_t), MS_ASYNC);
209
528741cb
UD
210 /* We do not have to worry about the pruning thread if we are
211 re-adding the data since this is done by the pruning thread. We
212 also do not have to do anything in case this is not the first
213 time the data is entered since different data heads all have the
214 same timeout. */
215 if (first && prune_wakeup)
216 {
217 /* Perhaps the prune thread for the table is not running in a long
218 time. Wake it if necessary. */
219 pthread_mutex_lock (&table->prune_lock);
220 time_t next_wakeup = table->wakeup_time;
221 bool do_wakeup = false;
222 if (next_wakeup > packet->timeout + CACHE_PRUNE_INTERVAL)
223 {
224 table->wakeup_time = packet->timeout;
225 do_wakeup = true;
226 }
227 pthread_mutex_unlock (&table->prune_lock);
228 if (do_wakeup)
ffb1b882 229 pthread_cond_signal (&table->prune_cond);
528741cb 230 }
ffb1b882 231
a95a08b4 232 return 0;
67479a70
UD
233}
234
235/* Walk through the table and remove all entries which lifetime ended.
236
237 We have a problem here. To actually remove the entries we must get
238 the write-lock. But since we want to keep the time we have the
239 lock as short as possible we cannot simply acquire the lock when we
240 start looking for timedout entries.
241
242 Therefore we do it in two stages: first we look for entries which
243 must be invalidated and remember them. Then we get the lock and
244 actually remove them. This is complicated by the way we have to
245 free the data structures since some hash table entries share the same
ce85d65b 246 data. */
ffb1b882 247time_t
902c4291 248prune_cache (struct database_dyn *table, time_t now, int fd)
67479a70 249{
a95a08b4 250 size_t cnt = table->head->module;
67479a70 251
9514f4e6
UD
252 /* If this table is not actually used don't do anything. */
253 if (cnt == 0)
902c4291
UD
254 {
255 if (fd != -1)
256 {
257 /* Reply to the INVALIDATE initiator. */
258 int32_t resp = 0;
259 writeall (fd, &resp, sizeof (resp));
260 }
ffb1b882
UD
261
262 /* No need to do this again anytime soon. */
263 return 24 * 60 * 60;
902c4291 264 }
9514f4e6 265
67479a70
UD
266 /* If we check for the modification of the underlying file we invalidate
267 the entries also in this case. */
319b9ad4 268 if (table->check_file && now != LONG_MAX)
67479a70 269 {
319b9ad4 270 struct traced_file *runp = table->traced_files;
67479a70 271
319b9ad4 272 while (runp != NULL)
67479a70 273 {
319b9ad4 274#ifdef HAVE_INOTIFY
cf9313e7 275 if (runp->inotify_descr[TRACED_FILE] == -1)
319b9ad4 276#endif
3d08d800 277 {
319b9ad4
UD
278 struct stat64 st;
279
280 if (stat64 (runp->fname, &st) < 0)
281 {
cf9313e7
CD
282 /* Print a diagnostic that the traced file was missing.
283 We must not disable tracing since the file might return
284 shortly and we want to reload it at the next pruning.
285 Disabling tracing here would go against the configuration
286 as specified by the user via check-files. */
319b9ad4 287 char buf[128];
cf9313e7 288 dbg_log (_("checking for monitored file `%s': %s"),
319b9ad4 289 runp->fname, strerror_r (errno, buf, sizeof (buf)));
319b9ad4
UD
290 }
291 else
292 {
cf9313e7
CD
293 /* This must be `!=` to catch cases where users turn the
294 clocks back and we still want to detect any time difference
295 in mtime. */
296 if (st.st_mtime != runp->mtime)
319b9ad4 297 {
cf9313e7
CD
298 dbg_log (_("monitored file `%s` changed (mtime)"),
299 runp->fname);
300 /* The file changed. Invalidate all entries. */
319b9ad4 301 now = LONG_MAX;
cf9313e7
CD
302 runp->mtime = st.st_mtime;
303#ifdef HAVE_INOTIFY
304 /* Attempt to install a watch on the file. */
305 install_watches (runp);
306#endif
319b9ad4
UD
307 }
308 }
3d08d800 309 }
319b9ad4
UD
310
311 runp = runp->next;
67479a70
UD
312 }
313 }
314
315 /* We run through the table and find values which are not valid anymore.
316
c86e6aec
UD
317 Note that for the initial step, finding the entries to be removed,
318 we don't need to get any lock. It is at all timed assured that the
319 linked lists are set up correctly and that no second thread prunes
320 the cache. */
30294ea4
UD
321 bool *mark;
322 size_t memory_needed = cnt * sizeof (bool);
323 bool mark_use_alloca;
a1ffb40e 324 if (__glibc_likely (memory_needed <= MAX_STACK_USE))
30294ea4
UD
325 {
326 mark = alloca (cnt * sizeof (bool));
327 memset (mark, '\0', memory_needed);
328 mark_use_alloca = true;
329 }
330 else
331 {
332 mark = xcalloc (1, memory_needed);
333 mark_use_alloca = false;
334 }
a95a08b4
UD
335 size_t first = cnt + 1;
336 size_t last = 0;
337 char *const data = table->data;
338 bool any = false;
339
a1ffb40e 340 if (__glibc_unlikely (debug_level > 2))
a757fda3
UD
341 dbg_log (_("pruning %s cache; time %ld"),
342 dbnames[table - dbs], (long int) now);
343
f9e2261b
UD
344#define NO_TIMEOUT LONG_MAX
345 time_t next_timeout = NO_TIMEOUT;
67479a70
UD
346 do
347 {
a95a08b4 348 ref_t run = table->head->array[--cnt];
67479a70 349
a95a08b4 350 while (run != ENDREF)
67479a70 351 {
a95a08b4
UD
352 struct hashentry *runp = (struct hashentry *) (data + run);
353 struct datahead *dh = (struct datahead *) (data + runp->packet);
354
a757fda3 355 /* Some debug support. */
a1ffb40e 356 if (__glibc_unlikely (debug_level > 2))
a757fda3
UD
357 {
358 char buf[INET6_ADDRSTRLEN];
359 const char *str;
360
361 if (runp->type == GETHOSTBYADDR || runp->type == GETHOSTBYADDRv6)
362 {
363 inet_ntop (runp->type == GETHOSTBYADDR ? AF_INET : AF_INET6,
364 data + runp->key, buf, sizeof (buf));
365 str = buf;
366 }
367 else
368 str = data + runp->key;
369
370 dbg_log (_("considering %s entry \"%s\", timeout %" PRIu64),
371 serv2str[runp->type], str, dh->timeout);
372 }
373
a95a08b4
UD
374 /* Check whether the entry timed out. */
375 if (dh->timeout < now)
67479a70 376 {
a95a08b4
UD
377 /* This hash bucket could contain entries which need to
378 be looked at. */
379 mark[cnt] = true;
380
67479a70
UD
381 first = MIN (first, cnt);
382 last = MAX (last, cnt);
a95a08b4
UD
383
384 /* We only have to look at the data of the first entries
385 since the count information is kept in the data part
386 which is shared. */
387 if (runp->first)
388 {
389
390 /* At this point there are two choices: we reload the
391 value or we discard it. Do not change NRELOADS if
392 we never not reload the record. */
393 if ((reload_count != UINT_MAX
394 && __builtin_expect (dh->nreloads >= reload_count, 0))
395 /* We always remove negative entries. */
396 || dh->notfound
397 /* Discard everything if the user explicitly
398 requests it. */
399 || now == LONG_MAX)
400 {
401 /* Remove the value. */
402 dh->usable = false;
403
404 /* We definitely have some garbage entries now. */
405 any = true;
406 }
407 else
408 {
409 /* Reload the value. We do this only for the
410 initially used key, not the additionally
411 added derived value. */
b21fa963
UD
412 assert (runp->type < LASTREQ
413 && readdfcts[runp->type] != NULL);
414
a4c7ea7b
UD
415 time_t timeout = readdfcts[runp->type] (table, runp, dh);
416 next_timeout = MIN (next_timeout, timeout);
a95a08b4
UD
417
418 /* If the entry has been replaced, we might need
419 cleanup. */
420 any |= !dh->usable;
421 }
422 }
67479a70 423 }
a95a08b4 424 else
02aa27fe
UD
425 {
426 assert (dh->usable);
427 next_timeout = MIN (next_timeout, dh->timeout);
428 }
a95a08b4
UD
429
430 run = runp->next;
67479a70
UD
431 }
432 }
433 while (cnt > 0);
434
a1ffb40e 435 if (__glibc_unlikely (fd != -1))
902c4291
UD
436 {
437 /* Reply to the INVALIDATE initiator that the cache has been
438 invalidated. */
439 int32_t resp = 0;
440 writeall (fd, &resp, sizeof (resp));
441 }
442
a95a08b4 443 if (first <= last)
67479a70
UD
444 {
445 struct hashentry *head = NULL;
446
447 /* Now we have to get the write lock since we are about to modify
448 the table. */
a1ffb40e 449 if (__glibc_unlikely (pthread_rwlock_trywrlock (&table->lock) != 0))
c86e6aec 450 {
a95a08b4 451 ++table->head->wrlockdelayed;
c86e6aec
UD
452 pthread_rwlock_wrlock (&table->lock);
453 }
67479a70
UD
454
455 while (first <= last)
456 {
a95a08b4 457 if (mark[first])
67479a70 458 {
a95a08b4
UD
459 ref_t *old = &table->head->array[first];
460 ref_t run = table->head->array[first];
67479a70 461
528741cb
UD
462 assert (run != ENDREF);
463 do
67479a70 464 {
a95a08b4
UD
465 struct hashentry *runp = (struct hashentry *) (data + run);
466 struct datahead *dh
467 = (struct datahead *) (data + runp->packet);
67479a70 468
a95a08b4 469 if (! dh->usable)
67479a70 470 {
a95a08b4
UD
471 /* We need the list only for debugging but it is
472 more costly to avoid creating the list than
473 doing it. */
474 runp->dellist = head;
475 head = runp;
476
477 /* No need for an atomic operation, we have the
478 write lock. */
479 --table->head->nentries;
480
481 run = *old = runp->next;
67479a70
UD
482 }
483 else
a95a08b4
UD
484 {
485 old = &runp->next;
486 run = runp->next;
487 }
67479a70 488 }
528741cb 489 while (run != ENDREF);
67479a70 490 }
a95a08b4 491
67479a70
UD
492 ++first;
493 }
494
495 /* It's all done. */
496 pthread_rwlock_unlock (&table->lock);
497
a95a08b4
UD
498 /* Make sure the data is saved to disk. */
499 if (table->persistent)
500 msync (table->head,
a757fda3 501 data + table->head->first_free - (char *) table->head,
a95a08b4
UD
502 MS_ASYNC);
503
c86e6aec 504 /* One extra pass if we do debugging. */
a1ffb40e 505 if (__glibc_unlikely (debug_level > 0))
67479a70 506 {
c86e6aec 507 struct hashentry *runp = head;
67479a70 508
c86e6aec 509 while (runp != NULL)
8d8c6efa
UD
510 {
511 char buf[INET6_ADDRSTRLEN];
512 const char *str;
513
c86e6aec 514 if (runp->type == GETHOSTBYADDR || runp->type == GETHOSTBYADDRv6)
8d8c6efa 515 {
c86e6aec 516 inet_ntop (runp->type == GETHOSTBYADDR ? AF_INET : AF_INET6,
a757fda3 517 data + runp->key, buf, sizeof (buf));
8d8c6efa
UD
518 str = buf;
519 }
520 else
a757fda3 521 str = data + runp->key;
8d8c6efa 522
c86e6aec
UD
523 dbg_log ("remove %s entry \"%s\"", serv2str[runp->type], str);
524
f3063fc7 525 runp = runp->dellist;
8d8c6efa 526 }
c86e6aec 527 }
67479a70 528 }
a95a08b4 529
a1ffb40e 530 if (__glibc_unlikely (! mark_use_alloca))
30294ea4
UD
531 free (mark);
532
a95a08b4
UD
533 /* Run garbage collection if any entry has been removed or replaced. */
534 if (any)
535 gc (table);
9636a217 536
f9e2261b
UD
537 /* If there is no entry in the database and we therefore have no new
538 timeout value, tell the caller to wake up in 24 hours. */
539 return next_timeout == NO_TIMEOUT ? 24 * 60 * 60 : next_timeout - now;
67479a70 540}