]> git.ipfire.org Git - thirdparty/glibc.git/blob - nscd/netgroupcache.c
02cda13644c85ff5d14f703e522758d0707093c4
[thirdparty/glibc.git] / nscd / netgroupcache.c
1 /* Cache handling for netgroup lookup.
2 Copyright (C) 2011 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Ulrich Drepper <drepper@gmail.com>, 2011.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published
8 by the Free Software Foundation; version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software Foundation,
18 Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
19
20 #include <alloca.h>
21 #include <assert.h>
22 #include <errno.h>
23 #include <libintl.h>
24 #include <stdbool.h>
25 #include <unistd.h>
26 #include <sys/mman.h>
27
28 #include "../inet/netgroup.h"
29 #include "nscd.h"
30 #include "dbg_log.h"
31 #ifdef HAVE_SENDFILE
32 # include <kernel-features.h>
33 #endif
34
35
36 /* This is the standard reply in case the service is disabled. */
37 static const netgroup_response_header disabled =
38 {
39 .version = NSCD_VERSION,
40 .found = -1,
41 .nresults = 0,
42 .result_len = 0
43 };
44
45 /* This is the struct describing how to write this record. */
46 const struct iovec netgroup_iov_disabled =
47 {
48 .iov_base = (void *) &disabled,
49 .iov_len = sizeof (disabled)
50 };
51
52
53 /* This is the standard reply in case we haven't found the dataset. */
54 static const netgroup_response_header notfound =
55 {
56 .version = NSCD_VERSION,
57 .found = 0,
58 .nresults = 0,
59 .result_len = 0
60 };
61
62
63 struct dataset
64 {
65 struct datahead head;
66 netgroup_response_header resp;
67 char strdata[0];
68 };
69
70
71 static time_t
72 addgetnetgrentX (struct database_dyn *db, int fd, request_header *req,
73 const char *key, uid_t uid, struct hashentry *he,
74 struct datahead *dh, struct dataset **resultp)
75 {
76 if (__builtin_expect (debug_level > 0, 0))
77 {
78 if (he == NULL)
79 dbg_log (_("Haven't found \"%s\" in netgroup cache!"), key);
80 else
81 dbg_log (_("Reloading \"%s\" in netgroup cache!"), key);
82 }
83
84 static service_user *netgroup_database;
85 time_t timeout;
86 struct dataset *dataset;
87 bool cacheable = false;
88 ssize_t total;
89
90 char *key_copy = NULL;
91 struct __netgrent data;
92 size_t buflen = MAX (1024, sizeof (*dataset) + req->key_len);
93 size_t buffilled = sizeof (*dataset);
94 char *buffer = NULL;
95 size_t nentries = 0;
96 bool use_malloc = false;
97 size_t group_len = strlen (key) + 1;
98 union
99 {
100 struct name_list elem;
101 char mem[sizeof (struct name_list) + group_len];
102 } first_needed;
103
104 if (netgroup_database == NULL
105 && __nss_database_lookup ("netgroup", NULL, NULL, &netgroup_database))
106 {
107 /* No such service. */
108 total = sizeof (notfound);
109 timeout = time (NULL) + db->negtimeout;
110
111 if (fd != -1)
112 TEMP_FAILURE_RETRY (send (fd, &notfound, total, MSG_NOSIGNAL));
113
114 dataset = mempool_alloc (db, sizeof (struct dataset) + req->key_len, 1);
115 /* If we cannot permanently store the result, so be it. */
116 if (dataset != NULL)
117 {
118 dataset->head.allocsize = sizeof (struct dataset) + req->key_len;
119 dataset->head.recsize = total;
120 dataset->head.notfound = true;
121 dataset->head.nreloads = 0;
122 dataset->head.usable = true;
123
124 /* Compute the timeout time. */
125 timeout = dataset->head.timeout = time (NULL) + db->negtimeout;
126 dataset->head.ttl = db->negtimeout;
127
128 /* This is the reply. */
129 memcpy (&dataset->resp, &notfound, total);
130
131 /* Copy the key data. */
132 memcpy (dataset->strdata, key, req->key_len);
133
134 cacheable = true;
135 }
136
137 goto writeout;
138 }
139
140 memset (&data, '\0', sizeof (data));
141 buffer = alloca (buflen);
142 first_needed.elem.next = &first_needed.elem;
143 memcpy (first_needed.elem.name, key, group_len);
144 data.needed_groups = &first_needed.elem;
145
146 while (data.needed_groups != NULL)
147 {
148 /* Add the next group to the list of those which are known. */
149 struct name_list *this_group = data.needed_groups->next;
150 if (this_group == data.needed_groups)
151 data.needed_groups = NULL;
152 else
153 data.needed_groups->next = this_group->next;
154 this_group->next = data.known_groups;
155 data.known_groups = this_group;
156
157 union
158 {
159 enum nss_status (*f) (const char *, struct __netgrent *);
160 void *ptr;
161 } setfct;
162
163 service_user *nip = netgroup_database;
164 int no_more = __nss_lookup (&nip, "setnetgrent", NULL, &setfct.ptr);
165 while (!no_more)
166 {
167 enum nss_status status
168 = DL_CALL_FCT (*setfct.f, (data.known_groups->name, &data));
169
170 if (status == NSS_STATUS_SUCCESS)
171 {
172 union
173 {
174 enum nss_status (*f) (struct __netgrent *, char *, size_t,
175 int *);
176 void *ptr;
177 } getfct;
178 getfct.ptr = __nss_lookup_function (nip, "getnetgrent_r");
179 if (getfct.f != NULL)
180 while (1)
181 {
182 int e;
183 status = getfct.f (&data, buffer + buffilled,
184 buflen - buffilled, &e);
185 if (status == NSS_STATUS_RETURN)
186 /* This was the last one for this group. Look
187 at next group if available. */
188 break;
189 if (status == NSS_STATUS_SUCCESS)
190 {
191 if (data.type == triple_val)
192 {
193 const char *nhost = data.val.triple.host;
194 const char *nuser = data.val.triple.user;
195 const char *ndomain = data.val.triple.domain;
196
197 if (data.val.triple.host > data.val.triple.user
198 || data.val.triple.user > data.val.triple.domain)
199 {
200 const char *last = MAX (nhost,
201 MAX (nuser, ndomain));
202 size_t bufused = (last + strlen (last) + 1
203 - buffer);
204
205 /* We have to make temporary copies. */
206 size_t hostlen = strlen (nhost) + 1;
207 size_t userlen = strlen (nuser) + 1;
208 size_t domainlen = strlen (ndomain) + 1;
209 size_t needed = hostlen + userlen + domainlen;
210
211 if (buflen - req->key_len - bufused < needed)
212 {
213 size_t newsize = MAX (2 * buflen,
214 buflen + 2 * needed);
215 if (use_malloc || newsize > 1024 * 1024)
216 {
217 buflen = newsize;
218 char *newbuf = xrealloc (use_malloc
219 ? buffer
220 : NULL,
221 buflen);
222
223 buffer = newbuf;
224 use_malloc = true;
225 }
226 else
227 extend_alloca (buffer, buflen, newsize);
228 }
229
230 nhost = memcpy (buffer + bufused,
231 nhost, hostlen);
232 nuser = memcpy ((char *) nhost + hostlen,
233 nuser, userlen);
234 ndomain = memcpy ((char *) nuser + userlen,
235 ndomain, domainlen);
236 }
237
238 char *wp = buffer + buffilled;
239 wp = stpcpy (wp, nhost) + 1;
240 wp = stpcpy (wp, nuser) + 1;
241 wp = stpcpy (wp, ndomain) + 1;
242 buffilled = wp - buffer;
243 ++nentries;
244 }
245 else
246 {
247 /* Check that the group has not been
248 requested before. */
249 struct name_list *runp = data.needed_groups;
250 if (runp != NULL)
251 while (1)
252 {
253 if (strcmp (runp->name, data.val.group) == 0)
254 break;
255
256 runp = runp->next;
257 if (runp == data.needed_groups)
258 {
259 runp = NULL;
260 break;
261 }
262 }
263
264 if (runp == NULL)
265 {
266 runp = data.known_groups;
267 while (runp != NULL)
268 if (strcmp (runp->name, data.val.group) == 0)
269 break;
270 else
271 runp = runp->next;
272 }
273
274 if (runp == NULL)
275 {
276 /* A new group is requested. */
277 size_t namelen = strlen (data.val.group) + 1;
278 struct name_list *newg = alloca (sizeof (*newg)
279 + namelen);
280 memcpy (newg->name, data.val.group, namelen);
281 if (data.needed_groups == NULL)
282 data.needed_groups = newg->next = newg;
283 else
284 {
285 newg->next = data.needed_groups->next;
286 data.needed_groups->next = newg;
287 data.needed_groups = newg;
288 }
289 }
290 }
291 }
292 else if (status == NSS_STATUS_UNAVAIL && e == ERANGE)
293 {
294 size_t newsize = 2 * buflen;
295 if (use_malloc || newsize > 1024 * 1024)
296 {
297 buflen = newsize;
298 char *newbuf = xrealloc (use_malloc
299 ? buffer : NULL, buflen);
300
301 buffer = newbuf;
302 use_malloc = true;
303 }
304 else
305 extend_alloca (buffer, buflen, newsize);
306 }
307 }
308
309 enum nss_status (*endfct) (struct __netgrent *);
310 endfct = __nss_lookup_function (nip, "endnetgrent");
311 if (endfct != NULL)
312 (void) DL_CALL_FCT (*endfct, (&data));
313
314 break;
315 }
316
317 no_more = __nss_next2 (&nip, "setnetgrent", NULL, &setfct.ptr,
318 status, 0);
319 }
320 }
321
322 total = buffilled;
323
324 /* Fill in the dataset. */
325 dataset = (struct dataset *) buffer;
326 dataset->head.allocsize = total + req->key_len;
327 dataset->head.recsize = total - offsetof (struct dataset, resp);
328 dataset->head.notfound = false;
329 dataset->head.nreloads = he == NULL ? 0 : (dh->nreloads + 1);
330 dataset->head.usable = true;
331 dataset->head.ttl = db->postimeout;
332 timeout = dataset->head.timeout = time (NULL) + dataset->head.ttl;
333
334 dataset->resp.version = NSCD_VERSION;
335 dataset->resp.found = 1;
336 dataset->resp.nresults = nentries;
337 dataset->resp.result_len = buffilled - sizeof (*dataset);
338
339 assert (buflen - buffilled >= req->key_len);
340 key_copy = memcpy (buffer + buffilled, key, req->key_len);
341 buffilled += req->key_len;
342
343 /* Now we can determine whether on refill we have to create a new
344 record or not. */
345 if (he != NULL)
346 {
347 assert (fd == -1);
348
349 if (dataset->head.allocsize == dh->allocsize
350 && dataset->head.recsize == dh->recsize
351 && memcmp (&dataset->resp, dh->data,
352 dh->allocsize - offsetof (struct dataset, resp)) == 0)
353 {
354 /* The data has not changed. We will just bump the timeout
355 value. Note that the new record has been allocated on
356 the stack and need not be freed. */
357 dh->timeout = dataset->head.timeout;
358 dh->ttl = dataset->head.ttl;
359 ++dh->nreloads;
360 dataset = (struct dataset *) dh;
361
362 goto out;
363 }
364 }
365
366 {
367 struct dataset *newp
368 = (struct dataset *) mempool_alloc (db, total + req->key_len, 1);
369 if (__builtin_expect (newp != NULL, 1))
370 {
371 /* Adjust pointer into the memory block. */
372 key_copy = (char *) newp + (key_copy - buffer);
373
374 dataset = memcpy (newp, dataset, total + req->key_len);
375 cacheable = true;
376
377 if (he != NULL)
378 /* Mark the old record as obsolete. */
379 dh->usable = false;
380 }
381 }
382
383 if (he == NULL && fd != -1)
384 {
385 /* We write the dataset before inserting it to the database
386 since while inserting this thread might block and so would
387 unnecessarily let the receiver wait. */
388 writeout:
389 #ifdef HAVE_SENDFILE
390 if (__builtin_expect (db->mmap_used, 1) && cacheable)
391 {
392 assert (db->wr_fd != -1);
393 assert ((char *) &dataset->resp > (char *) db->data);
394 assert ((char *) dataset - (char *) db->head + total
395 <= (sizeof (struct database_pers_head)
396 + db->head->module * sizeof (ref_t)
397 + db->head->data_size));
398 # ifndef __ASSUME_SENDFILE
399 ssize_t written =
400 # endif
401 sendfileall (fd, db->wr_fd, (char *) &dataset->resp
402 - (char *) db->head, dataset->head.recsize);
403 # ifndef __ASSUME_SENDFILE
404 if (written == -1 && errno == ENOSYS)
405 goto use_write;
406 # endif
407 }
408 else
409 #endif
410 {
411 #if defined HAVE_SENDFILE && !defined __ASSUME_SENDFILE
412 use_write:
413 #endif
414 writeall (fd, &dataset->resp, dataset->head.recsize);
415 }
416 }
417
418 if (cacheable)
419 {
420 /* If necessary, we also propagate the data to disk. */
421 if (db->persistent)
422 {
423 // XXX async OK?
424 uintptr_t pval = (uintptr_t) dataset & ~pagesize_m1;
425 msync ((void *) pval,
426 ((uintptr_t) dataset & pagesize_m1) + total + req->key_len,
427 MS_ASYNC);
428 }
429
430 (void) cache_add (req->type, key_copy, req->key_len, &dataset->head,
431 true, db, uid, he == NULL);
432
433 pthread_rwlock_unlock (&db->lock);
434
435 /* Mark the old entry as obsolete. */
436 if (dh != NULL)
437 dh->usable = false;
438 }
439
440 out:
441 if (use_malloc)
442 free (buffer);
443
444 *resultp = dataset;
445
446 return timeout;
447 }
448
449
450 static time_t
451 addinnetgrX (struct database_dyn *db, int fd, request_header *req,
452 char *key, uid_t uid, struct hashentry *he,
453 struct datahead *dh)
454 {
455 const char *group = key;
456 key = (char *) rawmemchr (key, '\0') + 1;
457 size_t group_len = key - group - 1;
458 const char *host = *key++ ? key : NULL;
459 if (host != NULL)
460 key = (char *) rawmemchr (key, '\0') + 1;
461 const char *user = *key++ ? key : NULL;
462 if (user != NULL)
463 key = (char *) rawmemchr (key, '\0') + 1;
464 const char *domain = *key++ ? key : NULL;
465
466 if (__builtin_expect (debug_level > 0, 0))
467 {
468 if (he == NULL)
469 dbg_log (_("Haven't found \"%s (%s,%s,%s)\" in netgroup cache!"),
470 group, host ?: "", user ?: "", domain ?: "");
471 else
472 dbg_log (_("Reloading \"%s (%s,%s,%s)\" in netgroup cache!"),
473 group, host ?: "", user ?: "", domain ?: "");
474 }
475
476 struct dataset *result = (struct dataset *) cache_search (GETNETGRENT,
477 group, group_len,
478 db, uid);
479 time_t timeout;
480 if (result != NULL)
481 timeout = result->head.timeout;
482 else
483 {
484 request_header req_get =
485 {
486 .type = GETNETGRENT,
487 .key_len = group_len
488 };
489 timeout = addgetnetgrentX (db, -1, &req_get, group, uid, NULL, NULL,
490 &result);
491 }
492
493 struct indataset
494 {
495 struct datahead head;
496 innetgroup_response_header resp;
497 } *dataset
498 = (struct indataset *) mempool_alloc (db,
499 sizeof (*dataset) + req->key_len,
500 1);
501 struct indataset dataset_mem;
502 bool cacheable = true;
503 if (__builtin_expect (dataset == NULL, 0))
504 {
505 cacheable = false;
506 dataset = &dataset_mem;
507 }
508
509 dataset->head.allocsize = sizeof (*dataset) + req->key_len;
510 dataset->head.recsize = sizeof (innetgroup_response_header);
511 dataset->head.notfound = result->head.notfound;
512 dataset->head.nreloads = he == NULL ? 0 : (dh->nreloads + 1);
513 dataset->head.usable = true;
514 dataset->head.ttl = result->head.ttl;
515 dataset->head.timeout = timeout;
516
517 dataset->resp.version = NSCD_VERSION;
518 dataset->resp.found = result->resp.found;
519 /* Until we find a matching entry the result is 0. */
520 dataset->resp.result = 0;
521
522 char *key_copy = memcpy ((char *) (dataset + 1), group, req->key_len);
523
524 if (dataset->resp.found)
525 {
526 const char *triplets = (const char *) (&result->resp + 1);
527
528 for (nscd_ssize_t i = result->resp.nresults; i > 0; --i)
529 {
530 bool success = true;
531
532 if (host != NULL)
533 success = strcmp (host, triplets) == 0;
534 triplets = (const char *) rawmemchr (triplets, '\0') + 1;
535
536 if (success && user != NULL)
537 success = strcmp (user, triplets) == 0;
538 triplets = (const char *) rawmemchr (triplets, '\0') + 1;
539
540 if (success && (domain == NULL || strcmp (domain, triplets) == 0))
541 {
542 dataset->resp.result = 1;
543 break;
544 }
545 triplets = (const char *) rawmemchr (triplets, '\0') + 1;
546 }
547 }
548
549 if (he != NULL && dh->data[0].innetgroupdata.result == dataset->resp.result)
550 {
551 /* The data has not changed. We will just bump the timeout
552 value. Note that the new record has been allocated on
553 the stack and need not be freed. */
554 dh->timeout = timeout;
555 dh->ttl = dataset->head.ttl;
556 ++dh->nreloads;
557 return timeout;
558 }
559
560 if (he == NULL)
561 {
562 /* We write the dataset before inserting it to the database
563 since while inserting this thread might block and so would
564 unnecessarily let the receiver wait. */
565 assert (fd != -1);
566
567 #ifdef HAVE_SENDFILE
568 if (__builtin_expect (db->mmap_used, 1) && cacheable)
569 {
570 assert (db->wr_fd != -1);
571 assert ((char *) &dataset->resp > (char *) db->data);
572 assert ((char *) dataset - (char *) db->head + sizeof (*dataset)
573 <= (sizeof (struct database_pers_head)
574 + db->head->module * sizeof (ref_t)
575 + db->head->data_size));
576 # ifndef __ASSUME_SENDFILE
577 ssize_t written =
578 # endif
579 sendfileall (fd, db->wr_fd,
580 (char *) &dataset->resp - (char *) db->head,
581 sizeof (innetgroup_response_header));
582 # ifndef __ASSUME_SENDFILE
583 if (written == -1 && errno == ENOSYS)
584 goto use_write;
585 # endif
586 }
587 else
588 {
589 # ifndef __ASSUME_SENDFILE
590 use_write:
591 # endif
592 #endif
593 writeall (fd, &dataset->resp, sizeof (innetgroup_response_header));
594 }
595 }
596
597 if (cacheable)
598 {
599 /* If necessary, we also propagate the data to disk. */
600 if (db->persistent)
601 {
602 // XXX async OK?
603 uintptr_t pval = (uintptr_t) dataset & ~pagesize_m1;
604 msync ((void *) pval,
605 ((uintptr_t) dataset & pagesize_m1) + sizeof (*dataset)
606 + req->key_len,
607 MS_ASYNC);
608 }
609
610 (void) cache_add (req->type, key_copy, req->key_len, &dataset->head,
611 true, db, uid, he == NULL);
612
613 pthread_rwlock_unlock (&db->lock);
614
615 /* Mark the old entry as obsolete. */
616 if (dh != NULL)
617 dh->usable = false;
618 }
619
620 return timeout;
621 }
622
623
624 void
625 addgetnetgrent (struct database_dyn *db, int fd, request_header *req,
626 void *key, uid_t uid)
627 {
628 struct dataset *ignore;
629
630 addgetnetgrentX (db, fd, req, key, uid, NULL, NULL, &ignore);
631 }
632
633
634 time_t
635 readdgetnetgrent (struct database_dyn *db, struct hashentry *he,
636 struct datahead *dh)
637 {
638 request_header req =
639 {
640 .type = GETNETGRENT,
641 .key_len = he->len
642 };
643 struct dataset *ignore;
644
645 return addgetnetgrentX (db, -1, &req, db->data + he->key, he->owner, he, dh,
646 &ignore);
647 }
648
649
650 void
651 addinnetgr (struct database_dyn *db, int fd, request_header *req,
652 void *key, uid_t uid)
653 {
654 addinnetgrX (db, fd, req, key, uid, NULL, NULL);
655 }
656
657
658 time_t
659 readdinnetgr (struct database_dyn *db, struct hashentry *he,
660 struct datahead *dh)
661 {
662 request_header req =
663 {
664 .type = INNETGR,
665 .key_len = he->len
666 };
667
668 return addinnetgrX (db, -1, &req, db->data + he->key, he->owner, he, dh);
669 }