]> git.ipfire.org Git - thirdparty/glibc.git/blame - nscd/nscd_helper.c
tunables: Avoid getenv calls and disable glibc.malloc.check by default
[thirdparty/glibc.git] / nscd / nscd_helper.c
CommitLineData
bfff8b1b 1/* Copyright (C) 1998-2017 Free Software Foundation, Inc.
c207f23b
UD
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@cygnus.com>, 1998.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
59ba27a6
PE
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
c207f23b
UD
18
19#include <assert.h>
20#include <errno.h>
21#include <fcntl.h>
22#include <stdbool.h>
cfe1fc10 23#include <stddef.h>
c6dfed24 24#include <stdlib.h>
1a77d37f 25#include <string.h>
0b890d59 26#include <time.h>
c207f23b 27#include <unistd.h>
e054f494 28#include <stdint.h>
c207f23b
UD
29#include <sys/mman.h>
30#include <sys/poll.h>
31#include <sys/socket.h>
32#include <sys/stat.h>
7529e67e 33#include <sys/time.h>
c207f23b
UD
34#include <sys/uio.h>
35#include <sys/un.h>
36#include <not-cancel.h>
37#include <nis/rpcsvc/nis.h>
c418b1ba 38#include <kernel-features.h>
c207f23b
UD
39
40#include "nscd-client.h"
41
42
f7140274 43/* Extra time we wait if the socket is still receiving data. This
cfca0aa3 44 value is in milliseconds. Note that the other side is nscd on the
f7140274
UD
45 local machine and it is already transmitting data. So the wait
46 time need not be long. */
47#define EXTRA_RECEIVE_TIME 200
48
49
50static int
51wait_on_socket (int sock, long int usectmo)
52{
53 struct pollfd fds[1];
54 fds[0].fd = sock;
55 fds[0].events = POLLIN | POLLERR | POLLHUP;
56 int n = __poll (fds, 1, usectmo);
57 if (n == -1 && __builtin_expect (errno == EINTR, 0))
58 {
59 /* Handle the case where the poll() call is interrupted by a
60 signal. We cannot just use TEMP_FAILURE_RETRY since it might
61 lead to infinite loops. */
62 struct timeval now;
63 (void) __gettimeofday (&now, NULL);
64 long int end = now.tv_sec * 1000 + usectmo + (now.tv_usec + 500) / 1000;
65 long int timeout = usectmo;
66 while (1)
67 {
68 n = __poll (fds, 1, timeout);
69 if (n != -1 || errno != EINTR)
70 break;
71
72 /* Recompute the timeout time. */
73 (void) __gettimeofday (&now, NULL);
74 timeout = end - (now.tv_sec * 1000 + (now.tv_usec + 500) / 1000);
75 }
76 }
77
78 return n;
79}
80
81
d2dc7d84
UD
82ssize_t
83__readall (int fd, void *buf, size_t len)
84{
85 size_t n = len;
86 ssize_t ret;
87 do
88 {
f7140274 89 again:
d2dc7d84
UD
90 ret = TEMP_FAILURE_RETRY (__read (fd, buf, n));
91 if (ret <= 0)
f7140274
UD
92 {
93 if (__builtin_expect (ret < 0 && errno == EAGAIN, 0)
94 /* The socket is still receiving data. Wait a bit more. */
95 && wait_on_socket (fd, EXTRA_RECEIVE_TIME) > 0)
96 goto again;
97
98 break;
99 }
d2dc7d84
UD
100 buf = (char *) buf + ret;
101 n -= ret;
102 }
103 while (n > 0);
104 return ret < 0 ? ret : len - n;
105}
106
107
108ssize_t
109__readvall (int fd, const struct iovec *iov, int iovcnt)
110{
111 ssize_t ret = TEMP_FAILURE_RETRY (__readv (fd, iov, iovcnt));
112 if (ret <= 0)
f7140274 113 {
a1ffb40e 114 if (__glibc_likely (ret == 0 || errno != EAGAIN))
f7140274
UD
115 /* A genuine error or no data to read. */
116 return ret;
117
118 /* The data has not all yet been received. Do as if we have not
119 read anything yet. */
120 ret = 0;
121 }
d2dc7d84
UD
122
123 size_t total = 0;
124 for (int i = 0; i < iovcnt; ++i)
125 total += iov[i].iov_len;
126
127 if (ret < total)
128 {
129 struct iovec iov_buf[iovcnt];
130 ssize_t r = ret;
131
132 struct iovec *iovp = memcpy (iov_buf, iov, iovcnt * sizeof (*iov));
133 do
134 {
135 while (iovp->iov_len <= r)
136 {
137 r -= iovp->iov_len;
138 --iovcnt;
139 ++iovp;
140 }
141 iovp->iov_base = (char *) iovp->iov_base + r;
142 iovp->iov_len -= r;
f7140274 143 again:
d2dc7d84
UD
144 r = TEMP_FAILURE_RETRY (__readv (fd, iovp, iovcnt));
145 if (r <= 0)
f7140274
UD
146 {
147 if (__builtin_expect (r < 0 && errno == EAGAIN, 0)
148 /* The socket is still receiving data. Wait a bit more. */
149 && wait_on_socket (fd, EXTRA_RECEIVE_TIME) > 0)
150 goto again;
151
152 break;
153 }
d2dc7d84
UD
154 ret += r;
155 }
156 while (ret < total);
157 if (r < 0)
158 ret = r;
159 }
160 return ret;
161}
162
163
c207f23b 164static int
58a2d52e 165open_socket (request_type type, const char *key, size_t keylen)
c207f23b 166{
c418b1ba
UD
167 int sock;
168
52fb79d6 169 sock = __socket (PF_UNIX, SOCK_STREAM | SOCK_CLOEXEC | SOCK_NONBLOCK, 0);
c207f23b
UD
170 if (sock < 0)
171 return -1;
172
c6dfed24 173 size_t real_sizeof_reqdata = sizeof (request_header) + keylen;
58a2d52e
UD
174 struct
175 {
176 request_header req;
c6dfed24
RM
177 char key[];
178 } *reqdata = alloca (real_sizeof_reqdata);
58a2d52e 179
c207f23b
UD
180 struct sockaddr_un sun;
181 sun.sun_family = AF_UNIX;
182 strcpy (sun.sun_path, _PATH_NSCDSOCKET);
183 if (__connect (sock, (struct sockaddr *) &sun, sizeof (sun)) < 0
184 && errno != EINPROGRESS)
185 goto out;
186
c6dfed24
RM
187 reqdata->req.version = NSCD_VERSION;
188 reqdata->req.type = type;
189 reqdata->req.key_len = keylen;
58a2d52e 190
c6dfed24 191 memcpy (reqdata->key, key, keylen);
58a2d52e
UD
192
193 bool first_try = true;
194 struct timeval tvend;
f2ccf983
UD
195 /* Fake initializing tvend. */
196 asm ("" : "=m" (tvend));
58a2d52e
UD
197 while (1)
198 {
199#ifndef MSG_NOSIGNAL
200# define MSG_NOSIGNAL 0
201#endif
c6dfed24 202 ssize_t wres = TEMP_FAILURE_RETRY (__send (sock, reqdata,
6eea8e0d 203 real_sizeof_reqdata,
58a2d52e 204 MSG_NOSIGNAL));
a1ffb40e 205 if (__glibc_likely (wres == (ssize_t) real_sizeof_reqdata))
58a2d52e
UD
206 /* We managed to send the request. */
207 return sock;
208
209 if (wres != -1 || errno != EAGAIN)
210 /* Something is really wrong, no chance to continue. */
211 break;
212
213 /* The daemon is busy wait for it. */
214 int to;
f2ccf983
UD
215 struct timeval now;
216 (void) __gettimeofday (&now, NULL);
58a2d52e
UD
217 if (first_try)
218 {
f2ccf983
UD
219 tvend.tv_usec = now.tv_usec;
220 tvend.tv_sec = now.tv_sec + 5;
58a2d52e
UD
221 to = 5 * 1000;
222 first_try = false;
223 }
224 else
f2ccf983
UD
225 to = ((tvend.tv_sec - now.tv_sec) * 1000
226 + (tvend.tv_usec - now.tv_usec) / 1000);
58a2d52e
UD
227
228 struct pollfd fds[1];
229 fds[0].fd = sock;
230 fds[0].events = POLLOUT | POLLERR | POLLHUP;
231 if (__poll (fds, 1, to) <= 0)
232 /* The connection timed out or broke down. */
233 break;
234
235 /* We try to write again. */
236 }
c207f23b
UD
237
238 out:
239 close_not_cancel_no_status (sock);
240
241 return -1;
242}
243
244
245void
246__nscd_unmap (struct mapped_database *mapped)
247{
248 assert (mapped->counter == 0);
62417d7e 249 __munmap ((void *) mapped->head, mapped->mapsize);
c207f23b
UD
250 free (mapped);
251}
252
253
254/* Try to get a file descriptor for the shared meory segment
255 containing the database. */
3a2c0242
UD
256struct mapped_database *
257__nscd_get_mapping (request_type type, const char *key,
258 struct mapped_database **mappedp)
c207f23b
UD
259{
260 struct mapped_database *result = NO_MAPPING;
261#ifdef SCM_RIGHTS
262 const size_t keylen = strlen (key) + 1;
c207f23b
UD
263 int saved_errno = errno;
264
265 int mapfd = -1;
58a2d52e 266 char resdata[keylen];
c207f23b 267
58a2d52e
UD
268 /* Open a socket and send the request. */
269 int sock = open_socket (type, key, keylen);
c207f23b
UD
270 if (sock < 0)
271 goto out;
272
c207f23b
UD
273 /* Room for the data sent along with the file descriptor. We expect
274 the key name back. */
f3c54060
UD
275 uint64_t mapsize;
276 struct iovec iov[2];
c207f23b
UD
277 iov[0].iov_base = resdata;
278 iov[0].iov_len = keylen;
f3c54060
UD
279 iov[1].iov_base = &mapsize;
280 iov[1].iov_len = sizeof (mapsize);
c207f23b 281
7529e67e
UD
282 union
283 {
284 struct cmsghdr hdr;
285 char bytes[CMSG_SPACE (sizeof (int))];
286 } buf;
f3c54060 287 struct msghdr msg = { .msg_iov = iov, .msg_iovlen = 2,
7529e67e
UD
288 .msg_control = buf.bytes,
289 .msg_controllen = sizeof (buf) };
c207f23b
UD
290 struct cmsghdr *cmsg = CMSG_FIRSTHDR (&msg);
291
292 cmsg->cmsg_level = SOL_SOCKET;
293 cmsg->cmsg_type = SCM_RIGHTS;
294 cmsg->cmsg_len = CMSG_LEN (sizeof (int));
295
7529e67e
UD
296 /* This access is well-aligned since BUF is correctly aligned for an
297 int and CMSG_DATA preserves this alignment. */
6cc8844f 298 memset (CMSG_DATA (cmsg), '\xff', sizeof (int));
c207f23b
UD
299
300 msg.msg_controllen = cmsg->cmsg_len;
301
f7140274 302 if (wait_on_socket (sock, 5 * 1000) <= 0)
c207f23b
UD
303 goto out_close2;
304
dde0763a
UD
305# ifndef MSG_CMSG_CLOEXEC
306# define MSG_CMSG_CLOEXEC 0
307# endif
f3c54060 308 ssize_t n = TEMP_FAILURE_RETRY (__recvmsg (sock, &msg, MSG_CMSG_CLOEXEC));
aadd7d9d 309
5da4373c
UD
310 if (__builtin_expect (CMSG_FIRSTHDR (&msg) == NULL
311 || (CMSG_FIRSTHDR (&msg)->cmsg_len
312 != CMSG_LEN (sizeof (int))), 0))
313 goto out_close2;
11bf311e 314
6cc8844f
UD
315 int *ip = (void *) CMSG_DATA (cmsg);
316 mapfd = *ip;
ee5d5755 317
a1ffb40e 318 if (__glibc_unlikely (n != keylen && n != keylen + sizeof (mapsize)))
f3c54060
UD
319 goto out_close;
320
a1ffb40e 321 if (__glibc_unlikely (strcmp (resdata, key) != 0))
c207f23b
UD
322 goto out_close;
323
a1ffb40e 324 if (__glibc_unlikely (n == keylen))
f3c54060
UD
325 {
326 struct stat64 st;
327 if (__builtin_expect (fstat64 (mapfd, &st) != 0, 0)
328 || __builtin_expect (st.st_size < sizeof (struct database_pers_head),
329 0))
330 goto out_close;
331
332 mapsize = st.st_size;
333 }
334
c207f23b 335 /* The file is large enough, map it now. */
f3c54060 336 void *mapping = __mmap (NULL, mapsize, PROT_READ, MAP_SHARED, mapfd, 0);
a1ffb40e 337 if (__glibc_likely (mapping != MAP_FAILED))
c207f23b 338 {
0adfcc05
UD
339 /* Check whether the database is correct and up-to-date. */
340 struct database_pers_head *head = mapping;
341
342 if (__builtin_expect (head->version != DB_VERSION, 0)
343 || __builtin_expect (head->header_size != sizeof (*head), 0)
27c377dd
UD
344 /* Catch some misconfiguration. The server should catch
345 them now but some older versions did not. */
346 || __builtin_expect (head->module == 0, 0)
0adfcc05
UD
347 /* This really should not happen but who knows, maybe the update
348 thread got stuck. */
349 || __builtin_expect (! head->nscd_certainly_running
350 && (head->timestamp + MAPPING_TIMEOUT
351 < time (NULL)), 0))
c207f23b 352 {
0adfcc05 353 out_unmap:
f3c54060 354 __munmap (mapping, mapsize);
c207f23b
UD
355 goto out_close;
356 }
357
0adfcc05
UD
358 size_t size = (sizeof (*head) + roundup (head->module * sizeof (ref_t),
359 ALIGN)
360 + head->data_size);
361
a1ffb40e 362 if (__glibc_unlikely (mapsize < size))
0adfcc05
UD
363 goto out_unmap;
364
365 /* Allocate a record for the mapping. */
366 struct mapped_database *newp = malloc (sizeof (*newp));
367 if (newp == NULL)
368 /* Ugh, after all we went through the memory allocation failed. */
369 goto out_unmap;
370
c207f23b 371 newp->head = mapping;
0adfcc05
UD
372 newp->data = ((char *) mapping + head->header_size
373 + roundup (head->module * sizeof (ref_t), ALIGN));
c207f23b 374 newp->mapsize = size;
0adfcc05 375 newp->datasize = head->data_size;
c207f23b
UD
376 /* Set counter to 1 to show it is usable. */
377 newp->counter = 1;
378
379 result = newp;
380 }
381
382 out_close:
383 __close (mapfd);
384 out_close2:
385 __close (sock);
386 out:
387 __set_errno (saved_errno);
388#endif /* SCM_RIGHTS */
389
390 struct mapped_database *oldval = *mappedp;
391 *mappedp = result;
392
393 if (oldval != NULL && atomic_decrement_val (&oldval->counter) == 0)
394 __nscd_unmap (oldval);
395
396 return result;
397}
398
c207f23b
UD
399struct mapped_database *
400__nscd_get_map_ref (request_type type, const char *name,
388df58d 401 volatile struct locked_map_ptr *mapptr, int *gc_cyclep)
c207f23b
UD
402{
403 struct mapped_database *cur = mapptr->mapped;
404 if (cur == NO_MAPPING)
405 return cur;
406
509072a0
AJ
407 if (!__nscd_acquire_maplock (mapptr))
408 return NO_MAPPING;
c207f23b
UD
409
410 cur = mapptr->mapped;
411
a1ffb40e 412 if (__glibc_likely (cur != NO_MAPPING))
c207f23b
UD
413 {
414 /* If not mapped or timestamp not updated, request new map. */
415 if (cur == NULL
081fc592 416 || (cur->head->nscd_certainly_running == 0
0b25a49a
UD
417 && cur->head->timestamp + MAPPING_TIMEOUT < time (NULL))
418 || cur->head->data_size > cur->datasize)
3a2c0242
UD
419 cur = __nscd_get_mapping (type, name,
420 (struct mapped_database **) &mapptr->mapped);
c207f23b 421
a1ffb40e 422 if (__glibc_likely (cur != NO_MAPPING))
c207f23b
UD
423 {
424 if (__builtin_expect (((*gc_cyclep = cur->head->gc_cycle) & 1) != 0,
425 0))
426 cur = NO_MAPPING;
427 else
428 atomic_increment (&cur->counter);
429 }
430 }
431
432 mapptr->lock = 0;
433
434 return cur;
435}
436
437
5078fff6
JJ
438/* Using sizeof (hashentry) is not always correct to determine the size of
439 the data structure as found in the nscd cache. The program could be
440 a 64-bit process and nscd could be a 32-bit process. In this case
441 sizeof (hashentry) would overestimate the size. The following is
442 the minimum size of such an entry, good enough for our tests here. */
443#define MINIMUM_HASHENTRY_SIZE \
444 (offsetof (struct hashentry, dellist) + sizeof (int32_t))
445
446
1a77d37f
JJ
447/* Don't return const struct datahead *, as eventhough the record
448 is normally constant, it can change arbitrarily during nscd
449 garbage collection. */
450struct datahead *
c207f23b 451__nscd_cache_search (request_type type, const char *key, size_t keylen,
cfe1fc10 452 const struct mapped_database *mapped, size_t datalen)
c207f23b
UD
453{
454 unsigned long int hash = __nis_hash (key, keylen) % mapped->head->module;
0b25a49a 455 size_t datasize = mapped->datasize;
c207f23b 456
a6fa5328 457 ref_t trail = mapped->head->array[hash];
cfe1fc10 458 trail = atomic_forced_read (trail);
a6fa5328 459 ref_t work = trail;
5078fff6
JJ
460 size_t loop_cnt = datasize / (MINIMUM_HASHENTRY_SIZE
461 + offsetof (struct datahead, data) / 2);
a6fa5328
UD
462 int tick = 0;
463
5078fff6 464 while (work != ENDREF && work + MINIMUM_HASHENTRY_SIZE <= datasize)
c207f23b
UD
465 {
466 struct hashentry *here = (struct hashentry *) (mapped->data + work);
cfe1fc10 467 ref_t here_key, here_packet;
c207f23b 468
27822ce6 469#if !_STRING_ARCH_unaligned
1a77d37f
JJ
470 /* Although during garbage collection when moving struct hashentry
471 records around we first copy from old to new location and then
472 adjust pointer from previous hashentry to it, there is no barrier
473 between those memory writes. It is very unlikely to hit it,
474 so check alignment only if a misaligned load can crash the
475 application. */
476 if ((uintptr_t) here & (__alignof__ (*here) - 1))
477 return NULL;
478#endif
479
0b25a49a
UD
480 if (type == here->type
481 && keylen == here->len
cfe1fc10
JJ
482 && (here_key = atomic_forced_read (here->key)) + keylen <= datasize
483 && memcmp (key, mapped->data + here_key, keylen) == 0
484 && ((here_packet = atomic_forced_read (here->packet))
485 + sizeof (struct datahead) <= datasize))
c207f23b
UD
486 {
487 /* We found the entry. Increment the appropriate counter. */
1a77d37f 488 struct datahead *dh
cfe1fc10 489 = (struct datahead *) (mapped->data + here_packet);
c207f23b 490
27822ce6 491#if !_STRING_ARCH_unaligned
1a77d37f
JJ
492 if ((uintptr_t) dh & (__alignof__ (*dh) - 1))
493 return NULL;
494#endif
495
c207f23b
UD
496 /* See whether we must ignore the entry or whether something
497 is wrong because garbage collection is in progress. */
cfe1fc10
JJ
498 if (dh->usable
499 && here_packet + dh->allocsize <= datasize
500 && (here_packet + offsetof (struct datahead, data) + datalen
501 <= datasize))
c207f23b
UD
502 return dh;
503 }
504
cfe1fc10 505 work = atomic_forced_read (here->next);
8c7661bc
UD
506 /* Prevent endless loops. This should never happen but perhaps
507 the database got corrupted, accidentally or deliberately. */
95410b7b 508 if (work == trail || loop_cnt-- == 0)
8c7661bc 509 break;
a6fa5328
UD
510 if (tick)
511 {
512 struct hashentry *trailelem;
513 trailelem = (struct hashentry *) (mapped->data + trail);
514
27822ce6 515#if !_STRING_ARCH_unaligned
a6fa5328
UD
516 /* We have to redo the checks. Maybe the data changed. */
517 if ((uintptr_t) trailelem & (__alignof__ (*trailelem) - 1))
518 return NULL;
519#endif
cfe1fc10 520
5078fff6 521 if (trail + MINIMUM_HASHENTRY_SIZE > datasize)
cfe1fc10
JJ
522 return NULL;
523
524 trail = atomic_forced_read (trailelem->next);
a6fa5328
UD
525 }
526 tick = 1 - tick;
c207f23b
UD
527 }
528
529 return NULL;
530}
531
532
533/* Create a socket connected to a name. */
534int
535__nscd_open_socket (const char *key, size_t keylen, request_type type,
536 void *response, size_t responselen)
537{
58a2d52e
UD
538 /* This should never happen and it is something the nscd daemon
539 enforces, too. He it helps to limit the amount of stack
540 used. */
541 if (keylen > MAXKEYLEN)
542 return -1;
543
c207f23b
UD
544 int saved_errno = errno;
545
58a2d52e 546 int sock = open_socket (type, key, keylen);
c207f23b
UD
547 if (sock >= 0)
548 {
58a2d52e 549 /* Wait for data. */
f7140274 550 if (wait_on_socket (sock, 5 * 1000) > 0)
7529e67e 551 {
58a2d52e
UD
552 ssize_t nbytes = TEMP_FAILURE_RETRY (__read (sock, response,
553 responselen));
7529e67e
UD
554 if (nbytes == (ssize_t) responselen)
555 return sock;
c207f23b
UD
556 }
557
558 close_not_cancel_no_status (sock);
559 }
560
561 __set_errno (saved_errno);
562
563 return -1;
564}