]> git.ipfire.org Git - thirdparty/glibc.git/blame - nscd/nscd_helper.c
nscd: Use time_t for return type of addgetnetgrentX
[thirdparty/glibc.git] / nscd / nscd_helper.c
CommitLineData
dff8da6b 1/* Copyright (C) 1998-2024 Free Software Foundation, Inc.
c207f23b 2 This file is part of the GNU C Library.
c207f23b
UD
3
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
8
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
13
14 You should have received a copy of the GNU Lesser General Public
59ba27a6 15 License along with the GNU C Library; if not, see
5a82c748 16 <https://www.gnu.org/licenses/>. */
c207f23b
UD
17
18#include <assert.h>
19#include <errno.h>
20#include <fcntl.h>
21#include <stdbool.h>
cfe1fc10 22#include <stddef.h>
c6dfed24 23#include <stdlib.h>
1a77d37f 24#include <string.h>
0b890d59 25#include <time.h>
c207f23b 26#include <unistd.h>
e054f494 27#include <stdint.h>
c207f23b 28#include <sys/mman.h>
2faa42da 29#include <sys/param.h>
c207f23b
UD
30#include <sys/poll.h>
31#include <sys/socket.h>
32#include <sys/stat.h>
7529e67e 33#include <sys/time.h>
c207f23b
UD
34#include <sys/uio.h>
35#include <sys/un.h>
36#include <not-cancel.h>
c418b1ba 37#include <kernel-features.h>
67f36c79 38#include <nss.h>
481d01fa 39#include <struct___timespec64.h>
c207f23b
UD
40
41#include "nscd-client.h"
c207f23b 42
f7140274 43/* Extra time we wait if the socket is still receiving data. This
cfca0aa3 44 value is in milliseconds. Note that the other side is nscd on the
f7140274
UD
45 local machine and it is already transmitting data. So the wait
46 time need not be long. */
47#define EXTRA_RECEIVE_TIME 200
48
49
50static int
51wait_on_socket (int sock, long int usectmo)
52{
53 struct pollfd fds[1];
54 fds[0].fd = sock;
55 fds[0].events = POLLIN | POLLERR | POLLHUP;
56 int n = __poll (fds, 1, usectmo);
57 if (n == -1 && __builtin_expect (errno == EINTR, 0))
58 {
59 /* Handle the case where the poll() call is interrupted by a
60 signal. We cannot just use TEMP_FAILURE_RETRY since it might
61 lead to infinite loops. */
481d01fa
LM
62 struct __timespec64 now;
63 __clock_gettime64 (CLOCK_REALTIME, &now);
64 int64_t end = (now.tv_sec * 1000 + usectmo
65 + (now.tv_nsec + 500000) / 1000000);
f7140274
UD
66 long int timeout = usectmo;
67 while (1)
68 {
69 n = __poll (fds, 1, timeout);
70 if (n != -1 || errno != EINTR)
71 break;
72
73 /* Recompute the timeout time. */
481d01fa 74 __clock_gettime64 (CLOCK_REALTIME, &now);
4a39c34c
ZW
75 timeout = end - ((now.tv_sec * 1000
76 + (now.tv_nsec + 500000) / 1000000));
f7140274
UD
77 }
78 }
79
80 return n;
81}
82
83
d2dc7d84
UD
84ssize_t
85__readall (int fd, void *buf, size_t len)
86{
87 size_t n = len;
88 ssize_t ret;
89 do
90 {
f7140274 91 again:
d2dc7d84
UD
92 ret = TEMP_FAILURE_RETRY (__read (fd, buf, n));
93 if (ret <= 0)
f7140274
UD
94 {
95 if (__builtin_expect (ret < 0 && errno == EAGAIN, 0)
96 /* The socket is still receiving data. Wait a bit more. */
97 && wait_on_socket (fd, EXTRA_RECEIVE_TIME) > 0)
98 goto again;
99
100 break;
101 }
d2dc7d84
UD
102 buf = (char *) buf + ret;
103 n -= ret;
104 }
105 while (n > 0);
106 return ret < 0 ? ret : len - n;
107}
108
109
110ssize_t
111__readvall (int fd, const struct iovec *iov, int iovcnt)
112{
113 ssize_t ret = TEMP_FAILURE_RETRY (__readv (fd, iov, iovcnt));
114 if (ret <= 0)
f7140274 115 {
a1ffb40e 116 if (__glibc_likely (ret == 0 || errno != EAGAIN))
f7140274
UD
117 /* A genuine error or no data to read. */
118 return ret;
119
120 /* The data has not all yet been received. Do as if we have not
121 read anything yet. */
122 ret = 0;
123 }
d2dc7d84
UD
124
125 size_t total = 0;
126 for (int i = 0; i < iovcnt; ++i)
127 total += iov[i].iov_len;
128
129 if (ret < total)
130 {
131 struct iovec iov_buf[iovcnt];
132 ssize_t r = ret;
133
134 struct iovec *iovp = memcpy (iov_buf, iov, iovcnt * sizeof (*iov));
135 do
136 {
137 while (iovp->iov_len <= r)
138 {
139 r -= iovp->iov_len;
140 --iovcnt;
141 ++iovp;
142 }
143 iovp->iov_base = (char *) iovp->iov_base + r;
144 iovp->iov_len -= r;
f7140274 145 again:
d2dc7d84
UD
146 r = TEMP_FAILURE_RETRY (__readv (fd, iovp, iovcnt));
147 if (r <= 0)
f7140274
UD
148 {
149 if (__builtin_expect (r < 0 && errno == EAGAIN, 0)
150 /* The socket is still receiving data. Wait a bit more. */
151 && wait_on_socket (fd, EXTRA_RECEIVE_TIME) > 0)
152 goto again;
153
154 break;
155 }
d2dc7d84
UD
156 ret += r;
157 }
158 while (ret < total);
159 if (r < 0)
160 ret = r;
161 }
162 return ret;
163}
164
165
c207f23b 166static int
58a2d52e 167open_socket (request_type type, const char *key, size_t keylen)
c207f23b 168{
c418b1ba
UD
169 int sock;
170
52fb79d6 171 sock = __socket (PF_UNIX, SOCK_STREAM | SOCK_CLOEXEC | SOCK_NONBLOCK, 0);
c207f23b
UD
172 if (sock < 0)
173 return -1;
174
c6dfed24 175 size_t real_sizeof_reqdata = sizeof (request_header) + keylen;
58a2d52e
UD
176 struct
177 {
178 request_header req;
c6dfed24
RM
179 char key[];
180 } *reqdata = alloca (real_sizeof_reqdata);
58a2d52e 181
c207f23b
UD
182 struct sockaddr_un sun;
183 sun.sun_family = AF_UNIX;
184 strcpy (sun.sun_path, _PATH_NSCDSOCKET);
185 if (__connect (sock, (struct sockaddr *) &sun, sizeof (sun)) < 0
186 && errno != EINPROGRESS)
187 goto out;
188
c6dfed24
RM
189 reqdata->req.version = NSCD_VERSION;
190 reqdata->req.type = type;
191 reqdata->req.key_len = keylen;
58a2d52e 192
c6dfed24 193 memcpy (reqdata->key, key, keylen);
58a2d52e
UD
194
195 bool first_try = true;
481d01fa 196 struct __timespec64 tvend = { 0, 0 };
58a2d52e
UD
197 while (1)
198 {
199#ifndef MSG_NOSIGNAL
200# define MSG_NOSIGNAL 0
201#endif
c6dfed24 202 ssize_t wres = TEMP_FAILURE_RETRY (__send (sock, reqdata,
6eea8e0d 203 real_sizeof_reqdata,
58a2d52e 204 MSG_NOSIGNAL));
a1ffb40e 205 if (__glibc_likely (wres == (ssize_t) real_sizeof_reqdata))
58a2d52e
UD
206 /* We managed to send the request. */
207 return sock;
208
209 if (wres != -1 || errno != EAGAIN)
210 /* Something is really wrong, no chance to continue. */
211 break;
212
213 /* The daemon is busy wait for it. */
214 int to;
481d01fa
LM
215 struct __timespec64 now;
216 __clock_gettime64 (CLOCK_REALTIME, &now);
58a2d52e
UD
217 if (first_try)
218 {
4a39c34c 219 tvend.tv_nsec = now.tv_nsec;
f2ccf983 220 tvend.tv_sec = now.tv_sec + 5;
58a2d52e
UD
221 to = 5 * 1000;
222 first_try = false;
223 }
224 else
f2ccf983 225 to = ((tvend.tv_sec - now.tv_sec) * 1000
4a39c34c 226 + (tvend.tv_nsec - now.tv_nsec) / 1000000);
58a2d52e
UD
227
228 struct pollfd fds[1];
229 fds[0].fd = sock;
230 fds[0].events = POLLOUT | POLLERR | POLLHUP;
231 if (__poll (fds, 1, to) <= 0)
232 /* The connection timed out or broke down. */
233 break;
234
235 /* We try to write again. */
236 }
c207f23b
UD
237
238 out:
c181840c 239 __close_nocancel_nostatus (sock);
c207f23b
UD
240
241 return -1;
242}
243
244
245void
246__nscd_unmap (struct mapped_database *mapped)
247{
248 assert (mapped->counter == 0);
62417d7e 249 __munmap ((void *) mapped->head, mapped->mapsize);
c207f23b
UD
250 free (mapped);
251}
252
253
7f0d9e61 254/* Try to get a file descriptor for the shared memory segment
c207f23b 255 containing the database. */
3a2c0242
UD
256struct mapped_database *
257__nscd_get_mapping (request_type type, const char *key,
258 struct mapped_database **mappedp)
c207f23b
UD
259{
260 struct mapped_database *result = NO_MAPPING;
261#ifdef SCM_RIGHTS
262 const size_t keylen = strlen (key) + 1;
c207f23b
UD
263 int saved_errno = errno;
264
265 int mapfd = -1;
58a2d52e 266 char resdata[keylen];
c207f23b 267
58a2d52e
UD
268 /* Open a socket and send the request. */
269 int sock = open_socket (type, key, keylen);
c207f23b
UD
270 if (sock < 0)
271 goto out;
272
c207f23b
UD
273 /* Room for the data sent along with the file descriptor. We expect
274 the key name back. */
f3c54060
UD
275 uint64_t mapsize;
276 struct iovec iov[2];
c207f23b
UD
277 iov[0].iov_base = resdata;
278 iov[0].iov_len = keylen;
f3c54060
UD
279 iov[1].iov_base = &mapsize;
280 iov[1].iov_len = sizeof (mapsize);
c207f23b 281
7529e67e
UD
282 union
283 {
284 struct cmsghdr hdr;
285 char bytes[CMSG_SPACE (sizeof (int))];
286 } buf;
f3c54060 287 struct msghdr msg = { .msg_iov = iov, .msg_iovlen = 2,
7529e67e
UD
288 .msg_control = buf.bytes,
289 .msg_controllen = sizeof (buf) };
c207f23b
UD
290 struct cmsghdr *cmsg = CMSG_FIRSTHDR (&msg);
291
292 cmsg->cmsg_level = SOL_SOCKET;
293 cmsg->cmsg_type = SCM_RIGHTS;
294 cmsg->cmsg_len = CMSG_LEN (sizeof (int));
295
7529e67e
UD
296 /* This access is well-aligned since BUF is correctly aligned for an
297 int and CMSG_DATA preserves this alignment. */
6cc8844f 298 memset (CMSG_DATA (cmsg), '\xff', sizeof (int));
c207f23b
UD
299
300 msg.msg_controllen = cmsg->cmsg_len;
301
f7140274 302 if (wait_on_socket (sock, 5 * 1000) <= 0)
c207f23b
UD
303 goto out_close2;
304
dde0763a
UD
305# ifndef MSG_CMSG_CLOEXEC
306# define MSG_CMSG_CLOEXEC 0
307# endif
f3c54060 308 ssize_t n = TEMP_FAILURE_RETRY (__recvmsg (sock, &msg, MSG_CMSG_CLOEXEC));
aadd7d9d 309
5da4373c
UD
310 if (__builtin_expect (CMSG_FIRSTHDR (&msg) == NULL
311 || (CMSG_FIRSTHDR (&msg)->cmsg_len
312 != CMSG_LEN (sizeof (int))), 0))
313 goto out_close2;
11bf311e 314
6cc8844f
UD
315 int *ip = (void *) CMSG_DATA (cmsg);
316 mapfd = *ip;
ee5d5755 317
a1ffb40e 318 if (__glibc_unlikely (n != keylen && n != keylen + sizeof (mapsize)))
f3c54060
UD
319 goto out_close;
320
a1ffb40e 321 if (__glibc_unlikely (strcmp (resdata, key) != 0))
c207f23b
UD
322 goto out_close;
323
a1ffb40e 324 if (__glibc_unlikely (n == keylen))
f3c54060 325 {
52a5fe70
AZ
326 struct __stat64_t64 st;
327 if (__glibc_unlikely (__fstat64_time64 (mapfd, &st) != 0)
f3c54060
UD
328 || __builtin_expect (st.st_size < sizeof (struct database_pers_head),
329 0))
330 goto out_close;
331
332 mapsize = st.st_size;
333 }
334
c207f23b 335 /* The file is large enough, map it now. */
f3c54060 336 void *mapping = __mmap (NULL, mapsize, PROT_READ, MAP_SHARED, mapfd, 0);
a1ffb40e 337 if (__glibc_likely (mapping != MAP_FAILED))
c207f23b 338 {
0adfcc05
UD
339 /* Check whether the database is correct and up-to-date. */
340 struct database_pers_head *head = mapping;
341
342 if (__builtin_expect (head->version != DB_VERSION, 0)
343 || __builtin_expect (head->header_size != sizeof (*head), 0)
27c377dd
UD
344 /* Catch some misconfiguration. The server should catch
345 them now but some older versions did not. */
346 || __builtin_expect (head->module == 0, 0)
0adfcc05
UD
347 /* This really should not happen but who knows, maybe the update
348 thread got stuck. */
349 || __builtin_expect (! head->nscd_certainly_running
350 && (head->timestamp + MAPPING_TIMEOUT
f9a75540 351 < time_now ()), 0))
c207f23b 352 {
0adfcc05 353 out_unmap:
f3c54060 354 __munmap (mapping, mapsize);
c207f23b
UD
355 goto out_close;
356 }
357
0adfcc05
UD
358 size_t size = (sizeof (*head) + roundup (head->module * sizeof (ref_t),
359 ALIGN)
360 + head->data_size);
361
a1ffb40e 362 if (__glibc_unlikely (mapsize < size))
0adfcc05
UD
363 goto out_unmap;
364
365 /* Allocate a record for the mapping. */
366 struct mapped_database *newp = malloc (sizeof (*newp));
367 if (newp == NULL)
368 /* Ugh, after all we went through the memory allocation failed. */
369 goto out_unmap;
370
c207f23b 371 newp->head = mapping;
0adfcc05
UD
372 newp->data = ((char *) mapping + head->header_size
373 + roundup (head->module * sizeof (ref_t), ALIGN));
c207f23b 374 newp->mapsize = size;
0adfcc05 375 newp->datasize = head->data_size;
c207f23b
UD
376 /* Set counter to 1 to show it is usable. */
377 newp->counter = 1;
378
379 result = newp;
380 }
381
382 out_close:
383 __close (mapfd);
384 out_close2:
385 __close (sock);
386 out:
387 __set_errno (saved_errno);
388#endif /* SCM_RIGHTS */
389
390 struct mapped_database *oldval = *mappedp;
391 *mappedp = result;
392
a364a3a7 393 if (oldval != NULL && atomic_fetch_add_relaxed (&oldval->counter, -1) == 1)
c207f23b
UD
394 __nscd_unmap (oldval);
395
396 return result;
397}
398
c207f23b
UD
399struct mapped_database *
400__nscd_get_map_ref (request_type type, const char *name,
388df58d 401 volatile struct locked_map_ptr *mapptr, int *gc_cyclep)
c207f23b
UD
402{
403 struct mapped_database *cur = mapptr->mapped;
404 if (cur == NO_MAPPING)
405 return cur;
406
509072a0
AJ
407 if (!__nscd_acquire_maplock (mapptr))
408 return NO_MAPPING;
c207f23b
UD
409
410 cur = mapptr->mapped;
411
a1ffb40e 412 if (__glibc_likely (cur != NO_MAPPING))
c207f23b
UD
413 {
414 /* If not mapped or timestamp not updated, request new map. */
415 if (cur == NULL
081fc592 416 || (cur->head->nscd_certainly_running == 0
f9a75540 417 && cur->head->timestamp + MAPPING_TIMEOUT < time_now ())
0b25a49a 418 || cur->head->data_size > cur->datasize)
3a2c0242
UD
419 cur = __nscd_get_mapping (type, name,
420 (struct mapped_database **) &mapptr->mapped);
c207f23b 421
a1ffb40e 422 if (__glibc_likely (cur != NO_MAPPING))
c207f23b
UD
423 {
424 if (__builtin_expect (((*gc_cyclep = cur->head->gc_cycle) & 1) != 0,
425 0))
426 cur = NO_MAPPING;
427 else
d1babeb3 428 atomic_fetch_add_relaxed (&cur->counter, 1);
c207f23b
UD
429 }
430 }
431
432 mapptr->lock = 0;
433
434 return cur;
435}
436
437
5078fff6
JJ
438/* Using sizeof (hashentry) is not always correct to determine the size of
439 the data structure as found in the nscd cache. The program could be
440 a 64-bit process and nscd could be a 32-bit process. In this case
441 sizeof (hashentry) would overestimate the size. The following is
442 the minimum size of such an entry, good enough for our tests here. */
443#define MINIMUM_HASHENTRY_SIZE \
444 (offsetof (struct hashentry, dellist) + sizeof (int32_t))
445
7f0d9e61 446/* Don't return const struct datahead *, as even though the record
1a77d37f
JJ
447 is normally constant, it can change arbitrarily during nscd
448 garbage collection. */
449struct datahead *
c207f23b 450__nscd_cache_search (request_type type, const char *key, size_t keylen,
cfe1fc10 451 const struct mapped_database *mapped, size_t datalen)
c207f23b 452{
67f36c79 453 unsigned long int hash = __nss_hash (key, keylen) % mapped->head->module;
0b25a49a 454 size_t datasize = mapped->datasize;
c207f23b 455
a6fa5328 456 ref_t trail = mapped->head->array[hash];
cfe1fc10 457 trail = atomic_forced_read (trail);
a6fa5328 458 ref_t work = trail;
5078fff6
JJ
459 size_t loop_cnt = datasize / (MINIMUM_HASHENTRY_SIZE
460 + offsetof (struct datahead, data) / 2);
a6fa5328
UD
461 int tick = 0;
462
5078fff6 463 while (work != ENDREF && work + MINIMUM_HASHENTRY_SIZE <= datasize)
c207f23b
UD
464 {
465 struct hashentry *here = (struct hashentry *) (mapped->data + work);
cfe1fc10 466 ref_t here_key, here_packet;
c207f23b 467
1a77d37f
JJ
468 /* Although during garbage collection when moving struct hashentry
469 records around we first copy from old to new location and then
470 adjust pointer from previous hashentry to it, there is no barrier
471 between those memory writes. It is very unlikely to hit it,
472 so check alignment only if a misaligned load can crash the
473 application. */
474 if ((uintptr_t) here & (__alignof__ (*here) - 1))
475 return NULL;
1a77d37f 476
0b25a49a
UD
477 if (type == here->type
478 && keylen == here->len
cfe1fc10
JJ
479 && (here_key = atomic_forced_read (here->key)) + keylen <= datasize
480 && memcmp (key, mapped->data + here_key, keylen) == 0
481 && ((here_packet = atomic_forced_read (here->packet))
482 + sizeof (struct datahead) <= datasize))
c207f23b
UD
483 {
484 /* We found the entry. Increment the appropriate counter. */
1a77d37f 485 struct datahead *dh
cfe1fc10 486 = (struct datahead *) (mapped->data + here_packet);
c207f23b 487
1a77d37f
JJ
488 if ((uintptr_t) dh & (__alignof__ (*dh) - 1))
489 return NULL;
1a77d37f 490
c207f23b
UD
491 /* See whether we must ignore the entry or whether something
492 is wrong because garbage collection is in progress. */
cfe1fc10
JJ
493 if (dh->usable
494 && here_packet + dh->allocsize <= datasize
495 && (here_packet + offsetof (struct datahead, data) + datalen
496 <= datasize))
c207f23b
UD
497 return dh;
498 }
499
cfe1fc10 500 work = atomic_forced_read (here->next);
8c7661bc
UD
501 /* Prevent endless loops. This should never happen but perhaps
502 the database got corrupted, accidentally or deliberately. */
95410b7b 503 if (work == trail || loop_cnt-- == 0)
8c7661bc 504 break;
a6fa5328
UD
505 if (tick)
506 {
507 struct hashentry *trailelem;
508 trailelem = (struct hashentry *) (mapped->data + trail);
509
a6fa5328
UD
510 /* We have to redo the checks. Maybe the data changed. */
511 if ((uintptr_t) trailelem & (__alignof__ (*trailelem) - 1))
512 return NULL;
cfe1fc10 513
5078fff6 514 if (trail + MINIMUM_HASHENTRY_SIZE > datasize)
cfe1fc10
JJ
515 return NULL;
516
517 trail = atomic_forced_read (trailelem->next);
a6fa5328
UD
518 }
519 tick = 1 - tick;
c207f23b
UD
520 }
521
522 return NULL;
523}
524
525
526/* Create a socket connected to a name. */
527int
528__nscd_open_socket (const char *key, size_t keylen, request_type type,
529 void *response, size_t responselen)
530{
58a2d52e
UD
531 /* This should never happen and it is something the nscd daemon
532 enforces, too. He it helps to limit the amount of stack
533 used. */
534 if (keylen > MAXKEYLEN)
535 return -1;
536
c207f23b
UD
537 int saved_errno = errno;
538
58a2d52e 539 int sock = open_socket (type, key, keylen);
c207f23b
UD
540 if (sock >= 0)
541 {
58a2d52e 542 /* Wait for data. */
f7140274 543 if (wait_on_socket (sock, 5 * 1000) > 0)
7529e67e 544 {
58a2d52e
UD
545 ssize_t nbytes = TEMP_FAILURE_RETRY (__read (sock, response,
546 responselen));
7529e67e
UD
547 if (nbytes == (ssize_t) responselen)
548 return sock;
c207f23b
UD
549 }
550
c181840c 551 __close_nocancel_nostatus (sock);
c207f23b
UD
552 }
553
554 __set_errno (saved_errno);
555
556 return -1;
557}