]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/basic/io-util.c
network: drop all checks of ipv6_disabled sysctl
[thirdparty/systemd.git] / src / basic / io-util.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2
3 #include <errno.h>
4 #include <limits.h>
5 #include <poll.h>
6 #include <stdio.h>
7 #include <time.h>
8 #include <unistd.h>
9
10 #include "io-util.h"
11 #include "string-util.h"
12 #include "time-util.h"
13
14 int flush_fd(int fd) {
15 struct pollfd pollfd = {
16 .fd = fd,
17 .events = POLLIN,
18 };
19 int count = 0;
20
21 /* Read from the specified file descriptor, until POLLIN is not set anymore, throwing away everything
22 * read. Note that some file descriptors (notable IP sockets) will trigger POLLIN even when no data can be read
23 * (due to IP packet checksum mismatches), hence this function is only safe to be non-blocking if the fd used
24 * was set to non-blocking too. */
25
26 for (;;) {
27 char buf[LINE_MAX];
28 ssize_t l;
29 int r;
30
31 r = poll(&pollfd, 1, 0);
32 if (r < 0) {
33 if (errno == EINTR)
34 continue;
35
36 return -errno;
37
38 } else if (r == 0)
39 return count;
40
41 l = read(fd, buf, sizeof(buf));
42 if (l < 0) {
43
44 if (errno == EINTR)
45 continue;
46
47 if (errno == EAGAIN)
48 return count;
49
50 return -errno;
51 } else if (l == 0)
52 return count;
53
54 count += (int) l;
55 }
56 }
57
58 ssize_t loop_read(int fd, void *buf, size_t nbytes, bool do_poll) {
59 uint8_t *p = buf;
60 ssize_t n = 0;
61
62 assert(fd >= 0);
63 assert(buf);
64
65 /* If called with nbytes == 0, let's call read() at least
66 * once, to validate the operation */
67
68 if (nbytes > (size_t) SSIZE_MAX)
69 return -EINVAL;
70
71 do {
72 ssize_t k;
73
74 k = read(fd, p, nbytes);
75 if (k < 0) {
76 if (errno == EINTR)
77 continue;
78
79 if (errno == EAGAIN && do_poll) {
80
81 /* We knowingly ignore any return value here,
82 * and expect that any error/EOF is reported
83 * via read() */
84
85 (void) fd_wait_for_event(fd, POLLIN, USEC_INFINITY);
86 continue;
87 }
88
89 return n > 0 ? n : -errno;
90 }
91
92 if (k == 0)
93 return n;
94
95 assert((size_t) k <= nbytes);
96
97 p += k;
98 nbytes -= k;
99 n += k;
100 } while (nbytes > 0);
101
102 return n;
103 }
104
105 int loop_read_exact(int fd, void *buf, size_t nbytes, bool do_poll) {
106 ssize_t n;
107
108 n = loop_read(fd, buf, nbytes, do_poll);
109 if (n < 0)
110 return (int) n;
111 if ((size_t) n != nbytes)
112 return -EIO;
113
114 return 0;
115 }
116
117 int loop_write(int fd, const void *buf, size_t nbytes, bool do_poll) {
118 const uint8_t *p = buf;
119
120 assert(fd >= 0);
121 assert(buf);
122
123 if (_unlikely_(nbytes > (size_t) SSIZE_MAX))
124 return -EINVAL;
125
126 do {
127 ssize_t k;
128
129 k = write(fd, p, nbytes);
130 if (k < 0) {
131 if (errno == EINTR)
132 continue;
133
134 if (errno == EAGAIN && do_poll) {
135 /* We knowingly ignore any return value here,
136 * and expect that any error/EOF is reported
137 * via write() */
138
139 (void) fd_wait_for_event(fd, POLLOUT, USEC_INFINITY);
140 continue;
141 }
142
143 return -errno;
144 }
145
146 if (_unlikely_(nbytes > 0 && k == 0)) /* Can't really happen */
147 return -EIO;
148
149 assert((size_t) k <= nbytes);
150
151 p += k;
152 nbytes -= k;
153 } while (nbytes > 0);
154
155 return 0;
156 }
157
158 int pipe_eof(int fd) {
159 struct pollfd pollfd = {
160 .fd = fd,
161 .events = POLLIN|POLLHUP,
162 };
163
164 int r;
165
166 r = poll(&pollfd, 1, 0);
167 if (r < 0)
168 return -errno;
169
170 if (r == 0)
171 return 0;
172
173 return pollfd.revents & POLLHUP;
174 }
175
176 int fd_wait_for_event(int fd, int event, usec_t t) {
177
178 struct pollfd pollfd = {
179 .fd = fd,
180 .events = event,
181 };
182
183 struct timespec ts;
184 int r;
185
186 r = ppoll(&pollfd, 1, t == USEC_INFINITY ? NULL : timespec_store(&ts, t), NULL);
187 if (r < 0)
188 return -errno;
189 if (r == 0)
190 return 0;
191
192 return pollfd.revents;
193 }
194
195 static size_t nul_length(const uint8_t *p, size_t sz) {
196 size_t n = 0;
197
198 while (sz > 0) {
199 if (*p != 0)
200 break;
201
202 n++;
203 p++;
204 sz--;
205 }
206
207 return n;
208 }
209
210 ssize_t sparse_write(int fd, const void *p, size_t sz, size_t run_length) {
211 const uint8_t *q, *w, *e;
212 ssize_t l;
213
214 q = w = p;
215 e = q + sz;
216 while (q < e) {
217 size_t n;
218
219 n = nul_length(q, e - q);
220
221 /* If there are more than the specified run length of
222 * NUL bytes, or if this is the beginning or the end
223 * of the buffer, then seek instead of write */
224 if ((n > run_length) ||
225 (n > 0 && q == p) ||
226 (n > 0 && q + n >= e)) {
227 if (q > w) {
228 l = write(fd, w, q - w);
229 if (l < 0)
230 return -errno;
231 if (l != q -w)
232 return -EIO;
233 }
234
235 if (lseek(fd, n, SEEK_CUR) == (off_t) -1)
236 return -errno;
237
238 q += n;
239 w = q;
240 } else if (n > 0)
241 q += n;
242 else
243 q++;
244 }
245
246 if (q > w) {
247 l = write(fd, w, q - w);
248 if (l < 0)
249 return -errno;
250 if (l != q - w)
251 return -EIO;
252 }
253
254 return q - (const uint8_t*) p;
255 }
256
257 char* set_iovec_string_field(struct iovec *iovec, size_t *n_iovec, const char *field, const char *value) {
258 char *x;
259
260 x = strjoin(field, value);
261 if (x)
262 iovec[(*n_iovec)++] = IOVEC_MAKE_STRING(x);
263 return x;
264 }
265
266 char* set_iovec_string_field_free(struct iovec *iovec, size_t *n_iovec, const char *field, char *value) {
267 char *x;
268
269 x = set_iovec_string_field(iovec, n_iovec, field, value);
270 free(value);
271 return x;
272 }
273
274 struct iovec_wrapper *iovw_new(void) {
275 return malloc0(sizeof(struct iovec_wrapper));
276 }
277
278 void iovw_free_contents(struct iovec_wrapper *iovw, bool free_vectors) {
279 if (free_vectors)
280 for (size_t i = 0; i < iovw->count; i++)
281 free(iovw->iovec[i].iov_base);
282
283 iovw->iovec = mfree(iovw->iovec);
284 iovw->count = 0;
285 iovw->size_bytes = 0;
286 }
287
288 struct iovec_wrapper *iovw_free_free(struct iovec_wrapper *iovw) {
289 iovw_free_contents(iovw, true);
290
291 return mfree(iovw);
292 }
293
294 struct iovec_wrapper *iovw_free(struct iovec_wrapper *iovw) {
295 iovw_free_contents(iovw, false);
296
297 return mfree(iovw);
298 }
299
300 int iovw_put(struct iovec_wrapper *iovw, void *data, size_t len) {
301 if (iovw->count >= IOV_MAX)
302 return -E2BIG;
303
304 if (!GREEDY_REALLOC(iovw->iovec, iovw->size_bytes, iovw->count + 1))
305 return log_oom();
306
307 iovw->iovec[iovw->count++] = IOVEC_MAKE(data, len);
308 return 0;
309 }
310
311 int iovw_put_string_field(struct iovec_wrapper *iovw, const char *field, const char *value) {
312 _cleanup_free_ char *x = NULL;
313 int r;
314
315 x = strjoin(field, value);
316 if (!x)
317 return log_oom();
318
319 r = iovw_put(iovw, x, strlen(x));
320 if (r >= 0)
321 TAKE_PTR(x);
322
323 return r;
324 }
325
326 int iovw_put_string_field_free(struct iovec_wrapper *iovw, const char *field, char *value) {
327 _cleanup_free_ _unused_ char *free_ptr = value;
328
329 return iovw_put_string_field(iovw, field, value);
330 }
331
332 void iovw_rebase(struct iovec_wrapper *iovw, char *old, char *new) {
333 size_t i;
334
335 for (i = 0; i < iovw->count; i++)
336 iovw->iovec[i].iov_base = (char *)iovw->iovec[i].iov_base - old + new;
337 }
338
339 size_t iovw_size(struct iovec_wrapper *iovw) {
340 size_t n = 0, i;
341
342 for (i = 0; i < iovw->count; i++)
343 n += iovw->iovec[i].iov_len;
344
345 return n;
346 }